diff --git a/.gitattributes b/.gitattributes index 1ef325f1b111266a6b26e0196871bd78baa8c2f3..daf9e8f39f09886f8ca8b6631d863eef4b4e4335 100644 --- a/.gitattributes +++ b/.gitattributes @@ -57,3 +57,30 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text # Video files - compressed *.mp4 filter=lfs diff=lfs merge=lfs -text *.webm filter=lfs diff=lfs merge=lfs -text +2024/2S-ODIS_[[:space:]]Two-Stage[[:space:]]Omni-Directional[[:space:]]Image[[:space:]]Synthesis[[:space:]]by[[:space:]]Geometric[[:space:]]Distortion[[:space:]]Correction/47960505-1990-491c-a65e-6611419353a0_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/3D[[:space:]]Congealing_[[:space:]]3D-Aware[[:space:]]Image[[:space:]]Alignment[[:space:]]in[[:space:]]the[[:space:]]Wild/757b034b-7105-4846-a434-665e5b237ea6_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/3D[[:space:]]Gaussian[[:space:]]Parametric[[:space:]]Head[[:space:]]Model/0c0538a8-3292-41a6-ad76-710b8fc8de37_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/3D[[:space:]]Hand[[:space:]]Pose[[:space:]]Estimation[[:space:]]in[[:space:]]Everyday[[:space:]]Egocentric[[:space:]]Images/332a64fb-af20-4857-af36-e23eeaad9f91_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/3D[[:space:]]Hand[[:space:]]Sequence[[:space:]]Recovery[[:space:]]from[[:space:]]Real[[:space:]]Blurry[[:space:]]Images[[:space:]]and[[:space:]]Event[[:space:]]Stream/c806d671-8f45-4954-8ba0-1ca55bf4fc0d_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/3D[[:space:]]Human[[:space:]]Pose[[:space:]]Estimation[[:space:]]via[[:space:]]Non-Causal[[:space:]]Retentive[[:space:]]Networks/38d38aa6-38d5-486e-8643-4f15fed7d372_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/3D[[:space:]]Open-Vocabulary[[:space:]]Panoptic[[:space:]]Segmentation[[:space:]]with[[:space:]]2D-3D[[:space:]]Vision-Language[[:space:]]Distillation/eb0bea0f-431f-4835-9237-239fd0d64e99_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/3D[[:space:]]Reconstruction[[:space:]]of[[:space:]]Objects[[:space:]]in[[:space:]]Hands[[:space:]]without[[:space:]]Real[[:space:]]World[[:space:]]3D[[:space:]]Supervision/1ec33038-4034-4272-be45-88734c621c33_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/3D[[:space:]]Single-object[[:space:]]Tracking[[:space:]]in[[:space:]]Point[[:space:]]Clouds[[:space:]]with[[:space:]]High[[:space:]]Temporal[[:space:]]Variation/c2204d64-6706-4e48-94d5-09db9f8770f0_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/3D[[:space:]]Small[[:space:]]Object[[:space:]]Detection[[:space:]]with[[:space:]]Dynamic[[:space:]]Spatial[[:space:]]Pruning/2b6d04de-265e-4c48-b6b1-d03973f89d8a_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/3D[[:space:]]Weakly[[:space:]]Supervised[[:space:]]Semantic[[:space:]]Segmentation[[:space:]]with[[:space:]]2D[[:space:]]Vision-Language[[:space:]]Guidance/b6e9bc2f-30dd-47bb-97e3-5a3cad4d6faf_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/3D-GOI_[[:space:]]3D[[:space:]]GAN[[:space:]]Omni-Inversion[[:space:]]for[[:space:]]Multifaceted[[:space:]]and[[:space:]]Multi-object[[:space:]]Editing/c22d5df4-9f40-4c3a-8e7a-40f3a1d6dbe5_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/3DEgo_[[:space:]]3D[[:space:]]Editing[[:space:]]on[[:space:]]the[[:space:]]Go!/58ba5dca-f01b-44f3-bd07-614e4a4b113d_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/3DFG-PIFu_[[:space:]]3D[[:space:]]Feature[[:space:]]Grids[[:space:]]for[[:space:]]Human[[:space:]]Digitization[[:space:]]from[[:space:]]Sparse[[:space:]]Views/7528968f-06f7-4c18-aa8f-783ee6c0a1d6_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/3DGazeNet_[[:space:]]Generalizing[[:space:]]Gaze[[:space:]]Estimation[[:space:]]with[[:space:]]Weak[[:space:]]Supervision[[:space:]]from[[:space:]]Synthetic[[:space:]]Views/7d049317-38f5-44ac-a691-e07c992f4970_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/3DSA_Multi-View[[:space:]]3D[[:space:]]Human[[:space:]]Pose[[:space:]]Estimation[[:space:]]With[[:space:]]3D[[:space:]]Space[[:space:]]Attention[[:space:]]Mechanisms/61874178-0339-4f9c-84c1-e29c381e8d91_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/3R-INN_[[:space:]]How[[:space:]]to[[:space:]]be[[:space:]]climate[[:space:]]friendly[[:space:]]while[[:space:]]consuming_delivering[[:space:]]videos_/f281395b-e7ef-449c-8738-d5a976fca3fe_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/3iGS_[[:space:]]Factorised[[:space:]]Tensorial[[:space:]]Illumination[[:space:]]for[[:space:]]3D[[:space:]]Gaussian[[:space:]]Splatting/25df5a9d-fc43-4ff8-b4e4-a2a9b9e269ba_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/3x2_[[:space:]]3D[[:space:]]Object[[:space:]]Part[[:space:]]Segmentation[[:space:]]by[[:space:]]2D[[:space:]]Semantic[[:space:]]Correspondences/4a9028d8-b05c-4422-ac23-0a7be9202087_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/4D[[:space:]]Contrastive[[:space:]]Superflows[[:space:]]are[[:space:]]Dense[[:space:]]3D[[:space:]]Representation[[:space:]]Learners/3b016017-cefc-4a8b-a706-93b64616c878_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/4Diff_[[:space:]]3D-Aware[[:space:]]Diffusion[[:space:]]Model[[:space:]]for[[:space:]]Third-to-First[[:space:]]Viewpoint[[:space:]]Translation/26bdb530-b8d4-43d7-9337-55a1116b4a83_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/6DGS_[[:space:]]6D[[:space:]]Pose[[:space:]]Estimation[[:space:]]from[[:space:]]a[[:space:]]Single[[:space:]]Image[[:space:]]and[[:space:]]a[[:space:]]3D[[:space:]]Gaussian[[:space:]]Splatting[[:space:]]Model/76913771-7094-44e1-8b30-8ea4e2210b42_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/6DoF[[:space:]]Head[[:space:]]Pose[[:space:]]Estimation[[:space:]]through[[:space:]]Explicit[[:space:]]Bidirectional[[:space:]]Interaction[[:space:]]with[[:space:]]Face[[:space:]]Geometry/2a1e442c-8a76-4afd-b0e2-7a3c115bb3f2_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/A[[:space:]]Cephalometric[[:space:]]Landmark[[:space:]]Regression[[:space:]]Method[[:space:]]based[[:space:]]on[[:space:]]Dual-encoder[[:space:]]for[[:space:]]High-resolution[[:space:]]X-ray[[:space:]]Image/2cca7425-9c6a-47c2-b889-8be913ae41cc_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/A[[:space:]]Closer[[:space:]]Look[[:space:]]at[[:space:]]GAN[[:space:]]Priors_[[:space:]]Exploiting[[:space:]]Intermediate[[:space:]]Features[[:space:]]for[[:space:]]Enhanced[[:space:]]Model[[:space:]]Inversion[[:space:]]Attacks/ae02311f-0ba1-4342-a539-c7ea4e71402f_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/A[[:space:]]Compact[[:space:]]Dynamic[[:space:]]3D[[:space:]]Gaussian[[:space:]]Representation[[:space:]]for[[:space:]]Real-Time[[:space:]]Dynamic[[:space:]]View[[:space:]]Synthesis/7472726a-f5ca-4354-bd16-63b6aa3c1be0_origin.pdf filter=lfs diff=lfs merge=lfs -text +2024/A[[:space:]]Comparative[[:space:]]Study[[:space:]]of[[:space:]]Image[[:space:]]Restoration[[:space:]]Networks[[:space:]]for[[:space:]]General[[:space:]]Backbone[[:space:]]Network[[:space:]]Design/76c19445-7741-420c-b1a4-d913d41c13ff_origin.pdf filter=lfs diff=lfs merge=lfs -text diff --git a/2024/2S-ODIS_ Two-Stage Omni-Directional Image Synthesis by Geometric Distortion Correction/47960505-1990-491c-a65e-6611419353a0_content_list.json b/2024/2S-ODIS_ Two-Stage Omni-Directional Image Synthesis by Geometric Distortion Correction/47960505-1990-491c-a65e-6611419353a0_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..6772239e63043b95751fb027da8f312bb5bce3e8 --- /dev/null +++ b/2024/2S-ODIS_ Two-Stage Omni-Directional Image Synthesis by Geometric Distortion Correction/47960505-1990-491c-a65e-6611419353a0_content_list.json @@ -0,0 +1,1441 @@ +[ + { + "type": "text", + "text": "2S-ODIS: Two-Stage Omni-Directional Image Synthesis by Geometric Distortion Correction", + "text_level": 1, + "bbox": [ + 238, + 141, + 764, + 186 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Atsuya Nakata and Takao Yamanaka", + "bbox": [ + 354, + 212, + 647, + 227 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Sophia University, Tokyo, Japan \na-nakata-7r0@eagle.sophia.ac.jp, takao-y@sophia.ac.jp", + "bbox": [ + 292, + 239, + 707, + 268 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract. Omni-directional images have been increasingly used in various applications, including virtual reality and SNS (Social Networking Services). However, their availability is comparatively limited in contrast to normal field of view (NFOV) images, since specialized cameras are required to take omni-directional images. Consequently, several methods have been proposed based on generative adversarial networks (GAN) to synthesize omni-directional images, but these approaches have shown difficulties in training of the models, due to instability and/or significant time consumption in the training. To address these problems, this paper proposes a novel omni-directional image synthesis method, 2S-ODIS (Two-Stage Omni-Directional Image Synthesis), which generated high-quality omni-directional images but drastically reduced the training time. This was realized by utilizing the VQGAN (Vector Quantized GAN) model pre-trained on a large-scale NFOV image database such as ImageNet without fine-tuning. Since this pre-trained model does not represent distortions of omni-directional images in the equi-rectangular projection (ERP), it cannot be applied directly to the omni-directional image synthesis in ERP. Therefore, two-stage structure was adopted to first create a global coarse image in ERP and then refine the image by integrating multiple local NFOV images in the higher resolution to compensate the distortions in ERP, both of which are based on the pre-trained VQGAN model. As a result, the proposed method, 2S-ODIS, achieved the reduction of the training time from 14 days in OmniDreamer to four days in higher image quality.", + "bbox": [ + 261, + 292, + 743, + 625 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/c48c7689ffa9052a87c7d9a0584498d5c5ef344e33dd9c361f13d46c9f3bed18.jpg", + "image_caption": [ + "Fig. 1: Overview of advantages of proposed method, 2S-ODIS. OmniDreamer [2] requires 14 days for training of the model, including 1-week training of the VQGAN model. In contrast, the proposed method only required 4 days for the training of the model since no training of VQGAN model was required." + ], + "image_footnote": [], + "bbox": [ + 217, + 667, + 787, + 742 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 217, + 143, + 374, + 160 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "An omni-directional image is taken by an omni-directional camera, also known as a 360-degree camera, which captures lights from all directions falling onto the focal point to cover a full sphere, and is usually represented in the equi-rectangular projection (ERP) to represent it in a 2-dimensional plane. These images have been applied to various applications such as virtual reality, social networking services, and map tools such as Google Street View. However, the availability of the omni-directional images are still limited compared with Normal Field of View (NFoV) images captured by a standard camera, since the specialized camera is required to capture the omni-directional images.", + "bbox": [ + 212, + 178, + 785, + 313 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Recently, several methods have been proposed to synthesize omni-directional images from a NFoV image [2,6,8,10,13], a text condition [5], or both [12]. These methods have been based on Generative Adversarial Networks (GAN) [6,8,10, 13], Vector Quantized GAN (VQGAN) [2,5], or auto-regressive outpainting using stable diffusion [12]. However, the GAN-based methods have faced challenges of instability in training, while the VQGAN-based methods and the auto-regression with stable diffusion require long training and inference time, respectively.", + "bbox": [ + 212, + 314, + 785, + 419 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this paper, we propose a novel method to synthesize omni-directional images from a NFoV image based on pre-trained VQGAN, trained on a large-scale NFoV-image dataset. The previous method with VQGAN [2] has required to train the VQGAN encoder and decoder to represent geometric distortions in omni-directional images in ERP, especially at poles (top and bottom regions in ERP). This training has taken long time, such as 1 week in the method [2]. In the proposed method, the pre-trained VQGAN encoder and decoder were applied without fine-tuning by synthesizing multiple NFoV images to integrate them into an omni-directional image based on geometric distortion correction. Since no training of VQGAN was required, the training of the model was shortened by removing the step of the fine-tuning of VQGAN, as shown in Fig. 1. Furthermore, a two-stage structure was adopted in the proposed method. At the first stage, a global coarse image in ERP is created using the pre-trained VQGAN encoder and decoder without the geometric distortion correction. Therefore, the generated omni-directional image at the first stage includes distortions. For example, a straight line in NFoV images at poles can not be reproduced at the first stage. At the second stage, this global coarse image is refined by synthesizing an omni-directional image from multiple NFoV images generated using the pre-trained VQGAN encoder and decoder. This second stage compensates the geometric distortions at the first stage, in addition to representing local detailed textures in a higher resolution. By using the two-stage structure, the model can produce globally plausible yet locally detailed omni-directional images without the geometric distortions.", + "bbox": [ + 212, + 421, + 785, + 767 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The contributions of this paper include:", + "bbox": [ + 238, + 768, + 527, + 782 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "- A novel method to synthesize omni-directional images from a NFoV image was proposed using pre-trained VQGAN. Since no training of VQGAN was required, the training time was drastically reduced.", + "bbox": [ + 225, + 794, + 785, + 839 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "A. Nakata and T. Yamanaka", + "bbox": [ + 271, + 114, + 464, + 127 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "- A two-stage structure was adopted to generate a global coarse omni-directional image at the first stage, and then generate a locally detailed image with geometric distortion correction at the second stage.", + "bbox": [ + 223, + 146, + 792, + 190 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "- Experimental results demonstrated that the proposed method synthesized higher quality omni-directional images in shortened training and inference time than the previous methods such as OmniDreamer [2].", + "bbox": [ + 225, + 191, + 785, + 237 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Related Works", + "text_level": 1, + "bbox": [ + 215, + 261, + 395, + 277 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.1 Image Generation", + "text_level": 1, + "bbox": [ + 215, + 294, + 410, + 309 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "VQVAE (Vector Quantized Variational AutoEncoder) [17] has been proposed to improve the generated image blurriness in VAE [11] by representing image patches with quantized latent vectors based on vector quantization. Furthermore, the adversarial loss has been introduced in VQVAE to make the generated images clearer, called VQGAN [7]. In this method, Transformer [18] has been used to sequentially predict image patches from neighbor patches based on auto-regressive prediction. The patches are represented with the quantized latent vectors called VQGAN codes to generate clear images with low computational cost. To improve the slow inference in VQGAN due to the sequential predictions of patches, MaskGIT [4] has been proposed by predicting multiple patches simultaneously. Although MaskGIT has succeeded in improving the inference speed, it has been difficult to generate high quality images in the high resolution. To solve this problem, Muse [3] has been proposed using a two-stage structure, where a low-resolution image is generated at the first stage, and then is refined to generate a higher-resolution image at the second stage. In our proposed method, this two-stage structure was adopted for the omni-directional image synthesis in the high resolution.", + "bbox": [ + 212, + 321, + 787, + 578 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2 Omni-directional Image Generation", + "text_level": 1, + "bbox": [ + 215, + 602, + 553, + 617 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Several methods have been proposed for synthesizing omni-directional images from NFoV images. Okubo and Yamanaka [10] have proposed a method of generating omni-directional images based on conditional GAN from a single NFoV image with the class label. Hara et al. [8] have also proposed a method based on the symmetric property in the omni-directional images using GAN and VAE. Another work to synthesize omni-directional images is Guided ImmerseGAN [6], which generates omni-directional images from a NFoV image with the modulation guided by a given class label, which does not have to be the true class of the input NFoV image. In the work of OmniDreamer [2], VQGAN has been applied to the omni-directional image synthesis by using Transformer for auto-regressive prediction. In this method, VQGAN encoder and decoder have to be fine-tuned on an omni-directional image dataset since the geometric distortion in ERP has to be represented in the latent codes of VQGAN. Text2Light [5] also uses VQGAN with auto-regressive prediction for the generation, though only text", + "bbox": [ + 212, + 628, + 787, + 840 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "2S-ODIS: Two-Stage Omni-Directional Image Synthesis", + "bbox": [ + 361, + 114, + 730, + 128 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/407fb1a0dae65374d1d25b47d8d445944ded0cc981c3bfecf5fa21ff99113475.jpg", + "image_caption": [ + "Fig. 2: Qualitative comparison of omni-directional image reconstruction using pretrained VQGAN encoder and decoder. Omni-directional Image: original omnidirectional image, Reconstructed in ERP: reconstructed in equirectangular projection, Reconstructed in Extracted Images: reconstructed by integrating multiple NFoV images in different directions. By extracting NFoV images, an omni-directional image can be correctly reconstructed without distortions." + ], + "image_footnote": [], + "bbox": [ + 217, + 143, + 787, + 287 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "information is taken as the input instead of the NFoV image. Nakata et al. [13] have proposed a method to increase the diversity of generated omni-directional images based on MLP-Mixer by efficiently propagating the information of the NFoV image embedded at the center in ERP. AOGNet [12] has generated omnidirectional images by out-painting an incomplete 360-degree image progressively with NFoV and text guidances jointly or individually. This has been realized using auto-regressive prediction based on the stable-diffusion backbone model. Due to the nature of sequential auto-regressive prediction, it takes long inference time.", + "bbox": [ + 212, + 410, + 787, + 542 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In our proposed method, the pre-trained VQGAN model was used without fine-tuning on the omni-directional image dataset, since multiple NFoV images are synthesized based on the pre-trained VQGAN and then integrated into omni-directional images. By removing the step of VQGAN training, the overall training of the model was drastically shortened than the previous method with VQGAN such as OmniDreamer [2]. In addition, the proposed method is based on simultaneous synthesis of multiple NFoV images in different directions, whose inference was faster than auto-regressive prediction such as OmniDreamer [2] and AOGNet [12].", + "bbox": [ + 212, + 546, + 789, + 683 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3 Proposed Method", + "text_level": 1, + "bbox": [ + 215, + 705, + 426, + 724 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1 Two-Stage Structure", + "text_level": 1, + "bbox": [ + 215, + 738, + 433, + 753 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The proposed method consists of the two-stage structure, where a global coarse omni-directional image in ERP is synthesized in a low resolution $(256\\times 512$ pixels) at the first stage without geometric distortion correction, and then is refined at the second stage by integrating the multiple synthesized NFoV images in different directions based on the geometric distortion correction, producing", + "bbox": [ + 212, + 763, + 787, + 840 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "A. Nakata and T. Yamanaka", + "bbox": [ + 271, + 114, + 464, + 127 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/6c79fc6bfdc27e7c17b4b54547a191508566b01fe547c6c47c7a74a1a1f595fd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 217, + 143, + 787, + 297 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/970736200ab83ab8dca4e0dde01b63a366d4facbec65e00012a9144392de1ec5.jpg", + "image_caption": [ + "Fig. 3: Diagram of the proposed method. (a)Inference, (b)Training." + ], + "image_footnote": [], + "bbox": [ + 217, + 305, + 751, + 465 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "a high-quality omni-directional image in ERP in a high resolution (1024×2048 pixels). At both stages, the pre-trained VQGAN was utilized without fine-tuning on the omni-directional image dataset.", + "bbox": [ + 212, + 527, + 784, + 573 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "As a preliminary experiment, an omni-directional image was reconstructed in ERP or in multiple NFoV images using pre-trained VQGAN encoder and decoder without the fine-tuning, as shown in Fig. 2. It can be seen from the figure that the reconstruction in ERP cannot correctly reproduce the texture in the region toward the ground (blue frame) and the continuity in the region at both edges (yellow frame), although it can reproduce the region at center in ERP (red frame). On the contrary, all the regions can be correctly reproduced in the extracted NFoV images. This indicates that the pre-trained VQGAN model can be applied without fine-tuning if it is applied to NFoV images.", + "bbox": [ + 212, + 579, + 787, + 715 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Thus, the generated omni-directional image in ERP at the first stage in the proposed method includes distortions since the pre-trained VQGAN cannot represent the texture and continuities in the omni-directional images in ERP. However, these distortions are correctly compensated at the second stage by synthesizing the multiple NFoV images which can be correctly reproduced by the pre-trained VQGAN model. If only the second stage is used in the proposed method, it is difficult to synthesize multiple NFoV images simultaneously with global compatibility. Therefore, the two-stage structure was adopted in the pro", + "bbox": [ + 212, + 719, + 787, + 840 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "2S-ODIS: Two-Stage Omni-Directional Image Synthesis", + "bbox": [ + 361, + 114, + 732, + 128 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/c193417ba274395797d3249e91cb80049f3edf2008b360866fb54e93c907c902.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 217, + 142, + 477, + 292 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/3c4f60946dbe6548b7b5925c16cc53346bbdda694916f288c0d2896f7a0da92a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 480, + 142, + 779, + 292 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/3a29181286e2a1b3da8c4beb6a803848db06ccb7db9fb1435e862e25329bf345.jpg", + "image_caption": [ + "Fig. 4: Structure of the proposed method. The range of attention differs between the high-resolution and low-resolution models." + ], + "image_footnote": [], + "bbox": [ + 215, + 305, + 785, + 444 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "posed method to produce globally plausible coarse omni-directional image at the first stage.", + "bbox": [ + 212, + 513, + 784, + 542 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The structure of the proposed method is shown in Fig. 3. At the first stage, the low resolution model produces the low resolution codes, which are converted into patches of omni-directional images in ERP using the pre-trained VQGAN decoder. At the second stage, the high resolution model produces the high resolution codes, which are corresponding to the patches in the NFoV images in multiple directions (26 directions in our implementation) in an omni-directional image with overlapping. These 26 directions for the NFoV images were the same directions as the normal vectors in the faces of a rhombicuboctahedron. The field of view was set to 60 degrees for all directions. The generated NFoV images with the size of $256 \\times 256$ pixels were integrated into an omni-directional image with the size of $1024 \\times 2048$ pixels in ERP.", + "bbox": [ + 212, + 544, + 787, + 710 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.2 Inference", + "text_level": 1, + "bbox": [ + 215, + 736, + 339, + 750 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "For synthesizing an omni-directional image in the inference, the low-resolution codes are first generated using the sampling strategy proposed in MaskGIT [4] at the first stage from the conditional image where an input NFOV image is embedded at the center in ERP, as shown in Fig. 3(a). In MaskGIT, the generation is started with 'Masked low resolution codes' which is filled with the [MASK] code,", + "bbox": [ + 212, + 763, + 787, + 840 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "A. Nakata and T. Yamanaka", + "bbox": [ + 271, + 114, + 464, + 126 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "the VQGAN code which indicates that it is masked, for all locations. Then, the low resolution model predicts the probabilities for all the [MASK] locations in parallel, and samples a VQGAN code based on its predicted probabilities over all possible VQGAN codes for each location. The location with the low probability is replaced with the [MASK] code again, and the VQGAN codes are resampled by predicting the probabilities using the low resolution model. This process is repeated in $T$ steps. At each iteration, the model predicts all VQGAN code simultaneously but only keeps the most confident ones. The remaining VQGAN codes are replaced with the [MASK] code and re-predicted in the next iteration. The mask ratio during the iterations is determined by $\\cos\\left(\\frac{\\pi}{2}\\frac{t}{T}\\right)$ , where $t$ indicates the current iterations in the total steps $T$ . This mask ratio is monotonically decreasing from 1 to 0 with respect to $t$ , which ensures that most of the locations are masked during the early stage in the iterations to prevent producing the inconsistent codes.", + "bbox": [ + 212, + 146, + 787, + 356 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "At the second stage, the high-resolution codes are generated using the high resolution model, where the generation process is almost same as the low resolution model. The difference from the low-resolution model is that the model accepts the low-resolution image generated at the first stage as an additional conditional image, and generates NFoV images, as shown in Fig. 3(a). To integrate the generated NFoV images into an omni-directional image, the overlapped regions are merged with weights depending on the distance from the centers of the NFoV images. Specifically, let $x_{i}$ and $x_{j}$ be the two overlapping pixel values, and let $d_{i}$ and $d_{j}$ be the distances from the centers of the NFoV images at each position. The integrated pixel value $y$ is given by", + "bbox": [ + 212, + 358, + 787, + 510 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\ny = \\frac {w _ {i}}{\\left(w _ {i} + w _ {j}\\right)} x _ {i} + \\frac {w _ {j}}{\\left(w _ {i} + w _ {j}\\right)} x _ {j} \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 388, + 521, + 784, + 551 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $w_{i} = 1 - \\frac{di}{\\max_{k}(d_{k})}$ , $w_{j} = 1 - \\frac{dj}{\\max_{k}(d_{k})}$ .", + "bbox": [ + 214, + 558, + 535, + 578 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The network architecture used in the low-resolution model and the high resolution model is shown in Fig. 4. The layer structure was adapted from MaxViT [16], although Transformer has been used in MaskGIT [4] and Muse [3]. The 8-layer MaxViT models were used in both the low-resolution model and the high-resolution model. In the low-resolution model, the padding in MBConv was replaced to the circular padding to encourage the continuity at the edges in ERP, whereas it was remained to the zero padding in the high-resolution model. The block attention was applied within each divided region at the low-resolution model and within each NFoV image in the high-resolution model, as shown in Fig. 4. The grid attention was also applied globally in sparse at the low-resolution model as in the original MaxViT model, whereas it was applied among same locations over the NFoV images at the high-resolution model.", + "bbox": [ + 212, + 578, + 787, + 758 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.3 Training", + "text_level": 1, + "bbox": [ + 214, + 782, + 333, + 797 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The low-resolution and high-resolution models are independently trained, as shown in Fig. 3(b). The objective of the training is to make the low-resolution", + "bbox": [ + 212, + 809, + 785, + 839 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "2S-ODIS: Two-Stage Omni-Directional Image Synthesis", + "bbox": [ + 361, + 114, + 732, + 128 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/1390be00070395268b019261887f10e2bcb71880da6d1c8b734167f526b2f4f7.jpg", + "image_caption": [ + "Fig. 5: Examples of conditional image in training. These images are generated from omni-directional images in ERP by randomly masking." + ], + "image_footnote": [], + "bbox": [ + 217, + 143, + 785, + 200 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "and high-resolution models predict plausible VQGAN codes at [MASK]-code locations for each inference step. For the low-resolution model, the inputs are 'randomly masked low-resolution codes' and a conditional image which emulates the NFoV image embedded at the center in ERP (Fig. 3 a). The mask ratio in the randomly masked low-resolution codes is set to $\\cos\\left(\\frac{\\pi}{2}r\\right)$ , where $r$ is sampled from a uniform distribution [0,1), since this emulates the single iteration in the inference. Examples of the conditional image in the training are shown in Fig. 5. They are prepared by randomly masking an original omni-directional image in ERP to emulate the conditional image in the inference. Since they are not limited to the single NFoV image embedded in ERP, the trained model can be applied to various in-painting and out-painting tasks, as described in 5.2. The low-resolution model is trained to predict the original VQGAN codes in the real omni-directional image at [MASK]-code locations, so that the cross entropy is used as the loss function to train the model.", + "bbox": [ + 212, + 268, + 787, + 479 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "For the high-resolution model, the inputs are 'randomly masked high-resolution codes' for multiple NFoV images (26 NFoV images in our implementation), conditional NFoV images converted from the conditional image in ERP, and the reconstructed low-resolution NFoV images converted from the low-resolution omni-directional image reconstructed using the pre-trained VQGAN encoder and decoder. The 'randomly masked high-resolution code' and the conditional image in ERP are prepared in the same manner for the low-resolution model. The low-resolution omni-directional image is required in the inputs of the high-resolution model to emulate the low-resolution image generated at the first stage. The high-resolution model is trained to predict the original VQGAN codes in the NFoV images converted from the real omni-directional image at [MASK]-code locations based on the cross-entropy loss function.", + "bbox": [ + 212, + 479, + 789, + 661 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Although the first and second stages are sequentially processed in the inference, they are independently trained in parallel, as shown in Fig. 3(b). This property is advantageous to shorten the required training time if multiple GPUs (Graphics Processing Units) can be used, although a single GPU was used in our implementation.", + "bbox": [ + 212, + 662, + 787, + 738 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4 Experiments", + "text_level": 1, + "bbox": [ + 215, + 762, + 375, + 779 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The omni-directional image dataset, SUN360 [19], was used in the experiments. The 5,000 outdoor images were used for test, while the remaining 47,938 outdoor images were used for training. The size of the images in the dataset is $512 \\times 1024$", + "bbox": [ + 212, + 794, + 787, + 839 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "A. Nakata and T. Yamanaka", + "bbox": [ + 271, + 114, + 464, + 126 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "pixels. Although the proposed method generates images in $1024 \\times 2048$ pixels, it was resized to $512 \\times 1024$ pixels for the evaluation. For comparison, several conventional methods were also evaluated, including GAN-based methods with CNN (Convolutional Neural Networks) [10] or MLP-Mixer [13], a VQGAN-based method (OmniDreamer) [2], and a GAN-based in-painting method (LAMA) [15]. The models were implemented in PyTorch, and were trained in a single GPU (NVIDIA RTX3090). The code for the network architecture in the proposed method is provided in the supplementary material.", + "bbox": [ + 212, + 146, + 782, + 263 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "The pre-trained VQGAN was obtained from [1], which is the model with 1024 codebooks trained on ImageNet. Eight MaxViT layers in Fig. 4 were used in both the low-resolution and high-resolution models, with 256 internal dimensions. The sizes of the VQGAN codes at the first and second stages were $16 \\times 32$ patches and $16 \\times 16$ patches $\\times 26$ NFoV images, respectively. These VQGAN codes were converted into trainable feature vectors in the 256 dimensions. The conditional image was also down-sampled into the same size as the VQGAN codes with 256 dimensions using CNN. At each iteration of MaskGIT in the first stage, these two feature vectors were added with trainable positional encoding vectors, and then were inputted into the low-resolution model composed of the 8 MaxViT layers. At each iteration in the second stage, the low-resolution image generated at the first stage (inference) or reconstructed using the pre-trained VQGAN (training) was converted into NFoV images. These NFoV images were down-sampled with CNN to be added with the feature vectors of the input VQGAN codes, the conditional NFoV images, and the positional encoding, and then were inputted into the high-resolution model. The total steps $T$ in MaskGIT was set to 16 at both stages.", + "bbox": [ + 212, + 266, + 785, + 522 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "The optimizer for training the models was AdamW with the learning rate of 0.001, the weight decay of 1e-5, Amsgrad, and the learning-rate scheduling of ExponentialLR reducing it by 0.95 every 5,000 iterations. OmniDreamer [2] was trained for 14 days (30 epochs in all training stages), while the proposed method was trained over 4 days (2 days with the batch size 16 at the first stage, and 2 days with the batch size 8 in the second stage, for 180,000 iterations at each stage). The other conventional methods were trained for 4 days, using their default batch sizes and hyper-parameters. FID (Frechet Inception Distance) [9], IS (Inception Score) [14], and LPIPS (Learned Perceptual Image Patch Similarity) [20] were used for evaluating the synthesized omni-directional images in ERP.", + "bbox": [ + 212, + 523, + 787, + 676 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5 Results", + "text_level": 1, + "bbox": [ + 215, + 696, + 323, + 710 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5.1 Evaluation of Proposed Method", + "text_level": 1, + "bbox": [ + 215, + 726, + 524, + 739 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "The proposed method, 2S-ODIS, was quantitatively evaluated, compared with the conventional methods, OmniDreamer [2], CNN-based cGAN [10], MLPixer-based cGAN [13], and LAMA [15], as shown in Table 1. The models in the proposed method were trained for 4 days (2 days for low-resolution model and 2 days for high-resolution model). For comparison, the result with the models trained for 2 days (1 day + 1 day for low-resolution and high-resolution models) was also", + "bbox": [ + 212, + 750, + 784, + 839 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "2S-ODIS: Two-Stage Omni-Directional Image Synthesis", + "bbox": [ + 361, + 114, + 730, + 128 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/cb50e6897925890b81a5fb2d5ebd67331c07ae97c8d817ac483f7c2debecbb3b.jpg", + "table_caption": [ + "Table 1: Quantitative comparison with conventional methods" + ], + "table_footnote": [], + "table_body": "
MethodIS (↑)FID (↓)LPIPS (↑)
2S-ODIS (Proposed method)5.96918.2630.662
2S-ODIS (2days)5.85718.6560.668
OmniDreamer [2]4.45823.1010.655
CNN-based cGAN [10]4.68440.0490.633
MLPMixer-based cGAN [13]4.40247.6900.634
LAMA [15]5.78469.4850.478
", + "bbox": [ + 313, + 178, + 684, + 292 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/303cd8eb4fe23c27145e2f4ab83a369b2da445cc961a7670a0d712954597d719.jpg", + "image_caption": [ + "Fig. 6: Evaluation metrics during training of proposed method compared with conventional method, OmniDreamer [2]" + ], + "image_footnote": [], + "bbox": [ + 223, + 316, + 406, + 458 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/fe8a2604ae7f5e89220dc4646fffa79bcdd48b49ff9040f9e0ba9a654192f25c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 413, + 318, + 593, + 458 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/b62a8dade0ea8abf54a771c6b5758917568e241446f4ae43c62688a425453cfd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 594, + 318, + 779, + 458 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/c004153388a3cc274c64b3b1a0e3a66d2f26da9d4bc2855eb9f0f1bbd6a15e8d.jpg", + "image_caption": [ + "Fig. 7: Examples of synthesized omni-directional images compared with conventional method, OmniDreamer [2]" + ], + "image_footnote": [], + "bbox": [ + 215, + 532, + 787, + 790 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 218, + 116, + 235, + 126 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "A. Nakata and T. Yamanaka", + "bbox": [ + 271, + 114, + 464, + 127 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/f604a9917878d9dd6a75815e812ecf3d8db3abc95ebbebccf42e8421538d4483.jpg", + "image_caption": [ + "2S-ODIS First Stage" + ], + "image_footnote": [], + "bbox": [ + 330, + 172, + 441, + 258 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/5748ed2919106ec279857fe8fe288cbf51d97f69eca418d12ae8b8a96d938016.jpg", + "image_caption": [ + "2S-ODIS Second Stage (Ours)" + ], + "image_footnote": [], + "bbox": [ + 444, + 172, + 557, + 258 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/e9016060434ad93abc2e84105f3bcfacda21d62ecf9419773d1f850001a1b82c.jpg", + "image_caption": [ + "OmniDreamer [2]" + ], + "image_footnote": [], + "bbox": [ + 560, + 172, + 671, + 258 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/43900a8c98b5536adadba112377939af5444e4e2ce19b874b7173256f6b16cda.jpg", + "image_caption": [ + "Fig. 8: Examples of NFoV images toward ground extracted from synthesized omnidirectional images in proposed and conventional methods" + ], + "image_footnote": [], + "bbox": [ + 330, + 261, + 441, + 347 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/35855581dd9bfaecf20726e99ce969ce9a3d1b3a3f6e2ccb3f9d3d207e268f06.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 444, + 261, + 555, + 347 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/455b066d64b5efeafbd02ca4f559a1447a9bd1e5ec0178d5406bb8786eadb383.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 560, + 261, + 671, + 347 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "shown in the table. The field of view for a NFoV image embedded in an input conditional image was set to 126.87 and 112.62 degrees for width and height in the experiments, respectively. It can be seen from the table that the proposed method achieved higher performance than the other conventional methods. Although the highest performance was achieved in the proposed method trained for 4 days, the performance already outperformed the other methods even with the 2-day training. To see this more clearly, the evaluation metrics during the training of the proposed method is shown in Fig. 6, where the performance exceeded OmniDreamer by 2 days and converged in around 4 days. Thus, the proposed method drastically shortened the training to 2-4 days from 14 days in OmniDreamer including the fine-tuning of the VQGAN model. In addition, the inference in the proposed method was much faster than in OmniDreamer: 1.54 seconds and 39.33 seconds for synthesizing each omni-directional image in the proposed method and OmniDreamer, respectively. This is because the proposed method is based on the simultaneous VQGAN-code prediction instead of the sequential auto-regressive prediction.", + "bbox": [ + 212, + 416, + 787, + 657 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "For the qualitative comparison, the examples of the synthesized omni-directional images are shown in Fig. 7. It is clear that the proposed method generated globally plausible and locally detained omni-directional images, while OmniDreamer [2] sometimes failed in generating continuous images, especially along the edges in the input conditional images. To see if the proposed method can generate NFoV images without the distortion, the NFoV images toward the ground were extracted from the synthesized omni-directional images, since the geometric distortion is large at the poles in ERP. The examples of the NFoV images toward the ground are shown in Fig. 8. The left column shows the NFoV images at the first stage, while the middle column shows the NFoV images at the second stage. Since the pre-trained VQGAN code cannot represent the geometric distortion in ERP, the straight lines were not appropriately reproduced at the first stage.", + "bbox": [ + 212, + 659, + 803, + 840 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "2S-ODIS: Two-Stage Omni-Directional Image Synthesis", + "bbox": [ + 361, + 114, + 730, + 128 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 767, + 116, + 782, + 126 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/c2aede004e59ac636a127c15fb53be9120f9ad545d1fc37c03cb408dd13a3392.jpg", + "table_caption": [ + "Table 2: Quantitative comparison in various conditional images" + ], + "table_footnote": [], + "table_body": "
Mask SettingMethodIS (↑)FID (↓)LPIPS (↑)
Inpainting2S-ODIS (Proposed)5.58215.0440.685
OmniDreamer [2]4.67241.2090.708
Inpainting of Ground Region2S-ODIS (Proposed)6.08413.0380.680
OmniDreamer [2]5.47415.3030.699
Outpainting from Two Images2S-ODIS (Proposed)5.72219.4370.663
OmniDreamer [2]3.95233.4030.672
", + "bbox": [ + 302, + 171, + 697, + 286 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/0ef10b74b93361baf4c7cc3f4d35787fce6e77d2e7014c8ab942d1eeafa85bb4.jpg", + "image_caption": [ + "Fig. 9: Examples of synthesized omni-directional images in various input conditions." + ], + "image_footnote": [], + "bbox": [ + 217, + 297, + 787, + 512 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "However, they were compensated at the second stage, which can be clearly seen in the sample images. On the other hand, OmniDreamer cannot appropriately reproduce the NFoV images toward the ground. Thus, it was confirmed that the proposed method synthesized omni-directional images without geometric distortion.", + "bbox": [ + 212, + 568, + 787, + 643 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "5.2 Evaluation in Various Input Conditions", + "text_level": 1, + "bbox": [ + 215, + 664, + 586, + 680 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "As explained in 3.3, the models in the proposed method were trained with various conditional images in Fig. 5. They were not limited to the single NFoV image embedded in ERP, so that the proposed model can be applied various inpainting and out-painting tasks. For example, the model can be applied to the inpainting task to remove objects or people in the omni-directional image taken by a 360-degree camera, as shown in the top row in Fig. 9. Another example is the task to fill in the ground region of an omni-directional image as shown in the middle row in Fig. 9, since the omni-directional image often includes a hand or a camera stand at the bottom region in ERP. The last example in Fig. 9 is to synthesize an omni-directional image which includes two NFoV images such as", + "bbox": [ + 212, + 689, + 787, + 840 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "A. Nakata and T. Yamanaka", + "bbox": [ + 271, + 114, + 464, + 127 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/1317c201c282b927125799a8201b66ccf0c983cd579a4f57d7d1819944a42824.jpg", + "table_caption": [ + "Table 3: Ablation study in propose method" + ], + "table_footnote": [], + "table_body": "
IS (↑)FID (↓)LPIPS (↑)
(1) Proposed5.96918.2630.662
(2) 1 Stage: Low Resolution Model5.79828.3290.670
(3) 1 Stage: High Resolution Model4.82152.4530.638
(4) Direct use of low-resolution VQGAN codes5.83721.8200.663
", + "bbox": [ + 254, + 170, + 743, + 258 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "front and rear cameras of a smartphone. Although OmniDreamer failed in synthesizing the omni-directional images in these situations, the proposed method generated high-quality omni-directional images. The quantitative results shown in Table 2 also indicate that the proposed method achieved higher performance than OmniDreamer. Although the diversity of the synthesized images was higher in OmniDreamer than the proposed method, it may be due to generating random images as shown in Fig. 9.", + "bbox": [ + 212, + 287, + 787, + 393 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "5.3 Ablation Study", + "text_level": 1, + "bbox": [ + 214, + 417, + 388, + 434 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Ablation studies were conducted to investigate the effectiveness of each component in the proposed method: the low-resolution and high-resolution models in the 2-stage structure. The results are shown in Table 3. (1) is the proposed method with the 2-stage structure, while (2) and (3) are the results with 1-stage structure only using the low-resolution model and the high-resolution model, respectively. As can be seen from the table, the 2-stage structure was indispensable for the high-quality image synthesis.", + "bbox": [ + 212, + 446, + 787, + 551 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Moreover, it was examined that the low-resolution VQGAN codes generated at the first stage were directly used at the second stage instead of the low-resolution image generated at the first stage, since the 2-stage structure in Muse [3] uses the low-resolution VQGAN codes directly. The result is shown in Table 3 (4). It was confirmed from the result that the low-resolution image was better to use at the second stage than the low-resolution VQGAN codes directly. This may be because the low-resolution image is compressed by CNN to properly extract the global information generated at the first stage.", + "bbox": [ + 212, + 551, + 787, + 674 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "6 Limitations and Future Prospects", + "text_level": 1, + "bbox": [ + 214, + 699, + 578, + 717 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The proposed method synthesizes an omni-directional image by merging multiple NFoV images with weights depending on the distance from the edges of the NFoV images. Although the generation of the NFoV images are conditioned by the global low-resolution image generated at the first stage, it may be possible to generate discontinuous NFoV images. One possible solution would be to add an additional network to refine the generated omni-directional images to improve the continuity between NFoV images. Another issue is that it takes 1-2 days to", + "bbox": [ + 212, + 733, + 787, + 840 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "2S-ODIS: Two-Stage Omni-Directional Image Synthesis", + "bbox": [ + 359, + 113, + 730, + 128 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 767, + 114, + 784, + 126 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "convert omni-directional images in the dataset to VQGAN codes using the pretrained VQGAN encoder. This may be alleviated by constructing a light-weight encoder using model distillation of the VQGAN encoder.", + "bbox": [ + 215, + 146, + 784, + 191 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Currently, inputs in the proposed method were limited to the conditional images such as a single or several NFoV images embedded in ERP, or masked omni-directional images for in-painting. However, the proposed architecture can be applied to any conditional information such as text information, a class label, and guidance information for style modulation using an additional module such as cross attention similar to stable diffusion. In addition, the hyper-parameters have not been thoroughly explored in the evaluation of the proposed method. For example, the directions of NFoV images at the second stage were fixed to 26 directions corresponding to the faces in rhombicuboctahedron, in addition to the field of view fixed to 60 degrees, which may be optimized in the future work. Furthermore, the network structure such as MaxViT might be improved to more optimized architecture to the omni-directional image synthesis. Although this paper focused on the tasks of omni-directional image synthesis, the proposed architecture would be useful for other omni-directional image tasks, such as semantic segmentation and object detection.", + "bbox": [ + 215, + 191, + 785, + 417 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "7 Conclusion", + "text_level": 1, + "bbox": [ + 217, + 439, + 356, + 455 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A novel method for the omni-directional image synthesis is proposed in this paper. By using the pre-trained VQGAN encoder and decoder without fine-tuning, the training of the model was drastically shortened. To manage the distortion in an omni-directional image in ERP, a two-stage structure was adopted. At the first stage, an omni-directional image was generated in ERP without geometric distortion correction, so that it cannot reproduce straight lines at poles in a sphere. Therefore, it was corrected at the second stage by synthesizing an omni-directional image from multiple NFoV images based on geometric distortion correction. To realize fast inference, the sampling strategy in MaskGIT was adopted to predict VQGAN codes simultaneously. As a result, the proposed method achieved the high-quality omni-directional image synthesis with low computational costs both in training and inference.", + "bbox": [ + 215, + 470, + 785, + 651 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Acknowledgement", + "text_level": 1, + "bbox": [ + 217, + 672, + 392, + 690 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "This work was supported by JSPS KAKENHI Grant Number JP21K11943", + "bbox": [ + 217, + 703, + 751, + 718 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 217, + 739, + 321, + 757 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "1. Compvis/taming-transformers. https://github.com/CompVis/taming-transformers", + "2. Akimoto, N., Matsuo, Y., Aoki, Y.: Diverse plausible 360-degree image outpainting for efficient 3dgc background creation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2022)" + ], + "bbox": [ + 222, + 770, + 782, + 838 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "A. Nakata and T. Yamanaka", + "bbox": [ + 271, + 114, + 464, + 127 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "3. Chang, H., Zhang, H., Barber, J., Maschinot, A., Lezama, J., Jiang, L., Yang, M.H., Murphy, K.P., Freeman, W.T., Rubinstein, M., Li, Y., Krishnan, D.: Muse: Text-to-image generation via masked generative transformers. In: Proceedings of the 40th International Conference on Machine Learning (ICML) (2023)", + "4. Chang, H., Zhang, H., Jiang, L., Liu, C., Freeman, W.T.: Maskgit: Masked generative image transformer. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (June 2022)", + "5. Chen, Z., Wang, G., Liu, Z.: Text2light: Zero-shot text-driven hdr panorama generation. ACM Transactions on Graphics (TOG) 41(6), 1-16 (2022)", + "6. Dastjerdi, M.R.K., Hold-Geoffroy, Y., Eisenmann, J., Khodadadeh, S., Lalonde, J.F.: Guided co-modulated gan for $360^{\\circ}$ field of view extrapolation. In: 2022 International Conference on 3D Vision (3DV). pp. 475–485 (2022)", + "7. Esser, P., Rombach, R., Ommer, B.: Taming transformers for high-resolution image synthesis. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 12873-12883 (2021)", + "8. Hara, T., Mukuta, Y., Harada, T.: Spherical image generation from a single image by considering scene symmetry. In: Thirty-Fifth AAAI Conference on Artificial Intelligence. pp. 1513-1521 (2021)", + "9. Heusel, M., Ramsauer, H., Unterthiner, T., Nessler, B., Hochreiter, S.: Gans trained by a two time-scale update rule converge to a local nash equilibrium. In: Advances in Neural Information Processing Systems (NeurIPS) (2017)", + "10. Keisuke, O., Takao, Y.: Omni-directional image generation from single snapshot image. In: IEEE International Conference on Systems, Man, and Cybernetics (SMC) (2020)", + "11. Kingma, D.P., Welling, M.: Auto-encoding variational bayes. In: International Conference on Learning Representations (ICLR) (2014)", + "12. Lu, Z., Hu, K., Wang, C., Bai, L., Wang, Z.: Autoregressive omni-aware outpainting for open-vocabulary 360-degree image generation. In: arXiv preprint arXiv:2309.03467 (2023)", + "13. Nakata, A., Miyazaki, R., Yamanaka, T.: Increasing diversity of omni-directional images generated from single image using cgan based on mlpmixer. In: Asian Conference on Pattern Recognition (ACPR)", + "14. Salimans, T., Goodfellow, I., Zaremba, W., Cheung, V., Radford, A., Chen, X., Chen, X.: Improved techniques for training gans. In: Advances in Neural Information Processing Systems (NeurIPS) (2016)", + "15. Suvorov, R., Logacheva, E., Mashikhin, A., Remizova, A., Ashukha, A., Silvestrov, A., Kong, N., Goka, H., Park, K., Lempitsky, V.: Resolution-robust large mask inpainting with fourier convolutions. In: Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision (WACV) (2022)", + "16. Tu, Z., Talebi, H., Zhang, H., Yang, F., Milanfar, P., Bovik, A., Li, Y.: Maxvit: Multi-axis vision transformer. In: European Conference on Computer Vision (ECCV) (2022)", + "17. Van Den Oord, A., Vinyals, O., et al.: Neural discrete representation learning. In: Advances in Neural Information Processing Systems (NeurIPS). vol. 30 (2017)", + "18. Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, L., Polosukhin, I.: Attention is all you need. In: Advances in Neural Information Processing Systems (NeurIPS). vol. 30 (2017)", + "19. Xiao, J., Ehinger, K.A., Oliva, A., Torralba, A.: Recognizing scene viewpoint using panoramic place representation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2012)" + ], + "bbox": [ + 217, + 146, + 787, + 839 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "2S-ODIS: Two-Stage Omni-Directional Image Synthesis", + "bbox": [ + 361, + 114, + 730, + 128 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 767, + 116, + 784, + 126 + ], + "page_idx": 14 + }, + { + "type": "ref_text", + "text": "20. Zhang, R., Isola, P., Efros, A.A., Shechtman, E., Wang, O.: The unreasonable effectiveness of deep features as a perceptual metric. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2018)", + "bbox": [ + 215, + 146, + 785, + 189 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "A. Nakata and T. Yamanaka", + "bbox": [ + 271, + 114, + 464, + 126 + ], + "page_idx": 15 + } +] \ No newline at end of file diff --git a/2024/2S-ODIS_ Two-Stage Omni-Directional Image Synthesis by Geometric Distortion Correction/47960505-1990-491c-a65e-6611419353a0_model.json b/2024/2S-ODIS_ Two-Stage Omni-Directional Image Synthesis by Geometric Distortion Correction/47960505-1990-491c-a65e-6611419353a0_model.json new file mode 100644 index 0000000000000000000000000000000000000000..73edf700c6dc7e3ebd5ab5d6fa34665dcf39516d --- /dev/null +++ b/2024/2S-ODIS_ Two-Stage Omni-Directional Image Synthesis by Geometric Distortion Correction/47960505-1990-491c-a65e-6611419353a0_model.json @@ -0,0 +1,1728 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.24, + 0.142, + 0.765, + 0.187 + ], + "angle": 0, + "content": "2S-ODIS: Two-Stage Omni-Directional Image Synthesis by Geometric Distortion Correction" + }, + { + "type": "text", + "bbox": [ + 0.356, + 0.213, + 0.648, + 0.228 + ], + "angle": 0, + "content": "Atsuya Nakata and Takao Yamanaka" + }, + { + "type": "text", + "bbox": [ + 0.294, + 0.24, + 0.709, + 0.27 + ], + "angle": 0, + "content": "Sophia University, Tokyo, Japan \na-nakata-7r0@eagle.sophia.ac.jp, takao-y@sophia.ac.jp" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.293, + 0.744, + 0.626 + ], + "angle": 0, + "content": "Abstract. Omni-directional images have been increasingly used in various applications, including virtual reality and SNS (Social Networking Services). However, their availability is comparatively limited in contrast to normal field of view (NFOV) images, since specialized cameras are required to take omni-directional images. Consequently, several methods have been proposed based on generative adversarial networks (GAN) to synthesize omni-directional images, but these approaches have shown difficulties in training of the models, due to instability and/or significant time consumption in the training. To address these problems, this paper proposes a novel omni-directional image synthesis method, 2S-ODIS (Two-Stage Omni-Directional Image Synthesis), which generated high-quality omni-directional images but drastically reduced the training time. This was realized by utilizing the VQGAN (Vector Quantized GAN) model pre-trained on a large-scale NFOV image database such as ImageNet without fine-tuning. Since this pre-trained model does not represent distortions of omni-directional images in the equi-rectangular projection (ERP), it cannot be applied directly to the omni-directional image synthesis in ERP. Therefore, two-stage structure was adopted to first create a global coarse image in ERP and then refine the image by integrating multiple local NFOV images in the higher resolution to compensate the distortions in ERP, both of which are based on the pre-trained VQGAN model. As a result, the proposed method, 2S-ODIS, achieved the reduction of the training time from 14 days in OmniDreamer to four days in higher image quality." + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.668, + 0.789, + 0.743 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.754, + 0.789, + 0.811 + ], + "angle": 0, + "content": "Fig. 1: Overview of advantages of proposed method, 2S-ODIS. OmniDreamer [2] requires 14 days for training of the model, including 1-week training of the VQGAN model. In contrast, the proposed method only required 4 days for the training of the model since no training of VQGAN model was required." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "2" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.465, + 0.128 + ], + "angle": 0, + "content": "A. Nakata and T. Yamanaka" + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.145, + 0.375, + 0.161 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.179, + 0.787, + 0.314 + ], + "angle": 0, + "content": "An omni-directional image is taken by an omni-directional camera, also known as a 360-degree camera, which captures lights from all directions falling onto the focal point to cover a full sphere, and is usually represented in the equi-rectangular projection (ERP) to represent it in a 2-dimensional plane. These images have been applied to various applications such as virtual reality, social networking services, and map tools such as Google Street View. However, the availability of the omni-directional images are still limited compared with Normal Field of View (NFoV) images captured by a standard camera, since the specialized camera is required to capture the omni-directional images." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.315, + 0.787, + 0.42 + ], + "angle": 0, + "content": "Recently, several methods have been proposed to synthesize omni-directional images from a NFoV image [2,6,8,10,13], a text condition [5], or both [12]. These methods have been based on Generative Adversarial Networks (GAN) [6,8,10, 13], Vector Quantized GAN (VQGAN) [2,5], or auto-regressive outpainting using stable diffusion [12]. However, the GAN-based methods have faced challenges of instability in training, while the VQGAN-based methods and the auto-regression with stable diffusion require long training and inference time, respectively." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.422, + 0.787, + 0.768 + ], + "angle": 0, + "content": "In this paper, we propose a novel method to synthesize omni-directional images from a NFoV image based on pre-trained VQGAN, trained on a large-scale NFoV-image dataset. The previous method with VQGAN [2] has required to train the VQGAN encoder and decoder to represent geometric distortions in omni-directional images in ERP, especially at poles (top and bottom regions in ERP). This training has taken long time, such as 1 week in the method [2]. In the proposed method, the pre-trained VQGAN encoder and decoder were applied without fine-tuning by synthesizing multiple NFoV images to integrate them into an omni-directional image based on geometric distortion correction. Since no training of VQGAN was required, the training of the model was shortened by removing the step of the fine-tuning of VQGAN, as shown in Fig. 1. Furthermore, a two-stage structure was adopted in the proposed method. At the first stage, a global coarse image in ERP is created using the pre-trained VQGAN encoder and decoder without the geometric distortion correction. Therefore, the generated omni-directional image at the first stage includes distortions. For example, a straight line in NFoV images at poles can not be reproduced at the first stage. At the second stage, this global coarse image is refined by synthesizing an omni-directional image from multiple NFoV images generated using the pre-trained VQGAN encoder and decoder. This second stage compensates the geometric distortions at the first stage, in addition to representing local detailed textures in a higher resolution. By using the two-stage structure, the model can produce globally plausible yet locally detailed omni-directional images without the geometric distortions." + }, + { + "type": "text", + "bbox": [ + 0.24, + 0.769, + 0.529, + 0.784 + ], + "angle": 0, + "content": "The contributions of this paper include:" + }, + { + "type": "text", + "bbox": [ + 0.226, + 0.795, + 0.787, + 0.84 + ], + "angle": 0, + "content": "- A novel method to synthesize omni-directional images from a NFoV image was proposed using pre-trained VQGAN. Since no training of VQGAN was required, the training time was drastically reduced." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.362, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "2S-ODIS: Two-Stage Omni-Directional Image Synthesis" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "3" + }, + { + "type": "text", + "bbox": [ + 0.224, + 0.147, + 0.793, + 0.191 + ], + "angle": 0, + "content": "- A two-stage structure was adopted to generate a global coarse omni-directional image at the first stage, and then generate a locally detailed image with geometric distortion correction at the second stage." + }, + { + "type": "text", + "bbox": [ + 0.226, + 0.193, + 0.786, + 0.238 + ], + "angle": 0, + "content": "- Experimental results demonstrated that the proposed method synthesized higher quality omni-directional images in shortened training and inference time than the previous methods such as OmniDreamer [2]." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.262, + 0.396, + 0.278 + ], + "angle": 0, + "content": "2 Related Works" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.295, + 0.411, + 0.31 + ], + "angle": 0, + "content": "2.1 Image Generation" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.322, + 0.788, + 0.579 + ], + "angle": 0, + "content": "VQVAE (Vector Quantized Variational AutoEncoder) [17] has been proposed to improve the generated image blurriness in VAE [11] by representing image patches with quantized latent vectors based on vector quantization. Furthermore, the adversarial loss has been introduced in VQVAE to make the generated images clearer, called VQGAN [7]. In this method, Transformer [18] has been used to sequentially predict image patches from neighbor patches based on auto-regressive prediction. The patches are represented with the quantized latent vectors called VQGAN codes to generate clear images with low computational cost. To improve the slow inference in VQGAN due to the sequential predictions of patches, MaskGIT [4] has been proposed by predicting multiple patches simultaneously. Although MaskGIT has succeeded in improving the inference speed, it has been difficult to generate high quality images in the high resolution. To solve this problem, Muse [3] has been proposed using a two-stage structure, where a low-resolution image is generated at the first stage, and then is refined to generate a higher-resolution image at the second stage. In our proposed method, this two-stage structure was adopted for the omni-directional image synthesis in the high resolution." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.603, + 0.555, + 0.618 + ], + "angle": 0, + "content": "2.2 Omni-directional Image Generation" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.629, + 0.788, + 0.841 + ], + "angle": 0, + "content": "Several methods have been proposed for synthesizing omni-directional images from NFoV images. Okubo and Yamanaka [10] have proposed a method of generating omni-directional images based on conditional GAN from a single NFoV image with the class label. Hara et al. [8] have also proposed a method based on the symmetric property in the omni-directional images using GAN and VAE. Another work to synthesize omni-directional images is Guided ImmerseGAN [6], which generates omni-directional images from a NFoV image with the modulation guided by a given class label, which does not have to be the true class of the input NFoV image. In the work of OmniDreamer [2], VQGAN has been applied to the omni-directional image synthesis by using Transformer for auto-regressive prediction. In this method, VQGAN encoder and decoder have to be fine-tuned on an omni-directional image dataset since the geometric distortion in ERP has to be represented in the latent codes of VQGAN. Text2Light [5] also uses VQGAN with auto-regressive prediction for the generation, though only text" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "4" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.465, + 0.128 + ], + "angle": 0, + "content": "A. Nakata and T. Yamanaka" + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.145, + 0.788, + 0.289 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.297, + 0.789, + 0.382 + ], + "angle": 0, + "content": "Fig. 2: Qualitative comparison of omni-directional image reconstruction using pretrained VQGAN encoder and decoder. Omni-directional Image: original omnidirectional image, Reconstructed in ERP: reconstructed in equirectangular projection, Reconstructed in Extracted Images: reconstructed by integrating multiple NFoV images in different directions. By extracting NFoV images, an omni-directional image can be correctly reconstructed without distortions." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.411, + 0.789, + 0.544 + ], + "angle": 0, + "content": "information is taken as the input instead of the NFoV image. Nakata et al. [13] have proposed a method to increase the diversity of generated omni-directional images based on MLP-Mixer by efficiently propagating the information of the NFoV image embedded at the center in ERP. AOGNet [12] has generated omnidirectional images by out-painting an incomplete 360-degree image progressively with NFoV and text guidances jointly or individually. This has been realized using auto-regressive prediction based on the stable-diffusion backbone model. Due to the nature of sequential auto-regressive prediction, it takes long inference time." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.547, + 0.79, + 0.684 + ], + "angle": 0, + "content": "In our proposed method, the pre-trained VQGAN model was used without fine-tuning on the omni-directional image dataset, since multiple NFoV images are synthesized based on the pre-trained VQGAN and then integrated into omni-directional images. By removing the step of VQGAN training, the overall training of the model was drastically shortened than the previous method with VQGAN such as OmniDreamer [2]. In addition, the proposed method is based on simultaneous synthesis of multiple NFoV images in different directions, whose inference was faster than auto-regressive prediction such as OmniDreamer [2] and AOGNet [12]." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.707, + 0.427, + 0.725 + ], + "angle": 0, + "content": "3 Proposed Method" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.739, + 0.434, + 0.755 + ], + "angle": 0, + "content": "3.1 Two-Stage Structure" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.765, + 0.789, + 0.842 + ], + "angle": 0, + "content": "The proposed method consists of the two-stage structure, where a global coarse omni-directional image in ERP is synthesized in a low resolution \\((256\\times 512\\) pixels) at the first stage without geometric distortion correction, and then is refined at the second stage by integrating the multiple synthesized NFoV images in different directions based on the geometric distortion correction, producing" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.362, + 0.115, + 0.733, + 0.13 + ], + "angle": 0, + "content": "2S-ODIS: Two-Stage Omni-Directional Image Synthesis" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "5" + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.144, + 0.789, + 0.299 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.306, + 0.752, + 0.467 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.275, + 0.475, + 0.726, + 0.49 + ], + "angle": 0, + "content": "Fig. 3: Diagram of the proposed method. (a)Inference, (b)Training." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.529, + 0.785, + 0.574 + ], + "angle": 0, + "content": "a high-quality omni-directional image in ERP in a high resolution (1024×2048 pixels). At both stages, the pre-trained VQGAN was utilized without fine-tuning on the omni-directional image dataset." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.58, + 0.788, + 0.716 + ], + "angle": 0, + "content": "As a preliminary experiment, an omni-directional image was reconstructed in ERP or in multiple NFoV images using pre-trained VQGAN encoder and decoder without the fine-tuning, as shown in Fig. 2. It can be seen from the figure that the reconstruction in ERP cannot correctly reproduce the texture in the region toward the ground (blue frame) and the continuity in the region at both edges (yellow frame), although it can reproduce the region at center in ERP (red frame). On the contrary, all the regions can be correctly reproduced in the extracted NFoV images. This indicates that the pre-trained VQGAN model can be applied without fine-tuning if it is applied to NFoV images." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.72, + 0.788, + 0.842 + ], + "angle": 0, + "content": "Thus, the generated omni-directional image in ERP at the first stage in the proposed method includes distortions since the pre-trained VQGAN cannot represent the texture and continuities in the omni-directional images in ERP. However, these distortions are correctly compensated at the second stage by synthesizing the multiple NFoV images which can be correctly reproduced by the pre-trained VQGAN model. If only the second stage is used in the proposed method, it is difficult to synthesize multiple NFoV images simultaneously with global compatibility. Therefore, the two-stage structure was adopted in the pro" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "6" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.465, + 0.127 + ], + "angle": 0, + "content": "A. Nakata and T. Yamanaka" + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.143, + 0.478, + 0.294 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.481, + 0.143, + 0.78, + 0.294 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.217, + 0.306, + 0.787, + 0.445 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.216, + 0.454, + 0.788, + 0.484 + ], + "angle": 0, + "content": "Fig. 4: Structure of the proposed method. The range of attention differs between the high-resolution and low-resolution models." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.514, + 0.785, + 0.544 + ], + "angle": 0, + "content": "posed method to produce globally plausible coarse omni-directional image at the first stage." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.545, + 0.788, + 0.712 + ], + "angle": 0, + "content": "The structure of the proposed method is shown in Fig. 3. At the first stage, the low resolution model produces the low resolution codes, which are converted into patches of omni-directional images in ERP using the pre-trained VQGAN decoder. At the second stage, the high resolution model produces the high resolution codes, which are corresponding to the patches in the NFoV images in multiple directions (26 directions in our implementation) in an omni-directional image with overlapping. These 26 directions for the NFoV images were the same directions as the normal vectors in the faces of a rhombicuboctahedron. The field of view was set to 60 degrees for all directions. The generated NFoV images with the size of \\(256 \\times 256\\) pixels were integrated into an omni-directional image with the size of \\(1024 \\times 2048\\) pixels in ERP." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.737, + 0.341, + 0.751 + ], + "angle": 0, + "content": "3.2 Inference" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.765, + 0.788, + 0.842 + ], + "angle": 0, + "content": "For synthesizing an omni-directional image in the inference, the low-resolution codes are first generated using the sampling strategy proposed in MaskGIT [4] at the first stage from the conditional image where an input NFOV image is embedded at the center in ERP, as shown in Fig. 3(a). In MaskGIT, the generation is started with 'Masked low resolution codes' which is filled with the [MASK] code," + } + ], + [ + { + "type": "header", + "bbox": [ + 0.362, + 0.115, + 0.733, + 0.129 + ], + "angle": 0, + "content": "2S-ODIS: Two-Stage Omni-Directional Image Synthesis" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "7" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.357 + ], + "angle": 0, + "content": "the VQGAN code which indicates that it is masked, for all locations. Then, the low resolution model predicts the probabilities for all the [MASK] locations in parallel, and samples a VQGAN code based on its predicted probabilities over all possible VQGAN codes for each location. The location with the low probability is replaced with the [MASK] code again, and the VQGAN codes are resampled by predicting the probabilities using the low resolution model. This process is repeated in \\( T \\) steps. At each iteration, the model predicts all VQGAN code simultaneously but only keeps the most confident ones. The remaining VQGAN codes are replaced with the [MASK] code and re-predicted in the next iteration. The mask ratio during the iterations is determined by \\( \\cos\\left(\\frac{\\pi}{2}\\frac{t}{T}\\right) \\), where \\( t \\) indicates the current iterations in the total steps \\( T \\). This mask ratio is monotonically decreasing from 1 to 0 with respect to \\( t \\), which ensures that most of the locations are masked during the early stage in the iterations to prevent producing the inconsistent codes." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.359, + 0.788, + 0.511 + ], + "angle": 0, + "content": "At the second stage, the high-resolution codes are generated using the high resolution model, where the generation process is almost same as the low resolution model. The difference from the low-resolution model is that the model accepts the low-resolution image generated at the first stage as an additional conditional image, and generates NFoV images, as shown in Fig. 3(a). To integrate the generated NFoV images into an omni-directional image, the overlapped regions are merged with weights depending on the distance from the centers of the NFoV images. Specifically, let \\( x_{i} \\) and \\( x_{j} \\) be the two overlapping pixel values, and let \\( d_{i} \\) and \\( d_{j} \\) be the distances from the centers of the NFoV images at each position. The integrated pixel value \\( y \\) is given by" + }, + { + "type": "equation", + "bbox": [ + 0.389, + 0.522, + 0.785, + 0.552 + ], + "angle": 0, + "content": "\\[\ny = \\frac {w _ {i}}{\\left(w _ {i} + w _ {j}\\right)} x _ {i} + \\frac {w _ {j}}{\\left(w _ {i} + w _ {j}\\right)} x _ {j} \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.559, + 0.536, + 0.579 + ], + "angle": 0, + "content": "where \\(w_{i} = 1 - \\frac{di}{\\max_{k}(d_{k})}\\), \\(w_{j} = 1 - \\frac{dj}{\\max_{k}(d_{k})}\\)." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.579, + 0.788, + 0.76 + ], + "angle": 0, + "content": "The network architecture used in the low-resolution model and the high resolution model is shown in Fig. 4. The layer structure was adapted from MaxViT [16], although Transformer has been used in MaskGIT [4] and Muse [3]. The 8-layer MaxViT models were used in both the low-resolution model and the high-resolution model. In the low-resolution model, the padding in MBConv was replaced to the circular padding to encourage the continuity at the edges in ERP, whereas it was remained to the zero padding in the high-resolution model. The block attention was applied within each divided region at the low-resolution model and within each NFoV image in the high-resolution model, as shown in Fig. 4. The grid attention was also applied globally in sparse at the low-resolution model as in the original MaxViT model, whereas it was applied among same locations over the NFoV images at the high-resolution model." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.783, + 0.334, + 0.799 + ], + "angle": 0, + "content": "3.3 Training" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.81, + 0.787, + 0.84 + ], + "angle": 0, + "content": "The low-resolution and high-resolution models are independently trained, as shown in Fig. 3(b). The objective of the training is to make the low-resolution" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "8" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.465, + 0.127 + ], + "angle": 0, + "content": "A. Nakata and T. Yamanaka" + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.144, + 0.787, + 0.201 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.216, + 0.211, + 0.785, + 0.239 + ], + "angle": 0, + "content": "Fig. 5: Examples of conditional image in training. These images are generated from omni-directional images in ERP by randomly masking." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.269, + 0.788, + 0.48 + ], + "angle": 0, + "content": "and high-resolution models predict plausible VQGAN codes at [MASK]-code locations for each inference step. For the low-resolution model, the inputs are 'randomly masked low-resolution codes' and a conditional image which emulates the NFoV image embedded at the center in ERP (Fig. 3 a). The mask ratio in the randomly masked low-resolution codes is set to \\(\\cos\\left(\\frac{\\pi}{2}r\\right)\\), where \\(r\\) is sampled from a uniform distribution [0,1), since this emulates the single iteration in the inference. Examples of the conditional image in the training are shown in Fig. 5. They are prepared by randomly masking an original omni-directional image in ERP to emulate the conditional image in the inference. Since they are not limited to the single NFoV image embedded in ERP, the trained model can be applied to various in-painting and out-painting tasks, as described in 5.2. The low-resolution model is trained to predict the original VQGAN codes in the real omni-directional image at [MASK]-code locations, so that the cross entropy is used as the loss function to train the model." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.481, + 0.79, + 0.662 + ], + "angle": 0, + "content": "For the high-resolution model, the inputs are 'randomly masked high-resolution codes' for multiple NFoV images (26 NFoV images in our implementation), conditional NFoV images converted from the conditional image in ERP, and the reconstructed low-resolution NFoV images converted from the low-resolution omni-directional image reconstructed using the pre-trained VQGAN encoder and decoder. The 'randomly masked high-resolution code' and the conditional image in ERP are prepared in the same manner for the low-resolution model. The low-resolution omni-directional image is required in the inputs of the high-resolution model to emulate the low-resolution image generated at the first stage. The high-resolution model is trained to predict the original VQGAN codes in the NFoV images converted from the real omni-directional image at [MASK]-code locations based on the cross-entropy loss function." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.663, + 0.788, + 0.739 + ], + "angle": 0, + "content": "Although the first and second stages are sequentially processed in the inference, they are independently trained in parallel, as shown in Fig. 3(b). This property is advantageous to shorten the required training time if multiple GPUs (Graphics Processing Units) can be used, although a single GPU was used in our implementation." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.763, + 0.376, + 0.78 + ], + "angle": 0, + "content": "4 Experiments" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.795, + 0.788, + 0.84 + ], + "angle": 0, + "content": "The omni-directional image dataset, SUN360 [19], was used in the experiments. The 5,000 outdoor images were used for test, while the remaining 47,938 outdoor images were used for training. The size of the images in the dataset is \\(512 \\times 1024\\)" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.362, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "2S-ODIS: Two-Stage Omni-Directional Image Synthesis" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "9" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.784, + 0.265 + ], + "angle": 0, + "content": "pixels. Although the proposed method generates images in \\(1024 \\times 2048\\) pixels, it was resized to \\(512 \\times 1024\\) pixels for the evaluation. For comparison, several conventional methods were also evaluated, including GAN-based methods with CNN (Convolutional Neural Networks) [10] or MLP-Mixer [13], a VQGAN-based method (OmniDreamer) [2], and a GAN-based in-painting method (LAMA) [15]. The models were implemented in PyTorch, and were trained in a single GPU (NVIDIA RTX3090). The code for the network architecture in the proposed method is provided in the supplementary material." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.267, + 0.786, + 0.523 + ], + "angle": 0, + "content": "The pre-trained VQGAN was obtained from [1], which is the model with 1024 codebooks trained on ImageNet. Eight MaxViT layers in Fig. 4 were used in both the low-resolution and high-resolution models, with 256 internal dimensions. The sizes of the VQGAN codes at the first and second stages were \\(16 \\times 32\\) patches and \\(16 \\times 16\\) patches \\(\\times 26\\) NFoV images, respectively. These VQGAN codes were converted into trainable feature vectors in the 256 dimensions. The conditional image was also down-sampled into the same size as the VQGAN codes with 256 dimensions using CNN. At each iteration of MaskGIT in the first stage, these two feature vectors were added with trainable positional encoding vectors, and then were inputted into the low-resolution model composed of the 8 MaxViT layers. At each iteration in the second stage, the low-resolution image generated at the first stage (inference) or reconstructed using the pre-trained VQGAN (training) was converted into NFoV images. These NFoV images were down-sampled with CNN to be added with the feature vectors of the input VQGAN codes, the conditional NFoV images, and the positional encoding, and then were inputted into the high-resolution model. The total steps \\(T\\) in MaskGIT was set to 16 at both stages." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.525, + 0.788, + 0.678 + ], + "angle": 0, + "content": "The optimizer for training the models was AdamW with the learning rate of 0.001, the weight decay of 1e-5, Amsgrad, and the learning-rate scheduling of ExponentialLR reducing it by 0.95 every 5,000 iterations. OmniDreamer [2] was trained for 14 days (30 epochs in all training stages), while the proposed method was trained over 4 days (2 days with the batch size 16 at the first stage, and 2 days with the batch size 8 in the second stage, for 180,000 iterations at each stage). The other conventional methods were trained for 4 days, using their default batch sizes and hyper-parameters. FID (Frechet Inception Distance) [9], IS (Inception Score) [14], and LPIPS (Learned Perceptual Image Patch Similarity) [20] were used for evaluating the synthesized omni-directional images in ERP." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.697, + 0.325, + 0.712 + ], + "angle": 0, + "content": "5 Results" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.727, + 0.525, + 0.741 + ], + "angle": 0, + "content": "5.1 Evaluation of Proposed Method" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.75, + 0.785, + 0.84 + ], + "angle": 0, + "content": "The proposed method, 2S-ODIS, was quantitatively evaluated, compared with the conventional methods, OmniDreamer [2], CNN-based cGAN [10], MLPixer-based cGAN [13], and LAMA [15], as shown in Table 1. The models in the proposed method were trained for 4 days (2 days for low-resolution model and 2 days for high-resolution model). For comparison, the result with the models trained for 2 days (1 day + 1 day for low-resolution and high-resolution models) was also" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.22, + 0.117, + 0.236, + 0.127 + ], + "angle": 0, + "content": "10" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.116, + 0.465, + 0.128 + ], + "angle": 0, + "content": "A. Nakata and T. Yamanaka" + }, + { + "type": "table_caption", + "bbox": [ + 0.292, + 0.152, + 0.712, + 0.166 + ], + "angle": 0, + "content": "Table 1: Quantitative comparison with conventional methods" + }, + { + "type": "table", + "bbox": [ + 0.314, + 0.179, + 0.686, + 0.294 + ], + "angle": 0, + "content": "
MethodIS (↑)FID (↓)LPIPS (↑)
2S-ODIS (Proposed method)5.96918.2630.662
2S-ODIS (2days)5.85718.6560.668
OmniDreamer [2]4.45823.1010.655
CNN-based cGAN [10]4.68440.0490.633
MLPMixer-based cGAN [13]4.40247.6900.634
LAMA [15]5.78469.4850.478
" + }, + { + "type": "image", + "bbox": [ + 0.225, + 0.318, + 0.407, + 0.459 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.414, + 0.319, + 0.594, + 0.459 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.595, + 0.319, + 0.781, + 0.459 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.217, + 0.473, + 0.785, + 0.503 + ], + "angle": 0, + "content": "Fig. 6: Evaluation metrics during training of proposed method compared with conventional method, OmniDreamer [2]" + }, + { + "type": "image", + "bbox": [ + 0.216, + 0.534, + 0.788, + 0.791 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.217, + 0.801, + 0.785, + 0.83 + ], + "angle": 0, + "content": "Fig. 7: Examples of synthesized omni-directional images compared with conventional method, OmniDreamer [2]" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.362, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "2S-ODIS: Two-Stage Omni-Directional Image Synthesis" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.117, + 0.784, + 0.127 + ], + "angle": 0, + "content": "11" + }, + { + "type": "image_caption", + "bbox": [ + 0.334, + 0.149, + 0.438, + 0.161 + ], + "angle": 0, + "content": "2S-ODIS First Stage" + }, + { + "type": "image_caption", + "bbox": [ + 0.444, + 0.149, + 0.561, + 0.171 + ], + "angle": 0, + "content": "2S-ODIS Second Stage (Ours)" + }, + { + "type": "image_caption", + "bbox": [ + 0.574, + 0.149, + 0.661, + 0.161 + ], + "angle": 0, + "content": "OmniDreamer [2]" + }, + { + "type": "image", + "bbox": [ + 0.331, + 0.173, + 0.442, + 0.259 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.446, + 0.173, + 0.558, + 0.259 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.562, + 0.173, + 0.673, + 0.259 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.331, + 0.262, + 0.442, + 0.348 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.446, + 0.262, + 0.556, + 0.348 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.561, + 0.262, + 0.673, + 0.348 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.215, + 0.359, + 0.785, + 0.387 + ], + "angle": 0, + "content": "Fig. 8: Examples of NFoV images toward ground extracted from synthesized omnidirectional images in proposed and conventional methods" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.417, + 0.788, + 0.658 + ], + "angle": 0, + "content": "shown in the table. The field of view for a NFoV image embedded in an input conditional image was set to 126.87 and 112.62 degrees for width and height in the experiments, respectively. It can be seen from the table that the proposed method achieved higher performance than the other conventional methods. Although the highest performance was achieved in the proposed method trained for 4 days, the performance already outperformed the other methods even with the 2-day training. To see this more clearly, the evaluation metrics during the training of the proposed method is shown in Fig. 6, where the performance exceeded OmniDreamer by 2 days and converged in around 4 days. Thus, the proposed method drastically shortened the training to 2-4 days from 14 days in OmniDreamer including the fine-tuning of the VQGAN model. In addition, the inference in the proposed method was much faster than in OmniDreamer: 1.54 seconds and 39.33 seconds for synthesizing each omni-directional image in the proposed method and OmniDreamer, respectively. This is because the proposed method is based on the simultaneous VQGAN-code prediction instead of the sequential auto-regressive prediction." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.66, + 0.804, + 0.841 + ], + "angle": 0, + "content": "For the qualitative comparison, the examples of the synthesized omni-directional images are shown in Fig. 7. It is clear that the proposed method generated globally plausible and locally detained omni-directional images, while OmniDreamer [2] sometimes failed in generating continuous images, especially along the edges in the input conditional images. To see if the proposed method can generate NFoV images without the distortion, the NFoV images toward the ground were extracted from the synthesized omni-directional images, since the geometric distortion is large at the poles in ERP. The examples of the NFoV images toward the ground are shown in Fig. 8. The left column shows the NFoV images at the first stage, while the middle column shows the NFoV images at the second stage. Since the pre-trained VQGAN code cannot represent the geometric distortion in ERP, the straight lines were not appropriately reproduced at the first stage." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "12" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.465, + 0.128 + ], + "angle": 0, + "content": "A. Nakata and T. Yamanaka" + }, + { + "type": "table_caption", + "bbox": [ + 0.286, + 0.145, + 0.716, + 0.16 + ], + "angle": 0, + "content": "Table 2: Quantitative comparison in various conditional images" + }, + { + "type": "table", + "bbox": [ + 0.303, + 0.172, + 0.699, + 0.287 + ], + "angle": 0, + "content": "
Mask SettingMethodIS (↑)FID (↓)LPIPS (↑)
Inpainting2S-ODIS (Proposed)5.58215.0440.685
OmniDreamer [2]4.67241.2090.708
Inpainting of Ground Region2S-ODIS (Proposed)6.08413.0380.680
OmniDreamer [2]5.47415.3030.699
Outpainting from Two Images2S-ODIS (Proposed)5.72219.4370.663
OmniDreamer [2]3.95233.4030.672
" + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.298, + 0.788, + 0.513 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.218, + 0.525, + 0.782, + 0.54 + ], + "angle": 0, + "content": "Fig. 9: Examples of synthesized omni-directional images in various input conditions." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.569, + 0.788, + 0.645 + ], + "angle": 0, + "content": "However, they were compensated at the second stage, which can be clearly seen in the sample images. On the other hand, OmniDreamer cannot appropriately reproduce the NFoV images toward the ground. Thus, it was confirmed that the proposed method synthesized omni-directional images without geometric distortion." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.665, + 0.588, + 0.681 + ], + "angle": 0, + "content": "5.2 Evaluation in Various Input Conditions" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.69, + 0.789, + 0.841 + ], + "angle": 0, + "content": "As explained in 3.3, the models in the proposed method were trained with various conditional images in Fig. 5. They were not limited to the single NFoV image embedded in ERP, so that the proposed model can be applied various inpainting and out-painting tasks. For example, the model can be applied to the inpainting task to remove objects or people in the omni-directional image taken by a 360-degree camera, as shown in the top row in Fig. 9. Another example is the task to fill in the ground region of an omni-directional image as shown in the middle row in Fig. 9, since the omni-directional image often includes a hand or a camera stand at the bottom region in ERP. The last example in Fig. 9 is to synthesize an omni-directional image which includes two NFoV images such as" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.361, + 0.114, + 0.732, + 0.129 + ], + "angle": 0, + "content": "2S-ODIS: Two-Stage Omni-Directional Image Synthesis" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.116, + 0.785, + 0.127 + ], + "angle": 0, + "content": "13" + }, + { + "type": "table_caption", + "bbox": [ + 0.352, + 0.145, + 0.651, + 0.159 + ], + "angle": 0, + "content": "Table 3: Ablation study in propose method" + }, + { + "type": "table", + "bbox": [ + 0.256, + 0.171, + 0.744, + 0.259 + ], + "angle": 0, + "content": "
IS (↑)FID (↓)LPIPS (↑)
(1) Proposed5.96918.2630.662
(2) 1 Stage: Low Resolution Model5.79828.3290.670
(3) 1 Stage: High Resolution Model4.82152.4530.638
(4) Direct use of low-resolution VQGAN codes5.83721.8200.663
" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.288, + 0.788, + 0.395 + ], + "angle": 0, + "content": "front and rear cameras of a smartphone. Although OmniDreamer failed in synthesizing the omni-directional images in these situations, the proposed method generated high-quality omni-directional images. The quantitative results shown in Table 2 also indicate that the proposed method achieved higher performance than OmniDreamer. Although the diversity of the synthesized images was higher in OmniDreamer than the proposed method, it may be due to generating random images as shown in Fig. 9." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.419, + 0.389, + 0.435 + ], + "angle": 0, + "content": "5.3 Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.447, + 0.788, + 0.552 + ], + "angle": 0, + "content": "Ablation studies were conducted to investigate the effectiveness of each component in the proposed method: the low-resolution and high-resolution models in the 2-stage structure. The results are shown in Table 3. (1) is the proposed method with the 2-stage structure, while (2) and (3) are the results with 1-stage structure only using the low-resolution model and the high-resolution model, respectively. As can be seen from the table, the 2-stage structure was indispensable for the high-quality image synthesis." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.553, + 0.788, + 0.675 + ], + "angle": 0, + "content": "Moreover, it was examined that the low-resolution VQGAN codes generated at the first stage were directly used at the second stage instead of the low-resolution image generated at the first stage, since the 2-stage structure in Muse [3] uses the low-resolution VQGAN codes directly. The result is shown in Table 3 (4). It was confirmed from the result that the low-resolution image was better to use at the second stage than the low-resolution VQGAN codes directly. This may be because the low-resolution image is compressed by CNN to properly extract the global information generated at the first stage." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.7, + 0.579, + 0.718 + ], + "angle": 0, + "content": "6 Limitations and Future Prospects" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.734, + 0.788, + 0.841 + ], + "angle": 0, + "content": "The proposed method synthesizes an omni-directional image by merging multiple NFoV images with weights depending on the distance from the edges of the NFoV images. Although the generation of the NFoV images are conditioned by the global low-resolution image generated at the first stage, it may be possible to generate discontinuous NFoV images. One possible solution would be to add an additional network to refine the generated omni-directional images to improve the continuity between NFoV images. Another issue is that it takes 1-2 days to" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "14" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.465, + 0.128 + ], + "angle": 0, + "content": "A. Nakata and T. Yamanaka" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.147, + 0.785, + 0.192 + ], + "angle": 0, + "content": "convert omni-directional images in the dataset to VQGAN codes using the pretrained VQGAN encoder. This may be alleviated by constructing a light-weight encoder using model distillation of the VQGAN encoder." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.193, + 0.786, + 0.418 + ], + "angle": 0, + "content": "Currently, inputs in the proposed method were limited to the conditional images such as a single or several NFoV images embedded in ERP, or masked omni-directional images for in-painting. However, the proposed architecture can be applied to any conditional information such as text information, a class label, and guidance information for style modulation using an additional module such as cross attention similar to stable diffusion. In addition, the hyper-parameters have not been thoroughly explored in the evaluation of the proposed method. For example, the directions of NFoV images at the second stage were fixed to 26 directions corresponding to the faces in rhombicuboctahedron, in addition to the field of view fixed to 60 degrees, which may be optimized in the future work. Furthermore, the network structure such as MaxViT might be improved to more optimized architecture to the omni-directional image synthesis. Although this paper focused on the tasks of omni-directional image synthesis, the proposed architecture would be useful for other omni-directional image tasks, such as semantic segmentation and object detection." + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.44, + 0.357, + 0.456 + ], + "angle": 0, + "content": "7 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.471, + 0.786, + 0.652 + ], + "angle": 0, + "content": "A novel method for the omni-directional image synthesis is proposed in this paper. By using the pre-trained VQGAN encoder and decoder without fine-tuning, the training of the model was drastically shortened. To manage the distortion in an omni-directional image in ERP, a two-stage structure was adopted. At the first stage, an omni-directional image was generated in ERP without geometric distortion correction, so that it cannot reproduce straight lines at poles in a sphere. Therefore, it was corrected at the second stage by synthesizing an omni-directional image from multiple NFoV images based on geometric distortion correction. To realize fast inference, the sampling strategy in MaskGIT was adopted to predict VQGAN codes simultaneously. As a result, the proposed method achieved the high-quality omni-directional image synthesis with low computational costs both in training and inference." + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.674, + 0.393, + 0.691 + ], + "angle": 0, + "content": "Acknowledgement" + }, + { + "type": "text", + "bbox": [ + 0.218, + 0.704, + 0.753, + 0.719 + ], + "angle": 0, + "content": "This work was supported by JSPS KAKENHI Grant Number JP21K11943" + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.741, + 0.322, + 0.758 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.223, + 0.771, + 0.783, + 0.796 + ], + "angle": 0, + "content": "1. Compvis/taming-transformers. https://github.com/CompVis/taming-transformers" + }, + { + "type": "ref_text", + "bbox": [ + 0.223, + 0.799, + 0.784, + 0.839 + ], + "angle": 0, + "content": "2. Akimoto, N., Matsuo, Y., Aoki, Y.: Diverse plausible 360-degree image outpainting for efficient 3dgc background creation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2022)" + }, + { + "type": "list", + "bbox": [ + 0.223, + 0.771, + 0.784, + 0.839 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.362, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "2S-ODIS: Two-Stage Omni-Directional Image Synthesis" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "15" + }, + { + "type": "ref_text", + "bbox": [ + 0.223, + 0.147, + 0.788, + 0.204 + ], + "angle": 0, + "content": "3. Chang, H., Zhang, H., Barber, J., Maschinot, A., Lezama, J., Jiang, L., Yang, M.H., Murphy, K.P., Freeman, W.T., Rubinstein, M., Li, Y., Krishnan, D.: Muse: Text-to-image generation via masked generative transformers. In: Proceedings of the 40th International Conference on Machine Learning (ICML) (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.205, + 0.787, + 0.245 + ], + "angle": 0, + "content": "4. Chang, H., Zhang, H., Jiang, L., Liu, C., Freeman, W.T.: Maskgit: Masked generative image transformer. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (June 2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.246, + 0.786, + 0.273 + ], + "angle": 0, + "content": "5. Chen, Z., Wang, G., Liu, Z.: Text2light: Zero-shot text-driven hdr panorama generation. ACM Transactions on Graphics (TOG) 41(6), 1-16 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.273, + 0.787, + 0.314 + ], + "angle": 0, + "content": "6. Dastjerdi, M.R.K., Hold-Geoffroy, Y., Eisenmann, J., Khodadadeh, S., Lalonde, J.F.: Guided co-modulated gan for \\(360^{\\circ}\\) field of view extrapolation. In: 2022 International Conference on 3D Vision (3DV). pp. 475–485 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.315, + 0.787, + 0.357 + ], + "angle": 0, + "content": "7. Esser, P., Rombach, R., Ommer, B.: Taming transformers for high-resolution image synthesis. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 12873-12883 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.357, + 0.787, + 0.398 + ], + "angle": 0, + "content": "8. Hara, T., Mukuta, Y., Harada, T.: Spherical image generation from a single image by considering scene symmetry. In: Thirty-Fifth AAAI Conference on Artificial Intelligence. pp. 1513-1521 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.398, + 0.787, + 0.439 + ], + "angle": 0, + "content": "9. Heusel, M., Ramsauer, H., Unterthiner, T., Nessler, B., Hochreiter, S.: Gans trained by a two time-scale update rule converge to a local nash equilibrium. In: Advances in Neural Information Processing Systems (NeurIPS) (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.22, + 0.44, + 0.785, + 0.481 + ], + "angle": 0, + "content": "10. Keisuke, O., Takao, Y.: Omni-directional image generation from single snapshot image. In: IEEE International Conference on Systems, Man, and Cybernetics (SMC) (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.481, + 0.785, + 0.509 + ], + "angle": 0, + "content": "11. Kingma, D.P., Welling, M.: Auto-encoding variational bayes. In: International Conference on Learning Representations (ICLR) (2014)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.509, + 0.785, + 0.55 + ], + "angle": 0, + "content": "12. Lu, Z., Hu, K., Wang, C., Bai, L., Wang, Z.: Autoregressive omni-aware outpainting for open-vocabulary 360-degree image generation. In: arXiv preprint arXiv:2309.03467 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.55, + 0.785, + 0.592 + ], + "angle": 0, + "content": "13. Nakata, A., Miyazaki, R., Yamanaka, T.: Increasing diversity of omni-directional images generated from single image using cgan based on mlpmixer. In: Asian Conference on Pattern Recognition (ACPR)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.592, + 0.785, + 0.633 + ], + "angle": 0, + "content": "14. Salimans, T., Goodfellow, I., Zaremba, W., Cheung, V., Radford, A., Chen, X., Chen, X.: Improved techniques for training gans. In: Advances in Neural Information Processing Systems (NeurIPS) (2016)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.633, + 0.785, + 0.688 + ], + "angle": 0, + "content": "15. Suvorov, R., Logacheva, E., Mashikhin, A., Remizova, A., Ashukha, A., Silvestrov, A., Kong, N., Goka, H., Park, K., Lempitsky, V.: Resolution-robust large mask inpainting with fourier convolutions. In: Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision (WACV) (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.689, + 0.785, + 0.731 + ], + "angle": 0, + "content": "16. Tu, Z., Talebi, H., Zhang, H., Yang, F., Milanfar, P., Bovik, A., Li, Y.: Maxvit: Multi-axis vision transformer. In: European Conference on Computer Vision (ECCV) (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.731, + 0.785, + 0.758 + ], + "angle": 0, + "content": "17. Van Den Oord, A., Vinyals, O., et al.: Neural discrete representation learning. In: Advances in Neural Information Processing Systems (NeurIPS). vol. 30 (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.759, + 0.785, + 0.799 + ], + "angle": 0, + "content": "18. Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, L., Polosukhin, I.: Attention is all you need. In: Advances in Neural Information Processing Systems (NeurIPS). vol. 30 (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.8, + 0.785, + 0.84 + ], + "angle": 0, + "content": "19. Xiao, J., Ehinger, K.A., Oliva, A., Torralba, A.: Recognizing scene viewpoint using panoramic place representation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2012)" + }, + { + "type": "list", + "bbox": [ + 0.218, + 0.147, + 0.788, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "16" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.465, + 0.127 + ], + "angle": 0, + "content": "A. Nakata and T. Yamanaka" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.147, + 0.787, + 0.19 + ], + "angle": 0, + "content": "20. Zhang, R., Isola, P., Efros, A.A., Shechtman, E., Wang, O.: The unreasonable effectiveness of deep features as a perceptual metric. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2018)" + } + ] +] \ No newline at end of file diff --git a/2024/2S-ODIS_ Two-Stage Omni-Directional Image Synthesis by Geometric Distortion Correction/47960505-1990-491c-a65e-6611419353a0_origin.pdf b/2024/2S-ODIS_ Two-Stage Omni-Directional Image Synthesis by Geometric Distortion Correction/47960505-1990-491c-a65e-6611419353a0_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..d48918be39b1c49b253646be8eedef97f01b904e --- /dev/null +++ b/2024/2S-ODIS_ Two-Stage Omni-Directional Image Synthesis by Geometric Distortion Correction/47960505-1990-491c-a65e-6611419353a0_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f5bf89bccd45ec62ebd26e7a24e54daf74253c1a31a360c1d4c58edb09f7257b +size 2469767 diff --git a/2024/2S-ODIS_ Two-Stage Omni-Directional Image Synthesis by Geometric Distortion Correction/full.md b/2024/2S-ODIS_ Two-Stage Omni-Directional Image Synthesis by Geometric Distortion Correction/full.md new file mode 100644 index 0000000000000000000000000000000000000000..bf09148506767db23b2726de71b2406804c8282d --- /dev/null +++ b/2024/2S-ODIS_ Two-Stage Omni-Directional Image Synthesis by Geometric Distortion Correction/full.md @@ -0,0 +1,217 @@ +# 2S-ODIS: Two-Stage Omni-Directional Image Synthesis by Geometric Distortion Correction + +Atsuya Nakata and Takao Yamanaka + +Sophia University, Tokyo, Japan +a-nakata-7r0@eagle.sophia.ac.jp, takao-y@sophia.ac.jp + +Abstract. Omni-directional images have been increasingly used in various applications, including virtual reality and SNS (Social Networking Services). However, their availability is comparatively limited in contrast to normal field of view (NFOV) images, since specialized cameras are required to take omni-directional images. Consequently, several methods have been proposed based on generative adversarial networks (GAN) to synthesize omni-directional images, but these approaches have shown difficulties in training of the models, due to instability and/or significant time consumption in the training. To address these problems, this paper proposes a novel omni-directional image synthesis method, 2S-ODIS (Two-Stage Omni-Directional Image Synthesis), which generated high-quality omni-directional images but drastically reduced the training time. This was realized by utilizing the VQGAN (Vector Quantized GAN) model pre-trained on a large-scale NFOV image database such as ImageNet without fine-tuning. Since this pre-trained model does not represent distortions of omni-directional images in the equi-rectangular projection (ERP), it cannot be applied directly to the omni-directional image synthesis in ERP. Therefore, two-stage structure was adopted to first create a global coarse image in ERP and then refine the image by integrating multiple local NFOV images in the higher resolution to compensate the distortions in ERP, both of which are based on the pre-trained VQGAN model. As a result, the proposed method, 2S-ODIS, achieved the reduction of the training time from 14 days in OmniDreamer to four days in higher image quality. + +![](images/c48c7689ffa9052a87c7d9a0584498d5c5ef344e33dd9c361f13d46c9f3bed18.jpg) +Fig. 1: Overview of advantages of proposed method, 2S-ODIS. OmniDreamer [2] requires 14 days for training of the model, including 1-week training of the VQGAN model. In contrast, the proposed method only required 4 days for the training of the model since no training of VQGAN model was required. + +# 1 Introduction + +An omni-directional image is taken by an omni-directional camera, also known as a 360-degree camera, which captures lights from all directions falling onto the focal point to cover a full sphere, and is usually represented in the equi-rectangular projection (ERP) to represent it in a 2-dimensional plane. These images have been applied to various applications such as virtual reality, social networking services, and map tools such as Google Street View. However, the availability of the omni-directional images are still limited compared with Normal Field of View (NFoV) images captured by a standard camera, since the specialized camera is required to capture the omni-directional images. + +Recently, several methods have been proposed to synthesize omni-directional images from a NFoV image [2,6,8,10,13], a text condition [5], or both [12]. These methods have been based on Generative Adversarial Networks (GAN) [6,8,10, 13], Vector Quantized GAN (VQGAN) [2,5], or auto-regressive outpainting using stable diffusion [12]. However, the GAN-based methods have faced challenges of instability in training, while the VQGAN-based methods and the auto-regression with stable diffusion require long training and inference time, respectively. + +In this paper, we propose a novel method to synthesize omni-directional images from a NFoV image based on pre-trained VQGAN, trained on a large-scale NFoV-image dataset. The previous method with VQGAN [2] has required to train the VQGAN encoder and decoder to represent geometric distortions in omni-directional images in ERP, especially at poles (top and bottom regions in ERP). This training has taken long time, such as 1 week in the method [2]. In the proposed method, the pre-trained VQGAN encoder and decoder were applied without fine-tuning by synthesizing multiple NFoV images to integrate them into an omni-directional image based on geometric distortion correction. Since no training of VQGAN was required, the training of the model was shortened by removing the step of the fine-tuning of VQGAN, as shown in Fig. 1. Furthermore, a two-stage structure was adopted in the proposed method. At the first stage, a global coarse image in ERP is created using the pre-trained VQGAN encoder and decoder without the geometric distortion correction. Therefore, the generated omni-directional image at the first stage includes distortions. For example, a straight line in NFoV images at poles can not be reproduced at the first stage. At the second stage, this global coarse image is refined by synthesizing an omni-directional image from multiple NFoV images generated using the pre-trained VQGAN encoder and decoder. This second stage compensates the geometric distortions at the first stage, in addition to representing local detailed textures in a higher resolution. By using the two-stage structure, the model can produce globally plausible yet locally detailed omni-directional images without the geometric distortions. + +The contributions of this paper include: + +- A novel method to synthesize omni-directional images from a NFoV image was proposed using pre-trained VQGAN. Since no training of VQGAN was required, the training time was drastically reduced. + +- A two-stage structure was adopted to generate a global coarse omni-directional image at the first stage, and then generate a locally detailed image with geometric distortion correction at the second stage. + +- Experimental results demonstrated that the proposed method synthesized higher quality omni-directional images in shortened training and inference time than the previous methods such as OmniDreamer [2]. + +# 2 Related Works + +# 2.1 Image Generation + +VQVAE (Vector Quantized Variational AutoEncoder) [17] has been proposed to improve the generated image blurriness in VAE [11] by representing image patches with quantized latent vectors based on vector quantization. Furthermore, the adversarial loss has been introduced in VQVAE to make the generated images clearer, called VQGAN [7]. In this method, Transformer [18] has been used to sequentially predict image patches from neighbor patches based on auto-regressive prediction. The patches are represented with the quantized latent vectors called VQGAN codes to generate clear images with low computational cost. To improve the slow inference in VQGAN due to the sequential predictions of patches, MaskGIT [4] has been proposed by predicting multiple patches simultaneously. Although MaskGIT has succeeded in improving the inference speed, it has been difficult to generate high quality images in the high resolution. To solve this problem, Muse [3] has been proposed using a two-stage structure, where a low-resolution image is generated at the first stage, and then is refined to generate a higher-resolution image at the second stage. In our proposed method, this two-stage structure was adopted for the omni-directional image synthesis in the high resolution. + +# 2.2 Omni-directional Image Generation + +Several methods have been proposed for synthesizing omni-directional images from NFoV images. Okubo and Yamanaka [10] have proposed a method of generating omni-directional images based on conditional GAN from a single NFoV image with the class label. Hara et al. [8] have also proposed a method based on the symmetric property in the omni-directional images using GAN and VAE. Another work to synthesize omni-directional images is Guided ImmerseGAN [6], which generates omni-directional images from a NFoV image with the modulation guided by a given class label, which does not have to be the true class of the input NFoV image. In the work of OmniDreamer [2], VQGAN has been applied to the omni-directional image synthesis by using Transformer for auto-regressive prediction. In this method, VQGAN encoder and decoder have to be fine-tuned on an omni-directional image dataset since the geometric distortion in ERP has to be represented in the latent codes of VQGAN. Text2Light [5] also uses VQGAN with auto-regressive prediction for the generation, though only text + +![](images/407fb1a0dae65374d1d25b47d8d445944ded0cc981c3bfecf5fa21ff99113475.jpg) +Fig. 2: Qualitative comparison of omni-directional image reconstruction using pretrained VQGAN encoder and decoder. Omni-directional Image: original omnidirectional image, Reconstructed in ERP: reconstructed in equirectangular projection, Reconstructed in Extracted Images: reconstructed by integrating multiple NFoV images in different directions. By extracting NFoV images, an omni-directional image can be correctly reconstructed without distortions. + +information is taken as the input instead of the NFoV image. Nakata et al. [13] have proposed a method to increase the diversity of generated omni-directional images based on MLP-Mixer by efficiently propagating the information of the NFoV image embedded at the center in ERP. AOGNet [12] has generated omnidirectional images by out-painting an incomplete 360-degree image progressively with NFoV and text guidances jointly or individually. This has been realized using auto-regressive prediction based on the stable-diffusion backbone model. Due to the nature of sequential auto-regressive prediction, it takes long inference time. + +In our proposed method, the pre-trained VQGAN model was used without fine-tuning on the omni-directional image dataset, since multiple NFoV images are synthesized based on the pre-trained VQGAN and then integrated into omni-directional images. By removing the step of VQGAN training, the overall training of the model was drastically shortened than the previous method with VQGAN such as OmniDreamer [2]. In addition, the proposed method is based on simultaneous synthesis of multiple NFoV images in different directions, whose inference was faster than auto-regressive prediction such as OmniDreamer [2] and AOGNet [12]. + +# 3 Proposed Method + +# 3.1 Two-Stage Structure + +The proposed method consists of the two-stage structure, where a global coarse omni-directional image in ERP is synthesized in a low resolution $(256\times 512$ pixels) at the first stage without geometric distortion correction, and then is refined at the second stage by integrating the multiple synthesized NFoV images in different directions based on the geometric distortion correction, producing + +![](images/6c79fc6bfdc27e7c17b4b54547a191508566b01fe547c6c47c7a74a1a1f595fd.jpg) + +![](images/970736200ab83ab8dca4e0dde01b63a366d4facbec65e00012a9144392de1ec5.jpg) +Fig. 3: Diagram of the proposed method. (a)Inference, (b)Training. + +a high-quality omni-directional image in ERP in a high resolution (1024×2048 pixels). At both stages, the pre-trained VQGAN was utilized without fine-tuning on the omni-directional image dataset. + +As a preliminary experiment, an omni-directional image was reconstructed in ERP or in multiple NFoV images using pre-trained VQGAN encoder and decoder without the fine-tuning, as shown in Fig. 2. It can be seen from the figure that the reconstruction in ERP cannot correctly reproduce the texture in the region toward the ground (blue frame) and the continuity in the region at both edges (yellow frame), although it can reproduce the region at center in ERP (red frame). On the contrary, all the regions can be correctly reproduced in the extracted NFoV images. This indicates that the pre-trained VQGAN model can be applied without fine-tuning if it is applied to NFoV images. + +Thus, the generated omni-directional image in ERP at the first stage in the proposed method includes distortions since the pre-trained VQGAN cannot represent the texture and continuities in the omni-directional images in ERP. However, these distortions are correctly compensated at the second stage by synthesizing the multiple NFoV images which can be correctly reproduced by the pre-trained VQGAN model. If only the second stage is used in the proposed method, it is difficult to synthesize multiple NFoV images simultaneously with global compatibility. Therefore, the two-stage structure was adopted in the pro + +![](images/c193417ba274395797d3249e91cb80049f3edf2008b360866fb54e93c907c902.jpg) + +![](images/3c4f60946dbe6548b7b5925c16cc53346bbdda694916f288c0d2896f7a0da92a.jpg) + +![](images/3a29181286e2a1b3da8c4beb6a803848db06ccb7db9fb1435e862e25329bf345.jpg) +Fig. 4: Structure of the proposed method. The range of attention differs between the high-resolution and low-resolution models. + +posed method to produce globally plausible coarse omni-directional image at the first stage. + +The structure of the proposed method is shown in Fig. 3. At the first stage, the low resolution model produces the low resolution codes, which are converted into patches of omni-directional images in ERP using the pre-trained VQGAN decoder. At the second stage, the high resolution model produces the high resolution codes, which are corresponding to the patches in the NFoV images in multiple directions (26 directions in our implementation) in an omni-directional image with overlapping. These 26 directions for the NFoV images were the same directions as the normal vectors in the faces of a rhombicuboctahedron. The field of view was set to 60 degrees for all directions. The generated NFoV images with the size of $256 \times 256$ pixels were integrated into an omni-directional image with the size of $1024 \times 2048$ pixels in ERP. + +# 3.2 Inference + +For synthesizing an omni-directional image in the inference, the low-resolution codes are first generated using the sampling strategy proposed in MaskGIT [4] at the first stage from the conditional image where an input NFOV image is embedded at the center in ERP, as shown in Fig. 3(a). In MaskGIT, the generation is started with 'Masked low resolution codes' which is filled with the [MASK] code, + +the VQGAN code which indicates that it is masked, for all locations. Then, the low resolution model predicts the probabilities for all the [MASK] locations in parallel, and samples a VQGAN code based on its predicted probabilities over all possible VQGAN codes for each location. The location with the low probability is replaced with the [MASK] code again, and the VQGAN codes are resampled by predicting the probabilities using the low resolution model. This process is repeated in $T$ steps. At each iteration, the model predicts all VQGAN code simultaneously but only keeps the most confident ones. The remaining VQGAN codes are replaced with the [MASK] code and re-predicted in the next iteration. The mask ratio during the iterations is determined by $\cos\left(\frac{\pi}{2}\frac{t}{T}\right)$ , where $t$ indicates the current iterations in the total steps $T$ . This mask ratio is monotonically decreasing from 1 to 0 with respect to $t$ , which ensures that most of the locations are masked during the early stage in the iterations to prevent producing the inconsistent codes. + +At the second stage, the high-resolution codes are generated using the high resolution model, where the generation process is almost same as the low resolution model. The difference from the low-resolution model is that the model accepts the low-resolution image generated at the first stage as an additional conditional image, and generates NFoV images, as shown in Fig. 3(a). To integrate the generated NFoV images into an omni-directional image, the overlapped regions are merged with weights depending on the distance from the centers of the NFoV images. Specifically, let $x_{i}$ and $x_{j}$ be the two overlapping pixel values, and let $d_{i}$ and $d_{j}$ be the distances from the centers of the NFoV images at each position. The integrated pixel value $y$ is given by + +$$ +y = \frac {w _ {i}}{\left(w _ {i} + w _ {j}\right)} x _ {i} + \frac {w _ {j}}{\left(w _ {i} + w _ {j}\right)} x _ {j} \tag {1} +$$ + +where $w_{i} = 1 - \frac{di}{\max_{k}(d_{k})}$ , $w_{j} = 1 - \frac{dj}{\max_{k}(d_{k})}$ . + +The network architecture used in the low-resolution model and the high resolution model is shown in Fig. 4. The layer structure was adapted from MaxViT [16], although Transformer has been used in MaskGIT [4] and Muse [3]. The 8-layer MaxViT models were used in both the low-resolution model and the high-resolution model. In the low-resolution model, the padding in MBConv was replaced to the circular padding to encourage the continuity at the edges in ERP, whereas it was remained to the zero padding in the high-resolution model. The block attention was applied within each divided region at the low-resolution model and within each NFoV image in the high-resolution model, as shown in Fig. 4. The grid attention was also applied globally in sparse at the low-resolution model as in the original MaxViT model, whereas it was applied among same locations over the NFoV images at the high-resolution model. + +# 3.3 Training + +The low-resolution and high-resolution models are independently trained, as shown in Fig. 3(b). The objective of the training is to make the low-resolution + +![](images/1390be00070395268b019261887f10e2bcb71880da6d1c8b734167f526b2f4f7.jpg) +Fig. 5: Examples of conditional image in training. These images are generated from omni-directional images in ERP by randomly masking. + +and high-resolution models predict plausible VQGAN codes at [MASK]-code locations for each inference step. For the low-resolution model, the inputs are 'randomly masked low-resolution codes' and a conditional image which emulates the NFoV image embedded at the center in ERP (Fig. 3 a). The mask ratio in the randomly masked low-resolution codes is set to $\cos\left(\frac{\pi}{2}r\right)$ , where $r$ is sampled from a uniform distribution [0,1), since this emulates the single iteration in the inference. Examples of the conditional image in the training are shown in Fig. 5. They are prepared by randomly masking an original omni-directional image in ERP to emulate the conditional image in the inference. Since they are not limited to the single NFoV image embedded in ERP, the trained model can be applied to various in-painting and out-painting tasks, as described in 5.2. The low-resolution model is trained to predict the original VQGAN codes in the real omni-directional image at [MASK]-code locations, so that the cross entropy is used as the loss function to train the model. + +For the high-resolution model, the inputs are 'randomly masked high-resolution codes' for multiple NFoV images (26 NFoV images in our implementation), conditional NFoV images converted from the conditional image in ERP, and the reconstructed low-resolution NFoV images converted from the low-resolution omni-directional image reconstructed using the pre-trained VQGAN encoder and decoder. The 'randomly masked high-resolution code' and the conditional image in ERP are prepared in the same manner for the low-resolution model. The low-resolution omni-directional image is required in the inputs of the high-resolution model to emulate the low-resolution image generated at the first stage. The high-resolution model is trained to predict the original VQGAN codes in the NFoV images converted from the real omni-directional image at [MASK]-code locations based on the cross-entropy loss function. + +Although the first and second stages are sequentially processed in the inference, they are independently trained in parallel, as shown in Fig. 3(b). This property is advantageous to shorten the required training time if multiple GPUs (Graphics Processing Units) can be used, although a single GPU was used in our implementation. + +# 4 Experiments + +The omni-directional image dataset, SUN360 [19], was used in the experiments. The 5,000 outdoor images were used for test, while the remaining 47,938 outdoor images were used for training. The size of the images in the dataset is $512 \times 1024$ + +pixels. Although the proposed method generates images in $1024 \times 2048$ pixels, it was resized to $512 \times 1024$ pixels for the evaluation. For comparison, several conventional methods were also evaluated, including GAN-based methods with CNN (Convolutional Neural Networks) [10] or MLP-Mixer [13], a VQGAN-based method (OmniDreamer) [2], and a GAN-based in-painting method (LAMA) [15]. The models were implemented in PyTorch, and were trained in a single GPU (NVIDIA RTX3090). The code for the network architecture in the proposed method is provided in the supplementary material. + +The pre-trained VQGAN was obtained from [1], which is the model with 1024 codebooks trained on ImageNet. Eight MaxViT layers in Fig. 4 were used in both the low-resolution and high-resolution models, with 256 internal dimensions. The sizes of the VQGAN codes at the first and second stages were $16 \times 32$ patches and $16 \times 16$ patches $\times 26$ NFoV images, respectively. These VQGAN codes were converted into trainable feature vectors in the 256 dimensions. The conditional image was also down-sampled into the same size as the VQGAN codes with 256 dimensions using CNN. At each iteration of MaskGIT in the first stage, these two feature vectors were added with trainable positional encoding vectors, and then were inputted into the low-resolution model composed of the 8 MaxViT layers. At each iteration in the second stage, the low-resolution image generated at the first stage (inference) or reconstructed using the pre-trained VQGAN (training) was converted into NFoV images. These NFoV images were down-sampled with CNN to be added with the feature vectors of the input VQGAN codes, the conditional NFoV images, and the positional encoding, and then were inputted into the high-resolution model. The total steps $T$ in MaskGIT was set to 16 at both stages. + +The optimizer for training the models was AdamW with the learning rate of 0.001, the weight decay of 1e-5, Amsgrad, and the learning-rate scheduling of ExponentialLR reducing it by 0.95 every 5,000 iterations. OmniDreamer [2] was trained for 14 days (30 epochs in all training stages), while the proposed method was trained over 4 days (2 days with the batch size 16 at the first stage, and 2 days with the batch size 8 in the second stage, for 180,000 iterations at each stage). The other conventional methods were trained for 4 days, using their default batch sizes and hyper-parameters. FID (Frechet Inception Distance) [9], IS (Inception Score) [14], and LPIPS (Learned Perceptual Image Patch Similarity) [20] were used for evaluating the synthesized omni-directional images in ERP. + +# 5 Results + +# 5.1 Evaluation of Proposed Method + +The proposed method, 2S-ODIS, was quantitatively evaluated, compared with the conventional methods, OmniDreamer [2], CNN-based cGAN [10], MLPixer-based cGAN [13], and LAMA [15], as shown in Table 1. The models in the proposed method were trained for 4 days (2 days for low-resolution model and 2 days for high-resolution model). For comparison, the result with the models trained for 2 days (1 day + 1 day for low-resolution and high-resolution models) was also + +Table 1: Quantitative comparison with conventional methods + +
MethodIS (↑)FID (↓)LPIPS (↑)
2S-ODIS (Proposed method)5.96918.2630.662
2S-ODIS (2days)5.85718.6560.668
OmniDreamer [2]4.45823.1010.655
CNN-based cGAN [10]4.68440.0490.633
MLPMixer-based cGAN [13]4.40247.6900.634
LAMA [15]5.78469.4850.478
+ +![](images/303cd8eb4fe23c27145e2f4ab83a369b2da445cc961a7670a0d712954597d719.jpg) +Fig. 6: Evaluation metrics during training of proposed method compared with conventional method, OmniDreamer [2] + +![](images/fe8a2604ae7f5e89220dc4646fffa79bcdd48b49ff9040f9e0ba9a654192f25c.jpg) + +![](images/b62a8dade0ea8abf54a771c6b5758917568e241446f4ae43c62688a425453cfd.jpg) + +![](images/c004153388a3cc274c64b3b1a0e3a66d2f26da9d4bc2855eb9f0f1bbd6a15e8d.jpg) +Fig. 7: Examples of synthesized omni-directional images compared with conventional method, OmniDreamer [2] + +![](images/f604a9917878d9dd6a75815e812ecf3d8db3abc95ebbebccf42e8421538d4483.jpg) +2S-ODIS First Stage + +![](images/5748ed2919106ec279857fe8fe288cbf51d97f69eca418d12ae8b8a96d938016.jpg) +2S-ODIS Second Stage (Ours) + +![](images/e9016060434ad93abc2e84105f3bcfacda21d62ecf9419773d1f850001a1b82c.jpg) +OmniDreamer [2] + +![](images/43900a8c98b5536adadba112377939af5444e4e2ce19b874b7173256f6b16cda.jpg) +Fig. 8: Examples of NFoV images toward ground extracted from synthesized omnidirectional images in proposed and conventional methods + +![](images/35855581dd9bfaecf20726e99ce969ce9a3d1b3a3f6e2ccb3f9d3d207e268f06.jpg) + +![](images/455b066d64b5efeafbd02ca4f559a1447a9bd1e5ec0178d5406bb8786eadb383.jpg) + +shown in the table. The field of view for a NFoV image embedded in an input conditional image was set to 126.87 and 112.62 degrees for width and height in the experiments, respectively. It can be seen from the table that the proposed method achieved higher performance than the other conventional methods. Although the highest performance was achieved in the proposed method trained for 4 days, the performance already outperformed the other methods even with the 2-day training. To see this more clearly, the evaluation metrics during the training of the proposed method is shown in Fig. 6, where the performance exceeded OmniDreamer by 2 days and converged in around 4 days. Thus, the proposed method drastically shortened the training to 2-4 days from 14 days in OmniDreamer including the fine-tuning of the VQGAN model. In addition, the inference in the proposed method was much faster than in OmniDreamer: 1.54 seconds and 39.33 seconds for synthesizing each omni-directional image in the proposed method and OmniDreamer, respectively. This is because the proposed method is based on the simultaneous VQGAN-code prediction instead of the sequential auto-regressive prediction. + +For the qualitative comparison, the examples of the synthesized omni-directional images are shown in Fig. 7. It is clear that the proposed method generated globally plausible and locally detained omni-directional images, while OmniDreamer [2] sometimes failed in generating continuous images, especially along the edges in the input conditional images. To see if the proposed method can generate NFoV images without the distortion, the NFoV images toward the ground were extracted from the synthesized omni-directional images, since the geometric distortion is large at the poles in ERP. The examples of the NFoV images toward the ground are shown in Fig. 8. The left column shows the NFoV images at the first stage, while the middle column shows the NFoV images at the second stage. Since the pre-trained VQGAN code cannot represent the geometric distortion in ERP, the straight lines were not appropriately reproduced at the first stage. + +Table 2: Quantitative comparison in various conditional images + +
Mask SettingMethodIS (↑)FID (↓)LPIPS (↑)
Inpainting2S-ODIS (Proposed)5.58215.0440.685
OmniDreamer [2]4.67241.2090.708
Inpainting of Ground Region2S-ODIS (Proposed)6.08413.0380.680
OmniDreamer [2]5.47415.3030.699
Outpainting from Two Images2S-ODIS (Proposed)5.72219.4370.663
OmniDreamer [2]3.95233.4030.672
+ +![](images/0ef10b74b93361baf4c7cc3f4d35787fce6e77d2e7014c8ab942d1eeafa85bb4.jpg) +Fig. 9: Examples of synthesized omni-directional images in various input conditions. + +However, they were compensated at the second stage, which can be clearly seen in the sample images. On the other hand, OmniDreamer cannot appropriately reproduce the NFoV images toward the ground. Thus, it was confirmed that the proposed method synthesized omni-directional images without geometric distortion. + +# 5.2 Evaluation in Various Input Conditions + +As explained in 3.3, the models in the proposed method were trained with various conditional images in Fig. 5. They were not limited to the single NFoV image embedded in ERP, so that the proposed model can be applied various inpainting and out-painting tasks. For example, the model can be applied to the inpainting task to remove objects or people in the omni-directional image taken by a 360-degree camera, as shown in the top row in Fig. 9. Another example is the task to fill in the ground region of an omni-directional image as shown in the middle row in Fig. 9, since the omni-directional image often includes a hand or a camera stand at the bottom region in ERP. The last example in Fig. 9 is to synthesize an omni-directional image which includes two NFoV images such as + +Table 3: Ablation study in propose method + +
IS (↑)FID (↓)LPIPS (↑)
(1) Proposed5.96918.2630.662
(2) 1 Stage: Low Resolution Model5.79828.3290.670
(3) 1 Stage: High Resolution Model4.82152.4530.638
(4) Direct use of low-resolution VQGAN codes5.83721.8200.663
+ +front and rear cameras of a smartphone. Although OmniDreamer failed in synthesizing the omni-directional images in these situations, the proposed method generated high-quality omni-directional images. The quantitative results shown in Table 2 also indicate that the proposed method achieved higher performance than OmniDreamer. Although the diversity of the synthesized images was higher in OmniDreamer than the proposed method, it may be due to generating random images as shown in Fig. 9. + +# 5.3 Ablation Study + +Ablation studies were conducted to investigate the effectiveness of each component in the proposed method: the low-resolution and high-resolution models in the 2-stage structure. The results are shown in Table 3. (1) is the proposed method with the 2-stage structure, while (2) and (3) are the results with 1-stage structure only using the low-resolution model and the high-resolution model, respectively. As can be seen from the table, the 2-stage structure was indispensable for the high-quality image synthesis. + +Moreover, it was examined that the low-resolution VQGAN codes generated at the first stage were directly used at the second stage instead of the low-resolution image generated at the first stage, since the 2-stage structure in Muse [3] uses the low-resolution VQGAN codes directly. The result is shown in Table 3 (4). It was confirmed from the result that the low-resolution image was better to use at the second stage than the low-resolution VQGAN codes directly. This may be because the low-resolution image is compressed by CNN to properly extract the global information generated at the first stage. + +# 6 Limitations and Future Prospects + +The proposed method synthesizes an omni-directional image by merging multiple NFoV images with weights depending on the distance from the edges of the NFoV images. Although the generation of the NFoV images are conditioned by the global low-resolution image generated at the first stage, it may be possible to generate discontinuous NFoV images. One possible solution would be to add an additional network to refine the generated omni-directional images to improve the continuity between NFoV images. Another issue is that it takes 1-2 days to + +convert omni-directional images in the dataset to VQGAN codes using the pretrained VQGAN encoder. This may be alleviated by constructing a light-weight encoder using model distillation of the VQGAN encoder. + +Currently, inputs in the proposed method were limited to the conditional images such as a single or several NFoV images embedded in ERP, or masked omni-directional images for in-painting. However, the proposed architecture can be applied to any conditional information such as text information, a class label, and guidance information for style modulation using an additional module such as cross attention similar to stable diffusion. In addition, the hyper-parameters have not been thoroughly explored in the evaluation of the proposed method. For example, the directions of NFoV images at the second stage were fixed to 26 directions corresponding to the faces in rhombicuboctahedron, in addition to the field of view fixed to 60 degrees, which may be optimized in the future work. Furthermore, the network structure such as MaxViT might be improved to more optimized architecture to the omni-directional image synthesis. Although this paper focused on the tasks of omni-directional image synthesis, the proposed architecture would be useful for other omni-directional image tasks, such as semantic segmentation and object detection. + +# 7 Conclusion + +A novel method for the omni-directional image synthesis is proposed in this paper. By using the pre-trained VQGAN encoder and decoder without fine-tuning, the training of the model was drastically shortened. To manage the distortion in an omni-directional image in ERP, a two-stage structure was adopted. At the first stage, an omni-directional image was generated in ERP without geometric distortion correction, so that it cannot reproduce straight lines at poles in a sphere. Therefore, it was corrected at the second stage by synthesizing an omni-directional image from multiple NFoV images based on geometric distortion correction. To realize fast inference, the sampling strategy in MaskGIT was adopted to predict VQGAN codes simultaneously. As a result, the proposed method achieved the high-quality omni-directional image synthesis with low computational costs both in training and inference. + +# Acknowledgement + +This work was supported by JSPS KAKENHI Grant Number JP21K11943 + +# References + +1. Compvis/taming-transformers. https://github.com/CompVis/taming-transformers +2. Akimoto, N., Matsuo, Y., Aoki, Y.: Diverse plausible 360-degree image outpainting for efficient 3dgc background creation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2022) + +3. Chang, H., Zhang, H., Barber, J., Maschinot, A., Lezama, J., Jiang, L., Yang, M.H., Murphy, K.P., Freeman, W.T., Rubinstein, M., Li, Y., Krishnan, D.: Muse: Text-to-image generation via masked generative transformers. In: Proceedings of the 40th International Conference on Machine Learning (ICML) (2023) +4. Chang, H., Zhang, H., Jiang, L., Liu, C., Freeman, W.T.: Maskgit: Masked generative image transformer. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (June 2022) +5. Chen, Z., Wang, G., Liu, Z.: Text2light: Zero-shot text-driven hdr panorama generation. ACM Transactions on Graphics (TOG) 41(6), 1-16 (2022) +6. Dastjerdi, M.R.K., Hold-Geoffroy, Y., Eisenmann, J., Khodadadeh, S., Lalonde, J.F.: Guided co-modulated gan for $360^{\circ}$ field of view extrapolation. In: 2022 International Conference on 3D Vision (3DV). pp. 475–485 (2022) +7. Esser, P., Rombach, R., Ommer, B.: Taming transformers for high-resolution image synthesis. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 12873-12883 (2021) +8. Hara, T., Mukuta, Y., Harada, T.: Spherical image generation from a single image by considering scene symmetry. In: Thirty-Fifth AAAI Conference on Artificial Intelligence. pp. 1513-1521 (2021) +9. Heusel, M., Ramsauer, H., Unterthiner, T., Nessler, B., Hochreiter, S.: Gans trained by a two time-scale update rule converge to a local nash equilibrium. In: Advances in Neural Information Processing Systems (NeurIPS) (2017) +10. Keisuke, O., Takao, Y.: Omni-directional image generation from single snapshot image. In: IEEE International Conference on Systems, Man, and Cybernetics (SMC) (2020) +11. Kingma, D.P., Welling, M.: Auto-encoding variational bayes. In: International Conference on Learning Representations (ICLR) (2014) +12. Lu, Z., Hu, K., Wang, C., Bai, L., Wang, Z.: Autoregressive omni-aware outpainting for open-vocabulary 360-degree image generation. In: arXiv preprint arXiv:2309.03467 (2023) +13. Nakata, A., Miyazaki, R., Yamanaka, T.: Increasing diversity of omni-directional images generated from single image using cgan based on mlpmixer. In: Asian Conference on Pattern Recognition (ACPR) +14. Salimans, T., Goodfellow, I., Zaremba, W., Cheung, V., Radford, A., Chen, X., Chen, X.: Improved techniques for training gans. In: Advances in Neural Information Processing Systems (NeurIPS) (2016) +15. Suvorov, R., Logacheva, E., Mashikhin, A., Remizova, A., Ashukha, A., Silvestrov, A., Kong, N., Goka, H., Park, K., Lempitsky, V.: Resolution-robust large mask inpainting with fourier convolutions. In: Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision (WACV) (2022) +16. Tu, Z., Talebi, H., Zhang, H., Yang, F., Milanfar, P., Bovik, A., Li, Y.: Maxvit: Multi-axis vision transformer. In: European Conference on Computer Vision (ECCV) (2022) +17. Van Den Oord, A., Vinyals, O., et al.: Neural discrete representation learning. In: Advances in Neural Information Processing Systems (NeurIPS). vol. 30 (2017) +18. Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, L., Polosukhin, I.: Attention is all you need. In: Advances in Neural Information Processing Systems (NeurIPS). vol. 30 (2017) +19. Xiao, J., Ehinger, K.A., Oliva, A., Torralba, A.: Recognizing scene viewpoint using panoramic place representation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2012) + +20. Zhang, R., Isola, P., Efros, A.A., Shechtman, E., Wang, O.: The unreasonable effectiveness of deep features as a perceptual metric. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2018) \ No newline at end of file diff --git a/2024/2S-ODIS_ Two-Stage Omni-Directional Image Synthesis by Geometric Distortion Correction/images.zip b/2024/2S-ODIS_ Two-Stage Omni-Directional Image Synthesis by Geometric Distortion Correction/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..a52489e5c67d055aa0bcd1e06bdeffae1144c874 --- /dev/null +++ b/2024/2S-ODIS_ Two-Stage Omni-Directional Image Synthesis by Geometric Distortion Correction/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e57e962a71c8a8985c4fabcca0d18bab35be41d63be32935a35827c758d0cdb +size 515839 diff --git a/2024/2S-ODIS_ Two-Stage Omni-Directional Image Synthesis by Geometric Distortion Correction/layout.json b/2024/2S-ODIS_ Two-Stage Omni-Directional Image Synthesis by Geometric Distortion Correction/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..3323eadb56989f19c3701532d567699903446632 --- /dev/null +++ b/2024/2S-ODIS_ Two-Stage Omni-Directional Image Synthesis by Geometric Distortion Correction/layout.json @@ -0,0 +1,5983 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 146, + 112, + 468, + 148 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 146, + 112, + 468, + 148 + ], + "spans": [ + { + "bbox": [ + 146, + 112, + 468, + 148 + ], + "type": "text", + "content": "2S-ODIS: Two-Stage Omni-Directional Image Synthesis by Geometric Distortion Correction" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 217, + 168, + 396, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 217, + 168, + 396, + 180 + ], + "spans": [ + { + "bbox": [ + 217, + 168, + 396, + 180 + ], + "type": "text", + "content": "Atsuya Nakata and Takao Yamanaka" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 179, + 190, + 433, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 179, + 190, + 433, + 213 + ], + "spans": [ + { + "bbox": [ + 179, + 190, + 433, + 213 + ], + "type": "text", + "content": "Sophia University, Tokyo, Japan \na-nakata-7r0@eagle.sophia.ac.jp, takao-y@sophia.ac.jp" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 160, + 232, + 455, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 232, + 455, + 495 + ], + "spans": [ + { + "bbox": [ + 160, + 232, + 455, + 495 + ], + "type": "text", + "content": "Abstract. Omni-directional images have been increasingly used in various applications, including virtual reality and SNS (Social Networking Services). However, their availability is comparatively limited in contrast to normal field of view (NFOV) images, since specialized cameras are required to take omni-directional images. Consequently, several methods have been proposed based on generative adversarial networks (GAN) to synthesize omni-directional images, but these approaches have shown difficulties in training of the models, due to instability and/or significant time consumption in the training. To address these problems, this paper proposes a novel omni-directional image synthesis method, 2S-ODIS (Two-Stage Omni-Directional Image Synthesis), which generated high-quality omni-directional images but drastically reduced the training time. This was realized by utilizing the VQGAN (Vector Quantized GAN) model pre-trained on a large-scale NFOV image database such as ImageNet without fine-tuning. Since this pre-trained model does not represent distortions of omni-directional images in the equi-rectangular projection (ERP), it cannot be applied directly to the omni-directional image synthesis in ERP. Therefore, two-stage structure was adopted to first create a global coarse image in ERP and then refine the image by integrating multiple local NFOV images in the higher resolution to compensate the distortions in ERP, both of which are based on the pre-trained VQGAN model. As a result, the proposed method, 2S-ODIS, achieved the reduction of the training time from 14 days in OmniDreamer to four days in higher image quality." + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 133, + 529, + 482, + 588 + ], + "blocks": [ + { + "bbox": [ + 133, + 529, + 482, + 588 + ], + "lines": [ + { + "bbox": [ + 133, + 529, + 482, + 588 + ], + "spans": [ + { + "bbox": [ + 133, + 529, + 482, + 588 + ], + "type": "image", + "image_path": "c48c7689ffa9052a87c7d9a0584498d5c5ef344e33dd9c361f13d46c9f3bed18.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 597, + 482, + 642 + ], + "lines": [ + { + "bbox": [ + 130, + 597, + 482, + 642 + ], + "spans": [ + { + "bbox": [ + 130, + 597, + 482, + 642 + ], + "type": "text", + "content": "Fig. 1: Overview of advantages of proposed method, 2S-ODIS. OmniDreamer [2] requires 14 days for training of the model, including 1-week training of the VQGAN model. In contrast, the proposed method only required 4 days for the training of the model since no training of VQGAN model was required." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 133, + 114, + 229, + 127 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 114, + 229, + 127 + ], + "spans": [ + { + "bbox": [ + 133, + 114, + 229, + 127 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 141, + 481, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 141, + 481, + 248 + ], + "spans": [ + { + "bbox": [ + 130, + 141, + 481, + 248 + ], + "type": "text", + "content": "An omni-directional image is taken by an omni-directional camera, also known as a 360-degree camera, which captures lights from all directions falling onto the focal point to cover a full sphere, and is usually represented in the equi-rectangular projection (ERP) to represent it in a 2-dimensional plane. These images have been applied to various applications such as virtual reality, social networking services, and map tools such as Google Street View. However, the availability of the omni-directional images are still limited compared with Normal Field of View (NFoV) images captured by a standard camera, since the specialized camera is required to capture the omni-directional images." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 249, + 481, + 332 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 249, + 481, + 332 + ], + "spans": [ + { + "bbox": [ + 130, + 249, + 481, + 332 + ], + "type": "text", + "content": "Recently, several methods have been proposed to synthesize omni-directional images from a NFoV image [2,6,8,10,13], a text condition [5], or both [12]. These methods have been based on Generative Adversarial Networks (GAN) [6,8,10, 13], Vector Quantized GAN (VQGAN) [2,5], or auto-regressive outpainting using stable diffusion [12]. However, the GAN-based methods have faced challenges of instability in training, while the VQGAN-based methods and the auto-regression with stable diffusion require long training and inference time, respectively." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 334, + 481, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 334, + 481, + 608 + ], + "spans": [ + { + "bbox": [ + 130, + 334, + 481, + 608 + ], + "type": "text", + "content": "In this paper, we propose a novel method to synthesize omni-directional images from a NFoV image based on pre-trained VQGAN, trained on a large-scale NFoV-image dataset. The previous method with VQGAN [2] has required to train the VQGAN encoder and decoder to represent geometric distortions in omni-directional images in ERP, especially at poles (top and bottom regions in ERP). This training has taken long time, such as 1 week in the method [2]. In the proposed method, the pre-trained VQGAN encoder and decoder were applied without fine-tuning by synthesizing multiple NFoV images to integrate them into an omni-directional image based on geometric distortion correction. Since no training of VQGAN was required, the training of the model was shortened by removing the step of the fine-tuning of VQGAN, as shown in Fig. 1. Furthermore, a two-stage structure was adopted in the proposed method. At the first stage, a global coarse image in ERP is created using the pre-trained VQGAN encoder and decoder without the geometric distortion correction. Therefore, the generated omni-directional image at the first stage includes distortions. For example, a straight line in NFoV images at poles can not be reproduced at the first stage. At the second stage, this global coarse image is refined by synthesizing an omni-directional image from multiple NFoV images generated using the pre-trained VQGAN encoder and decoder. This second stage compensates the geometric distortions at the first stage, in addition to representing local detailed textures in a higher resolution. By using the two-stage structure, the model can produce globally plausible yet locally detailed omni-directional images without the geometric distortions." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 146, + 609, + 323, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 146, + 609, + 323, + 620 + ], + "spans": [ + { + "bbox": [ + 146, + 609, + 323, + 620 + ], + "type": "text", + "content": "The contributions of this paper include:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 138, + 629, + 481, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 629, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 138, + 629, + 481, + 665 + ], + "type": "text", + "content": "- A novel method to synthesize omni-directional images from a NFoV image was proposed using pre-trained VQGAN. Since no training of VQGAN was required, the training time was drastically reduced." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 284, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 284, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 284, + 101 + ], + "type": "text", + "content": "A. Nakata and T. Yamanaka" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 137, + 116, + 485, + 151 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 116, + 485, + 151 + ], + "spans": [ + { + "bbox": [ + 137, + 116, + 485, + 151 + ], + "type": "text", + "content": "- A two-stage structure was adopted to generate a global coarse omni-directional image at the first stage, and then generate a locally detailed image with geometric distortion correction at the second stage." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 138, + 152, + 481, + 188 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 152, + 481, + 188 + ], + "spans": [ + { + "bbox": [ + 138, + 152, + 481, + 188 + ], + "type": "text", + "content": "- Experimental results demonstrated that the proposed method synthesized higher quality omni-directional images in shortened training and inference time than the previous methods such as OmniDreamer [2]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 207, + 242, + 220 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 207, + 242, + 220 + ], + "spans": [ + { + "bbox": [ + 132, + 207, + 242, + 220 + ], + "type": "text", + "content": "2 Related Works" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 233, + 251, + 245 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 233, + 251, + 245 + ], + "spans": [ + { + "bbox": [ + 132, + 233, + 251, + 245 + ], + "type": "text", + "content": "2.1 Image Generation" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 255, + 482, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 255, + 482, + 458 + ], + "spans": [ + { + "bbox": [ + 130, + 255, + 482, + 458 + ], + "type": "text", + "content": "VQVAE (Vector Quantized Variational AutoEncoder) [17] has been proposed to improve the generated image blurriness in VAE [11] by representing image patches with quantized latent vectors based on vector quantization. Furthermore, the adversarial loss has been introduced in VQVAE to make the generated images clearer, called VQGAN [7]. In this method, Transformer [18] has been used to sequentially predict image patches from neighbor patches based on auto-regressive prediction. The patches are represented with the quantized latent vectors called VQGAN codes to generate clear images with low computational cost. To improve the slow inference in VQGAN due to the sequential predictions of patches, MaskGIT [4] has been proposed by predicting multiple patches simultaneously. Although MaskGIT has succeeded in improving the inference speed, it has been difficult to generate high quality images in the high resolution. To solve this problem, Muse [3] has been proposed using a two-stage structure, where a low-resolution image is generated at the first stage, and then is refined to generate a higher-resolution image at the second stage. In our proposed method, this two-stage structure was adopted for the omni-directional image synthesis in the high resolution." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 477, + 339, + 489 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 477, + 339, + 489 + ], + "spans": [ + { + "bbox": [ + 132, + 477, + 339, + 489 + ], + "type": "text", + "content": "2.2 Omni-directional Image Generation" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 498, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 498, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 498, + 482, + 666 + ], + "type": "text", + "content": "Several methods have been proposed for synthesizing omni-directional images from NFoV images. Okubo and Yamanaka [10] have proposed a method of generating omni-directional images based on conditional GAN from a single NFoV image with the class label. Hara et al. [8] have also proposed a method based on the symmetric property in the omni-directional images using GAN and VAE. Another work to synthesize omni-directional images is Guided ImmerseGAN [6], which generates omni-directional images from a NFoV image with the modulation guided by a given class label, which does not have to be the true class of the input NFoV image. In the work of OmniDreamer [2], VQGAN has been applied to the omni-directional image synthesis by using Transformer for auto-regressive prediction. In this method, VQGAN encoder and decoder have to be fine-tuned on an omni-directional image dataset since the geometric distortion in ERP has to be represented in the latent codes of VQGAN. Text2Light [5] also uses VQGAN with auto-regressive prediction for the generation, though only text" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 221, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 221, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 221, + 91, + 447, + 102 + ], + "type": "text", + "content": "2S-ODIS: Two-Stage Omni-Directional Image Synthesis" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 133, + 114, + 482, + 228 + ], + "blocks": [ + { + "bbox": [ + 133, + 114, + 482, + 228 + ], + "lines": [ + { + "bbox": [ + 133, + 114, + 482, + 228 + ], + "spans": [ + { + "bbox": [ + 133, + 114, + 482, + 228 + ], + "type": "image", + "image_path": "407fb1a0dae65374d1d25b47d8d445944ded0cc981c3bfecf5fa21ff99113475.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 235, + 482, + 302 + ], + "lines": [ + { + "bbox": [ + 130, + 235, + 482, + 302 + ], + "spans": [ + { + "bbox": [ + 130, + 235, + 482, + 302 + ], + "type": "text", + "content": "Fig. 2: Qualitative comparison of omni-directional image reconstruction using pretrained VQGAN encoder and decoder. Omni-directional Image: original omnidirectional image, Reconstructed in ERP: reconstructed in equirectangular projection, Reconstructed in Extracted Images: reconstructed by integrating multiple NFoV images in different directions. By extracting NFoV images, an omni-directional image can be correctly reconstructed without distortions." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 325, + 482, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 325, + 482, + 430 + ], + "spans": [ + { + "bbox": [ + 130, + 325, + 482, + 430 + ], + "type": "text", + "content": "information is taken as the input instead of the NFoV image. Nakata et al. [13] have proposed a method to increase the diversity of generated omni-directional images based on MLP-Mixer by efficiently propagating the information of the NFoV image embedded at the center in ERP. AOGNet [12] has generated omnidirectional images by out-painting an incomplete 360-degree image progressively with NFoV and text guidances jointly or individually. This has been realized using auto-regressive prediction based on the stable-diffusion backbone model. Due to the nature of sequential auto-regressive prediction, it takes long inference time." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 433, + 483, + 541 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 433, + 483, + 541 + ], + "spans": [ + { + "bbox": [ + 130, + 433, + 483, + 541 + ], + "type": "text", + "content": "In our proposed method, the pre-trained VQGAN model was used without fine-tuning on the omni-directional image dataset, since multiple NFoV images are synthesized based on the pre-trained VQGAN and then integrated into omni-directional images. By removing the step of VQGAN training, the overall training of the model was drastically shortened than the previous method with VQGAN such as OmniDreamer [2]. In addition, the proposed method is based on simultaneous synthesis of multiple NFoV images in different directions, whose inference was faster than auto-regressive prediction such as OmniDreamer [2] and AOGNet [12]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 559, + 261, + 574 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 559, + 261, + 574 + ], + "spans": [ + { + "bbox": [ + 132, + 559, + 261, + 574 + ], + "type": "text", + "content": "3 Proposed Method" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 585, + 265, + 597 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 585, + 265, + 597 + ], + "spans": [ + { + "bbox": [ + 132, + 585, + 265, + 597 + ], + "type": "text", + "content": "3.1 Two-Stage Structure" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "type": "text", + "content": "The proposed method consists of the two-stage structure, where a global coarse omni-directional image in ERP is synthesized in a low resolution " + }, + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "type": "inline_equation", + "content": "(256\\times 512" + }, + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "type": "text", + "content": " pixels) at the first stage without geometric distortion correction, and then is refined at the second stage by integrating the multiple synthesized NFoV images in different directions based on the geometric distortion correction, producing" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 284, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 284, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 284, + 101 + ], + "type": "text", + "content": "A. Nakata and T. Yamanaka" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 133, + 114, + 482, + 236 + ], + "blocks": [ + { + "bbox": [ + 133, + 114, + 482, + 236 + ], + "lines": [ + { + "bbox": [ + 133, + 114, + 482, + 236 + ], + "spans": [ + { + "bbox": [ + 133, + 114, + 482, + 236 + ], + "type": "image", + "image_path": "6c79fc6bfdc27e7c17b4b54547a191508566b01fe547c6c47c7a74a1a1f595fd.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 133, + 242, + 460, + 369 + ], + "blocks": [ + { + "bbox": [ + 133, + 242, + 460, + 369 + ], + "lines": [ + { + "bbox": [ + 133, + 242, + 460, + 369 + ], + "spans": [ + { + "bbox": [ + 133, + 242, + 460, + 369 + ], + "type": "image", + "image_path": "970736200ab83ab8dca4e0dde01b63a366d4facbec65e00012a9144392de1ec5.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 168, + 376, + 444, + 388 + ], + "lines": [ + { + "bbox": [ + 168, + 376, + 444, + 388 + ], + "spans": [ + { + "bbox": [ + 168, + 376, + 444, + 388 + ], + "type": "text", + "content": "Fig. 3: Diagram of the proposed method. (a)Inference, (b)Training." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 418, + 480, + 454 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 418, + 480, + 454 + ], + "spans": [ + { + "bbox": [ + 130, + 418, + 480, + 454 + ], + "type": "text", + "content": "a high-quality omni-directional image in ERP in a high resolution (1024×2048 pixels). At both stages, the pre-trained VQGAN was utilized without fine-tuning on the omni-directional image dataset." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 459, + 482, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 459, + 482, + 567 + ], + "spans": [ + { + "bbox": [ + 130, + 459, + 482, + 567 + ], + "type": "text", + "content": "As a preliminary experiment, an omni-directional image was reconstructed in ERP or in multiple NFoV images using pre-trained VQGAN encoder and decoder without the fine-tuning, as shown in Fig. 2. It can be seen from the figure that the reconstruction in ERP cannot correctly reproduce the texture in the region toward the ground (blue frame) and the continuity in the region at both edges (yellow frame), although it can reproduce the region at center in ERP (red frame). On the contrary, all the regions can be correctly reproduced in the extracted NFoV images. This indicates that the pre-trained VQGAN model can be applied without fine-tuning if it is applied to NFoV images." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 570, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 570, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 570, + 482, + 666 + ], + "type": "text", + "content": "Thus, the generated omni-directional image in ERP at the first stage in the proposed method includes distortions since the pre-trained VQGAN cannot represent the texture and continuities in the omni-directional images in ERP. However, these distortions are correctly compensated at the second stage by synthesizing the multiple NFoV images which can be correctly reproduced by the pre-trained VQGAN model. If only the second stage is used in the proposed method, it is difficult to synthesize multiple NFoV images simultaneously with global compatibility. Therefore, the two-stage structure was adopted in the pro" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 221, + 91, + 448, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 221, + 91, + 448, + 102 + ], + "spans": [ + { + "bbox": [ + 221, + 91, + 448, + 102 + ], + "type": "text", + "content": "2S-ODIS: Two-Stage Omni-Directional Image Synthesis" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 133, + 113, + 292, + 232 + ], + "blocks": [ + { + "bbox": [ + 133, + 113, + 292, + 232 + ], + "lines": [ + { + "bbox": [ + 133, + 113, + 292, + 232 + ], + "spans": [ + { + "bbox": [ + 133, + 113, + 292, + 232 + ], + "type": "image", + "image_path": "c193417ba274395797d3249e91cb80049f3edf2008b360866fb54e93c907c902.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 294, + 113, + 477, + 232 + ], + "blocks": [ + { + "bbox": [ + 294, + 113, + 477, + 232 + ], + "lines": [ + { + "bbox": [ + 294, + 113, + 477, + 232 + ], + "spans": [ + { + "bbox": [ + 294, + 113, + 477, + 232 + ], + "type": "image", + "image_path": "3c4f60946dbe6548b7b5925c16cc53346bbdda694916f288c0d2896f7a0da92a.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 132, + 242, + 481, + 352 + ], + "blocks": [ + { + "bbox": [ + 132, + 242, + 481, + 352 + ], + "lines": [ + { + "bbox": [ + 132, + 242, + 481, + 352 + ], + "spans": [ + { + "bbox": [ + 132, + 242, + 481, + 352 + ], + "type": "image", + "image_path": "3a29181286e2a1b3da8c4beb6a803848db06ccb7db9fb1435e862e25329bf345.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 132, + 359, + 482, + 383 + ], + "lines": [ + { + "bbox": [ + 132, + 359, + 482, + 383 + ], + "spans": [ + { + "bbox": [ + 132, + 359, + 482, + 383 + ], + "type": "text", + "content": "Fig. 4: Structure of the proposed method. The range of attention differs between the high-resolution and low-resolution models." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 407, + 480, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 407, + 480, + 430 + ], + "spans": [ + { + "bbox": [ + 130, + 407, + 480, + 430 + ], + "type": "text", + "content": "posed method to produce globally plausible coarse omni-directional image at the first stage." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 431, + 482, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 431, + 482, + 563 + ], + "spans": [ + { + "bbox": [ + 130, + 431, + 482, + 563 + ], + "type": "text", + "content": "The structure of the proposed method is shown in Fig. 3. At the first stage, the low resolution model produces the low resolution codes, which are converted into patches of omni-directional images in ERP using the pre-trained VQGAN decoder. At the second stage, the high resolution model produces the high resolution codes, which are corresponding to the patches in the NFoV images in multiple directions (26 directions in our implementation) in an omni-directional image with overlapping. These 26 directions for the NFoV images were the same directions as the normal vectors in the faces of a rhombicuboctahedron. The field of view was set to 60 degrees for all directions. The generated NFoV images with the size of " + }, + { + "bbox": [ + 130, + 431, + 482, + 563 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 130, + 431, + 482, + 563 + ], + "type": "text", + "content": " pixels were integrated into an omni-directional image with the size of " + }, + { + "bbox": [ + 130, + 431, + 482, + 563 + ], + "type": "inline_equation", + "content": "1024 \\times 2048" + }, + { + "bbox": [ + 130, + 431, + 482, + 563 + ], + "type": "text", + "content": " pixels in ERP." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 583, + 208, + 594 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 583, + 208, + 594 + ], + "spans": [ + { + "bbox": [ + 132, + 583, + 208, + 594 + ], + "type": "text", + "content": "3.2 Inference" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "type": "text", + "content": "For synthesizing an omni-directional image in the inference, the low-resolution codes are first generated using the sampling strategy proposed in MaskGIT [4] at the first stage from the conditional image where an input NFOV image is embedded at the center in ERP, as shown in Fig. 3(a). In MaskGIT, the generation is started with 'Masked low resolution codes' which is filled with the [MASK] code," + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 284, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 284, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 284, + 100 + ], + "type": "text", + "content": "A. Nakata and T. Yamanaka" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 282 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 282 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 282 + ], + "type": "text", + "content": "the VQGAN code which indicates that it is masked, for all locations. Then, the low resolution model predicts the probabilities for all the [MASK] locations in parallel, and samples a VQGAN code based on its predicted probabilities over all possible VQGAN codes for each location. The location with the low probability is replaced with the [MASK] code again, and the VQGAN codes are resampled by predicting the probabilities using the low resolution model. This process is repeated in " + }, + { + "bbox": [ + 130, + 116, + 482, + 282 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 130, + 116, + 482, + 282 + ], + "type": "text", + "content": " steps. At each iteration, the model predicts all VQGAN code simultaneously but only keeps the most confident ones. The remaining VQGAN codes are replaced with the [MASK] code and re-predicted in the next iteration. The mask ratio during the iterations is determined by " + }, + { + "bbox": [ + 130, + 116, + 482, + 282 + ], + "type": "inline_equation", + "content": "\\cos\\left(\\frac{\\pi}{2}\\frac{t}{T}\\right)" + }, + { + "bbox": [ + 130, + 116, + 482, + 282 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 130, + 116, + 482, + 282 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 130, + 116, + 482, + 282 + ], + "type": "text", + "content": " indicates the current iterations in the total steps " + }, + { + "bbox": [ + 130, + 116, + 482, + 282 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 130, + 116, + 482, + 282 + ], + "type": "text", + "content": ". This mask ratio is monotonically decreasing from 1 to 0 with respect to " + }, + { + "bbox": [ + 130, + 116, + 482, + 282 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 130, + 116, + 482, + 282 + ], + "type": "text", + "content": ", which ensures that most of the locations are masked during the early stage in the iterations to prevent producing the inconsistent codes." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 284, + 482, + 404 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 284, + 482, + 404 + ], + "spans": [ + { + "bbox": [ + 130, + 284, + 482, + 404 + ], + "type": "text", + "content": "At the second stage, the high-resolution codes are generated using the high resolution model, where the generation process is almost same as the low resolution model. The difference from the low-resolution model is that the model accepts the low-resolution image generated at the first stage as an additional conditional image, and generates NFoV images, as shown in Fig. 3(a). To integrate the generated NFoV images into an omni-directional image, the overlapped regions are merged with weights depending on the distance from the centers of the NFoV images. Specifically, let " + }, + { + "bbox": [ + 130, + 284, + 482, + 404 + ], + "type": "inline_equation", + "content": "x_{i}" + }, + { + "bbox": [ + 130, + 284, + 482, + 404 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 284, + 482, + 404 + ], + "type": "inline_equation", + "content": "x_{j}" + }, + { + "bbox": [ + 130, + 284, + 482, + 404 + ], + "type": "text", + "content": " be the two overlapping pixel values, and let " + }, + { + "bbox": [ + 130, + 284, + 482, + 404 + ], + "type": "inline_equation", + "content": "d_{i}" + }, + { + "bbox": [ + 130, + 284, + 482, + 404 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 284, + 482, + 404 + ], + "type": "inline_equation", + "content": "d_{j}" + }, + { + "bbox": [ + 130, + 284, + 482, + 404 + ], + "type": "text", + "content": " be the distances from the centers of the NFoV images at each position. The integrated pixel value " + }, + { + "bbox": [ + 130, + 284, + 482, + 404 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 130, + 284, + 482, + 404 + ], + "type": "text", + "content": " is given by" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 238, + 413, + 480, + 437 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 238, + 413, + 480, + 437 + ], + "spans": [ + { + "bbox": [ + 238, + 413, + 480, + 437 + ], + "type": "interline_equation", + "content": "y = \\frac {w _ {i}}{\\left(w _ {i} + w _ {j}\\right)} x _ {i} + \\frac {w _ {j}}{\\left(w _ {i} + w _ {j}\\right)} x _ {j} \\tag {1}", + "image_path": "3917d7338e0d8056ba43617b8a05deeebe2636c7e95ca427f16378482027838d.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 131, + 442, + 328, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 442, + 328, + 458 + ], + "spans": [ + { + "bbox": [ + 131, + 442, + 328, + 458 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 131, + 442, + 328, + 458 + ], + "type": "inline_equation", + "content": "w_{i} = 1 - \\frac{di}{\\max_{k}(d_{k})}" + }, + { + "bbox": [ + 131, + 442, + 328, + 458 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 131, + 442, + 328, + 458 + ], + "type": "inline_equation", + "content": "w_{j} = 1 - \\frac{dj}{\\max_{k}(d_{k})}" + }, + { + "bbox": [ + 131, + 442, + 328, + 458 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 458, + 482, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 458, + 482, + 601 + ], + "spans": [ + { + "bbox": [ + 130, + 458, + 482, + 601 + ], + "type": "text", + "content": "The network architecture used in the low-resolution model and the high resolution model is shown in Fig. 4. The layer structure was adapted from MaxViT [16], although Transformer has been used in MaskGIT [4] and Muse [3]. The 8-layer MaxViT models were used in both the low-resolution model and the high-resolution model. In the low-resolution model, the padding in MBConv was replaced to the circular padding to encourage the continuity at the edges in ERP, whereas it was remained to the zero padding in the high-resolution model. The block attention was applied within each divided region at the low-resolution model and within each NFoV image in the high-resolution model, as shown in Fig. 4. The grid attention was also applied globally in sparse at the low-resolution model as in the original MaxViT model, whereas it was applied among same locations over the NFoV images at the high-resolution model." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 131, + 620, + 204, + 632 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 620, + 204, + 632 + ], + "spans": [ + { + "bbox": [ + 131, + 620, + 204, + 632 + ], + "type": "text", + "content": "3.3 Training" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 641, + 481, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 641, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 641, + 481, + 665 + ], + "type": "text", + "content": "The low-resolution and high-resolution models are independently trained, as shown in Fig. 3(b). The objective of the training is to make the low-resolution" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 221, + 91, + 448, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 221, + 91, + 448, + 102 + ], + "spans": [ + { + "bbox": [ + 221, + 91, + 448, + 102 + ], + "type": "text", + "content": "2S-ODIS: Two-Stage Omni-Directional Image Synthesis" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 133, + 114, + 481, + 159 + ], + "blocks": [ + { + "bbox": [ + 133, + 114, + 481, + 159 + ], + "lines": [ + { + "bbox": [ + 133, + 114, + 481, + 159 + ], + "spans": [ + { + "bbox": [ + 133, + 114, + 481, + 159 + ], + "type": "image", + "image_path": "1390be00070395268b019261887f10e2bcb71880da6d1c8b734167f526b2f4f7.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 132, + 167, + 480, + 189 + ], + "lines": [ + { + "bbox": [ + 132, + 167, + 480, + 189 + ], + "spans": [ + { + "bbox": [ + 132, + 167, + 480, + 189 + ], + "type": "text", + "content": "Fig. 5: Examples of conditional image in training. These images are generated from omni-directional images in ERP by randomly masking." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 213, + 482, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 213, + 482, + 380 + ], + "spans": [ + { + "bbox": [ + 130, + 213, + 482, + 380 + ], + "type": "text", + "content": "and high-resolution models predict plausible VQGAN codes at [MASK]-code locations for each inference step. For the low-resolution model, the inputs are 'randomly masked low-resolution codes' and a conditional image which emulates the NFoV image embedded at the center in ERP (Fig. 3 a). The mask ratio in the randomly masked low-resolution codes is set to " + }, + { + "bbox": [ + 130, + 213, + 482, + 380 + ], + "type": "inline_equation", + "content": "\\cos\\left(\\frac{\\pi}{2}r\\right)" + }, + { + "bbox": [ + 130, + 213, + 482, + 380 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 130, + 213, + 482, + 380 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 130, + 213, + 482, + 380 + ], + "type": "text", + "content": " is sampled from a uniform distribution [0,1), since this emulates the single iteration in the inference. Examples of the conditional image in the training are shown in Fig. 5. They are prepared by randomly masking an original omni-directional image in ERP to emulate the conditional image in the inference. Since they are not limited to the single NFoV image embedded in ERP, the trained model can be applied to various in-painting and out-painting tasks, as described in 5.2. The low-resolution model is trained to predict the original VQGAN codes in the real omni-directional image at [MASK]-code locations, so that the cross entropy is used as the loss function to train the model." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 380, + 483, + 524 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 380, + 483, + 524 + ], + "spans": [ + { + "bbox": [ + 130, + 380, + 483, + 524 + ], + "type": "text", + "content": "For the high-resolution model, the inputs are 'randomly masked high-resolution codes' for multiple NFoV images (26 NFoV images in our implementation), conditional NFoV images converted from the conditional image in ERP, and the reconstructed low-resolution NFoV images converted from the low-resolution omni-directional image reconstructed using the pre-trained VQGAN encoder and decoder. The 'randomly masked high-resolution code' and the conditional image in ERP are prepared in the same manner for the low-resolution model. The low-resolution omni-directional image is required in the inputs of the high-resolution model to emulate the low-resolution image generated at the first stage. The high-resolution model is trained to predict the original VQGAN codes in the NFoV images converted from the real omni-directional image at [MASK]-code locations based on the cross-entropy loss function." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 525, + 482, + 585 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 525, + 482, + 585 + ], + "spans": [ + { + "bbox": [ + 130, + 525, + 482, + 585 + ], + "type": "text", + "content": "Although the first and second stages are sequentially processed in the inference, they are independently trained in parallel, as shown in Fig. 3(b). This property is advantageous to shorten the required training time if multiple GPUs (Graphics Processing Units) can be used, although a single GPU was used in our implementation." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 604, + 230, + 617 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 604, + 230, + 617 + ], + "spans": [ + { + "bbox": [ + 132, + 604, + 230, + 617 + ], + "type": "text", + "content": "4 Experiments" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 629, + 482, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 629, + 482, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 629, + 482, + 665 + ], + "type": "text", + "content": "The omni-directional image dataset, SUN360 [19], was used in the experiments. The 5,000 outdoor images were used for test, while the remaining 47,938 outdoor images were used for training. The size of the images in the dataset is " + }, + { + "bbox": [ + 130, + 629, + 482, + 665 + ], + "type": "inline_equation", + "content": "512 \\times 1024" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 284, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 284, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 284, + 100 + ], + "type": "text", + "content": "A. Nakata and T. Yamanaka" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 479, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 479, + 209 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 479, + 209 + ], + "type": "text", + "content": "pixels. Although the proposed method generates images in " + }, + { + "bbox": [ + 130, + 116, + 479, + 209 + ], + "type": "inline_equation", + "content": "1024 \\times 2048" + }, + { + "bbox": [ + 130, + 116, + 479, + 209 + ], + "type": "text", + "content": " pixels, it was resized to " + }, + { + "bbox": [ + 130, + 116, + 479, + 209 + ], + "type": "inline_equation", + "content": "512 \\times 1024" + }, + { + "bbox": [ + 130, + 116, + 479, + 209 + ], + "type": "text", + "content": " pixels for the evaluation. For comparison, several conventional methods were also evaluated, including GAN-based methods with CNN (Convolutional Neural Networks) [10] or MLP-Mixer [13], a VQGAN-based method (OmniDreamer) [2], and a GAN-based in-painting method (LAMA) [15]. The models were implemented in PyTorch, and were trained in a single GPU (NVIDIA RTX3090). The code for the network architecture in the proposed method is provided in the supplementary material." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 211, + 481, + 414 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 211, + 481, + 414 + ], + "spans": [ + { + "bbox": [ + 130, + 211, + 481, + 414 + ], + "type": "text", + "content": "The pre-trained VQGAN was obtained from [1], which is the model with 1024 codebooks trained on ImageNet. Eight MaxViT layers in Fig. 4 were used in both the low-resolution and high-resolution models, with 256 internal dimensions. The sizes of the VQGAN codes at the first and second stages were " + }, + { + "bbox": [ + 130, + 211, + 481, + 414 + ], + "type": "inline_equation", + "content": "16 \\times 32" + }, + { + "bbox": [ + 130, + 211, + 481, + 414 + ], + "type": "text", + "content": " patches and " + }, + { + "bbox": [ + 130, + 211, + 481, + 414 + ], + "type": "inline_equation", + "content": "16 \\times 16" + }, + { + "bbox": [ + 130, + 211, + 481, + 414 + ], + "type": "text", + "content": " patches " + }, + { + "bbox": [ + 130, + 211, + 481, + 414 + ], + "type": "inline_equation", + "content": "\\times 26" + }, + { + "bbox": [ + 130, + 211, + 481, + 414 + ], + "type": "text", + "content": " NFoV images, respectively. These VQGAN codes were converted into trainable feature vectors in the 256 dimensions. The conditional image was also down-sampled into the same size as the VQGAN codes with 256 dimensions using CNN. At each iteration of MaskGIT in the first stage, these two feature vectors were added with trainable positional encoding vectors, and then were inputted into the low-resolution model composed of the 8 MaxViT layers. At each iteration in the second stage, the low-resolution image generated at the first stage (inference) or reconstructed using the pre-trained VQGAN (training) was converted into NFoV images. These NFoV images were down-sampled with CNN to be added with the feature vectors of the input VQGAN codes, the conditional NFoV images, and the positional encoding, and then were inputted into the high-resolution model. The total steps " + }, + { + "bbox": [ + 130, + 211, + 481, + 414 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 130, + 211, + 481, + 414 + ], + "type": "text", + "content": " in MaskGIT was set to 16 at both stages." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 415, + 482, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 415, + 482, + 536 + ], + "spans": [ + { + "bbox": [ + 130, + 415, + 482, + 536 + ], + "type": "text", + "content": "The optimizer for training the models was AdamW with the learning rate of 0.001, the weight decay of 1e-5, Amsgrad, and the learning-rate scheduling of ExponentialLR reducing it by 0.95 every 5,000 iterations. OmniDreamer [2] was trained for 14 days (30 epochs in all training stages), while the proposed method was trained over 4 days (2 days with the batch size 16 at the first stage, and 2 days with the batch size 8 in the second stage, for 180,000 iterations at each stage). The other conventional methods were trained for 4 days, using their default batch sizes and hyper-parameters. FID (Frechet Inception Distance) [9], IS (Inception Score) [14], and LPIPS (Learned Perceptual Image Patch Similarity) [20] were used for evaluating the synthesized omni-directional images in ERP." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 552, + 198, + 563 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 552, + 198, + 563 + ], + "spans": [ + { + "bbox": [ + 132, + 552, + 198, + 563 + ], + "type": "text", + "content": "5 Results" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 575, + 321, + 586 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 575, + 321, + 586 + ], + "spans": [ + { + "bbox": [ + 132, + 575, + 321, + 586 + ], + "type": "text", + "content": "5.1 Evaluation of Proposed Method" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 594, + 480, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 594, + 480, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 594, + 480, + 665 + ], + "type": "text", + "content": "The proposed method, 2S-ODIS, was quantitatively evaluated, compared with the conventional methods, OmniDreamer [2], CNN-based cGAN [10], MLPixer-based cGAN [13], and LAMA [15], as shown in Table 1. The models in the proposed method were trained for 4 days (2 days for low-resolution model and 2 days for high-resolution model). For comparison, the result with the models trained for 2 days (1 day + 1 day for low-resolution and high-resolution models) was also" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 221, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 221, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 221, + 91, + 447, + 102 + ], + "type": "text", + "content": "2S-ODIS: Two-Stage Omni-Directional Image Synthesis" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 192, + 141, + 419, + 232 + ], + "blocks": [ + { + "bbox": [ + 178, + 120, + 435, + 131 + ], + "lines": [ + { + "bbox": [ + 178, + 120, + 435, + 131 + ], + "spans": [ + { + "bbox": [ + 178, + 120, + 435, + 131 + ], + "type": "text", + "content": "Table 1: Quantitative comparison with conventional methods" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 192, + 141, + 419, + 232 + ], + "lines": [ + { + "bbox": [ + 192, + 141, + 419, + 232 + ], + "spans": [ + { + "bbox": [ + 192, + 141, + 419, + 232 + ], + "type": "table", + "html": "
MethodIS (↑)FID (↓)LPIPS (↑)
2S-ODIS (Proposed method)5.96918.2630.662
2S-ODIS (2days)5.85718.6560.668
OmniDreamer [2]4.45823.1010.655
CNN-based cGAN [10]4.68440.0490.633
MLPMixer-based cGAN [13]4.40247.6900.634
LAMA [15]5.78469.4850.478
", + "image_path": "cb50e6897925890b81a5fb2d5ebd67331c07ae97c8d817ac483f7c2debecbb3b.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 137, + 251, + 249, + 363 + ], + "blocks": [ + { + "bbox": [ + 137, + 251, + 249, + 363 + ], + "lines": [ + { + "bbox": [ + 137, + 251, + 249, + 363 + ], + "spans": [ + { + "bbox": [ + 137, + 251, + 249, + 363 + ], + "type": "image", + "image_path": "303cd8eb4fe23c27145e2f4ab83a369b2da445cc961a7670a0d712954597d719.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 132, + 374, + 480, + 398 + ], + "lines": [ + { + "bbox": [ + 132, + 374, + 480, + 398 + ], + "spans": [ + { + "bbox": [ + 132, + 374, + 480, + 398 + ], + "type": "text", + "content": "Fig. 6: Evaluation metrics during training of proposed method compared with conventional method, OmniDreamer [2]" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 253, + 252, + 363, + 363 + ], + "blocks": [ + { + "bbox": [ + 253, + 252, + 363, + 363 + ], + "lines": [ + { + "bbox": [ + 253, + 252, + 363, + 363 + ], + "spans": [ + { + "bbox": [ + 253, + 252, + 363, + 363 + ], + "type": "image", + "image_path": "fe8a2604ae7f5e89220dc4646fffa79bcdd48b49ff9040f9e0ba9a654192f25c.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 364, + 252, + 477, + 363 + ], + "blocks": [ + { + "bbox": [ + 364, + 252, + 477, + 363 + ], + "lines": [ + { + "bbox": [ + 364, + 252, + 477, + 363 + ], + "spans": [ + { + "bbox": [ + 364, + 252, + 477, + 363 + ], + "type": "image", + "image_path": "b62a8dade0ea8abf54a771c6b5758917568e241446f4ae43c62688a425453cfd.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 132, + 422, + 482, + 626 + ], + "blocks": [ + { + "bbox": [ + 132, + 422, + 482, + 626 + ], + "lines": [ + { + "bbox": [ + 132, + 422, + 482, + 626 + ], + "spans": [ + { + "bbox": [ + 132, + 422, + 482, + 626 + ], + "type": "image", + "image_path": "c004153388a3cc274c64b3b1a0e3a66d2f26da9d4bc2855eb9f0f1bbd6a15e8d.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 132, + 634, + 480, + 657 + ], + "lines": [ + { + "bbox": [ + 132, + 634, + 480, + 657 + ], + "spans": [ + { + "bbox": [ + 132, + 634, + 480, + 657 + ], + "type": "text", + "content": "Fig. 7: Examples of synthesized omni-directional images compared with conventional method, OmniDreamer [2]" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 134, + 92, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 92, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 134, + 92, + 144, + 100 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 284, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 284, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 284, + 101 + ], + "type": "text", + "content": "A. Nakata and T. Yamanaka" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 202, + 137, + 270, + 205 + ], + "blocks": [ + { + "bbox": [ + 204, + 118, + 268, + 127 + ], + "lines": [ + { + "bbox": [ + 204, + 118, + 268, + 127 + ], + "spans": [ + { + "bbox": [ + 204, + 118, + 268, + 127 + ], + "type": "text", + "content": "2S-ODIS First Stage" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 202, + 137, + 270, + 205 + ], + "lines": [ + { + "bbox": [ + 202, + 137, + 270, + 205 + ], + "spans": [ + { + "bbox": [ + 202, + 137, + 270, + 205 + ], + "type": "image", + "image_path": "f604a9917878d9dd6a75815e812ecf3d8db3abc95ebbebccf42e8421538d4483.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 272, + 137, + 341, + 205 + ], + "blocks": [ + { + "bbox": [ + 271, + 118, + 343, + 135 + ], + "lines": [ + { + "bbox": [ + 271, + 118, + 343, + 135 + ], + "spans": [ + { + "bbox": [ + 271, + 118, + 343, + 135 + ], + "type": "text", + "content": "2S-ODIS Second Stage (Ours)" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 272, + 137, + 341, + 205 + ], + "lines": [ + { + "bbox": [ + 272, + 137, + 341, + 205 + ], + "spans": [ + { + "bbox": [ + 272, + 137, + 341, + 205 + ], + "type": "image", + "image_path": "5748ed2919106ec279857fe8fe288cbf51d97f69eca418d12ae8b8a96d938016.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 343, + 137, + 411, + 205 + ], + "blocks": [ + { + "bbox": [ + 351, + 118, + 404, + 127 + ], + "lines": [ + { + "bbox": [ + 351, + 118, + 404, + 127 + ], + "spans": [ + { + "bbox": [ + 351, + 118, + 404, + 127 + ], + "type": "text", + "content": "OmniDreamer [2]" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 343, + 137, + 411, + 205 + ], + "lines": [ + { + "bbox": [ + 343, + 137, + 411, + 205 + ], + "spans": [ + { + "bbox": [ + 343, + 137, + 411, + 205 + ], + "type": "image", + "image_path": "e9016060434ad93abc2e84105f3bcfacda21d62ecf9419773d1f850001a1b82c.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 202, + 207, + 270, + 275 + ], + "blocks": [ + { + "bbox": [ + 202, + 207, + 270, + 275 + ], + "lines": [ + { + "bbox": [ + 202, + 207, + 270, + 275 + ], + "spans": [ + { + "bbox": [ + 202, + 207, + 270, + 275 + ], + "type": "image", + "image_path": "43900a8c98b5536adadba112377939af5444e4e2ce19b874b7173256f6b16cda.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 131, + 284, + 480, + 306 + ], + "lines": [ + { + "bbox": [ + 131, + 284, + 480, + 306 + ], + "spans": [ + { + "bbox": [ + 131, + 284, + 480, + 306 + ], + "type": "text", + "content": "Fig. 8: Examples of NFoV images toward ground extracted from synthesized omnidirectional images in proposed and conventional methods" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 272, + 207, + 340, + 275 + ], + "blocks": [ + { + "bbox": [ + 272, + 207, + 340, + 275 + ], + "lines": [ + { + "bbox": [ + 272, + 207, + 340, + 275 + ], + "spans": [ + { + "bbox": [ + 272, + 207, + 340, + 275 + ], + "type": "image", + "image_path": "35855581dd9bfaecf20726e99ce969ce9a3d1b3a3f6e2ccb3f9d3d207e268f06.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 343, + 207, + 411, + 275 + ], + "blocks": [ + { + "bbox": [ + 343, + 207, + 411, + 275 + ], + "lines": [ + { + "bbox": [ + 343, + 207, + 411, + 275 + ], + "spans": [ + { + "bbox": [ + 343, + 207, + 411, + 275 + ], + "type": "image", + "image_path": "455b066d64b5efeafbd02ca4f559a1447a9bd1e5ec0178d5406bb8786eadb383.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 330, + 482, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 330, + 482, + 521 + ], + "spans": [ + { + "bbox": [ + 130, + 330, + 482, + 521 + ], + "type": "text", + "content": "shown in the table. The field of view for a NFoV image embedded in an input conditional image was set to 126.87 and 112.62 degrees for width and height in the experiments, respectively. It can be seen from the table that the proposed method achieved higher performance than the other conventional methods. Although the highest performance was achieved in the proposed method trained for 4 days, the performance already outperformed the other methods even with the 2-day training. To see this more clearly, the evaluation metrics during the training of the proposed method is shown in Fig. 6, where the performance exceeded OmniDreamer by 2 days and converged in around 4 days. Thus, the proposed method drastically shortened the training to 2-4 days from 14 days in OmniDreamer including the fine-tuning of the VQGAN model. In addition, the inference in the proposed method was much faster than in OmniDreamer: 1.54 seconds and 39.33 seconds for synthesizing each omni-directional image in the proposed method and OmniDreamer, respectively. This is because the proposed method is based on the simultaneous VQGAN-code prediction instead of the sequential auto-regressive prediction." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 130, + 522, + 492, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 522, + 492, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 522, + 492, + 666 + ], + "type": "text", + "content": "For the qualitative comparison, the examples of the synthesized omni-directional images are shown in Fig. 7. It is clear that the proposed method generated globally plausible and locally detained omni-directional images, while OmniDreamer [2] sometimes failed in generating continuous images, especially along the edges in the input conditional images. To see if the proposed method can generate NFoV images without the distortion, the NFoV images toward the ground were extracted from the synthesized omni-directional images, since the geometric distortion is large at the poles in ERP. The examples of the NFoV images toward the ground are shown in Fig. 8. The left column shows the NFoV images at the first stage, while the middle column shows the NFoV images at the second stage. Since the pre-trained VQGAN code cannot represent the geometric distortion in ERP, the straight lines were not appropriately reproduced at the first stage." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 221, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 221, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 221, + 91, + 447, + 102 + ], + "type": "text", + "content": "2S-ODIS: Two-Stage Omni-Directional Image Synthesis" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 479, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 479, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 479, + 100 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 185, + 136, + 427, + 227 + ], + "blocks": [ + { + "bbox": [ + 175, + 114, + 438, + 126 + ], + "lines": [ + { + "bbox": [ + 175, + 114, + 438, + 126 + ], + "spans": [ + { + "bbox": [ + 175, + 114, + 438, + 126 + ], + "type": "text", + "content": "Table 2: Quantitative comparison in various conditional images" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 185, + 136, + 427, + 227 + ], + "lines": [ + { + "bbox": [ + 185, + 136, + 427, + 227 + ], + "spans": [ + { + "bbox": [ + 185, + 136, + 427, + 227 + ], + "type": "table", + "html": "
Mask SettingMethodIS (↑)FID (↓)LPIPS (↑)
Inpainting2S-ODIS (Proposed)5.58215.0440.685
OmniDreamer [2]4.67241.2090.708
Inpainting of Ground Region2S-ODIS (Proposed)6.08413.0380.680
OmniDreamer [2]5.47415.3030.699
Outpainting from Two Images2S-ODIS (Proposed)5.72219.4370.663
OmniDreamer [2]3.95233.4030.672
", + "image_path": "c2aede004e59ac636a127c15fb53be9120f9ad545d1fc37c03cb408dd13a3392.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 133, + 236, + 482, + 406 + ], + "blocks": [ + { + "bbox": [ + 133, + 236, + 482, + 406 + ], + "lines": [ + { + "bbox": [ + 133, + 236, + 482, + 406 + ], + "spans": [ + { + "bbox": [ + 133, + 236, + 482, + 406 + ], + "type": "image", + "image_path": "0ef10b74b93361baf4c7cc3f4d35787fce6e77d2e7014c8ab942d1eeafa85bb4.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 133, + 415, + 478, + 427 + ], + "lines": [ + { + "bbox": [ + 133, + 415, + 478, + 427 + ], + "spans": [ + { + "bbox": [ + 133, + 415, + 478, + 427 + ], + "type": "text", + "content": "Fig. 9: Examples of synthesized omni-directional images in various input conditions." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 450, + 482, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 450, + 482, + 510 + ], + "spans": [ + { + "bbox": [ + 130, + 450, + 482, + 510 + ], + "type": "text", + "content": "However, they were compensated at the second stage, which can be clearly seen in the sample images. On the other hand, OmniDreamer cannot appropriately reproduce the NFoV images toward the ground. Thus, it was confirmed that the proposed method synthesized omni-directional images without geometric distortion." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 526, + 359, + 539 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 526, + 359, + 539 + ], + "spans": [ + { + "bbox": [ + 132, + 526, + 359, + 539 + ], + "type": "text", + "content": "5.2 Evaluation in Various Input Conditions" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 546, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 546, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 546, + 482, + 666 + ], + "type": "text", + "content": "As explained in 3.3, the models in the proposed method were trained with various conditional images in Fig. 5. They were not limited to the single NFoV image embedded in ERP, so that the proposed model can be applied various inpainting and out-painting tasks. For example, the model can be applied to the inpainting task to remove objects or people in the omni-directional image taken by a 360-degree camera, as shown in the top row in Fig. 9. Another example is the task to fill in the ground region of an omni-directional image as shown in the middle row in Fig. 9, since the omni-directional image often includes a hand or a camera stand at the bottom region in ERP. The last example in Fig. 9 is to synthesize an omni-directional image which includes two NFoV images such as" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 284, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 284, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 284, + 101 + ], + "type": "text", + "content": "A. Nakata and T. Yamanaka" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 156, + 135, + 455, + 205 + ], + "blocks": [ + { + "bbox": [ + 215, + 114, + 398, + 125 + ], + "lines": [ + { + "bbox": [ + 215, + 114, + 398, + 125 + ], + "spans": [ + { + "bbox": [ + 215, + 114, + 398, + 125 + ], + "type": "text", + "content": "Table 3: Ablation study in propose method" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 156, + 135, + 455, + 205 + ], + "lines": [ + { + "bbox": [ + 156, + 135, + 455, + 205 + ], + "spans": [ + { + "bbox": [ + 156, + 135, + 455, + 205 + ], + "type": "table", + "html": "
IS (↑)FID (↓)LPIPS (↑)
(1) Proposed5.96918.2630.662
(2) 1 Stage: Low Resolution Model5.79828.3290.670
(3) 1 Stage: High Resolution Model4.82152.4530.638
(4) Direct use of low-resolution VQGAN codes5.83721.8200.663
", + "image_path": "1317c201c282b927125799a8201b66ccf0c983cd579a4f57d7d1819944a42824.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 228, + 482, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 228, + 482, + 312 + ], + "spans": [ + { + "bbox": [ + 130, + 228, + 482, + 312 + ], + "type": "text", + "content": "front and rear cameras of a smartphone. Although OmniDreamer failed in synthesizing the omni-directional images in these situations, the proposed method generated high-quality omni-directional images. The quantitative results shown in Table 2 also indicate that the proposed method achieved higher performance than OmniDreamer. Although the diversity of the synthesized images was higher in OmniDreamer than the proposed method, it may be due to generating random images as shown in Fig. 9." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 131, + 331, + 238, + 344 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 331, + 238, + 344 + ], + "spans": [ + { + "bbox": [ + 131, + 331, + 238, + 344 + ], + "type": "text", + "content": "5.3 Ablation Study" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 354, + 482, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 354, + 482, + 437 + ], + "spans": [ + { + "bbox": [ + 130, + 354, + 482, + 437 + ], + "type": "text", + "content": "Ablation studies were conducted to investigate the effectiveness of each component in the proposed method: the low-resolution and high-resolution models in the 2-stage structure. The results are shown in Table 3. (1) is the proposed method with the 2-stage structure, while (2) and (3) are the results with 1-stage structure only using the low-resolution model and the high-resolution model, respectively. As can be seen from the table, the 2-stage structure was indispensable for the high-quality image synthesis." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 437, + 482, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 437, + 482, + 534 + ], + "spans": [ + { + "bbox": [ + 130, + 437, + 482, + 534 + ], + "type": "text", + "content": "Moreover, it was examined that the low-resolution VQGAN codes generated at the first stage were directly used at the second stage instead of the low-resolution image generated at the first stage, since the 2-stage structure in Muse [3] uses the low-resolution VQGAN codes directly. The result is shown in Table 3 (4). It was confirmed from the result that the low-resolution image was better to use at the second stage than the low-resolution VQGAN codes directly. This may be because the low-resolution image is compressed by CNN to properly extract the global information generated at the first stage." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 131, + 554, + 354, + 568 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 554, + 354, + 568 + ], + "spans": [ + { + "bbox": [ + 131, + 554, + 354, + 568 + ], + "type": "text", + "content": "6 Limitations and Future Prospects" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 581, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 581, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 581, + 482, + 666 + ], + "type": "text", + "content": "The proposed method synthesizes an omni-directional image by merging multiple NFoV images with weights depending on the distance from the edges of the NFoV images. Although the generation of the NFoV images are conditioned by the global low-resolution image generated at the first stage, it may be possible to generate discontinuous NFoV images. One possible solution would be to add an additional network to refine the generated omni-directional images to improve the continuity between NFoV images. Another issue is that it takes 1-2 days to" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 220, + 90, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 220, + 90, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 220, + 90, + 447, + 102 + ], + "type": "text", + "content": "2S-ODIS: Two-Stage Omni-Directional Image Synthesis" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 480, + 100 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 480, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 116, + 480, + 152 + ], + "spans": [ + { + "bbox": [ + 132, + 116, + 480, + 152 + ], + "type": "text", + "content": "convert omni-directional images in the dataset to VQGAN codes using the pretrained VQGAN encoder. This may be alleviated by constructing a light-weight encoder using model distillation of the VQGAN encoder." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 152, + 481, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 152, + 481, + 331 + ], + "spans": [ + { + "bbox": [ + 132, + 152, + 481, + 331 + ], + "type": "text", + "content": "Currently, inputs in the proposed method were limited to the conditional images such as a single or several NFoV images embedded in ERP, or masked omni-directional images for in-painting. However, the proposed architecture can be applied to any conditional information such as text information, a class label, and guidance information for style modulation using an additional module such as cross attention similar to stable diffusion. In addition, the hyper-parameters have not been thoroughly explored in the evaluation of the proposed method. For example, the directions of NFoV images at the second stage were fixed to 26 directions corresponding to the faces in rhombicuboctahedron, in addition to the field of view fixed to 60 degrees, which may be optimized in the future work. Furthermore, the network structure such as MaxViT might be improved to more optimized architecture to the omni-directional image synthesis. Although this paper focused on the tasks of omni-directional image synthesis, the proposed architecture would be useful for other omni-directional image tasks, such as semantic segmentation and object detection." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 133, + 348, + 218, + 361 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 348, + 218, + 361 + ], + "spans": [ + { + "bbox": [ + 133, + 348, + 218, + 361 + ], + "type": "text", + "content": "7 Conclusion" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 373, + 481, + 516 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 373, + 481, + 516 + ], + "spans": [ + { + "bbox": [ + 132, + 373, + 481, + 516 + ], + "type": "text", + "content": "A novel method for the omni-directional image synthesis is proposed in this paper. By using the pre-trained VQGAN encoder and decoder without fine-tuning, the training of the model was drastically shortened. To manage the distortion in an omni-directional image in ERP, a two-stage structure was adopted. At the first stage, an omni-directional image was generated in ERP without geometric distortion correction, so that it cannot reproduce straight lines at poles in a sphere. Therefore, it was corrected at the second stage by synthesizing an omni-directional image from multiple NFoV images based on geometric distortion correction. To realize fast inference, the sampling strategy in MaskGIT was adopted to predict VQGAN codes simultaneously. As a result, the proposed method achieved the high-quality omni-directional image synthesis with low computational costs both in training and inference." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 133, + 533, + 240, + 547 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 533, + 240, + 547 + ], + "spans": [ + { + "bbox": [ + 133, + 533, + 240, + 547 + ], + "type": "text", + "content": "Acknowledgement" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 133, + 557, + 460, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 557, + 460, + 569 + ], + "spans": [ + { + "bbox": [ + 133, + 557, + 460, + 569 + ], + "type": "text", + "content": "This work was supported by JSPS KAKENHI Grant Number JP21K11943" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 133, + 586, + 197, + 600 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 586, + 197, + 600 + ], + "spans": [ + { + "bbox": [ + 133, + 586, + 197, + 600 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 136, + 610, + 479, + 664 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 136, + 610, + 479, + 630 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 610, + 479, + 630 + ], + "spans": [ + { + "bbox": [ + 136, + 610, + 479, + 630 + ], + "type": "text", + "content": "1. Compvis/taming-transformers. https://github.com/CompVis/taming-transformers" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 136, + 632, + 479, + 664 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 632, + 479, + 664 + ], + "spans": [ + { + "bbox": [ + 136, + 632, + 479, + 664 + ], + "type": "text", + "content": "2. Akimoto, N., Matsuo, Y., Aoki, Y.: Diverse plausible 360-degree image outpainting for efficient 3dgc background creation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2022)" + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 284, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 284, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 284, + 101 + ], + "type": "text", + "content": "A. Nakata and T. Yamanaka" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 133, + 116, + 482, + 665 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 136, + 116, + 482, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 116, + 482, + 161 + ], + "spans": [ + { + "bbox": [ + 136, + 116, + 482, + 161 + ], + "type": "text", + "content": "3. Chang, H., Zhang, H., Barber, J., Maschinot, A., Lezama, J., Jiang, L., Yang, M.H., Murphy, K.P., Freeman, W.T., Rubinstein, M., Li, Y., Krishnan, D.: Muse: Text-to-image generation via masked generative transformers. In: Proceedings of the 40th International Conference on Machine Learning (ICML) (2023)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 138, + 162, + 481, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 162, + 481, + 194 + ], + "spans": [ + { + "bbox": [ + 138, + 162, + 481, + 194 + ], + "type": "text", + "content": "4. Chang, H., Zhang, H., Jiang, L., Liu, C., Freeman, W.T.: Maskgit: Masked generative image transformer. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (June 2022)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 138, + 194, + 481, + 216 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 194, + 481, + 216 + ], + "spans": [ + { + "bbox": [ + 138, + 194, + 481, + 216 + ], + "type": "text", + "content": "5. Chen, Z., Wang, G., Liu, Z.: Text2light: Zero-shot text-driven hdr panorama generation. ACM Transactions on Graphics (TOG) 41(6), 1-16 (2022)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 138, + 216, + 481, + 248 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 216, + 481, + 248 + ], + "spans": [ + { + "bbox": [ + 138, + 216, + 481, + 248 + ], + "type": "text", + "content": "6. Dastjerdi, M.R.K., Hold-Geoffroy, Y., Eisenmann, J., Khodadadeh, S., Lalonde, J.F.: Guided co-modulated gan for " + }, + { + "bbox": [ + 138, + 216, + 481, + 248 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 138, + 216, + 481, + 248 + ], + "type": "text", + "content": " field of view extrapolation. In: 2022 International Conference on 3D Vision (3DV). pp. 475–485 (2022)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 138, + 249, + 481, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 249, + 481, + 282 + ], + "spans": [ + { + "bbox": [ + 138, + 249, + 481, + 282 + ], + "type": "text", + "content": "7. Esser, P., Rombach, R., Ommer, B.: Taming transformers for high-resolution image synthesis. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 12873-12883 (2021)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 138, + 282, + 481, + 315 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 282, + 481, + 315 + ], + "spans": [ + { + "bbox": [ + 138, + 282, + 481, + 315 + ], + "type": "text", + "content": "8. Hara, T., Mukuta, Y., Harada, T.: Spherical image generation from a single image by considering scene symmetry. In: Thirty-Fifth AAAI Conference on Artificial Intelligence. pp. 1513-1521 (2021)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 138, + 315, + 481, + 347 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 315, + 481, + 347 + ], + "spans": [ + { + "bbox": [ + 138, + 315, + 481, + 347 + ], + "type": "text", + "content": "9. Heusel, M., Ramsauer, H., Unterthiner, T., Nessler, B., Hochreiter, S.: Gans trained by a two time-scale update rule converge to a local nash equilibrium. In: Advances in Neural Information Processing Systems (NeurIPS) (2017)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 134, + 348, + 480, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 348, + 480, + 380 + ], + "spans": [ + { + "bbox": [ + 134, + 348, + 480, + 380 + ], + "type": "text", + "content": "10. Keisuke, O., Takao, Y.: Omni-directional image generation from single snapshot image. In: IEEE International Conference on Systems, Man, and Cybernetics (SMC) (2020)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 133, + 380, + 480, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 380, + 480, + 403 + ], + "spans": [ + { + "bbox": [ + 133, + 380, + 480, + 403 + ], + "type": "text", + "content": "11. Kingma, D.P., Welling, M.: Auto-encoding variational bayes. In: International Conference on Learning Representations (ICLR) (2014)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 133, + 403, + 480, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 403, + 480, + 435 + ], + "spans": [ + { + "bbox": [ + 133, + 403, + 480, + 435 + ], + "type": "text", + "content": "12. Lu, Z., Hu, K., Wang, C., Bai, L., Wang, Z.: Autoregressive omni-aware outpainting for open-vocabulary 360-degree image generation. In: arXiv preprint arXiv:2309.03467 (2023)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 133, + 435, + 480, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 435, + 480, + 468 + ], + "spans": [ + { + "bbox": [ + 133, + 435, + 480, + 468 + ], + "type": "text", + "content": "13. Nakata, A., Miyazaki, R., Yamanaka, T.: Increasing diversity of omni-directional images generated from single image using cgan based on mlpmixer. In: Asian Conference on Pattern Recognition (ACPR)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 133, + 468, + 480, + 501 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 468, + 480, + 501 + ], + "spans": [ + { + "bbox": [ + 133, + 468, + 480, + 501 + ], + "type": "text", + "content": "14. Salimans, T., Goodfellow, I., Zaremba, W., Cheung, V., Radford, A., Chen, X., Chen, X.: Improved techniques for training gans. In: Advances in Neural Information Processing Systems (NeurIPS) (2016)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 133, + 501, + 480, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 501, + 480, + 544 + ], + "spans": [ + { + "bbox": [ + 133, + 501, + 480, + 544 + ], + "type": "text", + "content": "15. Suvorov, R., Logacheva, E., Mashikhin, A., Remizova, A., Ashukha, A., Silvestrov, A., Kong, N., Goka, H., Park, K., Lempitsky, V.: Resolution-robust large mask inpainting with fourier convolutions. In: Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision (WACV) (2022)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 133, + 545, + 480, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 545, + 480, + 578 + ], + "spans": [ + { + "bbox": [ + 133, + 545, + 480, + 578 + ], + "type": "text", + "content": "16. Tu, Z., Talebi, H., Zhang, H., Yang, F., Milanfar, P., Bovik, A., Li, Y.: Maxvit: Multi-axis vision transformer. In: European Conference on Computer Vision (ECCV) (2022)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 133, + 578, + 480, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 578, + 480, + 600 + ], + "spans": [ + { + "bbox": [ + 133, + 578, + 480, + 600 + ], + "type": "text", + "content": "17. Van Den Oord, A., Vinyals, O., et al.: Neural discrete representation learning. In: Advances in Neural Information Processing Systems (NeurIPS). vol. 30 (2017)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 133, + 601, + 480, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 601, + 480, + 632 + ], + "spans": [ + { + "bbox": [ + 133, + 601, + 480, + 632 + ], + "type": "text", + "content": "18. Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, L., Polosukhin, I.: Attention is all you need. In: Advances in Neural Information Processing Systems (NeurIPS). vol. 30 (2017)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 133, + 633, + 480, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 633, + 480, + 665 + ], + "spans": [ + { + "bbox": [ + 133, + 633, + 480, + 665 + ], + "type": "text", + "content": "19. Xiao, J., Ehinger, K.A., Oliva, A., Torralba, A.: Recognizing scene viewpoint using panoramic place representation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2012)" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 221, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 221, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 221, + 91, + 447, + 102 + ], + "type": "text", + "content": "2S-ODIS: Two-Stage Omni-Directional Image Synthesis" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 481, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 116, + 481, + 150 + ], + "spans": [ + { + "bbox": [ + 132, + 116, + 481, + 150 + ], + "type": "text", + "content": "20. Zhang, R., Isola, P., Efros, A.A., Shechtman, E., Wang, O.: The unreasonable effectiveness of deep features as a perceptual metric. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2018)" + } + ] + } + ], + "index": 2 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 284, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 284, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 284, + 100 + ], + "type": "text", + "content": "A. Nakata and T. Yamanaka" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/3D Congealing_ 3D-Aware Image Alignment in the Wild/757b034b-7105-4846-a434-665e5b237ea6_content_list.json b/2024/3D Congealing_ 3D-Aware Image Alignment in the Wild/757b034b-7105-4846-a434-665e5b237ea6_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..4e41ac17b643d2077259acadce1138931ac85885 --- /dev/null +++ b/2024/3D Congealing_ 3D-Aware Image Alignment in the Wild/757b034b-7105-4846-a434-665e5b237ea6_content_list.json @@ -0,0 +1,1760 @@ +[ + { + "type": "text", + "text": "3D Congealing: 3D-Aware Image Alignment in the Wild", + "text_level": 1, + "bbox": [ + 331, + 141, + 671, + 186 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yunzhi Zhang $^{1}$ , Zizhang Li $^{1}$ , Amit Raj $^{2}$ , Andreas Engelhardt $^{3}$ , Yuanzhen Li $^{2}$ , Tingbo Hou $^{4}$ , Jiajun Wu $^{1}$ , and Varun Jampani $^{5}$", + "bbox": [ + 215, + 210, + 785, + 244 + ], + "page_idx": 0 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1 Stanford University", + "2 Google DeepMind", + "3 University of Tübingen", + "4 Meta GenAI", + "Stability AI" + ], + "bbox": [ + 416, + 253, + 584, + 323 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract. We propose 3D Congealing, a novel problem of 3D-aware alignment for 2D images capturing semantically similar objects. Given a collection of unlabeled Internet images, our goal is to associate the shared semantic parts from the inputs and aggregate the knowledge from 2D images to a shared 3D canonical space. We introduce a general framework that tackles the task without assuming shape templates, poses, or any camera parameters. At its core is a canonical 3D representation that encapsulates geometric and semantic information. The framework optimizes for the canonical representation together with the pose for each input image, and a per-image coordinate map that warps 2D pixel coordinates to the 3D canonical frame to account for the shape matching. The optimization procedure fuses prior knowledge from a pre-trained image generative model and semantic information from input images. The former provides strong knowledge guidance for this under-constraint task, while the latter provides the necessary information to mitigate the training data bias from the pre-trained model. Our framework can be used for various tasks such as pose estimation and image editing, achieving strong results on real-world image datasets under challenging illumination conditions and on in-the-wild online image collections. Project page at https://ai.stanford.edu/~yzzhang/projects/3d-congealing/.", + "bbox": [ + 259, + 361, + 743, + 641 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 215, + 686, + 375, + 704 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We propose the task of 3D Congealing, where the goal is to align a collection of images containing semantically similar objects into a shared 3D space. Specifically, we aim to obtain a canonical 3D representation together with the pose and a dense map of 2D-3D correspondence for each image in the collection. The input images may contain object instances belonging to a similar category with varying shapes and textures, and are captured under distinct camera viewpoints and illumination conditions, which all contribute to the pixel-level difference as shown in Figure 1. Despite such inter-image differences, humans excel at aligning such", + "bbox": [ + 212, + 718, + 787, + 843 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/326fa3f4c2133cca056625ab6933f5a81979e4d9cd0e440db6d346aa227594ec.jpg", + "image_caption": [ + "Fig. 1: Objects with different shapes and appearances, such as these sculptures, may share similar semantic parts and a similar geometric structure. We study 3D Congealing, inferring and aligning such a shared structure from an unlabeled image collection. Such alignment can be used for tasks such as pose estimation and image editing. See Appendix A for full results." + ], + "image_footnote": [], + "bbox": [ + 225, + 143, + 781, + 258 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "images with one another in a geometrically and semantically consistent manner based on their 3D-aware understanding.", + "bbox": [ + 212, + 371, + 785, + 402 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Obtaining a canonical 3D representation and grounding input images to the 3D canonical space enable several downstream tasks, such as 6-DoF object pose estimation, pose-aware image filtering, and image editing. Unlike the task of 2D congealing [11, 29, 31], where the aim is to align the 2D pixels across the images, 3D Congealing requires aggregating the information from the image collection altogether and forming the association among images in 3D. The task is also closely related to 3D reconstruction from multiview images, with a key distinction in the problem setting, as inputs here do not necessarily contain identical objects but rather semantically similar ones. Such a difference opens up the possibility of image alignment from readily available image collections on the Internet, e.g., online search results, landmark images, and personal photo collections.", + "bbox": [ + 212, + 406, + 787, + 571 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3D Congealing represents a challenging problem, particularly for arbitrary images without camera pose or lighting annotations, even when the input images contain identical objects [1,4,20,44], because the solutions for pose and shape are generally entangled. On the one hand, the definition of poses is specific to the coordinate frame of the shape; on the other hand, the shape optimization is typically guided by the pixel-wise supervision of images under the estimated poses. To overcome the ambiguity in jointly estimating poses and shapes, prior works mostly start from noisy pose initializations [20], data-specific initial pose distributions [25,44], or rough pose annotations such as pose quadrants [1]. They then perform joint optimization for a 3D representation using an objective of reconstructing input image pixels [1,20,44] or distribution matching [25].", + "bbox": [ + 212, + 578, + 787, + 744 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this work, instead of relying on initial poses as starting points for shape reconstruction, we propose to tackle the joint optimization problem from a different perspective. We first obtain a plausible 3D shape that is compliant with the input image observations using pre-trained generative models, and then use semantic-aware visual features, e.g., pre-trained features from DINO [2,30] and Stable-Diffusion [36], to register input images to the 3D shape. Compared to", + "bbox": [ + 212, + 750, + 787, + 840 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Y. Zhang et al.", + "bbox": [ + 271, + 114, + 374, + 128 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "photometric reconstruction losses, these features are more tolerant of variance in object identities among image inputs.", + "bbox": [ + 212, + 146, + 782, + 176 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We make deliberate design choices to instantiate such a framework that fuses the knowledge from pre-trained text-to-image (T2I) generative models with real image inputs. First, to utilize the prior knowledge from generative models, we opt to apply a T2I personalization method, Textual Inversion [7], which aims to find the most suitable text embedding to reconstruct the input images via the pre-trained model. Furthermore, a semantic-aware distance is proposed to mitigate the appearance discrepancy between the rendered image and the input photo collection. Finally, a canonical coordinate mapping is learned to find the correspondence between 3D canonical representation and 2D input images.", + "bbox": [ + 212, + 176, + 785, + 311 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To prove the effectiveness of the proposed framework, we compare the proposed method against several baselines on the task of pose estimation on a dataset with varying illuminations and show that our method surpasses all the baselines significantly. We also demonstrate several applications of the proposed method, including image editing and object alignment on web image data.", + "bbox": [ + 212, + 313, + 785, + 387 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In summary, our contributions are:", + "bbox": [ + 238, + 388, + 493, + 402 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. We propose a novel task of 3D Congealing that involves aligning images of semantically similar objects in a shared 3D space.", + "2. We develop a framework tackling the proposed task and demonstrate several applications using the obtained 2D-3D correspondence, such as pose estimation and image editing.", + "3. We show the effectiveness and applicability of the proposed method on a diverse range of in-the-wild Internet images." + ], + "bbox": [ + 220, + 406, + 784, + 512 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Related Works", + "text_level": 1, + "bbox": [ + 215, + 535, + 395, + 551 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Image Alignment and Congealing. The task of image alignment for a single instance, possibly under varying illuminations, has been relatively well-studied [24, 47]. To align images containing different instances from the same category with small deformations, one line of approach is known as imageCongealing [12, 13, 18, 27, 29, 31]. In particular, Neural Congealing [29] learns atlases to capture common semantic features from input images and recovers a dense mapping between input images and the atlases. GANgealing [31] uses a spatial transformer to map a randomly generated image from a GAN [8] to a jointly aligned space. These 2D-warping-based methods are typically applied to source and target image pairs with no or small camera rotation, and work best on in-plane transformation, while our proposed framework handles a larger variation of viewpoints due to 3D reasoning. On the other hand, DIFNet [6] exemplifies an approach of joint optimization of shape template and deformation, provided with the 3D shape. In comparison, we propose a template-followed-by-implicit-deformation approach and assume a single 2D observation for each instance instead of 3D inputs. The proposed approach exploits the fact that a \"good\" template, i.e., one that captures common geometric structure of inputs, is not unique and a solution can be effectively found before knowing input image poses. Compared to joint", + "bbox": [ + 212, + 566, + 787, + 840 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "3D Congealing: 3D-Aware Image Alignment in the Wild", + "bbox": [ + 357, + 114, + 730, + 128 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "optimization methods, it reduces task complexity by providing such an anchoring template to make later image registration easier. Finally, this work provides qualitative results on aligning images cross instances with large deformation. The output global alignment of input instances and articulation-free templates can be useful for downstream reconstruction with image-specific articulation, which is beyond the scope of this work.", + "bbox": [ + 212, + 146, + 787, + 238 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Object Pose Estimation. Object pose estimation aims to estimate the pose of an object instance with respect to the coordinate frame of its 3D shape. Classical methods for pose estimation recover poses from multi-view images using pixel- or feature-level matching to find the alignment between different images [38]. These methods are less suitable in the in-the-wild setting due to the increasing appearance variance. Recent methods tackle this task by supervised learning wht pose annotations [19,42,48], but it remains challenging for these methods to generalize beyond the training distribution. Another class of methods uses an analysis-by-synthesis framework to estimate pose given category-specific templates [3] or a pre-trained 3D representation [46]; these assumptions make it challenging to apply these methods to generic objects in the real world. ID-Pose [5] leverages Zero-1-to-3 [21], a view synthesis model, and optimizes for the relative pose given a source and a target image. Goodwin et al. [9] use pre-trained self-supervised features for matching, instead of doing it at the pixel level, but require both RGB and depth inputs.", + "bbox": [ + 212, + 271, + 789, + 500 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Shape Reconstruction from Image Collections. Neural rendering approaches [26, 43, 45] use images with known poses to reconstruct the 3D shape and appearance from a collection of multiview images. The assumptions of known poses and consistent illumination prevent these methods from being applied in the wild. Several works have extended these approaches to relax the pose assumption, proposing to handle noisy or unknown camera poses of input images through joint optimization of poses and 3D representation [4, 20, 44]. SAMURAI [1] further handles scenes under various illuminations, but requires access to coarse initial poses in the form of pose quadrant annotations.", + "bbox": [ + 212, + 532, + 789, + 670 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3D Distillation from 2D Diffusion Models. Recently, text-to-image diffusion models have shown great advancement in 2D image generation and are used for 3D asset distillation with conditions such as texts [32,39], single image [21], and image collections [33]. DreamFusion [32] has proposed to apply gradients computed from pre-trained text-to-image models to the optimized 3D representations. DreamBooth3D [33] proposed to utilize fine-tuned diffusion model [37] for the image-conditioned 3D reconstruction task. These works provide a viable solution for 3D reconstruction from image collections but without grounding the inputs to the 3D space as in ours.", + "bbox": [ + 212, + 703, + 789, + 840 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Y. Zhang et al.", + "bbox": [ + 271, + 114, + 372, + 128 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/29133ff7e713ee4a80c5650915b474bc3ca0e87797780a9fef76cd115e896d26.jpg", + "image_caption": [ + "Fig. 2: Pipeline. Given a collection of in-the-wild images capturing similar objects as inputs, we develop a framework that \"congeals\" these images in 3D. The core representation consists of a canonical 3D shape that captures the geometric structure shared among the inputs, together with a set of coordinate mappings that register the input images to the canonical shape. The framework utilizes the prior knowledge of plausible 3D shapes from a generative model, and aligns images in the semantic space using pre-trained semantic feature extractors." + ], + "image_footnote": [], + "bbox": [ + 222, + 147, + 787, + 335 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3 Method", + "text_level": 1, + "bbox": [ + 215, + 467, + 330, + 483 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We formulate the problem of 3D Congealing as follows. Given a set of $N$ object-centric images $\\mathcal{D} = \\{x_{n}\\}_{n = 1}^{N}$ that captures objects sharing semantic components, e.g., objects from one category, we seek to align the object instances in these images into a canonical 3D representation, e.g., NeRF [26], parameterized by $\\theta$ . We refer to the coordinate frame of this 3D representation as the canonical frame. We also recover the camera pose of each observation $x\\in \\mathcal{D}$ in the canonical frame, denoted using a pose function $\\pi :x\\mapsto (\\xi ,\\kappa)$ where $\\xi$ represents the object pose in SE(3) and $\\kappa$ is the camera intrinsic parameters. We assume access to instance masks, which can be obtained using an off-the-shelf segmentation method [16].", + "bbox": [ + 212, + 502, + 787, + 638 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The 3D representation should be consistent with the physical prior of objects in the natural world, and with input observations both geometrically and semantically. These constraints can be translated into an optimization problem:", + "bbox": [ + 212, + 638, + 787, + 685 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\max _ {\\pi , \\theta} p _ {\\Theta} (\\theta), \\text {s . t .} x = \\mathcal {R} (\\pi (x), \\theta), \\forall x \\in \\mathcal {D}, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 359, + 696, + 787, + 720 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $p_{\\Theta}$ is a prior distribution for the 3D representation parameter $\\theta$ that encourages physically plausible solutions, $\\mathcal{R}$ is a predefined rendering function that enforces geometric consistency, and the equality constraint on image reconstruction enforces compliance with input observations.", + "bbox": [ + 212, + 733, + 787, + 792 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We will now describe an instantiation of the 3D prior $p_{\\Theta}$ (Sec. 3.1), an image distance function that helps enforce the equality constraint (Sec. 3.2), followed by the 3D Congealing optimization (Sec. 3.3) to estimate input image poses $\\pi$ .", + "bbox": [ + 212, + 794, + 787, + 840 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "3D Congealing: 3D-Aware Image Alignment in the Wild", + "bbox": [ + 357, + 114, + 732, + 128 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.1 3D Guidance from Generative Models", + "text_level": 1, + "bbox": [ + 215, + 146, + 576, + 161 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "As illustrated in the left part of Figure 2, we extract the prior knowledge for 3D representations $p_{\\Theta}(\\cdot)$ from a pre-trained text-to-image (T2I) model such as Stable-Diffusion [36]. DreamFusion [32] proposes to turn a text prompt $y$ into a 3D representation $\\theta$ using the following Score Distillation Sampling (SDS) objective, leveraging a T2I diffusion model with frozen parameters $\\phi$ ,", + "bbox": [ + 212, + 169, + 787, + 247 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\min _ {\\theta} \\mathbb {E} _ {x \\in \\mathcal {D} (\\theta)} \\mathcal {L} _ {\\text {d i f f}} ^ {\\phi} (x, y). \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 421, + 256, + 787, + 277 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Here $\\mathcal{D}(\\theta) \\coloneqq \\{\\mathcal{R}(\\pi, \\theta) \\mid \\pi \\sim p_{\\Pi}(\\cdot)\\}$ contains images rendered from the 3D representation $\\theta$ under a prior camera distribution $p_{\\Pi}(\\cdot)$ , and $\\mathcal{L}_{\\mathrm{diff}}^{\\phi}$ is the training objective of image diffusion models specified as follows:", + "bbox": [ + 214, + 286, + 787, + 335 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {d i f f}} ^ {\\phi} (x, y) := \\mathbb {E} _ {t \\sim \\mathcal {U} ([ 0, 1 ]), \\epsilon \\sim \\mathcal {N} (\\mathbf {0}, I)} \\left[ \\omega (t) \\| \\epsilon_ {\\phi} (\\alpha_ {t} x + \\sigma_ {t} \\epsilon , y, t) - \\epsilon \\| _ {2} ^ {2} \\right], \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 276, + 344, + 787, + 363 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\epsilon_{\\phi}$ is the pre-trained denoising network, $\\omega(\\cdot)$ is the timestep-dependent weighting function, $t$ is the diffusion timestep and $\\alpha_{t}, \\sigma_{t}$ are timestep-dependent coefficients from the diffusion model schedule.", + "bbox": [ + 212, + 369, + 787, + 414 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The above loss can be used to guide the optimization of a 3D representation $\\theta$ , whose gradient is approximated by", + "bbox": [ + 214, + 415, + 787, + 446 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\nabla_ {\\theta} \\mathcal {L} _ {\\mathrm {d i f f}} ^ {\\phi} (x = \\mathcal {R} (\\xi , \\kappa , \\theta), y) \\approx \\mathbb {E} _ {t, \\epsilon} \\left[ \\omega (t) (\\epsilon_ {\\phi} (\\alpha_ {t} x + \\sigma_ {t} \\epsilon , y, t) - \\epsilon) \\frac {\\partial x}{\\partial \\theta} \\right], \\qquad (4)\n$$\n", + "text_format": "latex", + "bbox": [ + 267, + 455, + 787, + 488 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\xi$ and $\\kappa$ are the extrinsic and intrinsic camera parameters, respectively. The derived gradient approximation is adopted by later works such as MVDream [39], which we use as the backbone.", + "bbox": [ + 212, + 496, + 787, + 541 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The original SDS objective is optimizing for a text-conditioned 3D shape with a user-specified text prompt $y$ and does not consider image inputs. Here, we use the technique from Textual Inversion [7] to recover the most suitable text prompt $y^{*}$ that explains input images, defined as follows:", + "bbox": [ + 212, + 541, + 787, + 602 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\ny ^ {*} = \\arg \\min _ {y} \\mathbb {E} _ {x \\in \\mathcal {D}} \\mathcal {L} _ {\\text {d i f f}} ^ {\\phi} (x, y). \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 398, + 612, + 787, + 635 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Eq. (2) and Eq. (5) differ in that both the sources of the observations $x$ (an infinite dataset of rendered images $\\mathcal{D}(\\theta)$ for the former, and real data $\\mathcal{D}$ for the latter) and the parameters being optimized over $(\\theta$ and $y$ , respectively). In our framework, we incorporate the real image information to the SDS guidance via first solving for $y^{*}$ (Eq. (5)) and keep it frozen when optimizing for $\\theta$ (Eq. (2)). The diffusion model parameter $\\phi$ is frozen throughout the process, requiring significantly less memory compared to the alternative of integrating input image information via finetuning $\\phi$ as in DreamBooth3D [33].", + "bbox": [ + 212, + 643, + 787, + 767 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.2 Semantic Consistency from Deep Features", + "text_level": 1, + "bbox": [ + 214, + 785, + 609, + 801 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The generative model prior from Sec. 3.1 effectively constrains the search space for the solutions. However, the objectives from Eqs. (2) and (5) use the input image", + "bbox": [ + 212, + 809, + 787, + 840 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Y. Zhang et al.", + "bbox": [ + 271, + 114, + 374, + 128 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "information only indirectly, via a text embedding $y^{*}$ . To explain the relative geometric relation among input images, we explicitly recover the pose of each input image w.r.t. $\\theta$ , as illustrated in Figure 2 (middle) and as explained below", + "bbox": [ + 212, + 146, + 782, + 191 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To align input images, we use an image distance metric defined by semantic feature dissimilarity. In particular, pre-trained deep models such as DINO [2,30] have been shown to be effective semantic feature extractors. Denote such a model as $f$ parameterized by $\\zeta$ . The similarity of two pixel locations $u_{1}$ and $u_{2}$ from two images $x_{1}$ and $x_{2}$ , respectively, can be measured with", + "bbox": [ + 212, + 191, + 784, + 267 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nd _ {\\zeta} ^ {u _ {1}, u _ {2}} \\left(x _ {1}, x _ {2}\\right) := 1 - \\frac {\\langle \\left[ f _ {\\zeta} \\left(x _ {1}\\right) \\right] _ {u _ {1}} , \\left[ f _ {\\zeta} \\left(x _ {2}\\right) \\right] _ {u _ {2}} \\rangle}{\\| \\left[ f _ {\\zeta} \\left(x _ {1}\\right) \\right] _ {u _ {1}} \\| _ {2} \\| \\left[ f _ {\\zeta} \\left(x _ {2}\\right) \\right] _ {u _ {2}} \\| _ {2}}, \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 323, + 277, + 785, + 314 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $[\\cdot ]$ is an indexing operator. It thereafter defines an image distance function", + "bbox": [ + 214, + 323, + 779, + 339 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| x _ {1} - x _ {2} \\right\\| _ {d _ {\\zeta}} := \\frac {1}{H W} \\sum_ {u} d _ {\\zeta} ^ {u, u} \\left(x _ {1}, x _ {2}\\right), \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 372, + 349, + 785, + 383 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $x_{1}$ and $x_{2}$ have resolution $H\\times W$ , and the sum is over all image coordinates.", + "bbox": [ + 212, + 393, + 785, + 407 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The choice of semantic-aware image distance, instead of photometric differences as in the classical problem setting of multiview 3D reconstruction [38,43,45], leads to solutions that maximally align input images to the 3D representation with more tolerance towards variance in object shape, texture, and environmental illuminations among input images, which is crucial in our problem setting.", + "bbox": [ + 212, + 409, + 784, + 484 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.3 Optimization", + "text_level": 1, + "bbox": [ + 214, + 506, + 370, + 522 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The Canonical Shape and Image Poses. Combining Secs. 3.1 and 3.2, we convert the original problem in Eq. (1) into", + "bbox": [ + 214, + 531, + 782, + 561 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\min _ {\\pi , \\theta} \\underbrace {\\mathbb {E} _ {x \\in \\mathcal {D} (\\theta)} \\mathcal {L} _ {\\text {d i f f}} ^ {\\phi} \\left(x , y ^ {*}\\right)} _ {\\text {g e n e r a t i v e m o d e l g u i d a n c e}} + \\lambda \\underbrace {\\mathbb {E} _ {x \\in \\mathcal {D}} \\| \\mathcal {R} (\\pi (x) , \\theta) - x \\| _ {d}} _ {\\text {d a t a r e c o n s t r u c t i o n}}, \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 312, + 574, + 785, + 609 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $y^{*}$ come from Eq. (5) and $\\lambda$ is a loss weight. Compared to Eq. (5), here the first term instantiates the generative modeling prior and the second term is a soft constraint of reconstructing input observations. Specifically, $d = \\lambda_{\\zeta}d_{\\zeta} + \\lambda_{\\mathrm{IoU}}d_{\\mathrm{IoU}}$ , where $d_{\\zeta}$ is the semantic-space distance metric from Sec. 3.2, and $d_{\\mathrm{IoU}}$ is the Intersection-over-Union (IoU) loss for masks, $\\| m_1 - m_2 \\|_{d_{\\mathrm{IoU}}} \\coloneqq 1 - (\\| m_1 \\odot m_2 \\|_1) / (\\| m_1 \\|_1 + \\| m_2 \\|_1 - \\| m_1 \\odot m_2 \\|_1)$ , where $m_1$ and $m_2$ are image masks, which in Eq. (8) are set to be the mask rendering and the instance mask for $x$ . The use of both $d_{\\zeta}$ and $d_{\\mathrm{IoU}}$ tolerates shape variance among input instances.", + "bbox": [ + 212, + 621, + 785, + 741 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "For the shape representation, we follow NeRF [26] and use neural networks $\\sigma_{\\theta}:\\mathbb{R}^{3}\\to \\mathbb{R}$ and $c_{\\theta}:\\mathbb{R}^{3}\\rightarrow \\mathbb{R}^{3}$ to map a 3D spatial coordinate to a density and an RGB value, respectively. The rendering operation $\\mathcal{R}$ is the volumetric rendering operation specified as follows:", + "bbox": [ + 212, + 742, + 785, + 801 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {R} (r, \\xi , \\theta ; c _ {\\theta}) = \\int T (t) \\sigma_ {\\theta} (\\xi r (t)) c _ {\\theta} (\\xi r (t)) \\mathrm {d} t, \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 349, + 813, + 785, + 844 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "3D Congealing: 3D-Aware Image Alignment in the Wild", + "bbox": [ + 357, + 114, + 730, + 128 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $T(t) = \\exp \\left(-\\int \\sigma_{\\theta}(r(t'))\\mathrm{d}t'\\right)$ , $r: \\mathbb{R} \\to \\mathbb{R}^3$ is a ray shooting from the camera center to the image plane, parameterized by the camera location and the ray's direction, and $\\xi$ is the relative pose that transforms the ray from the camera frame to the canonical frame.", + "bbox": [ + 212, + 145, + 787, + 205 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Forward Canonical Coordinate Mappings. After the above optimization, each image $x$ from the input image collection can be \"congealed\" to the shape $\\theta$ via a canonical coordinate mapping, i.e., a forward warping operation $\\varPhi_x^{\\mathrm{fwd}}: \\mathbb{R}^2 \\to \\mathbb{R}^3$ that maps a 2D image coordinate to a 3D coordinate in the canonical frame of reference as illustrated in Figure 2. $\\varPhi_x^{\\mathrm{fwd}}$ consists of the following two operations.", + "bbox": [ + 212, + 219, + 787, + 294 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "First, we warp a coordinate $u$ from the real image $x$ to the rendering of the canonical shape under its pose $\\pi(x)$ , denoted as $\\tilde{x} \\coloneqq \\mathcal{R}(\\pi(x), \\theta)$ . Specifically,", + "bbox": [ + 212, + 295, + 787, + 325 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\varPhi_ {\\tilde {x} \\leftarrow x} ^ {\\mathrm {2 D} \\leftarrow 2 \\mathrm {D}} (u) := \\arg \\min _ {\\tilde {u}} d _ {\\zeta} ^ {\\tilde {u}, u} (\\tilde {x}, x) + \\lambda_ {\\ell_ {2}} \\| \\tilde {u} - u \\| _ {2} ^ {2} + \\lambda_ {\\text {s m o o t h}} \\mathcal {L} _ {\\text {s m o o t h}} (\\tilde {u}, u), \\quad (1 0)\n$$\n", + "text_format": "latex", + "bbox": [ + 236, + 333, + 787, + 357 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "where $d_{\\zeta}$ follows Eq. (6), the 2D coordinates $u$ and $\\tilde{u}$ are normalized into range [0,1] before computing the $\\ell_2$ norm, the smoothness term $\\mathcal{L}_{\\mathrm{smooth}}$ is specified in Appendix B, and $\\lambda_{\\ell_2}$ and $\\lambda_{\\mathrm{smooth}}$ are scalar weights. This objective searches for a new image coordinate $\\tilde{u}$ (from the rendering $\\tilde{x}$ ) that shares a semantic feature similar to $u$ (from the real image $x$ ), and ensures that $\\tilde{u}$ stays in the local neighborhood of $u$ via a soft constraint of the coordinate distance. Afterward, a 2D-to-3D operation takes in the warped coordinate from above and outputs its 3D location in the normalized object coordinate space (NOCS) [41] of $\\theta$ :", + "bbox": [ + 212, + 364, + 787, + 487 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\Phi_ {x} ^ {\\mathrm {3 D} \\leftarrow 2 \\mathrm {D}} (\\tilde {u}) := \\left[ \\mathcal {R} _ {\\text {N O C S}} (\\pi (x), \\theta) \\right] _ {\\tilde {u}}, \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 380, + 494, + 787, + 512 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "where $\\mathcal{R}_{\\mathrm{NOCS}}$ is identical to $\\mathcal{R}$ from Eq. (9), but replacing the color field $c_{\\theta}$ with a canonical object coordinate field, $c_{\\mathrm{NOCS}}: \\mathbb{R}^3 \\to \\mathbb{R}^3$ , $p \\mapsto (p - p_{\\mathrm{min}}) / (p_{\\mathrm{max}} - p_{\\mathrm{min}})$ , where $p_{\\mathrm{min}}$ and $p_{\\mathrm{max}}$ are the two opposite corners of the canonical shape's bounding box. These bounding boxes are determined by the mesh extracted from the density neural field $\\sigma_{\\theta}$ using the Marching Cube [22] algorithm.", + "bbox": [ + 212, + 520, + 787, + 595 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Combining the above, given an input image coordinate $u$ , $\\varPhi_x^{\\mathrm{fwd}}(u) := \\varPhi_x^{3\\mathrm{D} \\leftarrow 2\\mathrm{D}} \\circ \\varPhi_{\\tilde{x} \\leftarrow x}^{2\\mathrm{D} \\leftarrow 2\\mathrm{D}}(u)$ identifies a 3D location in the canonical frame corresponding to $u$ .", + "bbox": [ + 212, + 595, + 787, + 628 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Reverse Canonical Coordinate Mappings. Each image can be \"uncongealed\" from the canonical shape using $\\varPhi_x^{\\mathrm{rev}}:\\mathbb{R}^3\\to \\mathbb{R}^2$ , which is the reverse operation of $\\varPhi_x^{\\mathrm{fwd}}(u)$ and is approximately computed via nearest-neighbor inversion as explained below.", + "bbox": [ + 212, + 638, + 787, + 696 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Given a 3D location within a unit cube, $p \\in [0,1]^3$ , $\\varPhi_x^{\\mathrm{rev}}(p) := \\varPhi_{x \\leftarrow \\tilde{x}}^{2\\mathrm{D} \\leftarrow 2\\mathrm{D}} \\circ \\varPhi_x^{2\\mathrm{D} \\leftarrow 3\\mathrm{D}}(p)$ . In particular,", + "bbox": [ + 212, + 696, + 787, + 729 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\Phi_ {x} ^ {\\mathrm {2 D} \\leftarrow 3 \\mathrm {D}} (p) := \\arg \\min _ {\\tilde {u}} \\| p - \\Phi_ {x} ^ {\\mathrm {3 D} \\leftarrow 2 \\mathrm {D}} (\\tilde {u}) \\| _ {2} \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 354, + 737, + 787, + 760 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "is an operation that takes in a 3D coordinate $p$ in the canonical frame and searches for a 2D image coordinate whose NOCS value is the closest to $p$ , and $\\varPhi_{x\\leftarrow \\bar{x}}^{2\\mathrm{D}\\leftarrow 2\\mathrm{D}}$ is computed via inverting $\\varPhi_{\\bar{x}\\leftarrow x}^{2\\mathrm{D}\\leftarrow 2\\mathrm{D}}$ from Eq. (10),", + "bbox": [ + 212, + 767, + 787, + 814 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\Phi_ {x \\leftarrow \\tilde {x}} ^ {2 \\mathrm {D} \\leftarrow 2 \\mathrm {D}} (\\tilde {u}) := \\arg \\min _ {u} \\| \\tilde {u} - \\Phi_ {\\tilde {x} \\leftarrow x} ^ {2 \\mathrm {D} \\leftarrow 2 \\mathrm {D}} (u) \\| _ {2}. \\tag {13}\n$$\n", + "text_format": "latex", + "bbox": [ + 351, + 820, + 787, + 843 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Y. Zhang et al.", + "bbox": [ + 271, + 114, + 374, + 128 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/908f2fa958d582a64e1292a8d8b01de6df5745ce15abca255ca4e99fd26785d0.jpg", + "image_caption": [ + "Fig. 3: Pose Estimation from Multi-Illumination Captures. The figure shows 4 example scenes from the NAVI dataset, displaying the real image inputs, canonical shapes under estimated poses, and the canonical coordinate maps." + ], + "image_footnote": [], + "bbox": [ + 228, + 141, + 781, + 412 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In summary, the above procedure establishes the 2D-3D correspondence between an input image $x$ and the canonical shape via $\\varPhi_x^{\\mathrm{fwd}}$ , and defines the dense 2D-2D correspondences between two images $x_1, x_2$ via $\\varPhi_{x_2}^{\\mathrm{rev}} \\circ \\varPhi_{x_1}^{\\mathrm{fwd}}$ which enables image editing (Figure 8). The full framework is described in Algorithm 1.", + "bbox": [ + 212, + 489, + 787, + 551 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "3.4 Implementation Details", + "text_level": 1, + "bbox": [ + 214, + 573, + 455, + 589 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Input images are cropped with the tightest bounding box around the foreground masks. The masks come from dataset annotations, if available, or from Grounded-SAM [16, 35], an off-the-shelf segmentation model.", + "bbox": [ + 212, + 598, + 488, + 686 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Across all experiments, we optimize for $y^{*}$ (Algorithm 1, line 2) for 1,000 iterations using an AdamW [23] optimizer with learning rate 0.02 and weight decay 0.01. We optimize for $\\theta$", + "bbox": [ + 212, + 689, + 488, + 763 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "(line 3) with $\\lambda = 0$ for 10,000 iterations, with AdamW and learning rate 0.001. The NeRF model $\\theta$ has 12.6M parameters. It is frozen afterwards and defines the coordinate frame for poses.", + "bbox": [ + 212, + 763, + 787, + 809 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Since directly optimizing poses and camera parameters with gradient descents easily falls into local minima [20], we initialize $\\pi$ using an analysis-by-synthesis", + "bbox": [ + 212, + 810, + 787, + 840 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1: procedure RUN $(\\mathcal{D} = \\{x_{n}\\}_{n = 1}^{N})$", + "2: $y^{*}\\gets$ Solution to Eq. (5)", + "3: Optimize $\\theta$ with Eq. (8)", + "4: Sample pose candidates $\\{\\xi_i\\}_i$", + "5: for $n\\gets 1$ to $N$ do $\\triangleright$ Pose initialization", + "6: $\\pi (x_{n})\\gets \\arg \\min_{\\xi_{i}}\\| \\mathcal{R}(\\xi ,\\theta) - x_{n}\\|_{d_{\\zeta}}$", + "7: end for", + "8: Optimize $\\pi (x_{n})$ with Eq. (8) for all $n$", + "9: Determine $\\Phi_{x_n}^{\\mathrm{fwd}}$ and $\\Phi_{x_n}^{\\mathrm{rev}}$ for all $n$", + "10: return $\\theta, \\pi, \\{\\Phi_{x_n}^{\\mathrm{fwd}}\\}_{n=1}^N, \\{\\Phi_{x_n}^{\\mathrm{rev}}\\}_{n=1}^N$", + "11: end procedure" + ], + "bbox": [ + 500, + 598, + 784, + 722 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Algorithm 1: Overview.", + "text_level": 1, + "bbox": [ + 550, + 729, + 732, + 742 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "3D Congealing: 3D-Aware Image Alignment in the Wild", + "bbox": [ + 357, + 114, + 732, + 128 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/a0c33ae171a9626877cd83e9d57774c70e40d7b3f5e7709eb72556b296df8897.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
LabelsMethodsRotation°↓Translation↓
SC~ SCSC~ SC
PoseNeROIC [17]42.11-0.09-
NeRS [47]122.41123.630.490.52
SAMURAI [1]26.1636.590.240.35
NoneGNeRF [25]93.1580.221.021.04
PoseDiffusion [42]46.7946.340.810.90
Ours (3 seeds)26.97±2.2432.56±2.900.40±0.010.41±0.04
Ours (No Pose Init)53.4557.870.970.96
Ours (No IoU Loss)31.2931.150.870.85
", + "bbox": [ + 254, + 143, + 743, + 277 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/1917e09c2daf0df41490f0531d7a4b07f898fa3d3828b91f2e552026427899da.jpg", + "table_caption": [ + "Table 1: Pose Estimation from Multi-Illumination Image Captures. Our method performs better than both GNeRF and PoseDiffusion with the same input information, and on par with SAMURAI which additionally assumes camera pose direction as inputs. Different random seeds lead to different canonical shapes, but our method is robust to such variations. $\\pm$ denotes means followed by standard deviations." + ], + "table_footnote": [], + "table_body": "
MethodsBedBookcaseChairDeskSofaTableWardrobeOverall
R°↓T↓R°↓T↓R°↓T↓R°↓T↓R°↓T↓R°↓T↓R°↓T↓R°↓T↓
[42]45.740.9922.830.3346.801.0423.890.4933.990.6943.531.2231.541.8035.47±10.00.94±0.49
Ours37.000.4036.470.4534.580.7626.530.3626.490.2749.440.6727.410.3933.99±8.260.47 ±0.18
", + "bbox": [ + 215, + 361, + 785, + 414 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Table 2: Pose Estimation from Cross-Instance Image Collections. Our method achieves overall better performance than PoseDiffusion on Pix3D. \"R\" stands for rotation and \"T\" for translation. $\\pm$ denotes cross-category means followed by standard deviations.", + "bbox": [ + 212, + 417, + 785, + 460 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "approach (line 5-7). Specifically, we parameterize the camera intrinsics using a pinhole camera model with a scalar Field-of-View (FoV) value, and sample the camera parameter $(\\xi, \\kappa)$ from a set of candidates determined by an exhaustive combination of 3 FoV, 16 azimuth, and 16 elevation values uniformly sampled from $[15^{\\circ}, 60^{\\circ}]$ , $[-180^{\\circ}, 180^{\\circ}]$ , and $[-90^{\\circ}, 90^{\\circ}]$ , respectively. In this pose initialization stage, all renderings use a fixed camera radius and are cropped with the tightest bounding boxes of rendered foreground masks before being compared with the real image inputs. Line 6 is effectively Eq. (8) with $\\lambda_{\\zeta} = 1$ and $\\lambda_{\\mathrm{IoU}} = 0$ .", + "bbox": [ + 212, + 479, + 785, + 601 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "After pose initialization, we use the $\\mathfrak{se}(3)$ Lie algebra for camera extrinsics parameterization following BARF [20], and optimize for the extrinsics and intrinsics of each input image (Algorithm 1, line 8), with $\\lambda_{\\zeta} = 0$ and $\\lambda_{\\mathrm{IoU}} = 1$ , for 1,000 iterations with the Adam [15] optimizer and learning rate 0.001. Since $\\theta$ is frozen, the optimization effectively only considers the second term from Eq. (8). Finally, to optimize for the canonical coordinate mappings (Algorithm 1, line 9), for each input image, we run 4,000 iterations for Eq. (10) with AdamW and learning rate 0.01. All experiments are run on a single 24GB A5000 GPU.", + "bbox": [ + 212, + 602, + 787, + 724 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4 Experiments", + "text_level": 1, + "bbox": [ + 215, + 753, + 375, + 772 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "In this section, we first benchmark the pose estimation performance of our method on in-the-wild image captures (Sec. 4.1), and then show qualitative results on diverse input data and demonstrate applications such as image editing (Sec. 4.2).", + "bbox": [ + 212, + 794, + 785, + 840 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Y. Zhang et al.", + "bbox": [ + 271, + 114, + 374, + 128 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/36334097b008a9f2ca11dbb40b3addfe05a182038a294b0561a9a00a3edd8e18.jpg", + "image_caption": [ + "Fig. 4: Pose Estimation for Tourist Landmarks. This is a challenging problem setting due to the varying viewpoints and lighting conditions, and the proposed method can successfully align online tourist photos taken at different times and possibly at different geographical locations, into one canonical representation. The top rows show input images and the bottom rows show shape templates under aligned poses." + ], + "image_footnote": [], + "bbox": [ + 223, + 142, + 781, + 335 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "4.1 Pose Estimation", + "text_level": 1, + "bbox": [ + 215, + 441, + 395, + 455 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Dataset. We benchmark pose estimation performance under two settings. First, for a single-instance, varying illumination setting, we use the in-the-wild split of the NAVI [14] dataset, which contains 35 object-centric image collections in its official release. Each image collection contains an average of around 60 casual image captures of an object instance placed under different illumination conditions, backgrounds, and cameras. Second, for a single-category, cross-instance setting, we use Pix3D [40], a dataset of natural in-the-wild images grouped into 9 categories, each containing multiple shape models of IKEA objects. We use 20 randomly selected images from each category except for \"tool\" and \"misc\" as they involve shapes visually and semantically far apart.", + "bbox": [ + 212, + 464, + 787, + 614 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "We use identical hyperparameters for all scenes. We use a generic text prompt, \"a photo of sks object\", for initialization for all scenes. The text embeddings corresponding to the tokens for \"sks object\" are being optimized using Eq. (5) with the rest frozen. For each scene, it takes around 1 hr to optimize for NeRF, 15 min for pose initialization, and 45 min for pose optimization.", + "bbox": [ + 212, + 616, + 787, + 691 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Baselines. We compare with several multiview reconstruction baselines. In particular, NeROIC [17] uses the poses from COLMAP, and NeRS [47] and SAMURAI [1] require initial camera directions. GNeRF [25] is a pose-free multiview 3D reconstruction method that is originally designed for single-illumination scenes, and is adapted as a baseline using the same input assumption as ours. PoseDiffusion [42] is a learning-based framework that predicts relative object poses, using ground truth pose annotations as training supervision. The original paper takes a model pre-trained on CO3D [34] and evaluates the pose prediction performance in the wild, and we use the same checkpoint for evaluation.", + "bbox": [ + 212, + 703, + 787, + 839 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "3D Congealing: 3D-Aware Image Alignment in the Wild", + "bbox": [ + 357, + 114, + 732, + 128 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 767, + 116, + 782, + 126 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/2640969c6ec9657cbdd4d97f071d9713a98aa5c547da988caa613e7c46aabeec.jpg", + "image_caption": [ + "Fig. 5: Object Alignment from Internet Images. Results of an online image search may contain various appearances, identities, and articulated poses of the object. Our method can successfully associate these in-the-wild images with one shared 3D space." + ], + "image_footnote": [], + "bbox": [ + 225, + 142, + 781, + 431 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Metrics. The varying illuminations pose challenges to classical pose estimation methods such as COLMAP [38]. We use the official split of the data which partitions the 35 scenes into 19 scenes where COLMAP converges ( $S_{C}$ in Table 1), and 16 scenes where COLMAP fails to converge ( $\\sim S_{C}$ ). Following [14], we report the absolute rotation and translation errors using Procrustes analysis [10], where for each scene, the predicted camera poses are aligned with the ground truth pose annotations using a global transformation before computing the pose metrics.", + "bbox": [ + 212, + 512, + 787, + 619 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Results. Handling different illumination conditions is challenging for all baselines using photometric-reconstruction-based optimization [1,17,47] even with additional information for pose initialization. As shown in Table 1, our approach significantly outperforms both GNeRF and PoseDiffusion and works on par with SAMURAI which requires additional pose initialization. We run our full pipeline with 3 random seeds and observe a consistent performance across seeds. Qualitative results of aligned templates and learned canonical coordinate maps are shown in Figure 3. Failure modes are discussed in Appendix F. In a cross-instance setting from Table 2, our method achieves a better overall performance compared to the best-performing baseline from Table 1.", + "bbox": [ + 212, + 638, + 787, + 789 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Ablations. Table 1 also shows ablation for the pose fitting objectives. The initialization is critical (\"No Pose Init\"), which is expected as pose optimization", + "bbox": [ + 212, + 809, + 785, + 840 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Y. Zhang et al.", + "bbox": [ + 271, + 114, + 374, + 128 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/46ac92b264d02545207cac6b17cfc59f2675186a671276c526b78bc586a9f0a8.jpg", + "image_caption": [ + "Fig. 6: Cross-Category Results. The method can associate images from different categories, such as cats and dogs, by leveraging a learned average shape." + ], + "image_footnote": [], + "bbox": [ + 225, + 143, + 779, + 247 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "is susceptible to local optima [20]. \"No IoU Loss\", which is equivalent to using the initialized poses as final predictions, also negatively affects the performance.", + "bbox": [ + 212, + 295, + 787, + 325 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "4.2 Applications", + "text_level": 1, + "bbox": [ + 215, + 348, + 366, + 363 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We show qualitative results on various in-the-wild image data. Inputs for Figures 4 and 5 are crawled with standard online image search engines and are CC-licensed, each consisting of 50 to 100 images. Inputs for Figures 6 and 7 come from the SPair-71k dataset [28]. We use identical hyperparameters for all datasets, except for text prompt initialization where we use a generic description of the object, e.g., \"a photo of sks sculpture\", or \"a photo of cats plus dogs\" for Figure 6.", + "bbox": [ + 212, + 373, + 787, + 464 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Single-Instance. Figure 4 shows the result on Internet photos of tourist landmarks, which may contain a large diversity in illuminations and styles. The proposed method can handle the variations and align these photos and art pieces to the same canonical 3D space and recover the relative camera poses.", + "bbox": [ + 212, + 478, + 787, + 540 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Cross-Instance, Single-Category. Internet images from generic objects may contain more shape and texture variations compared to landmarks. Figure 5 shows results for various objects, where the framework infers a canonical shape from the inputs to capture the shared semantic components being observed.", + "bbox": [ + 212, + 553, + 787, + 613 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Cross-Category. The method leverages semantic features to establish alignment and does not strictly assume that inputs are of the same category. In Figure 6, the method infers an average shape as an anchor to further reason about the relative relation among images from different categories.", + "bbox": [ + 212, + 628, + 787, + 689 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Inputs with Deformable Shapes. To test the robustness of the method, we run the pipeline on images of humans with highly diverse poses. Figures 1 and 7 show that the method assigns plausible poses to the inputs despite the large diversity of shapes and articulated poses contained in the inputs.", + "bbox": [ + 212, + 704, + 787, + 765 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Image Editing. The proposed method finds image correspondence and can be applied to image editing, as shown in Figure 8. Figure 8 (c) shows that our method obtains more visually plausible results compared to the Nearest-Neighbor (NN) baseline using the same DINO features. The baseline matches features in 2D", + "bbox": [ + 212, + 779, + 787, + 840 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "3D Congealing: 3D-Aware Image Alignment in the Wild", + "bbox": [ + 357, + 114, + 730, + 128 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 767, + 116, + 784, + 126 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/7af03c3af46925888c033ffb75d3a45e2061e556eec3b1343e71aa76783e4a20.jpg", + "image_caption": [ + "Fig. 7: Results on Deformable Objects. The method can be applied to images with highly diverse articulated poses and shapes as shown in the examples above." + ], + "image_footnote": [], + "bbox": [ + 227, + 143, + 779, + 246 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/fbf7042de2cf9b45bfd3d816dcd50e2a2ad49d33715e037590b2e71f4bf7d996.jpg", + "image_caption": [ + "(a) Texture Propagation" + ], + "image_footnote": [], + "bbox": [ + 217, + 305, + 787, + 373 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/57852ca76fad81fafab7972c2d5c18d657810d57aff50291248ef72539e24171.jpg", + "image_caption": [ + "(b) Editing Propagation", + "(c) Baseline Comparisons", + "Fig. 8: Image Editing. Our method propagates texture in (a) and (c) and regional editing in (b) to real images. As shown in (c), it achieves smoother results compared to the nearest-neighbor (NN) baseline thanks to the 3D geometric reasoning." + ], + "image_footnote": [], + "bbox": [ + 217, + 391, + 787, + 518 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "for each pixel individually and produces noisy results, as discussed in Appendix C. Quantitative evaluation of correspondence matching and additional qualitative results for editing are included in Appendix D and E.", + "bbox": [ + 214, + 627, + 787, + 674 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "5 Conclusion", + "text_level": 1, + "bbox": [ + 215, + 699, + 359, + 715 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "We have introduced 3D Congealing, 3D-aware alignment for 2D images capturing semantically similar objects. Our proposed framework leverages a canonical 3D representation that encapsulates geometric and semantic information and, through optimization, fuses prior knowledge from a pre-trained image generative model and semantic information from input images. We show that our model achieves strong results on real-world image datasets under challenging identity, illumination, and background conditions.", + "bbox": [ + 212, + 733, + 787, + 840 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Y. Zhang et al.", + "bbox": [ + 271, + 114, + 374, + 128 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Acknowledgments. We thank Chen Geng and Sharon Lee for their help in reviewing the manuscript. This work is in part supported by NSF RI #2211258, #2338203, and ONR MURI N00014-22-1-2740.", + "bbox": [ + 215, + 146, + 787, + 191 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 217, + 217, + 321, + 233 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "1. Boss, M., Engelhardt, A., Kar, A., Li, Y., Sun, D., Barron, J., Lensch, H., Jampani, V.: Samurai: Shape and material from unconstrained real-world arbitrary image collections. Advances in Neural Information Processing Systems 35, 26389-26403 (2022)", + "2. Caron, M., Touvron, H., Misra, I., Jégou, H., Mairal, J., Bojanowski, P., Joulin, A.: Emerging properties in self-supervised vision transformers. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 9650-9660 (2021)", + "3. Chen, X., Dong, Z., Song, J., Geiger, A., Hilliges, O.: Category level object pose estimation via neural analysis-by-synthesis. In: Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XXVI 16. pp. 139-156. Springer (2020)", + "4. Chen, Y., Chen, X., Wang, X., Zhang, Q., Guo, Y., Shan, Y., Wang, F.: Local-to-global registration for bundle-adjusting neural radiance fields. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 8264-8273 (2023)", + "5. Cheng, W., Cao, Y.P., Shan, Y.: Id-pose: Sparse-view camera pose estimation by inverting diffusion models. arXiv preprint arXiv:2306.17140 (2023)", + "6. Deng, Y., Yang, J., Tong, X.: Deformed implicit field: Modeling 3D shapes with learned dense correspondence. In: CVPR (2021)", + "7. Gal, R., Alaluf, Y., Atzmon, Y., Patashnik, O., Bermano, A.H., Chechik, G., Cohen-Or, D.: An image is worth one word: Personalizing text-to-image generation using textual inversion. arXiv preprint arXiv:2208.01618 (2022)", + "8. Goodfellow, I., Pouget-Abadie, J., Mirza, M., Xu, B., Warde-Farley, D., Ozair, S., Courville, A., Bengio, Y.: Generative adversarial networks. Communications of the ACM 63(11), 139–144 (2020)", + "9. Goodwin, W., Vaze, S., Havoutis, I., Posner, I.: Zero-shot category-level object pose estimation. In: European Conference on Computer Vision. pp. 516-532. Springer (2022)", + "0. Gower, J.C., Dijksterhuis, G.B.: Procrustes problems, vol. 30. OUP Oxford (2004)", + "1. Gupta, K., Jampani, V., Esteves, C., Shrivastava, A., Makadia, A., Snavely, N., Kar, A.: ASIC: Aligning sparse in-the-wild image collections. arXiv preprint arXiv:2303.16201 (2023)", + "2. Huang, G., Mattar, M., Lee, H., Learned-Miller, E.: Learning to align from scratch. Advances in neural information processing systems 25 (2012)", + "3. Huang, G.B., Jain, V., Learned-Miller, E.: Unsupervised joint alignment of complex images. In: ICCV. pp. 1-8. IEEE (2007)", + "4. Jampani, V., Maninis, K.K., Engelhardt, A., Karpur, A., Truong, K., Sargent, K., Popov, S., Araujo, A., Martin-Brualla, R., Patel, K., et al.: Navi: Category-agnostic image collections with high-quality 3d shape and pose annotations. arXiv preprint arXiv:2306.09109 (2023)", + "5. Kingma, D.P., Ba, J.: Adam: A method for stochastic optimization. In: International Conference on Learning Representations (2015)" + ], + "bbox": [ + 225, + 250, + 787, + 839 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "3D Congealing: 3D-Aware Image Alignment in the Wild", + "bbox": [ + 357, + 114, + 730, + 128 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 767, + 116, + 784, + 126 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "16. Kirillov, A., Mintun, E., Ravi, N., Mao, H., Rolland, C., Gustafson, L., Xiao, T., Whitehead, S., Berg, A.C., Lo, W.Y., et al.: Segment anything. arXiv preprint arXiv:2304.02643 (2023)", + "17. Kuang, Z., Olszewski, K., Chai, M., Huang, Z., Achlioptas, P., Tulyakov, S.: Neroic: Neural rendering of objects from online image collections. ACM Transactions on Graphics (TOG) 41(4), 1-12 (2022)", + "18. Learned-Miller, E.G.: Data driven image models through continuous joint alignment. IEEE TPAMI 28(2), 236-250 (2005)", + "19. Lin, A., Zhang, J.Y., Ramanan, D., Tulsiani, S.: Relpose++: Recovering 6d poses from sparse-view observations. arXiv preprint arXiv:2305.04926 (2023)", + "20. Lin, C.H., Ma, W.C., Torralba, A., Lucey, S.: Barf: Bundle-adjusting neural radiance fields. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 5741-5751 (2021)", + "21. Liu, R., Wu, R., Van Hoorick, B., Tokmakov, P., Zakharov, S., Vondrick, C.: Zero-1-to-3: Zero-shot one image to 3d object. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 9298-9309 (2023)", + "22. Lorensen, W.E., Cline, H.E.: Marching cubes: A high resolution 3d surface construction algorithm. ACM SIGGRAPH Computer Graphics 21(4), 163-169 (1987)", + "23. Loshchilov, I., Hutter, F.: Decoupled weight decay regularization. In: International Conference on Learning Representations (2018)", + "24. Martin-Brualla, R., Radwan, N., Sajjadi, M.S.M., Barron, J.T., Dosovitskiy, A., Duckworth, D.: NeRF in the Wild: Neural Radiance Fields for Unconstrained Photo Collections. In: CVPR (2021)", + "25. Meng, Q., Chen, A., Luo, H., Wu, M., Su, H., Xu, L., He, X., Yu, J.: Gnerf: Gan-based neural radiance field without posed camera. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 6351-6361 (2021)", + "26. Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM 65(1), 99-106 (2021)", + "27. Miller, E.G., Matsakis, N.E., Viola, P.A.: Learning from one example through shared densities on transforms. In: CVPR. vol. 1, pp. 464-471. IEEE (2000)", + "28. Min, J., Lee, J., Ponce, J., Cho, M.: Spair-71k: A large-scale benchmark for semantic correspondence. arXiv preprint arXiv:1908.10543 (2019)", + "29. Ofri-Amar, D., Geyer, M., Kasten, Y., Dekel, T.: Neural congealing: Aligning images to a joint semantic atlas. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 19403-19412 (2023)", + "30. Oquab, M., Darcet, T., Moutakanni, T., Vo, H., Szafraniec, M., Khalidov, V., Fernandez, P., Haziza, D., Massa, F., El-Nouby, A., et al.: Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:2304.07193 (2023)", + "31. Peebles, W., Zhu, J.Y., Zhang, R., Torralba, A., Efros, A.A., Shechtman, E.: Gansupervised dense visual alignment. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 13470-13481 (2022)", + "32. Poole, B., Jain, A., Barron, J.T., Mildenhall, B.: Dreamfusion: Text-to-3d using 2d diffusion. arXiv preprint arXiv:2209.14988 (2022)", + "33. Raj, A., Kaza, S., Poole, B., Niemeyer, M., Ruiz, N., Mildenhall, B., Zada, S., Aberman, K., Rubinstein, M., Barron, J., et al.: Dreambooth3d: Subject-driven text-to-3d generation. arXiv preprint arXiv:2303.13508 (2023)", + "34. Reizenstein, J., Shapovalov, R., Henzler, P., Sbordone, L., Labatut, P., Novotny, D.: Common objects in 3d: Large-scale learning and evaluation of real-life 3d category reconstruction. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 10901-10911 (2021)" + ], + "bbox": [ + 215, + 146, + 785, + 839 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Y. Zhang et al.", + "bbox": [ + 271, + 114, + 374, + 128 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "35. Ren, T., Liu, S., Zeng, A., Lin, J., Li, K., Cao, H., Chen, J., Huang, X., Chen, Y., Yan, F., et al.: Grounded sam: Assembling open-world models for diverse visual tasks. arXiv preprint arXiv:2401.14159 (2024)", + "36. Rombach, R., Blattmann, A., Lorenz, D., Esser, P., Ommer, B.: High-resolution image synthesis with latent diffusion models. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 10684-10695 (2022)", + "37. Ruiz, N., Li, Y., Jampani, V., Pritch, Y., Rubinstein, M., Aberman, K.: Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 22500-22510 (2023)", + "38. Schonberger, J.L., Frahm, J.M.: Structure-from-motion revisited. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 4104-4113 (2016)", + "39. Shi, Y., Wang, P., Ye, J., Long, M., Li, K., Yang, X.: Mvdream: Multi-view diffusion for 3d generation. arXiv preprint arXiv:2308.16512 (2023)", + "40. Sun, X., Wu, J., Zhang, X., Zhang, Z., Zhang, C., Xue, T., Tenenbaum, J.B., Freeman, W.T.: Pix3d: Dataset and methods for single-image 3d shape modeling. In: CVPR (2018)", + "41. Wang, H., Sridhar, S., Huang, J., Valentin, J., Song, S., Guibas, L.J.: Normalized object coordinate space for category-level 6d object pose and size estimation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 2642-2651 (2019)", + "42. Wang, J., Rupprecht, C., Novotny, D.: Posediffusion: Solving pose estimation via diffusion-aided bundle adjustment. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 9773-9783 (2023)", + "43. Wang, P., Liu, L., Liu, Y., Theobalt, C., Komura, T., Wang, W.: Neus: Learning neural implicit surfaces by volume rendering for multi-view reconstruction. arXiv preprint arXiv:2106.10689 (2021)", + "44. Wang, Z., Wu, S., Xie, W., Chen, M., Prisacariu, V.A.: Nerf-: Neural radiance fields without known camera parameters. arXiv preprint arXiv:2102.07064 (2021)", + "45. Yariv, L., Gu, J., Kasten, Y., Lipman, Y.: Volume rendering of neural implicit surfaces. Advances in Neural Information Processing Systems 34, 4805-4815 (2021)", + "46. Yen-Chen, L., Florence, P., Barron, J.T., Rodriguez, A., Isola, P., Lin, T.Y.: inerf: Inverting neural radiance fields for pose estimation. In: 2021 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). pp. 1323-1330. IEEE (2021)", + "47. Zhang, J., Yang, G., Tulsiani, S., Ramanan, D.: Ners: Neural reflectance surfaces for sparse-view 3d reconstruction in the wild. In: Advances in Neural Information Processing Systems. vol. 34, pp. 29835-29847 (2021)", + "48. Zhang, J.Y., Ramanan, D., Tulsiani, S.: Relpose: Predicting probabilistic relative rotation for single objects in the wild. In: European Conference on Computer Vision. pp. 592-611. Springer (2022)" + ], + "bbox": [ + 212, + 146, + 787, + 715 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "3D Congealing: 3D-Aware Image Alignment in the Wild", + "bbox": [ + 357, + 114, + 730, + 128 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 767, + 116, + 784, + 126 + ], + "page_idx": 16 + } +] \ No newline at end of file diff --git a/2024/3D Congealing_ 3D-Aware Image Alignment in the Wild/757b034b-7105-4846-a434-665e5b237ea6_model.json b/2024/3D Congealing_ 3D-Aware Image Alignment in the Wild/757b034b-7105-4846-a434-665e5b237ea6_model.json new file mode 100644 index 0000000000000000000000000000000000000000..ebcb0a7d6909469a35dc7d0d930c5590492f29e6 --- /dev/null +++ b/2024/3D Congealing_ 3D-Aware Image Alignment in the Wild/757b034b-7105-4846-a434-665e5b237ea6_model.json @@ -0,0 +1,2511 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.333, + 0.142, + 0.672, + 0.187 + ], + "angle": 0, + "content": "3D Congealing: 3D-Aware Image Alignment in the Wild" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.212, + 0.787, + 0.245 + ], + "angle": 0, + "content": "Yunzhi Zhang\\(^{1}\\), Zizhang Li\\(^{1}\\), Amit Raj\\(^{2}\\), Andreas Engelhardt\\(^{3}\\), Yuanzhen Li\\(^{2}\\), Tingbo Hou\\(^{4}\\), Jiajun Wu\\(^{1}\\), and Varun Jampani\\(^{5}\\)" + }, + { + "type": "text", + "bbox": [ + 0.428, + 0.255, + 0.576, + 0.269 + ], + "angle": 0, + "content": "1 Stanford University" + }, + { + "type": "text", + "bbox": [ + 0.434, + 0.27, + 0.57, + 0.283 + ], + "angle": 0, + "content": "2 Google DeepMind" + }, + { + "type": "text", + "bbox": [ + 0.418, + 0.283, + 0.586, + 0.297 + ], + "angle": 0, + "content": "3 University of Tübingen" + }, + { + "type": "text", + "bbox": [ + 0.452, + 0.297, + 0.551, + 0.309 + ], + "angle": 0, + "content": "4 Meta GenAI" + }, + { + "type": "text", + "bbox": [ + 0.455, + 0.31, + 0.55, + 0.324 + ], + "angle": 0, + "content": "Stability AI" + }, + { + "type": "list", + "bbox": [ + 0.418, + 0.255, + 0.586, + 0.324 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.261, + 0.362, + 0.744, + 0.642 + ], + "angle": 0, + "content": "Abstract. We propose 3D Congealing, a novel problem of 3D-aware alignment for 2D images capturing semantically similar objects. Given a collection of unlabeled Internet images, our goal is to associate the shared semantic parts from the inputs and aggregate the knowledge from 2D images to a shared 3D canonical space. We introduce a general framework that tackles the task without assuming shape templates, poses, or any camera parameters. At its core is a canonical 3D representation that encapsulates geometric and semantic information. The framework optimizes for the canonical representation together with the pose for each input image, and a per-image coordinate map that warps 2D pixel coordinates to the 3D canonical frame to account for the shape matching. The optimization procedure fuses prior knowledge from a pre-trained image generative model and semantic information from input images. The former provides strong knowledge guidance for this under-constraint task, while the latter provides the necessary information to mitigate the training data bias from the pre-trained model. Our framework can be used for various tasks such as pose estimation and image editing, achieving strong results on real-world image datasets under challenging illumination conditions and on in-the-wild online image collections. Project page at https://ai.stanford.edu/~yzzhang/projects/3d-congealing/." + }, + { + "type": "title", + "bbox": [ + 0.217, + 0.688, + 0.377, + 0.705 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.719, + 0.789, + 0.844 + ], + "angle": 0, + "content": "We propose the task of 3D Congealing, where the goal is to align a collection of images containing semantically similar objects into a shared 3D space. Specifically, we aim to obtain a canonical 3D representation together with the pose and a dense map of 2D-3D correspondence for each image in the collection. The input images may contain object instances belonging to a similar category with varying shapes and textures, and are captured under distinct camera viewpoints and illumination conditions, which all contribute to the pixel-level difference as shown in Figure 1. Despite such inter-image differences, humans excel at aligning such" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "2" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.375, + 0.129 + ], + "angle": 0, + "content": "Y. Zhang et al." + }, + { + "type": "image", + "bbox": [ + 0.226, + 0.144, + 0.782, + 0.259 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.273, + 0.788, + 0.343 + ], + "angle": 0, + "content": "Fig. 1: Objects with different shapes and appearances, such as these sculptures, may share similar semantic parts and a similar geometric structure. We study 3D Congealing, inferring and aligning such a shared structure from an unlabeled image collection. Such alignment can be used for tasks such as pose estimation and image editing. See Appendix A for full results." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.372, + 0.786, + 0.403 + ], + "angle": 0, + "content": "images with one another in a geometrically and semantically consistent manner based on their 3D-aware understanding." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.407, + 0.789, + 0.573 + ], + "angle": 0, + "content": "Obtaining a canonical 3D representation and grounding input images to the 3D canonical space enable several downstream tasks, such as 6-DoF object pose estimation, pose-aware image filtering, and image editing. Unlike the task of 2D congealing [11, 29, 31], where the aim is to align the 2D pixels across the images, 3D Congealing requires aggregating the information from the image collection altogether and forming the association among images in 3D. The task is also closely related to 3D reconstruction from multiview images, with a key distinction in the problem setting, as inputs here do not necessarily contain identical objects but rather semantically similar ones. Such a difference opens up the possibility of image alignment from readily available image collections on the Internet, e.g., online search results, landmark images, and personal photo collections." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.579, + 0.789, + 0.746 + ], + "angle": 0, + "content": "3D Congealing represents a challenging problem, particularly for arbitrary images without camera pose or lighting annotations, even when the input images contain identical objects [1,4,20,44], because the solutions for pose and shape are generally entangled. On the one hand, the definition of poses is specific to the coordinate frame of the shape; on the other hand, the shape optimization is typically guided by the pixel-wise supervision of images under the estimated poses. To overcome the ambiguity in jointly estimating poses and shapes, prior works mostly start from noisy pose initializations [20], data-specific initial pose distributions [25,44], or rough pose annotations such as pose quadrants [1]. They then perform joint optimization for a 3D representation using an objective of reconstructing input image pixels [1,20,44] or distribution matching [25]." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.75, + 0.789, + 0.842 + ], + "angle": 0, + "content": "In this work, instead of relying on initial poses as starting points for shape reconstruction, we propose to tackle the joint optimization problem from a different perspective. We first obtain a plausible 3D shape that is compliant with the input image observations using pre-trained generative models, and then use semantic-aware visual features, e.g., pre-trained features from DINO [2,30] and Stable-Diffusion [36], to register input images to the 3D shape. Compared to" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.358, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "3D Congealing: 3D-Aware Image Alignment in the Wild" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "3" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.784, + 0.177 + ], + "angle": 0, + "content": "photometric reconstruction losses, these features are more tolerant of variance in object identities among image inputs." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.178, + 0.787, + 0.313 + ], + "angle": 0, + "content": "We make deliberate design choices to instantiate such a framework that fuses the knowledge from pre-trained text-to-image (T2I) generative models with real image inputs. First, to utilize the prior knowledge from generative models, we opt to apply a T2I personalization method, Textual Inversion [7], which aims to find the most suitable text embedding to reconstruct the input images via the pre-trained model. Furthermore, a semantic-aware distance is proposed to mitigate the appearance discrepancy between the rendered image and the input photo collection. Finally, a canonical coordinate mapping is learned to find the correspondence between 3D canonical representation and 2D input images." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.314, + 0.787, + 0.388 + ], + "angle": 0, + "content": "To prove the effectiveness of the proposed framework, we compare the proposed method against several baselines on the task of pose estimation on a dataset with varying illuminations and show that our method surpasses all the baselines significantly. We also demonstrate several applications of the proposed method, including image editing and object alignment on web image data." + }, + { + "type": "text", + "bbox": [ + 0.24, + 0.389, + 0.495, + 0.404 + ], + "angle": 0, + "content": "In summary, our contributions are:" + }, + { + "type": "text", + "bbox": [ + 0.223, + 0.407, + 0.785, + 0.437 + ], + "angle": 0, + "content": "1. We propose a novel task of 3D Congealing that involves aligning images of semantically similar objects in a shared 3D space." + }, + { + "type": "text", + "bbox": [ + 0.222, + 0.438, + 0.785, + 0.482 + ], + "angle": 0, + "content": "2. We develop a framework tackling the proposed task and demonstrate several applications using the obtained 2D-3D correspondence, such as pose estimation and image editing." + }, + { + "type": "text", + "bbox": [ + 0.222, + 0.483, + 0.785, + 0.513 + ], + "angle": 0, + "content": "3. We show the effectiveness and applicability of the proposed method on a diverse range of in-the-wild Internet images." + }, + { + "type": "list", + "bbox": [ + 0.222, + 0.407, + 0.785, + 0.513 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.536, + 0.396, + 0.552 + ], + "angle": 0, + "content": "2 Related Works" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.568, + 0.788, + 0.842 + ], + "angle": 0, + "content": "Image Alignment and Congealing. The task of image alignment for a single instance, possibly under varying illuminations, has been relatively well-studied [24, 47]. To align images containing different instances from the same category with small deformations, one line of approach is known as imageCongealing [12, 13, 18, 27, 29, 31]. In particular, Neural Congealing [29] learns atlases to capture common semantic features from input images and recovers a dense mapping between input images and the atlases. GANgealing [31] uses a spatial transformer to map a randomly generated image from a GAN [8] to a jointly aligned space. These 2D-warping-based methods are typically applied to source and target image pairs with no or small camera rotation, and work best on in-plane transformation, while our proposed framework handles a larger variation of viewpoints due to 3D reasoning. On the other hand, DIFNet [6] exemplifies an approach of joint optimization of shape template and deformation, provided with the 3D shape. In comparison, we propose a template-followed-by-implicit-deformation approach and assume a single 2D observation for each instance instead of 3D inputs. The proposed approach exploits the fact that a \"good\" template, i.e., one that captures common geometric structure of inputs, is not unique and a solution can be effectively found before knowing input image poses. Compared to joint" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "4" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.374, + 0.129 + ], + "angle": 0, + "content": "Y. Zhang et al." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.239 + ], + "angle": 0, + "content": "optimization methods, it reduces task complexity by providing such an anchoring template to make later image registration easier. Finally, this work provides qualitative results on aligning images cross instances with large deformation. The output global alignment of input instances and articulation-free templates can be useful for downstream reconstruction with image-specific articulation, which is beyond the scope of this work." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.272, + 0.79, + 0.501 + ], + "angle": 0, + "content": "Object Pose Estimation. Object pose estimation aims to estimate the pose of an object instance with respect to the coordinate frame of its 3D shape. Classical methods for pose estimation recover poses from multi-view images using pixel- or feature-level matching to find the alignment between different images [38]. These methods are less suitable in the in-the-wild setting due to the increasing appearance variance. Recent methods tackle this task by supervised learning wht pose annotations [19,42,48], but it remains challenging for these methods to generalize beyond the training distribution. Another class of methods uses an analysis-by-synthesis framework to estimate pose given category-specific templates [3] or a pre-trained 3D representation [46]; these assumptions make it challenging to apply these methods to generic objects in the real world. ID-Pose [5] leverages Zero-1-to-3 [21], a view synthesis model, and optimizes for the relative pose given a source and a target image. Goodwin et al. [9] use pre-trained self-supervised features for matching, instead of doing it at the pixel level, but require both RGB and depth inputs." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.534, + 0.79, + 0.671 + ], + "angle": 0, + "content": "Shape Reconstruction from Image Collections. Neural rendering approaches [26, 43, 45] use images with known poses to reconstruct the 3D shape and appearance from a collection of multiview images. The assumptions of known poses and consistent illumination prevent these methods from being applied in the wild. Several works have extended these approaches to relax the pose assumption, proposing to handle noisy or unknown camera poses of input images through joint optimization of poses and 3D representation [4, 20, 44]. SAMURAI [1] further handles scenes under various illuminations, but requires access to coarse initial poses in the form of pose quadrant annotations." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.704, + 0.79, + 0.841 + ], + "angle": 0, + "content": "3D Distillation from 2D Diffusion Models. Recently, text-to-image diffusion models have shown great advancement in 2D image generation and are used for 3D asset distillation with conditions such as texts [32,39], single image [21], and image collections [33]. DreamFusion [32] has proposed to apply gradients computed from pre-trained text-to-image models to the optimized 3D representations. DreamBooth3D [33] proposed to utilize fine-tuned diffusion model [37] for the image-conditioned 3D reconstruction task. These works provide a viable solution for 3D reconstruction from image collections but without grounding the inputs to the 3D space as in ours." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.358, + 0.115, + 0.733, + 0.129 + ], + "angle": 0, + "content": "3D Congealing: 3D-Aware Image Alignment in the Wild" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "5" + }, + { + "type": "image", + "bbox": [ + 0.223, + 0.148, + 0.788, + 0.337 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.352, + 0.788, + 0.45 + ], + "angle": 0, + "content": "Fig. 2: Pipeline. Given a collection of in-the-wild images capturing similar objects as inputs, we develop a framework that \"congeals\" these images in 3D. The core representation consists of a canonical 3D shape that captures the geometric structure shared among the inputs, together with a set of coordinate mappings that register the input images to the canonical shape. The framework utilizes the prior knowledge of plausible 3D shapes from a generative model, and aligns images in the semantic space using pre-trained semantic feature extractors." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.468, + 0.331, + 0.484 + ], + "angle": 0, + "content": "3 Method" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.503, + 0.789, + 0.639 + ], + "angle": 0, + "content": "We formulate the problem of 3D Congealing as follows. Given a set of \\(N\\) object-centric images \\(\\mathcal{D} = \\{x_{n}\\}_{n = 1}^{N}\\) that captures objects sharing semantic components, e.g., objects from one category, we seek to align the object instances in these images into a canonical 3D representation, e.g., NeRF [26], parameterized by \\(\\theta\\). We refer to the coordinate frame of this 3D representation as the canonical frame. We also recover the camera pose of each observation \\(x\\in \\mathcal{D}\\) in the canonical frame, denoted using a pose function \\(\\pi :x\\mapsto (\\xi ,\\kappa)\\) where \\(\\xi\\) represents the object pose in SE(3) and \\(\\kappa\\) is the camera intrinsic parameters. We assume access to instance masks, which can be obtained using an off-the-shelf segmentation method [16]." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.64, + 0.788, + 0.686 + ], + "angle": 0, + "content": "The 3D representation should be consistent with the physical prior of objects in the natural world, and with input observations both geometrically and semantically. These constraints can be translated into an optimization problem:" + }, + { + "type": "equation", + "bbox": [ + 0.36, + 0.698, + 0.788, + 0.722 + ], + "angle": 0, + "content": "\\[\n\\max _ {\\pi , \\theta} p _ {\\Theta} (\\theta), \\text {s . t .} x = \\mathcal {R} (\\pi (x), \\theta), \\forall x \\in \\mathcal {D}, \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.734, + 0.788, + 0.794 + ], + "angle": 0, + "content": "where \\( p_{\\Theta} \\) is a prior distribution for the 3D representation parameter \\( \\theta \\) that encourages physically plausible solutions, \\( \\mathcal{R} \\) is a predefined rendering function that enforces geometric consistency, and the equality constraint on image reconstruction enforces compliance with input observations." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.795, + 0.788, + 0.842 + ], + "angle": 0, + "content": "We will now describe an instantiation of the 3D prior \\( p_{\\Theta} \\) (Sec. 3.1), an image distance function that helps enforce the equality constraint (Sec. 3.2), followed by the 3D Congealing optimization (Sec. 3.3) to estimate input image poses \\( \\pi \\)." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "6" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.375, + 0.129 + ], + "angle": 0, + "content": "Y. Zhang et al." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.147, + 0.578, + 0.162 + ], + "angle": 0, + "content": "3.1 3D Guidance from Generative Models" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.17, + 0.788, + 0.248 + ], + "angle": 0, + "content": "As illustrated in the left part of Figure 2, we extract the prior knowledge for 3D representations \\( p_{\\Theta}(\\cdot) \\) from a pre-trained text-to-image (T2I) model such as Stable-Diffusion [36]. DreamFusion [32] proposes to turn a text prompt \\( y \\) into a 3D representation \\( \\theta \\) using the following Score Distillation Sampling (SDS) objective, leveraging a T2I diffusion model with frozen parameters \\( \\phi \\)," + }, + { + "type": "equation", + "bbox": [ + 0.423, + 0.257, + 0.788, + 0.279 + ], + "angle": 0, + "content": "\\[\n\\min _ {\\theta} \\mathbb {E} _ {x \\in \\mathcal {D} (\\theta)} \\mathcal {L} _ {\\text {d i f f}} ^ {\\phi} (x, y). \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.287, + 0.789, + 0.336 + ], + "angle": 0, + "content": "Here \\(\\mathcal{D}(\\theta) \\coloneqq \\{\\mathcal{R}(\\pi, \\theta) \\mid \\pi \\sim p_{\\Pi}(\\cdot)\\}\\) contains images rendered from the 3D representation \\(\\theta\\) under a prior camera distribution \\(p_{\\Pi}(\\cdot)\\), and \\(\\mathcal{L}_{\\mathrm{diff}}^{\\phi}\\) is the training objective of image diffusion models specified as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.277, + 0.345, + 0.788, + 0.364 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {d i f f}} ^ {\\phi} (x, y) := \\mathbb {E} _ {t \\sim \\mathcal {U} ([ 0, 1 ]), \\epsilon \\sim \\mathcal {N} (\\mathbf {0}, I)} \\left[ \\omega (t) \\| \\epsilon_ {\\phi} (\\alpha_ {t} x + \\sigma_ {t} \\epsilon , y, t) - \\epsilon \\| _ {2} ^ {2} \\right], \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.371, + 0.789, + 0.415 + ], + "angle": 0, + "content": "where \\(\\epsilon_{\\phi}\\) is the pre-trained denoising network, \\(\\omega(\\cdot)\\) is the timestep-dependent weighting function, \\(t\\) is the diffusion timestep and \\(\\alpha_{t}, \\sigma_{t}\\) are timestep-dependent coefficients from the diffusion model schedule." + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.416, + 0.788, + 0.448 + ], + "angle": 0, + "content": "The above loss can be used to guide the optimization of a 3D representation \\(\\theta\\), whose gradient is approximated by" + }, + { + "type": "equation", + "bbox": [ + 0.269, + 0.456, + 0.788, + 0.489 + ], + "angle": 0, + "content": "\\[\n\\nabla_ {\\theta} \\mathcal {L} _ {\\mathrm {d i f f}} ^ {\\phi} (x = \\mathcal {R} (\\xi , \\kappa , \\theta), y) \\approx \\mathbb {E} _ {t, \\epsilon} \\left[ \\omega (t) (\\epsilon_ {\\phi} (\\alpha_ {t} x + \\sigma_ {t} \\epsilon , y, t) - \\epsilon) \\frac {\\partial x}{\\partial \\theta} \\right], \\qquad (4)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.497, + 0.789, + 0.542 + ], + "angle": 0, + "content": "where \\(\\xi\\) and \\(\\kappa\\) are the extrinsic and intrinsic camera parameters, respectively. The derived gradient approximation is adopted by later works such as MVDream [39], which we use as the backbone." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.542, + 0.789, + 0.603 + ], + "angle": 0, + "content": "The original SDS objective is optimizing for a text-conditioned 3D shape with a user-specified text prompt \\( y \\) and does not consider image inputs. Here, we use the technique from Textual Inversion [7] to recover the most suitable text prompt \\( y^{*} \\) that explains input images, defined as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.4, + 0.613, + 0.788, + 0.636 + ], + "angle": 0, + "content": "\\[\ny ^ {*} = \\arg \\min _ {y} \\mathbb {E} _ {x \\in \\mathcal {D}} \\mathcal {L} _ {\\text {d i f f}} ^ {\\phi} (x, y). \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.644, + 0.789, + 0.768 + ], + "angle": 0, + "content": "Eq. (2) and Eq. (5) differ in that both the sources of the observations \\( x \\) (an infinite dataset of rendered images \\( \\mathcal{D}(\\theta) \\) for the former, and real data \\( \\mathcal{D} \\) for the latter) and the parameters being optimized over \\( (\\theta \\) and \\( y \\), respectively). In our framework, we incorporate the real image information to the SDS guidance via first solving for \\( y^{*} \\) (Eq. (5)) and keep it frozen when optimizing for \\( \\theta \\) (Eq. (2)). The diffusion model parameter \\( \\phi \\) is frozen throughout the process, requiring significantly less memory compared to the alternative of integrating input image information via finetuning \\( \\phi \\) as in DreamBooth3D [33]." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.786, + 0.611, + 0.803 + ], + "angle": 0, + "content": "3.2 Semantic Consistency from Deep Features" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.81, + 0.788, + 0.842 + ], + "angle": 0, + "content": "The generative model prior from Sec. 3.1 effectively constrains the search space for the solutions. However, the objectives from Eqs. (2) and (5) use the input image" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.358, + 0.115, + 0.731, + 0.129 + ], + "angle": 0, + "content": "3D Congealing: 3D-Aware Image Alignment in the Wild" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "7" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.784, + 0.192 + ], + "angle": 0, + "content": "information only indirectly, via a text embedding \\( y^{*} \\). To explain the relative geometric relation among input images, we explicitly recover the pose of each input image w.r.t. \\( \\theta \\), as illustrated in Figure 2 (middle) and as explained below" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.193, + 0.785, + 0.268 + ], + "angle": 0, + "content": "To align input images, we use an image distance metric defined by semantic feature dissimilarity. In particular, pre-trained deep models such as DINO [2,30] have been shown to be effective semantic feature extractors. Denote such a model as \\( f \\) parameterized by \\( \\zeta \\). The similarity of two pixel locations \\( u_{1} \\) and \\( u_{2} \\) from two images \\( x_{1} \\) and \\( x_{2} \\), respectively, can be measured with" + }, + { + "type": "equation", + "bbox": [ + 0.325, + 0.279, + 0.786, + 0.315 + ], + "angle": 0, + "content": "\\[\nd _ {\\zeta} ^ {u _ {1}, u _ {2}} \\left(x _ {1}, x _ {2}\\right) := 1 - \\frac {\\langle \\left[ f _ {\\zeta} \\left(x _ {1}\\right) \\right] _ {u _ {1}} , \\left[ f _ {\\zeta} \\left(x _ {2}\\right) \\right] _ {u _ {2}} \\rangle}{\\| \\left[ f _ {\\zeta} \\left(x _ {1}\\right) \\right] _ {u _ {1}} \\| _ {2} \\| \\left[ f _ {\\zeta} \\left(x _ {2}\\right) \\right] _ {u _ {2}} \\| _ {2}}, \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.324, + 0.78, + 0.34 + ], + "angle": 0, + "content": "where \\([\\cdot ]\\) is an indexing operator. It thereafter defines an image distance function" + }, + { + "type": "equation", + "bbox": [ + 0.373, + 0.351, + 0.786, + 0.385 + ], + "angle": 0, + "content": "\\[\n\\left\\| x _ {1} - x _ {2} \\right\\| _ {d _ {\\zeta}} := \\frac {1}{H W} \\sum_ {u} d _ {\\zeta} ^ {u, u} \\left(x _ {1}, x _ {2}\\right), \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.395, + 0.786, + 0.408 + ], + "angle": 0, + "content": "where \\( x_{1} \\) and \\( x_{2} \\) have resolution \\( H\\times W \\), and the sum is over all image coordinates." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.41, + 0.785, + 0.485 + ], + "angle": 0, + "content": "The choice of semantic-aware image distance, instead of photometric differences as in the classical problem setting of multiview 3D reconstruction [38,43,45], leads to solutions that maximally align input images to the 3D representation with more tolerance towards variance in object shape, texture, and environmental illuminations among input images, which is crucial in our problem setting." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.507, + 0.371, + 0.523 + ], + "angle": 0, + "content": "3.3 Optimization" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.532, + 0.784, + 0.562 + ], + "angle": 0, + "content": "The Canonical Shape and Image Poses. Combining Secs. 3.1 and 3.2, we convert the original problem in Eq. (1) into" + }, + { + "type": "equation", + "bbox": [ + 0.313, + 0.575, + 0.786, + 0.611 + ], + "angle": 0, + "content": "\\[\n\\min _ {\\pi , \\theta} \\underbrace {\\mathbb {E} _ {x \\in \\mathcal {D} (\\theta)} \\mathcal {L} _ {\\text {d i f f}} ^ {\\phi} \\left(x , y ^ {*}\\right)} _ {\\text {g e n e r a t i v e m o d e l g u i d a n c e}} + \\lambda \\underbrace {\\mathbb {E} _ {x \\in \\mathcal {D}} \\| \\mathcal {R} (\\pi (x) , \\theta) - x \\| _ {d}} _ {\\text {d a t a r e c o n s t r u c t i o n}}, \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.622, + 0.786, + 0.742 + ], + "angle": 0, + "content": "where \\( y^{*} \\) come from Eq. (5) and \\( \\lambda \\) is a loss weight. Compared to Eq. (5), here the first term instantiates the generative modeling prior and the second term is a soft constraint of reconstructing input observations. Specifically, \\( d = \\lambda_{\\zeta}d_{\\zeta} + \\lambda_{\\mathrm{IoU}}d_{\\mathrm{IoU}} \\), where \\( d_{\\zeta} \\) is the semantic-space distance metric from Sec. 3.2, and \\( d_{\\mathrm{IoU}} \\) is the Intersection-over-Union (IoU) loss for masks, \\( \\| m_1 - m_2 \\|_{d_{\\mathrm{IoU}}} \\coloneqq 1 - (\\| m_1 \\odot m_2 \\|_1) / (\\| m_1 \\|_1 + \\| m_2 \\|_1 - \\| m_1 \\odot m_2 \\|_1) \\), where \\( m_1 \\) and \\( m_2 \\) are image masks, which in Eq. (8) are set to be the mask rendering and the instance mask for \\( x \\). The use of both \\( d_{\\zeta} \\) and \\( d_{\\mathrm{IoU}} \\) tolerates shape variance among input instances." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.743, + 0.786, + 0.803 + ], + "angle": 0, + "content": "For the shape representation, we follow NeRF [26] and use neural networks \\(\\sigma_{\\theta}:\\mathbb{R}^{3}\\to \\mathbb{R}\\) and \\(c_{\\theta}:\\mathbb{R}^{3}\\rightarrow \\mathbb{R}^{3}\\) to map a 3D spatial coordinate to a density and an RGB value, respectively. The rendering operation \\(\\mathcal{R}\\) is the volumetric rendering operation specified as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.35, + 0.814, + 0.786, + 0.845 + ], + "angle": 0, + "content": "\\[\n\\mathcal {R} (r, \\xi , \\theta ; c _ {\\theta}) = \\int T (t) \\sigma_ {\\theta} (\\xi r (t)) c _ {\\theta} (\\xi r (t)) \\mathrm {d} t, \\tag {9}\n\\]" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "8" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.375, + 0.129 + ], + "angle": 0, + "content": "Y. Zhang et al." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.146, + 0.788, + 0.207 + ], + "angle": 0, + "content": "where \\( T(t) = \\exp \\left(-\\int \\sigma_{\\theta}(r(t'))\\mathrm{d}t'\\right) \\), \\( r: \\mathbb{R} \\to \\mathbb{R}^3 \\) is a ray shooting from the camera center to the image plane, parameterized by the camera location and the ray's direction, and \\( \\xi \\) is the relative pose that transforms the ray from the camera frame to the canonical frame." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.22, + 0.788, + 0.295 + ], + "angle": 0, + "content": "Forward Canonical Coordinate Mappings. After the above optimization, each image \\(x\\) from the input image collection can be \"congealed\" to the shape \\(\\theta\\) via a canonical coordinate mapping, i.e., a forward warping operation \\(\\varPhi_x^{\\mathrm{fwd}}: \\mathbb{R}^2 \\to \\mathbb{R}^3\\) that maps a 2D image coordinate to a 3D coordinate in the canonical frame of reference as illustrated in Figure 2. \\(\\varPhi_x^{\\mathrm{fwd}}\\) consists of the following two operations." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.296, + 0.788, + 0.327 + ], + "angle": 0, + "content": "First, we warp a coordinate \\( u \\) from the real image \\( x \\) to the rendering of the canonical shape under its pose \\( \\pi(x) \\), denoted as \\( \\tilde{x} \\coloneqq \\mathcal{R}(\\pi(x), \\theta) \\). Specifically," + }, + { + "type": "equation", + "bbox": [ + 0.238, + 0.334, + 0.788, + 0.358 + ], + "angle": 0, + "content": "\\[\n\\varPhi_ {\\tilde {x} \\leftarrow x} ^ {\\mathrm {2 D} \\leftarrow 2 \\mathrm {D}} (u) := \\arg \\min _ {\\tilde {u}} d _ {\\zeta} ^ {\\tilde {u}, u} (\\tilde {x}, x) + \\lambda_ {\\ell_ {2}} \\| \\tilde {u} - u \\| _ {2} ^ {2} + \\lambda_ {\\text {s m o o t h}} \\mathcal {L} _ {\\text {s m o o t h}} (\\tilde {u}, u), \\quad (1 0)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.366, + 0.788, + 0.488 + ], + "angle": 0, + "content": "where \\(d_{\\zeta}\\) follows Eq. (6), the 2D coordinates \\(u\\) and \\(\\tilde{u}\\) are normalized into range [0,1] before computing the \\(\\ell_2\\) norm, the smoothness term \\(\\mathcal{L}_{\\mathrm{smooth}}\\) is specified in Appendix B, and \\(\\lambda_{\\ell_2}\\) and \\(\\lambda_{\\mathrm{smooth}}\\) are scalar weights. This objective searches for a new image coordinate \\(\\tilde{u}\\) (from the rendering \\(\\tilde{x}\\)) that shares a semantic feature similar to \\(u\\) (from the real image \\(x\\)), and ensures that \\(\\tilde{u}\\) stays in the local neighborhood of \\(u\\) via a soft constraint of the coordinate distance. Afterward, a 2D-to-3D operation takes in the warped coordinate from above and outputs its 3D location in the normalized object coordinate space (NOCS) [41] of \\(\\theta\\):" + }, + { + "type": "equation", + "bbox": [ + 0.381, + 0.495, + 0.788, + 0.513 + ], + "angle": 0, + "content": "\\[\n\\Phi_ {x} ^ {\\mathrm {3 D} \\leftarrow 2 \\mathrm {D}} (\\tilde {u}) := \\left[ \\mathcal {R} _ {\\text {N O C S}} (\\pi (x), \\theta) \\right] _ {\\tilde {u}}, \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.521, + 0.788, + 0.596 + ], + "angle": 0, + "content": "where \\(\\mathcal{R}_{\\mathrm{NOCS}}\\) is identical to \\(\\mathcal{R}\\) from Eq. (9), but replacing the color field \\(c_{\\theta}\\) with a canonical object coordinate field, \\(c_{\\mathrm{NOCS}}: \\mathbb{R}^3 \\to \\mathbb{R}^3\\), \\(p \\mapsto (p - p_{\\mathrm{min}}) / (p_{\\mathrm{max}} - p_{\\mathrm{min}})\\), where \\(p_{\\mathrm{min}}\\) and \\(p_{\\mathrm{max}}\\) are the two opposite corners of the canonical shape's bounding box. These bounding boxes are determined by the mesh extracted from the density neural field \\(\\sigma_{\\theta}\\) using the Marching Cube [22] algorithm." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.596, + 0.788, + 0.629 + ], + "angle": 0, + "content": "Combining the above, given an input image coordinate \\( u \\), \\( \\varPhi_x^{\\mathrm{fwd}}(u) := \\varPhi_x^{3\\mathrm{D} \\leftarrow 2\\mathrm{D}} \\circ \\varPhi_{\\tilde{x} \\leftarrow x}^{2\\mathrm{D} \\leftarrow 2\\mathrm{D}}(u) \\) identifies a 3D location in the canonical frame corresponding to \\( u \\)." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.639, + 0.788, + 0.698 + ], + "angle": 0, + "content": "Reverse Canonical Coordinate Mappings. Each image can be \"uncongealed\" from the canonical shape using \\(\\varPhi_x^{\\mathrm{rev}}:\\mathbb{R}^3\\to \\mathbb{R}^2\\), which is the reverse operation of \\(\\varPhi_x^{\\mathrm{fwd}}(u)\\) and is approximately computed via nearest-neighbor inversion as explained below." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.698, + 0.788, + 0.731 + ], + "angle": 0, + "content": "Given a 3D location within a unit cube, \\( p \\in [0,1]^3 \\), \\( \\varPhi_x^{\\mathrm{rev}}(p) := \\varPhi_{x \\leftarrow \\tilde{x}}^{2\\mathrm{D} \\leftarrow 2\\mathrm{D}} \\circ \\varPhi_x^{2\\mathrm{D} \\leftarrow 3\\mathrm{D}}(p) \\). In particular," + }, + { + "type": "equation", + "bbox": [ + 0.355, + 0.738, + 0.788, + 0.761 + ], + "angle": 0, + "content": "\\[\n\\Phi_ {x} ^ {\\mathrm {2 D} \\leftarrow 3 \\mathrm {D}} (p) := \\arg \\min _ {\\tilde {u}} \\| p - \\Phi_ {x} ^ {\\mathrm {3 D} \\leftarrow 2 \\mathrm {D}} (\\tilde {u}) \\| _ {2} \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.768, + 0.788, + 0.815 + ], + "angle": 0, + "content": "is an operation that takes in a 3D coordinate \\( p \\) in the canonical frame and searches for a 2D image coordinate whose NOCS value is the closest to \\( p \\), and \\( \\varPhi_{x\\leftarrow \\bar{x}}^{2\\mathrm{D}\\leftarrow 2\\mathrm{D}} \\) is computed via inverting \\( \\varPhi_{\\bar{x}\\leftarrow x}^{2\\mathrm{D}\\leftarrow 2\\mathrm{D}} \\) from Eq. (10)," + }, + { + "type": "equation", + "bbox": [ + 0.352, + 0.821, + 0.788, + 0.844 + ], + "angle": 0, + "content": "\\[\n\\Phi_ {x \\leftarrow \\tilde {x}} ^ {2 \\mathrm {D} \\leftarrow 2 \\mathrm {D}} (\\tilde {u}) := \\arg \\min _ {u} \\| \\tilde {u} - \\Phi_ {\\tilde {x} \\leftarrow x} ^ {2 \\mathrm {D} \\leftarrow 2 \\mathrm {D}} (u) \\| _ {2}. \\tag {13}\n\\]" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.358, + 0.115, + 0.733, + 0.129 + ], + "angle": 0, + "content": "3D Congealing: 3D-Aware Image Alignment in the Wild" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "9" + }, + { + "type": "image", + "bbox": [ + 0.23, + 0.142, + 0.782, + 0.414 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.431, + 0.788, + 0.474 + ], + "angle": 0, + "content": "Fig. 3: Pose Estimation from Multi-Illumination Captures. The figure shows 4 example scenes from the NAVI dataset, displaying the real image inputs, canonical shapes under estimated poses, and the canonical coordinate maps." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.491, + 0.789, + 0.553 + ], + "angle": 0, + "content": "In summary, the above procedure establishes the 2D-3D correspondence between an input image \\( x \\) and the canonical shape via \\( \\varPhi_x^{\\mathrm{fwd}} \\), and defines the dense 2D-2D correspondences between two images \\( x_1, x_2 \\) via \\( \\varPhi_{x_2}^{\\mathrm{rev}} \\circ \\varPhi_{x_1}^{\\mathrm{fwd}} \\) which enables image editing (Figure 8). The full framework is described in Algorithm 1." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.574, + 0.457, + 0.59 + ], + "angle": 0, + "content": "3.4 Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.599, + 0.49, + 0.688 + ], + "angle": 0, + "content": "Input images are cropped with the tightest bounding box around the foreground masks. The masks come from dataset annotations, if available, or from Grounded-SAM [16, 35], an off-the-shelf segmentation model." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.69, + 0.49, + 0.764 + ], + "angle": 0, + "content": "Across all experiments, we optimize for \\( y^{*} \\) (Algorithm 1, line 2) for 1,000 iterations using an AdamW [23] optimizer with learning rate 0.02 and weight decay 0.01. We optimize for \\( \\theta \\)" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.765, + 0.789, + 0.81 + ], + "angle": 0, + "content": "(line 3) with \\(\\lambda = 0\\) for 10,000 iterations, with AdamW and learning rate 0.001. The NeRF model \\(\\theta\\) has 12.6M parameters. It is frozen afterwards and defines the coordinate frame for poses." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.811, + 0.789, + 0.842 + ], + "angle": 0, + "content": "Since directly optimizing poses and camera parameters with gradient descents easily falls into local minima [20], we initialize \\(\\pi\\) using an analysis-by-synthesis" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.599, + 0.72, + 0.613 + ], + "angle": 0, + "content": "1: procedure RUN \\((\\mathcal{D} = \\{x_{n}\\}_{n = 1}^{N})\\)" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.612, + 0.693, + 0.622 + ], + "angle": 0, + "content": "2: \\(y^{*}\\gets\\) Solution to Eq. (5)" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.622, + 0.687, + 0.632 + ], + "angle": 0, + "content": "3: Optimize \\(\\theta\\) with Eq. (8)" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.632, + 0.717, + 0.642 + ], + "angle": 0, + "content": "4: Sample pose candidates \\(\\{\\xi_i\\}_i\\)" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.642, + 0.785, + 0.652 + ], + "angle": 0, + "content": "5: for \\(n\\gets 1\\) to \\(N\\) do \\(\\triangleright\\) Pose initialization" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.652, + 0.776, + 0.665 + ], + "angle": 0, + "content": "6: \\(\\pi (x_{n})\\gets \\arg \\min_{\\xi_{i}}\\| \\mathcal{R}(\\xi ,\\theta) - x_{n}\\|_{d_{\\zeta}}\\)" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.665, + 0.596, + 0.674 + ], + "angle": 0, + "content": "7: end for" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.674, + 0.765, + 0.687 + ], + "angle": 0, + "content": "8: Optimize \\(\\pi (x_{n})\\) with Eq. (8) for all \\(n\\)" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.687, + 0.746, + 0.698 + ], + "angle": 0, + "content": "9: Determine \\(\\Phi_{x_n}^{\\mathrm{fwd}}\\) and \\(\\Phi_{x_n}^{\\mathrm{rev}}\\) for all \\(n\\)" + }, + { + "type": "text", + "bbox": [ + 0.501, + 0.698, + 0.751, + 0.712 + ], + "angle": 0, + "content": "10: return \\(\\theta, \\pi, \\{\\Phi_{x_n}^{\\mathrm{fwd}}\\}_{n=1}^N, \\{\\Phi_{x_n}^{\\mathrm{rev}}\\}_{n=1}^N\\)" + }, + { + "type": "text", + "bbox": [ + 0.501, + 0.712, + 0.624, + 0.723 + ], + "angle": 0, + "content": "11: end procedure" + }, + { + "type": "list", + "bbox": [ + 0.501, + 0.599, + 0.785, + 0.723 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.551, + 0.73, + 0.733, + 0.743 + ], + "angle": 0, + "content": "Algorithm 1: Overview." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "10" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.375, + 0.129 + ], + "angle": 0, + "content": "Y. Zhang et al." + }, + { + "type": "table", + "bbox": [ + 0.256, + 0.144, + 0.744, + 0.279 + ], + "angle": 0, + "content": "
LabelsMethodsRotation°↓Translation↓
SC~ SCSC~ SC
PoseNeROIC [17]42.11-0.09-
NeRS [47]122.41123.630.490.52
SAMURAI [1]26.1636.590.240.35
NoneGNeRF [25]93.1580.221.021.04
PoseDiffusion [42]46.7946.340.810.90
Ours (3 seeds)26.97±2.2432.56±2.900.40±0.010.41±0.04
Ours (No Pose Init)53.4557.870.970.96
Ours (No IoU Loss)31.2931.150.870.85
" + }, + { + "type": "table_caption", + "bbox": [ + 0.214, + 0.283, + 0.788, + 0.354 + ], + "angle": 0, + "content": "Table 1: Pose Estimation from Multi-Illumination Image Captures. Our method performs better than both GNeRF and PoseDiffusion with the same input information, and on par with SAMURAI which additionally assumes camera pose direction as inputs. Different random seeds lead to different canonical shapes, but our method is robust to such variations. \\(\\pm\\) denotes means followed by standard deviations." + }, + { + "type": "table", + "bbox": [ + 0.217, + 0.362, + 0.787, + 0.415 + ], + "angle": 0, + "content": "
MethodsBedBookcaseChairDeskSofaTableWardrobeOverall
R°↓T↓R°↓T↓R°↓T↓R°↓T↓R°↓T↓R°↓T↓R°↓T↓R°↓T↓
[42]45.740.9922.830.3346.801.0423.890.4933.990.6943.531.2231.541.8035.47±10.00.94±0.49
Ours37.000.4036.470.4534.580.7626.530.3626.490.2749.440.6727.410.3933.99±8.260.47 ±0.18
" + }, + { + "type": "table_caption", + "bbox": [ + 0.214, + 0.419, + 0.786, + 0.461 + ], + "angle": 0, + "content": "Table 2: Pose Estimation from Cross-Instance Image Collections. Our method achieves overall better performance than PoseDiffusion on Pix3D. \"R\" stands for rotation and \"T\" for translation. \\(\\pm\\) denotes cross-category means followed by standard deviations." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.481, + 0.786, + 0.602 + ], + "angle": 0, + "content": "approach (line 5-7). Specifically, we parameterize the camera intrinsics using a pinhole camera model with a scalar Field-of-View (FoV) value, and sample the camera parameter \\((\\xi, \\kappa)\\) from a set of candidates determined by an exhaustive combination of 3 FoV, 16 azimuth, and 16 elevation values uniformly sampled from \\([15^{\\circ}, 60^{\\circ}]\\), \\([-180^{\\circ}, 180^{\\circ}]\\), and \\([-90^{\\circ}, 90^{\\circ}]\\), respectively. In this pose initialization stage, all renderings use a fixed camera radius and are cropped with the tightest bounding boxes of rendered foreground masks before being compared with the real image inputs. Line 6 is effectively Eq. (8) with \\(\\lambda_{\\zeta} = 1\\) and \\(\\lambda_{\\mathrm{IoU}} = 0\\)." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.603, + 0.788, + 0.725 + ], + "angle": 0, + "content": "After pose initialization, we use the \\(\\mathfrak{se}(3)\\) Lie algebra for camera extrinsics parameterization following BARF [20], and optimize for the extrinsics and intrinsics of each input image (Algorithm 1, line 8), with \\(\\lambda_{\\zeta} = 0\\) and \\(\\lambda_{\\mathrm{IoU}} = 1\\), for 1,000 iterations with the Adam [15] optimizer and learning rate 0.001. Since \\(\\theta\\) is frozen, the optimization effectively only considers the second term from Eq. (8). Finally, to optimize for the canonical coordinate mappings (Algorithm 1, line 9), for each input image, we run 4,000 iterations for Eq. (10) with AdamW and learning rate 0.01. All experiments are run on a single 24GB A5000 GPU." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.755, + 0.377, + 0.773 + ], + "angle": 0, + "content": "4 Experiments" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.795, + 0.787, + 0.842 + ], + "angle": 0, + "content": "In this section, we first benchmark the pose estimation performance of our method on in-the-wild image captures (Sec. 4.1), and then show qualitative results on diverse input data and demonstrate applications such as image editing (Sec. 4.2)." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.358, + 0.115, + 0.733, + 0.129 + ], + "angle": 0, + "content": "3D Congealing: 3D-Aware Image Alignment in the Wild" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.117, + 0.784, + 0.127 + ], + "angle": 0, + "content": "11" + }, + { + "type": "image", + "bbox": [ + 0.225, + 0.143, + 0.782, + 0.337 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.356, + 0.788, + 0.427 + ], + "angle": 0, + "content": "Fig. 4: Pose Estimation for Tourist Landmarks. This is a challenging problem setting due to the varying viewpoints and lighting conditions, and the proposed method can successfully align online tourist photos taken at different times and possibly at different geographical locations, into one canonical representation. The top rows show input images and the bottom rows show shape templates under aligned poses." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.442, + 0.396, + 0.456 + ], + "angle": 0, + "content": "4.1 Pose Estimation" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.465, + 0.788, + 0.616 + ], + "angle": 0, + "content": "Dataset. We benchmark pose estimation performance under two settings. First, for a single-instance, varying illumination setting, we use the in-the-wild split of the NAVI [14] dataset, which contains 35 object-centric image collections in its official release. Each image collection contains an average of around 60 casual image captures of an object instance placed under different illumination conditions, backgrounds, and cameras. Second, for a single-category, cross-instance setting, we use Pix3D [40], a dataset of natural in-the-wild images grouped into 9 categories, each containing multiple shape models of IKEA objects. We use 20 randomly selected images from each category except for \"tool\" and \"misc\" as they involve shapes visually and semantically far apart." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.617, + 0.788, + 0.692 + ], + "angle": 0, + "content": "We use identical hyperparameters for all scenes. We use a generic text prompt, \"a photo of sks object\", for initialization for all scenes. The text embeddings corresponding to the tokens for \"sks object\" are being optimized using Eq. (5) with the rest frozen. For each scene, it takes around 1 hr to optimize for NeRF, 15 min for pose initialization, and 45 min for pose optimization." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.704, + 0.788, + 0.84 + ], + "angle": 0, + "content": "Baselines. We compare with several multiview reconstruction baselines. In particular, NeROIC [17] uses the poses from COLMAP, and NeRS [47] and SAMURAI [1] require initial camera directions. GNeRF [25] is a pose-free multiview 3D reconstruction method that is originally designed for single-illumination scenes, and is adapted as a baseline using the same input assumption as ours. PoseDiffusion [42] is a learning-based framework that predicts relative object poses, using ground truth pose annotations as training supervision. The original paper takes a model pre-trained on CO3D [34] and evaluates the pose prediction performance in the wild, and we use the same checkpoint for evaluation." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "12" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.375, + 0.129 + ], + "angle": 0, + "content": "Y. Zhang et al." + }, + { + "type": "image", + "bbox": [ + 0.226, + 0.143, + 0.782, + 0.432 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.451, + 0.788, + 0.493 + ], + "angle": 0, + "content": "Fig. 5: Object Alignment from Internet Images. Results of an online image search may contain various appearances, identities, and articulated poses of the object. Our method can successfully associate these in-the-wild images with one shared 3D space." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.513, + 0.788, + 0.621 + ], + "angle": 0, + "content": "Metrics. The varying illuminations pose challenges to classical pose estimation methods such as COLMAP [38]. We use the official split of the data which partitions the 35 scenes into 19 scenes where COLMAP converges (\\( S_{C} \\) in Table 1), and 16 scenes where COLMAP fails to converge (\\( \\sim S_{C} \\)). Following [14], we report the absolute rotation and translation errors using Procrustes analysis [10], where for each scene, the predicted camera poses are aligned with the ground truth pose annotations using a global transformation before computing the pose metrics." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.639, + 0.788, + 0.79 + ], + "angle": 0, + "content": "Results. Handling different illumination conditions is challenging for all baselines using photometric-reconstruction-based optimization [1,17,47] even with additional information for pose initialization. As shown in Table 1, our approach significantly outperforms both GNeRF and PoseDiffusion and works on par with SAMURAI which requires additional pose initialization. We run our full pipeline with 3 random seeds and observe a consistent performance across seeds. Qualitative results of aligned templates and learned canonical coordinate maps are shown in Figure 3. Failure modes are discussed in Appendix F. In a cross-instance setting from Table 2, our method achieves a better overall performance compared to the best-performing baseline from Table 1." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.81, + 0.787, + 0.842 + ], + "angle": 0, + "content": "Ablations. Table 1 also shows ablation for the pose fitting objectives. The initialization is critical (\"No Pose Init\"), which is expected as pose optimization" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.358, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "3D Congealing: 3D-Aware Image Alignment in the Wild" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "13" + }, + { + "type": "image", + "bbox": [ + 0.227, + 0.145, + 0.78, + 0.248 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.261, + 0.788, + 0.29 + ], + "angle": 0, + "content": "Fig. 6: Cross-Category Results. The method can associate images from different categories, such as cats and dogs, by leveraging a learned average shape." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.296, + 0.789, + 0.327 + ], + "angle": 0, + "content": "is susceptible to local optima [20]. \"No IoU Loss\", which is equivalent to using the initialized poses as final predictions, also negatively affects the performance." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.349, + 0.367, + 0.364 + ], + "angle": 0, + "content": "4.2 Applications" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.374, + 0.789, + 0.465 + ], + "angle": 0, + "content": "We show qualitative results on various in-the-wild image data. Inputs for Figures 4 and 5 are crawled with standard online image search engines and are CC-licensed, each consisting of 50 to 100 images. Inputs for Figures 6 and 7 come from the SPair-71k dataset [28]. We use identical hyperparameters for all datasets, except for text prompt initialization where we use a generic description of the object, e.g., \"a photo of sks sculpture\", or \"a photo of cats plus dogs\" for Figure 6." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.479, + 0.789, + 0.541 + ], + "angle": 0, + "content": "Single-Instance. Figure 4 shows the result on Internet photos of tourist landmarks, which may contain a large diversity in illuminations and styles. The proposed method can handle the variations and align these photos and art pieces to the same canonical 3D space and recover the relative camera poses." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.554, + 0.789, + 0.614 + ], + "angle": 0, + "content": "Cross-Instance, Single-Category. Internet images from generic objects may contain more shape and texture variations compared to landmarks. Figure 5 shows results for various objects, where the framework infers a canonical shape from the inputs to capture the shared semantic components being observed." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.629, + 0.789, + 0.69 + ], + "angle": 0, + "content": "Cross-Category. The method leverages semantic features to establish alignment and does not strictly assume that inputs are of the same category. In Figure 6, the method infers an average shape as an anchor to further reason about the relative relation among images from different categories." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.705, + 0.789, + 0.766 + ], + "angle": 0, + "content": "Inputs with Deformable Shapes. To test the robustness of the method, we run the pipeline on images of humans with highly diverse poses. Figures 1 and 7 show that the method assigns plausible poses to the inputs despite the large diversity of shapes and articulated poses contained in the inputs." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.78, + 0.789, + 0.841 + ], + "angle": 0, + "content": "Image Editing. The proposed method finds image correspondence and can be applied to image editing, as shown in Figure 8. Figure 8 (c) shows that our method obtains more visually plausible results compared to the Nearest-Neighbor (NN) baseline using the same DINO features. The baseline matches features in 2D" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "14" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.375, + 0.129 + ], + "angle": 0, + "content": "Y. Zhang et al." + }, + { + "type": "image", + "bbox": [ + 0.228, + 0.145, + 0.78, + 0.247 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.215, + 0.263, + 0.788, + 0.293 + ], + "angle": 0, + "content": "Fig. 7: Results on Deformable Objects. The method can be applied to images with highly diverse articulated poses and shapes as shown in the examples above." + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.306, + 0.789, + 0.374 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.436, + 0.375, + 0.568, + 0.387 + ], + "angle": 0, + "content": "(a) Texture Propagation" + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.392, + 0.789, + 0.52 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.233, + 0.522, + 0.366, + 0.535 + ], + "angle": 0, + "content": "(b) Editing Propagation" + }, + { + "type": "image_caption", + "bbox": [ + 0.514, + 0.523, + 0.658, + 0.535 + ], + "angle": 0, + "content": "(c) Baseline Comparisons" + }, + { + "type": "image_caption", + "bbox": [ + 0.215, + 0.567, + 0.788, + 0.61 + ], + "angle": 0, + "content": "Fig. 8: Image Editing. Our method propagates texture in (a) and (c) and regional editing in (b) to real images. As shown in (c), it achieves smoother results compared to the nearest-neighbor (NN) baseline thanks to the 3D geometric reasoning." + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.628, + 0.788, + 0.675 + ], + "angle": 0, + "content": "for each pixel individually and produces noisy results, as discussed in Appendix C. Quantitative evaluation of correspondence matching and additional qualitative results for editing are included in Appendix D and E." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.7, + 0.36, + 0.716 + ], + "angle": 0, + "content": "5 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.734, + 0.789, + 0.841 + ], + "angle": 0, + "content": "We have introduced 3D Congealing, 3D-aware alignment for 2D images capturing semantically similar objects. Our proposed framework leverages a canonical 3D representation that encapsulates geometric and semantic information and, through optimization, fuses prior knowledge from a pre-trained image generative model and semantic information from input images. We show that our model achieves strong results on real-world image datasets under challenging identity, illumination, and background conditions." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.358, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "3D Congealing: 3D-Aware Image Alignment in the Wild" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "15" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.147, + 0.788, + 0.193 + ], + "angle": 0, + "content": "Acknowledgments. We thank Chen Geng and Sharon Lee for their help in reviewing the manuscript. This work is in part supported by NSF RI #2211258, #2338203, and ONR MURI N00014-22-1-2740." + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.218, + 0.323, + 0.234 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.251, + 0.788, + 0.308 + ], + "angle": 0, + "content": "1. Boss, M., Engelhardt, A., Kar, A., Li, Y., Sun, D., Barron, J., Lensch, H., Jampani, V.: Samurai: Shape and material from unconstrained real-world arbitrary image collections. Advances in Neural Information Processing Systems 35, 26389-26403 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.309, + 0.788, + 0.35 + ], + "angle": 0, + "content": "2. Caron, M., Touvron, H., Misra, I., Jégou, H., Mairal, J., Bojanowski, P., Joulin, A.: Emerging properties in self-supervised vision transformers. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 9650-9660 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.351, + 0.788, + 0.405 + ], + "angle": 0, + "content": "3. Chen, X., Dong, Z., Song, J., Geiger, A., Hilliges, O.: Category level object pose estimation via neural analysis-by-synthesis. In: Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XXVI 16. pp. 139-156. Springer (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.406, + 0.788, + 0.462 + ], + "angle": 0, + "content": "4. Chen, Y., Chen, X., Wang, X., Zhang, Q., Guo, Y., Shan, Y., Wang, F.: Local-to-global registration for bundle-adjusting neural radiance fields. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 8264-8273 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.463, + 0.788, + 0.49 + ], + "angle": 0, + "content": "5. Cheng, W., Cao, Y.P., Shan, Y.: Id-pose: Sparse-view camera pose estimation by inverting diffusion models. arXiv preprint arXiv:2306.17140 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.491, + 0.788, + 0.518 + ], + "angle": 0, + "content": "6. Deng, Y., Yang, J., Tong, X.: Deformed implicit field: Modeling 3D shapes with learned dense correspondence. In: CVPR (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.519, + 0.788, + 0.56 + ], + "angle": 0, + "content": "7. Gal, R., Alaluf, Y., Atzmon, Y., Patashnik, O., Bermano, A.H., Chechik, G., Cohen-Or, D.: An image is worth one word: Personalizing text-to-image generation using textual inversion. arXiv preprint arXiv:2208.01618 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.561, + 0.788, + 0.602 + ], + "angle": 0, + "content": "8. Goodfellow, I., Pouget-Abadie, J., Mirza, M., Xu, B., Warde-Farley, D., Ozair, S., Courville, A., Bengio, Y.: Generative adversarial networks. Communications of the ACM 63(11), 139–144 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.603, + 0.788, + 0.644 + ], + "angle": 0, + "content": "9. Goodwin, W., Vaze, S., Havoutis, I., Posner, I.: Zero-shot category-level object pose estimation. In: European Conference on Computer Vision. pp. 516-532. Springer (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.645, + 0.788, + 0.658 + ], + "angle": 0, + "content": "0. Gower, J.C., Dijksterhuis, G.B.: Procrustes problems, vol. 30. OUP Oxford (2004)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.659, + 0.788, + 0.7 + ], + "angle": 0, + "content": "1. Gupta, K., Jampani, V., Esteves, C., Shrivastava, A., Makadia, A., Snavely, N., Kar, A.: ASIC: Aligning sparse in-the-wild image collections. arXiv preprint arXiv:2303.16201 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.701, + 0.788, + 0.728 + ], + "angle": 0, + "content": "2. Huang, G., Mattar, M., Lee, H., Learned-Miller, E.: Learning to align from scratch. Advances in neural information processing systems 25 (2012)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.729, + 0.788, + 0.756 + ], + "angle": 0, + "content": "3. Huang, G.B., Jain, V., Learned-Miller, E.: Unsupervised joint alignment of complex images. In: ICCV. pp. 1-8. IEEE (2007)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.757, + 0.788, + 0.812 + ], + "angle": 0, + "content": "4. Jampani, V., Maninis, K.K., Engelhardt, A., Karpur, A., Truong, K., Sargent, K., Popov, S., Araujo, A., Martin-Brualla, R., Patel, K., et al.: Navi: Category-agnostic image collections with high-quality 3d shape and pose annotations. arXiv preprint arXiv:2306.09109 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.813, + 0.788, + 0.84 + ], + "angle": 0, + "content": "5. Kingma, D.P., Ba, J.: Adam: A method for stochastic optimization. In: International Conference on Learning Representations (2015)" + }, + { + "type": "list", + "bbox": [ + 0.226, + 0.251, + 0.788, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "16" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.375, + 0.129 + ], + "angle": 0, + "content": "Y. Zhang et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.147, + 0.787, + 0.189 + ], + "angle": 0, + "content": "16. Kirillov, A., Mintun, E., Ravi, N., Mao, H., Rolland, C., Gustafson, L., Xiao, T., Whitehead, S., Berg, A.C., Lo, W.Y., et al.: Segment anything. arXiv preprint arXiv:2304.02643 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.189, + 0.787, + 0.23 + ], + "angle": 0, + "content": "17. Kuang, Z., Olszewski, K., Chai, M., Huang, Z., Achlioptas, P., Tulyakov, S.: Neroic: Neural rendering of objects from online image collections. ACM Transactions on Graphics (TOG) 41(4), 1-12 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.23, + 0.787, + 0.256 + ], + "angle": 0, + "content": "18. Learned-Miller, E.G.: Data driven image models through continuous joint alignment. IEEE TPAMI 28(2), 236-250 (2005)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.257, + 0.787, + 0.284 + ], + "angle": 0, + "content": "19. Lin, A., Zhang, J.Y., Ramanan, D., Tulsiani, S.: Relpose++: Recovering 6d poses from sparse-view observations. arXiv preprint arXiv:2305.04926 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.284, + 0.787, + 0.325 + ], + "angle": 0, + "content": "20. Lin, C.H., Ma, W.C., Torralba, A., Lucey, S.: Barf: Bundle-adjusting neural radiance fields. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 5741-5751 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.325, + 0.787, + 0.366 + ], + "angle": 0, + "content": "21. Liu, R., Wu, R., Van Hoorick, B., Tokmakov, P., Zakharov, S., Vondrick, C.: Zero-1-to-3: Zero-shot one image to 3d object. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 9298-9309 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.366, + 0.787, + 0.393 + ], + "angle": 0, + "content": "22. Lorensen, W.E., Cline, H.E.: Marching cubes: A high resolution 3d surface construction algorithm. ACM SIGGRAPH Computer Graphics 21(4), 163-169 (1987)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.393, + 0.787, + 0.419 + ], + "angle": 0, + "content": "23. Loshchilov, I., Hutter, F.: Decoupled weight decay regularization. In: International Conference on Learning Representations (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.42, + 0.787, + 0.46 + ], + "angle": 0, + "content": "24. Martin-Brualla, R., Radwan, N., Sajjadi, M.S.M., Barron, J.T., Dosovitskiy, A., Duckworth, D.: NeRF in the Wild: Neural Radiance Fields for Unconstrained Photo Collections. In: CVPR (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.46, + 0.787, + 0.501 + ], + "angle": 0, + "content": "25. Meng, Q., Chen, A., Luo, H., Wu, M., Su, H., Xu, L., He, X., Yu, J.: Gnerf: Gan-based neural radiance field without posed camera. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 6351-6361 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.501, + 0.787, + 0.542 + ], + "angle": 0, + "content": "26. Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM 65(1), 99-106 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.542, + 0.787, + 0.569 + ], + "angle": 0, + "content": "27. Miller, E.G., Matsakis, N.E., Viola, P.A.: Learning from one example through shared densities on transforms. In: CVPR. vol. 1, pp. 464-471. IEEE (2000)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.569, + 0.787, + 0.596 + ], + "angle": 0, + "content": "28. Min, J., Lee, J., Ponce, J., Cho, M.: Spair-71k: A large-scale benchmark for semantic correspondence. arXiv preprint arXiv:1908.10543 (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.596, + 0.787, + 0.637 + ], + "angle": 0, + "content": "29. Ofri-Amar, D., Geyer, M., Kasten, Y., Dekel, T.: Neural congealing: Aligning images to a joint semantic atlas. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 19403-19412 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.637, + 0.787, + 0.677 + ], + "angle": 0, + "content": "30. Oquab, M., Darcet, T., Moutakanni, T., Vo, H., Szafraniec, M., Khalidov, V., Fernandez, P., Haziza, D., Massa, F., El-Nouby, A., et al.: Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:2304.07193 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.677, + 0.787, + 0.718 + ], + "angle": 0, + "content": "31. Peebles, W., Zhu, J.Y., Zhang, R., Torralba, A., Efros, A.A., Shechtman, E.: Gansupervised dense visual alignment. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 13470-13481 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.718, + 0.787, + 0.744 + ], + "angle": 0, + "content": "32. Poole, B., Jain, A., Barron, J.T., Mildenhall, B.: Dreamfusion: Text-to-3d using 2d diffusion. arXiv preprint arXiv:2209.14988 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.744, + 0.787, + 0.786 + ], + "angle": 0, + "content": "33. Raj, A., Kaza, S., Poole, B., Niemeyer, M., Ruiz, N., Mildenhall, B., Zada, S., Aberman, K., Rubinstein, M., Barron, J., et al.: Dreambooth3d: Subject-driven text-to-3d generation. arXiv preprint arXiv:2303.13508 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.786, + 0.787, + 0.84 + ], + "angle": 0, + "content": "34. Reizenstein, J., Shapovalov, R., Henzler, P., Sbordone, L., Labatut, P., Novotny, D.: Common objects in 3d: Large-scale learning and evaluation of real-life 3d category reconstruction. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 10901-10911 (2021)" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.147, + 0.787, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.358, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "3D Congealing: 3D-Aware Image Alignment in the Wild" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "17" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.189 + ], + "angle": 0, + "content": "35. Ren, T., Liu, S., Zeng, A., Lin, J., Li, K., Cao, H., Chen, J., Huang, X., Chen, Y., Yan, F., et al.: Grounded sam: Assembling open-world models for diverse visual tasks. arXiv preprint arXiv:2401.14159 (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.19, + 0.788, + 0.232 + ], + "angle": 0, + "content": "36. Rombach, R., Blattmann, A., Lorenz, D., Esser, P., Ommer, B.: High-resolution image synthesis with latent diffusion models. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 10684-10695 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.232, + 0.788, + 0.286 + ], + "angle": 0, + "content": "37. Ruiz, N., Li, Y., Jampani, V., Pritch, Y., Rubinstein, M., Aberman, K.: Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 22500-22510 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.287, + 0.788, + 0.327 + ], + "angle": 0, + "content": "38. Schonberger, J.L., Frahm, J.M.: Structure-from-motion revisited. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 4104-4113 (2016)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.328, + 0.788, + 0.356 + ], + "angle": 0, + "content": "39. Shi, Y., Wang, P., Ye, J., Long, M., Li, K., Yang, X.: Mvdream: Multi-view diffusion for 3d generation. arXiv preprint arXiv:2308.16512 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.356, + 0.788, + 0.397 + ], + "angle": 0, + "content": "40. Sun, X., Wu, J., Zhang, X., Zhang, Z., Zhang, C., Xue, T., Tenenbaum, J.B., Freeman, W.T.: Pix3d: Dataset and methods for single-image 3d shape modeling. In: CVPR (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.398, + 0.788, + 0.453 + ], + "angle": 0, + "content": "41. Wang, H., Sridhar, S., Huang, J., Valentin, J., Song, S., Guibas, L.J.: Normalized object coordinate space for category-level 6d object pose and size estimation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 2642-2651 (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.453, + 0.788, + 0.493 + ], + "angle": 0, + "content": "42. Wang, J., Rupprecht, C., Novotny, D.: Posediffusion: Solving pose estimation via diffusion-aided bundle adjustment. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 9773-9783 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.494, + 0.788, + 0.535 + ], + "angle": 0, + "content": "43. Wang, P., Liu, L., Liu, Y., Theobalt, C., Komura, T., Wang, W.: Neus: Learning neural implicit surfaces by volume rendering for multi-view reconstruction. arXiv preprint arXiv:2106.10689 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.536, + 0.788, + 0.563 + ], + "angle": 0, + "content": "44. Wang, Z., Wu, S., Xie, W., Chen, M., Prisacariu, V.A.: Nerf-: Neural radiance fields without known camera parameters. arXiv preprint arXiv:2102.07064 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.564, + 0.788, + 0.592 + ], + "angle": 0, + "content": "45. Yariv, L., Gu, J., Kasten, Y., Lipman, Y.: Volume rendering of neural implicit surfaces. Advances in Neural Information Processing Systems 34, 4805-4815 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.592, + 0.788, + 0.633 + ], + "angle": 0, + "content": "46. Yen-Chen, L., Florence, P., Barron, J.T., Rodriguez, A., Isola, P., Lin, T.Y.: inerf: Inverting neural radiance fields for pose estimation. In: 2021 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). pp. 1323-1330. IEEE (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.633, + 0.788, + 0.674 + ], + "angle": 0, + "content": "47. Zhang, J., Yang, G., Tulsiani, S., Ramanan, D.: Ners: Neural reflectance surfaces for sparse-view 3d reconstruction in the wild. In: Advances in Neural Information Processing Systems. vol. 34, pp. 29835-29847 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.674, + 0.788, + 0.716 + ], + "angle": 0, + "content": "48. Zhang, J.Y., Ramanan, D., Tulsiani, S.: Relpose: Predicting probabilistic relative rotation for single objects in the wild. In: European Conference on Computer Vision. pp. 592-611. Springer (2022)" + }, + { + "type": "list", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.716 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/2024/3D Congealing_ 3D-Aware Image Alignment in the Wild/757b034b-7105-4846-a434-665e5b237ea6_origin.pdf b/2024/3D Congealing_ 3D-Aware Image Alignment in the Wild/757b034b-7105-4846-a434-665e5b237ea6_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..0ea53d7556917ec9170b0baa440ff14be25c6524 --- /dev/null +++ b/2024/3D Congealing_ 3D-Aware Image Alignment in the Wild/757b034b-7105-4846-a434-665e5b237ea6_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:495d541ca75826cfdb952235770e6cd7f44c513263fbc17a6827f65574afa7b6 +size 6344496 diff --git a/2024/3D Congealing_ 3D-Aware Image Alignment in the Wild/full.md b/2024/3D Congealing_ 3D-Aware Image Alignment in the Wild/full.md new file mode 100644 index 0000000000000000000000000000000000000000..8e1974098687438d54cb9473040ddfe6a21eacdb --- /dev/null +++ b/2024/3D Congealing_ 3D-Aware Image Alignment in the Wild/full.md @@ -0,0 +1,327 @@ +# 3D Congealing: 3D-Aware Image Alignment in the Wild + +Yunzhi Zhang $^{1}$ , Zizhang Li $^{1}$ , Amit Raj $^{2}$ , Andreas Engelhardt $^{3}$ , Yuanzhen Li $^{2}$ , Tingbo Hou $^{4}$ , Jiajun Wu $^{1}$ , and Varun Jampani $^{5}$ + +1 Stanford University +2 Google DeepMind +3 University of Tübingen +4 Meta GenAI +Stability AI + +Abstract. We propose 3D Congealing, a novel problem of 3D-aware alignment for 2D images capturing semantically similar objects. Given a collection of unlabeled Internet images, our goal is to associate the shared semantic parts from the inputs and aggregate the knowledge from 2D images to a shared 3D canonical space. We introduce a general framework that tackles the task without assuming shape templates, poses, or any camera parameters. At its core is a canonical 3D representation that encapsulates geometric and semantic information. The framework optimizes for the canonical representation together with the pose for each input image, and a per-image coordinate map that warps 2D pixel coordinates to the 3D canonical frame to account for the shape matching. The optimization procedure fuses prior knowledge from a pre-trained image generative model and semantic information from input images. The former provides strong knowledge guidance for this under-constraint task, while the latter provides the necessary information to mitigate the training data bias from the pre-trained model. Our framework can be used for various tasks such as pose estimation and image editing, achieving strong results on real-world image datasets under challenging illumination conditions and on in-the-wild online image collections. Project page at https://ai.stanford.edu/~yzzhang/projects/3d-congealing/. + +# 1 Introduction + +We propose the task of 3D Congealing, where the goal is to align a collection of images containing semantically similar objects into a shared 3D space. Specifically, we aim to obtain a canonical 3D representation together with the pose and a dense map of 2D-3D correspondence for each image in the collection. The input images may contain object instances belonging to a similar category with varying shapes and textures, and are captured under distinct camera viewpoints and illumination conditions, which all contribute to the pixel-level difference as shown in Figure 1. Despite such inter-image differences, humans excel at aligning such + +![](images/326fa3f4c2133cca056625ab6933f5a81979e4d9cd0e440db6d346aa227594ec.jpg) +Fig. 1: Objects with different shapes and appearances, such as these sculptures, may share similar semantic parts and a similar geometric structure. We study 3D Congealing, inferring and aligning such a shared structure from an unlabeled image collection. Such alignment can be used for tasks such as pose estimation and image editing. See Appendix A for full results. + +images with one another in a geometrically and semantically consistent manner based on their 3D-aware understanding. + +Obtaining a canonical 3D representation and grounding input images to the 3D canonical space enable several downstream tasks, such as 6-DoF object pose estimation, pose-aware image filtering, and image editing. Unlike the task of 2D congealing [11, 29, 31], where the aim is to align the 2D pixels across the images, 3D Congealing requires aggregating the information from the image collection altogether and forming the association among images in 3D. The task is also closely related to 3D reconstruction from multiview images, with a key distinction in the problem setting, as inputs here do not necessarily contain identical objects but rather semantically similar ones. Such a difference opens up the possibility of image alignment from readily available image collections on the Internet, e.g., online search results, landmark images, and personal photo collections. + +3D Congealing represents a challenging problem, particularly for arbitrary images without camera pose or lighting annotations, even when the input images contain identical objects [1,4,20,44], because the solutions for pose and shape are generally entangled. On the one hand, the definition of poses is specific to the coordinate frame of the shape; on the other hand, the shape optimization is typically guided by the pixel-wise supervision of images under the estimated poses. To overcome the ambiguity in jointly estimating poses and shapes, prior works mostly start from noisy pose initializations [20], data-specific initial pose distributions [25,44], or rough pose annotations such as pose quadrants [1]. They then perform joint optimization for a 3D representation using an objective of reconstructing input image pixels [1,20,44] or distribution matching [25]. + +In this work, instead of relying on initial poses as starting points for shape reconstruction, we propose to tackle the joint optimization problem from a different perspective. We first obtain a plausible 3D shape that is compliant with the input image observations using pre-trained generative models, and then use semantic-aware visual features, e.g., pre-trained features from DINO [2,30] and Stable-Diffusion [36], to register input images to the 3D shape. Compared to + +photometric reconstruction losses, these features are more tolerant of variance in object identities among image inputs. + +We make deliberate design choices to instantiate such a framework that fuses the knowledge from pre-trained text-to-image (T2I) generative models with real image inputs. First, to utilize the prior knowledge from generative models, we opt to apply a T2I personalization method, Textual Inversion [7], which aims to find the most suitable text embedding to reconstruct the input images via the pre-trained model. Furthermore, a semantic-aware distance is proposed to mitigate the appearance discrepancy between the rendered image and the input photo collection. Finally, a canonical coordinate mapping is learned to find the correspondence between 3D canonical representation and 2D input images. + +To prove the effectiveness of the proposed framework, we compare the proposed method against several baselines on the task of pose estimation on a dataset with varying illuminations and show that our method surpasses all the baselines significantly. We also demonstrate several applications of the proposed method, including image editing and object alignment on web image data. + +In summary, our contributions are: + +1. We propose a novel task of 3D Congealing that involves aligning images of semantically similar objects in a shared 3D space. +2. We develop a framework tackling the proposed task and demonstrate several applications using the obtained 2D-3D correspondence, such as pose estimation and image editing. +3. We show the effectiveness and applicability of the proposed method on a diverse range of in-the-wild Internet images. + +# 2 Related Works + +Image Alignment and Congealing. The task of image alignment for a single instance, possibly under varying illuminations, has been relatively well-studied [24, 47]. To align images containing different instances from the same category with small deformations, one line of approach is known as imageCongealing [12, 13, 18, 27, 29, 31]. In particular, Neural Congealing [29] learns atlases to capture common semantic features from input images and recovers a dense mapping between input images and the atlases. GANgealing [31] uses a spatial transformer to map a randomly generated image from a GAN [8] to a jointly aligned space. These 2D-warping-based methods are typically applied to source and target image pairs with no or small camera rotation, and work best on in-plane transformation, while our proposed framework handles a larger variation of viewpoints due to 3D reasoning. On the other hand, DIFNet [6] exemplifies an approach of joint optimization of shape template and deformation, provided with the 3D shape. In comparison, we propose a template-followed-by-implicit-deformation approach and assume a single 2D observation for each instance instead of 3D inputs. The proposed approach exploits the fact that a "good" template, i.e., one that captures common geometric structure of inputs, is not unique and a solution can be effectively found before knowing input image poses. Compared to joint + +optimization methods, it reduces task complexity by providing such an anchoring template to make later image registration easier. Finally, this work provides qualitative results on aligning images cross instances with large deformation. The output global alignment of input instances and articulation-free templates can be useful for downstream reconstruction with image-specific articulation, which is beyond the scope of this work. + +Object Pose Estimation. Object pose estimation aims to estimate the pose of an object instance with respect to the coordinate frame of its 3D shape. Classical methods for pose estimation recover poses from multi-view images using pixel- or feature-level matching to find the alignment between different images [38]. These methods are less suitable in the in-the-wild setting due to the increasing appearance variance. Recent methods tackle this task by supervised learning wht pose annotations [19,42,48], but it remains challenging for these methods to generalize beyond the training distribution. Another class of methods uses an analysis-by-synthesis framework to estimate pose given category-specific templates [3] or a pre-trained 3D representation [46]; these assumptions make it challenging to apply these methods to generic objects in the real world. ID-Pose [5] leverages Zero-1-to-3 [21], a view synthesis model, and optimizes for the relative pose given a source and a target image. Goodwin et al. [9] use pre-trained self-supervised features for matching, instead of doing it at the pixel level, but require both RGB and depth inputs. + +Shape Reconstruction from Image Collections. Neural rendering approaches [26, 43, 45] use images with known poses to reconstruct the 3D shape and appearance from a collection of multiview images. The assumptions of known poses and consistent illumination prevent these methods from being applied in the wild. Several works have extended these approaches to relax the pose assumption, proposing to handle noisy or unknown camera poses of input images through joint optimization of poses and 3D representation [4, 20, 44]. SAMURAI [1] further handles scenes under various illuminations, but requires access to coarse initial poses in the form of pose quadrant annotations. + +3D Distillation from 2D Diffusion Models. Recently, text-to-image diffusion models have shown great advancement in 2D image generation and are used for 3D asset distillation with conditions such as texts [32,39], single image [21], and image collections [33]. DreamFusion [32] has proposed to apply gradients computed from pre-trained text-to-image models to the optimized 3D representations. DreamBooth3D [33] proposed to utilize fine-tuned diffusion model [37] for the image-conditioned 3D reconstruction task. These works provide a viable solution for 3D reconstruction from image collections but without grounding the inputs to the 3D space as in ours. + +![](images/29133ff7e713ee4a80c5650915b474bc3ca0e87797780a9fef76cd115e896d26.jpg) +Fig. 2: Pipeline. Given a collection of in-the-wild images capturing similar objects as inputs, we develop a framework that "congeals" these images in 3D. The core representation consists of a canonical 3D shape that captures the geometric structure shared among the inputs, together with a set of coordinate mappings that register the input images to the canonical shape. The framework utilizes the prior knowledge of plausible 3D shapes from a generative model, and aligns images in the semantic space using pre-trained semantic feature extractors. + +# 3 Method + +We formulate the problem of 3D Congealing as follows. Given a set of $N$ object-centric images $\mathcal{D} = \{x_{n}\}_{n = 1}^{N}$ that captures objects sharing semantic components, e.g., objects from one category, we seek to align the object instances in these images into a canonical 3D representation, e.g., NeRF [26], parameterized by $\theta$ . We refer to the coordinate frame of this 3D representation as the canonical frame. We also recover the camera pose of each observation $x\in \mathcal{D}$ in the canonical frame, denoted using a pose function $\pi :x\mapsto (\xi ,\kappa)$ where $\xi$ represents the object pose in SE(3) and $\kappa$ is the camera intrinsic parameters. We assume access to instance masks, which can be obtained using an off-the-shelf segmentation method [16]. + +The 3D representation should be consistent with the physical prior of objects in the natural world, and with input observations both geometrically and semantically. These constraints can be translated into an optimization problem: + +$$ +\max _ {\pi , \theta} p _ {\Theta} (\theta), \text {s . t .} x = \mathcal {R} (\pi (x), \theta), \forall x \in \mathcal {D}, \tag {1} +$$ + +where $p_{\Theta}$ is a prior distribution for the 3D representation parameter $\theta$ that encourages physically plausible solutions, $\mathcal{R}$ is a predefined rendering function that enforces geometric consistency, and the equality constraint on image reconstruction enforces compliance with input observations. + +We will now describe an instantiation of the 3D prior $p_{\Theta}$ (Sec. 3.1), an image distance function that helps enforce the equality constraint (Sec. 3.2), followed by the 3D Congealing optimization (Sec. 3.3) to estimate input image poses $\pi$ . + +# 3.1 3D Guidance from Generative Models + +As illustrated in the left part of Figure 2, we extract the prior knowledge for 3D representations $p_{\Theta}(\cdot)$ from a pre-trained text-to-image (T2I) model such as Stable-Diffusion [36]. DreamFusion [32] proposes to turn a text prompt $y$ into a 3D representation $\theta$ using the following Score Distillation Sampling (SDS) objective, leveraging a T2I diffusion model with frozen parameters $\phi$ , + +$$ +\min _ {\theta} \mathbb {E} _ {x \in \mathcal {D} (\theta)} \mathcal {L} _ {\text {d i f f}} ^ {\phi} (x, y). \tag {2} +$$ + +Here $\mathcal{D}(\theta) \coloneqq \{\mathcal{R}(\pi, \theta) \mid \pi \sim p_{\Pi}(\cdot)\}$ contains images rendered from the 3D representation $\theta$ under a prior camera distribution $p_{\Pi}(\cdot)$ , and $\mathcal{L}_{\mathrm{diff}}^{\phi}$ is the training objective of image diffusion models specified as follows: + +$$ +\mathcal {L} _ {\mathrm {d i f f}} ^ {\phi} (x, y) := \mathbb {E} _ {t \sim \mathcal {U} ([ 0, 1 ]), \epsilon \sim \mathcal {N} (\mathbf {0}, I)} \left[ \omega (t) \| \epsilon_ {\phi} (\alpha_ {t} x + \sigma_ {t} \epsilon , y, t) - \epsilon \| _ {2} ^ {2} \right], \tag {3} +$$ + +where $\epsilon_{\phi}$ is the pre-trained denoising network, $\omega(\cdot)$ is the timestep-dependent weighting function, $t$ is the diffusion timestep and $\alpha_{t}, \sigma_{t}$ are timestep-dependent coefficients from the diffusion model schedule. + +The above loss can be used to guide the optimization of a 3D representation $\theta$ , whose gradient is approximated by + +$$ +\nabla_ {\theta} \mathcal {L} _ {\mathrm {d i f f}} ^ {\phi} (x = \mathcal {R} (\xi , \kappa , \theta), y) \approx \mathbb {E} _ {t, \epsilon} \left[ \omega (t) (\epsilon_ {\phi} (\alpha_ {t} x + \sigma_ {t} \epsilon , y, t) - \epsilon) \frac {\partial x}{\partial \theta} \right], \qquad (4) +$$ + +where $\xi$ and $\kappa$ are the extrinsic and intrinsic camera parameters, respectively. The derived gradient approximation is adopted by later works such as MVDream [39], which we use as the backbone. + +The original SDS objective is optimizing for a text-conditioned 3D shape with a user-specified text prompt $y$ and does not consider image inputs. Here, we use the technique from Textual Inversion [7] to recover the most suitable text prompt $y^{*}$ that explains input images, defined as follows: + +$$ +y ^ {*} = \arg \min _ {y} \mathbb {E} _ {x \in \mathcal {D}} \mathcal {L} _ {\text {d i f f}} ^ {\phi} (x, y). \tag {5} +$$ + +Eq. (2) and Eq. (5) differ in that both the sources of the observations $x$ (an infinite dataset of rendered images $\mathcal{D}(\theta)$ for the former, and real data $\mathcal{D}$ for the latter) and the parameters being optimized over $(\theta$ and $y$ , respectively). In our framework, we incorporate the real image information to the SDS guidance via first solving for $y^{*}$ (Eq. (5)) and keep it frozen when optimizing for $\theta$ (Eq. (2)). The diffusion model parameter $\phi$ is frozen throughout the process, requiring significantly less memory compared to the alternative of integrating input image information via finetuning $\phi$ as in DreamBooth3D [33]. + +# 3.2 Semantic Consistency from Deep Features + +The generative model prior from Sec. 3.1 effectively constrains the search space for the solutions. However, the objectives from Eqs. (2) and (5) use the input image + +information only indirectly, via a text embedding $y^{*}$ . To explain the relative geometric relation among input images, we explicitly recover the pose of each input image w.r.t. $\theta$ , as illustrated in Figure 2 (middle) and as explained below + +To align input images, we use an image distance metric defined by semantic feature dissimilarity. In particular, pre-trained deep models such as DINO [2,30] have been shown to be effective semantic feature extractors. Denote such a model as $f$ parameterized by $\zeta$ . The similarity of two pixel locations $u_{1}$ and $u_{2}$ from two images $x_{1}$ and $x_{2}$ , respectively, can be measured with + +$$ +d _ {\zeta} ^ {u _ {1}, u _ {2}} \left(x _ {1}, x _ {2}\right) := 1 - \frac {\langle \left[ f _ {\zeta} \left(x _ {1}\right) \right] _ {u _ {1}} , \left[ f _ {\zeta} \left(x _ {2}\right) \right] _ {u _ {2}} \rangle}{\| \left[ f _ {\zeta} \left(x _ {1}\right) \right] _ {u _ {1}} \| _ {2} \| \left[ f _ {\zeta} \left(x _ {2}\right) \right] _ {u _ {2}} \| _ {2}}, \tag {6} +$$ + +where $[\cdot ]$ is an indexing operator. It thereafter defines an image distance function + +$$ +\left\| x _ {1} - x _ {2} \right\| _ {d _ {\zeta}} := \frac {1}{H W} \sum_ {u} d _ {\zeta} ^ {u, u} \left(x _ {1}, x _ {2}\right), \tag {7} +$$ + +where $x_{1}$ and $x_{2}$ have resolution $H\times W$ , and the sum is over all image coordinates. + +The choice of semantic-aware image distance, instead of photometric differences as in the classical problem setting of multiview 3D reconstruction [38,43,45], leads to solutions that maximally align input images to the 3D representation with more tolerance towards variance in object shape, texture, and environmental illuminations among input images, which is crucial in our problem setting. + +# 3.3 Optimization + +The Canonical Shape and Image Poses. Combining Secs. 3.1 and 3.2, we convert the original problem in Eq. (1) into + +$$ +\min _ {\pi , \theta} \underbrace {\mathbb {E} _ {x \in \mathcal {D} (\theta)} \mathcal {L} _ {\text {d i f f}} ^ {\phi} \left(x , y ^ {*}\right)} _ {\text {g e n e r a t i v e m o d e l g u i d a n c e}} + \lambda \underbrace {\mathbb {E} _ {x \in \mathcal {D}} \| \mathcal {R} (\pi (x) , \theta) - x \| _ {d}} _ {\text {d a t a r e c o n s t r u c t i o n}}, \tag {8} +$$ + +where $y^{*}$ come from Eq. (5) and $\lambda$ is a loss weight. Compared to Eq. (5), here the first term instantiates the generative modeling prior and the second term is a soft constraint of reconstructing input observations. Specifically, $d = \lambda_{\zeta}d_{\zeta} + \lambda_{\mathrm{IoU}}d_{\mathrm{IoU}}$ , where $d_{\zeta}$ is the semantic-space distance metric from Sec. 3.2, and $d_{\mathrm{IoU}}$ is the Intersection-over-Union (IoU) loss for masks, $\| m_1 - m_2 \|_{d_{\mathrm{IoU}}} \coloneqq 1 - (\| m_1 \odot m_2 \|_1) / (\| m_1 \|_1 + \| m_2 \|_1 - \| m_1 \odot m_2 \|_1)$ , where $m_1$ and $m_2$ are image masks, which in Eq. (8) are set to be the mask rendering and the instance mask for $x$ . The use of both $d_{\zeta}$ and $d_{\mathrm{IoU}}$ tolerates shape variance among input instances. + +For the shape representation, we follow NeRF [26] and use neural networks $\sigma_{\theta}:\mathbb{R}^{3}\to \mathbb{R}$ and $c_{\theta}:\mathbb{R}^{3}\rightarrow \mathbb{R}^{3}$ to map a 3D spatial coordinate to a density and an RGB value, respectively. The rendering operation $\mathcal{R}$ is the volumetric rendering operation specified as follows: + +$$ +\mathcal {R} (r, \xi , \theta ; c _ {\theta}) = \int T (t) \sigma_ {\theta} (\xi r (t)) c _ {\theta} (\xi r (t)) \mathrm {d} t, \tag {9} +$$ + +where $T(t) = \exp \left(-\int \sigma_{\theta}(r(t'))\mathrm{d}t'\right)$ , $r: \mathbb{R} \to \mathbb{R}^3$ is a ray shooting from the camera center to the image plane, parameterized by the camera location and the ray's direction, and $\xi$ is the relative pose that transforms the ray from the camera frame to the canonical frame. + +Forward Canonical Coordinate Mappings. After the above optimization, each image $x$ from the input image collection can be "congealed" to the shape $\theta$ via a canonical coordinate mapping, i.e., a forward warping operation $\varPhi_x^{\mathrm{fwd}}: \mathbb{R}^2 \to \mathbb{R}^3$ that maps a 2D image coordinate to a 3D coordinate in the canonical frame of reference as illustrated in Figure 2. $\varPhi_x^{\mathrm{fwd}}$ consists of the following two operations. + +First, we warp a coordinate $u$ from the real image $x$ to the rendering of the canonical shape under its pose $\pi(x)$ , denoted as $\tilde{x} \coloneqq \mathcal{R}(\pi(x), \theta)$ . Specifically, + +$$ +\varPhi_ {\tilde {x} \leftarrow x} ^ {\mathrm {2 D} \leftarrow 2 \mathrm {D}} (u) := \arg \min _ {\tilde {u}} d _ {\zeta} ^ {\tilde {u}, u} (\tilde {x}, x) + \lambda_ {\ell_ {2}} \| \tilde {u} - u \| _ {2} ^ {2} + \lambda_ {\text {s m o o t h}} \mathcal {L} _ {\text {s m o o t h}} (\tilde {u}, u), \quad (1 0) +$$ + +where $d_{\zeta}$ follows Eq. (6), the 2D coordinates $u$ and $\tilde{u}$ are normalized into range [0,1] before computing the $\ell_2$ norm, the smoothness term $\mathcal{L}_{\mathrm{smooth}}$ is specified in Appendix B, and $\lambda_{\ell_2}$ and $\lambda_{\mathrm{smooth}}$ are scalar weights. This objective searches for a new image coordinate $\tilde{u}$ (from the rendering $\tilde{x}$ ) that shares a semantic feature similar to $u$ (from the real image $x$ ), and ensures that $\tilde{u}$ stays in the local neighborhood of $u$ via a soft constraint of the coordinate distance. Afterward, a 2D-to-3D operation takes in the warped coordinate from above and outputs its 3D location in the normalized object coordinate space (NOCS) [41] of $\theta$ : + +$$ +\Phi_ {x} ^ {\mathrm {3 D} \leftarrow 2 \mathrm {D}} (\tilde {u}) := \left[ \mathcal {R} _ {\text {N O C S}} (\pi (x), \theta) \right] _ {\tilde {u}}, \tag {11} +$$ + +where $\mathcal{R}_{\mathrm{NOCS}}$ is identical to $\mathcal{R}$ from Eq. (9), but replacing the color field $c_{\theta}$ with a canonical object coordinate field, $c_{\mathrm{NOCS}}: \mathbb{R}^3 \to \mathbb{R}^3$ , $p \mapsto (p - p_{\mathrm{min}}) / (p_{\mathrm{max}} - p_{\mathrm{min}})$ , where $p_{\mathrm{min}}$ and $p_{\mathrm{max}}$ are the two opposite corners of the canonical shape's bounding box. These bounding boxes are determined by the mesh extracted from the density neural field $\sigma_{\theta}$ using the Marching Cube [22] algorithm. + +Combining the above, given an input image coordinate $u$ , $\varPhi_x^{\mathrm{fwd}}(u) := \varPhi_x^{3\mathrm{D} \leftarrow 2\mathrm{D}} \circ \varPhi_{\tilde{x} \leftarrow x}^{2\mathrm{D} \leftarrow 2\mathrm{D}}(u)$ identifies a 3D location in the canonical frame corresponding to $u$ . + +Reverse Canonical Coordinate Mappings. Each image can be "uncongealed" from the canonical shape using $\varPhi_x^{\mathrm{rev}}:\mathbb{R}^3\to \mathbb{R}^2$ , which is the reverse operation of $\varPhi_x^{\mathrm{fwd}}(u)$ and is approximately computed via nearest-neighbor inversion as explained below. + +Given a 3D location within a unit cube, $p \in [0,1]^3$ , $\varPhi_x^{\mathrm{rev}}(p) := \varPhi_{x \leftarrow \tilde{x}}^{2\mathrm{D} \leftarrow 2\mathrm{D}} \circ \varPhi_x^{2\mathrm{D} \leftarrow 3\mathrm{D}}(p)$ . In particular, + +$$ +\Phi_ {x} ^ {\mathrm {2 D} \leftarrow 3 \mathrm {D}} (p) := \arg \min _ {\tilde {u}} \| p - \Phi_ {x} ^ {\mathrm {3 D} \leftarrow 2 \mathrm {D}} (\tilde {u}) \| _ {2} \tag {12} +$$ + +is an operation that takes in a 3D coordinate $p$ in the canonical frame and searches for a 2D image coordinate whose NOCS value is the closest to $p$ , and $\varPhi_{x\leftarrow \bar{x}}^{2\mathrm{D}\leftarrow 2\mathrm{D}}$ is computed via inverting $\varPhi_{\bar{x}\leftarrow x}^{2\mathrm{D}\leftarrow 2\mathrm{D}}$ from Eq. (10), + +$$ +\Phi_ {x \leftarrow \tilde {x}} ^ {2 \mathrm {D} \leftarrow 2 \mathrm {D}} (\tilde {u}) := \arg \min _ {u} \| \tilde {u} - \Phi_ {\tilde {x} \leftarrow x} ^ {2 \mathrm {D} \leftarrow 2 \mathrm {D}} (u) \| _ {2}. \tag {13} +$$ + +![](images/908f2fa958d582a64e1292a8d8b01de6df5745ce15abca255ca4e99fd26785d0.jpg) +Fig. 3: Pose Estimation from Multi-Illumination Captures. The figure shows 4 example scenes from the NAVI dataset, displaying the real image inputs, canonical shapes under estimated poses, and the canonical coordinate maps. + +In summary, the above procedure establishes the 2D-3D correspondence between an input image $x$ and the canonical shape via $\varPhi_x^{\mathrm{fwd}}$ , and defines the dense 2D-2D correspondences between two images $x_1, x_2$ via $\varPhi_{x_2}^{\mathrm{rev}} \circ \varPhi_{x_1}^{\mathrm{fwd}}$ which enables image editing (Figure 8). The full framework is described in Algorithm 1. + +# 3.4 Implementation Details + +Input images are cropped with the tightest bounding box around the foreground masks. The masks come from dataset annotations, if available, or from Grounded-SAM [16, 35], an off-the-shelf segmentation model. + +Across all experiments, we optimize for $y^{*}$ (Algorithm 1, line 2) for 1,000 iterations using an AdamW [23] optimizer with learning rate 0.02 and weight decay 0.01. We optimize for $\theta$ + +(line 3) with $\lambda = 0$ for 10,000 iterations, with AdamW and learning rate 0.001. The NeRF model $\theta$ has 12.6M parameters. It is frozen afterwards and defines the coordinate frame for poses. + +Since directly optimizing poses and camera parameters with gradient descents easily falls into local minima [20], we initialize $\pi$ using an analysis-by-synthesis + +1: procedure RUN $(\mathcal{D} = \{x_{n}\}_{n = 1}^{N})$ +2: $y^{*}\gets$ Solution to Eq. (5) +3: Optimize $\theta$ with Eq. (8) +4: Sample pose candidates $\{\xi_i\}_i$ +5: for $n\gets 1$ to $N$ do $\triangleright$ Pose initialization +6: $\pi (x_{n})\gets \arg \min_{\xi_{i}}\| \mathcal{R}(\xi ,\theta) - x_{n}\|_{d_{\zeta}}$ +7: end for +8: Optimize $\pi (x_{n})$ with Eq. (8) for all $n$ +9: Determine $\Phi_{x_n}^{\mathrm{fwd}}$ and $\Phi_{x_n}^{\mathrm{rev}}$ for all $n$ +10: return $\theta, \pi, \{\Phi_{x_n}^{\mathrm{fwd}}\}_{n=1}^N, \{\Phi_{x_n}^{\mathrm{rev}}\}_{n=1}^N$ +11: end procedure + +# Algorithm 1: Overview. + +
LabelsMethodsRotation°↓Translation↓
SC~ SCSC~ SC
PoseNeROIC [17]42.11-0.09-
NeRS [47]122.41123.630.490.52
SAMURAI [1]26.1636.590.240.35
NoneGNeRF [25]93.1580.221.021.04
PoseDiffusion [42]46.7946.340.810.90
Ours (3 seeds)26.97±2.2432.56±2.900.40±0.010.41±0.04
Ours (No Pose Init)53.4557.870.970.96
Ours (No IoU Loss)31.2931.150.870.85
+ +Table 1: Pose Estimation from Multi-Illumination Image Captures. Our method performs better than both GNeRF and PoseDiffusion with the same input information, and on par with SAMURAI which additionally assumes camera pose direction as inputs. Different random seeds lead to different canonical shapes, but our method is robust to such variations. $\pm$ denotes means followed by standard deviations. + +
MethodsBedBookcaseChairDeskSofaTableWardrobeOverall
R°↓T↓R°↓T↓R°↓T↓R°↓T↓R°↓T↓R°↓T↓R°↓T↓R°↓T↓
[42]45.740.9922.830.3346.801.0423.890.4933.990.6943.531.2231.541.8035.47±10.00.94±0.49
Ours37.000.4036.470.4534.580.7626.530.3626.490.2749.440.6727.410.3933.99±8.260.47 ±0.18
+ +Table 2: Pose Estimation from Cross-Instance Image Collections. Our method achieves overall better performance than PoseDiffusion on Pix3D. "R" stands for rotation and "T" for translation. $\pm$ denotes cross-category means followed by standard deviations. + +approach (line 5-7). Specifically, we parameterize the camera intrinsics using a pinhole camera model with a scalar Field-of-View (FoV) value, and sample the camera parameter $(\xi, \kappa)$ from a set of candidates determined by an exhaustive combination of 3 FoV, 16 azimuth, and 16 elevation values uniformly sampled from $[15^{\circ}, 60^{\circ}]$ , $[-180^{\circ}, 180^{\circ}]$ , and $[-90^{\circ}, 90^{\circ}]$ , respectively. In this pose initialization stage, all renderings use a fixed camera radius and are cropped with the tightest bounding boxes of rendered foreground masks before being compared with the real image inputs. Line 6 is effectively Eq. (8) with $\lambda_{\zeta} = 1$ and $\lambda_{\mathrm{IoU}} = 0$ . + +After pose initialization, we use the $\mathfrak{se}(3)$ Lie algebra for camera extrinsics parameterization following BARF [20], and optimize for the extrinsics and intrinsics of each input image (Algorithm 1, line 8), with $\lambda_{\zeta} = 0$ and $\lambda_{\mathrm{IoU}} = 1$ , for 1,000 iterations with the Adam [15] optimizer and learning rate 0.001. Since $\theta$ is frozen, the optimization effectively only considers the second term from Eq. (8). Finally, to optimize for the canonical coordinate mappings (Algorithm 1, line 9), for each input image, we run 4,000 iterations for Eq. (10) with AdamW and learning rate 0.01. All experiments are run on a single 24GB A5000 GPU. + +# 4 Experiments + +In this section, we first benchmark the pose estimation performance of our method on in-the-wild image captures (Sec. 4.1), and then show qualitative results on diverse input data and demonstrate applications such as image editing (Sec. 4.2). + +![](images/36334097b008a9f2ca11dbb40b3addfe05a182038a294b0561a9a00a3edd8e18.jpg) +Fig. 4: Pose Estimation for Tourist Landmarks. This is a challenging problem setting due to the varying viewpoints and lighting conditions, and the proposed method can successfully align online tourist photos taken at different times and possibly at different geographical locations, into one canonical representation. The top rows show input images and the bottom rows show shape templates under aligned poses. + +# 4.1 Pose Estimation + +Dataset. We benchmark pose estimation performance under two settings. First, for a single-instance, varying illumination setting, we use the in-the-wild split of the NAVI [14] dataset, which contains 35 object-centric image collections in its official release. Each image collection contains an average of around 60 casual image captures of an object instance placed under different illumination conditions, backgrounds, and cameras. Second, for a single-category, cross-instance setting, we use Pix3D [40], a dataset of natural in-the-wild images grouped into 9 categories, each containing multiple shape models of IKEA objects. We use 20 randomly selected images from each category except for "tool" and "misc" as they involve shapes visually and semantically far apart. + +We use identical hyperparameters for all scenes. We use a generic text prompt, "a photo of sks object", for initialization for all scenes. The text embeddings corresponding to the tokens for "sks object" are being optimized using Eq. (5) with the rest frozen. For each scene, it takes around 1 hr to optimize for NeRF, 15 min for pose initialization, and 45 min for pose optimization. + +Baselines. We compare with several multiview reconstruction baselines. In particular, NeROIC [17] uses the poses from COLMAP, and NeRS [47] and SAMURAI [1] require initial camera directions. GNeRF [25] is a pose-free multiview 3D reconstruction method that is originally designed for single-illumination scenes, and is adapted as a baseline using the same input assumption as ours. PoseDiffusion [42] is a learning-based framework that predicts relative object poses, using ground truth pose annotations as training supervision. The original paper takes a model pre-trained on CO3D [34] and evaluates the pose prediction performance in the wild, and we use the same checkpoint for evaluation. + +![](images/2640969c6ec9657cbdd4d97f071d9713a98aa5c547da988caa613e7c46aabeec.jpg) +Fig. 5: Object Alignment from Internet Images. Results of an online image search may contain various appearances, identities, and articulated poses of the object. Our method can successfully associate these in-the-wild images with one shared 3D space. + +Metrics. The varying illuminations pose challenges to classical pose estimation methods such as COLMAP [38]. We use the official split of the data which partitions the 35 scenes into 19 scenes where COLMAP converges ( $S_{C}$ in Table 1), and 16 scenes where COLMAP fails to converge ( $\sim S_{C}$ ). Following [14], we report the absolute rotation and translation errors using Procrustes analysis [10], where for each scene, the predicted camera poses are aligned with the ground truth pose annotations using a global transformation before computing the pose metrics. + +Results. Handling different illumination conditions is challenging for all baselines using photometric-reconstruction-based optimization [1,17,47] even with additional information for pose initialization. As shown in Table 1, our approach significantly outperforms both GNeRF and PoseDiffusion and works on par with SAMURAI which requires additional pose initialization. We run our full pipeline with 3 random seeds and observe a consistent performance across seeds. Qualitative results of aligned templates and learned canonical coordinate maps are shown in Figure 3. Failure modes are discussed in Appendix F. In a cross-instance setting from Table 2, our method achieves a better overall performance compared to the best-performing baseline from Table 1. + +Ablations. Table 1 also shows ablation for the pose fitting objectives. The initialization is critical ("No Pose Init"), which is expected as pose optimization + +![](images/46ac92b264d02545207cac6b17cfc59f2675186a671276c526b78bc586a9f0a8.jpg) +Fig. 6: Cross-Category Results. The method can associate images from different categories, such as cats and dogs, by leveraging a learned average shape. + +is susceptible to local optima [20]. "No IoU Loss", which is equivalent to using the initialized poses as final predictions, also negatively affects the performance. + +# 4.2 Applications + +We show qualitative results on various in-the-wild image data. Inputs for Figures 4 and 5 are crawled with standard online image search engines and are CC-licensed, each consisting of 50 to 100 images. Inputs for Figures 6 and 7 come from the SPair-71k dataset [28]. We use identical hyperparameters for all datasets, except for text prompt initialization where we use a generic description of the object, e.g., "a photo of sks sculpture", or "a photo of cats plus dogs" for Figure 6. + +Single-Instance. Figure 4 shows the result on Internet photos of tourist landmarks, which may contain a large diversity in illuminations and styles. The proposed method can handle the variations and align these photos and art pieces to the same canonical 3D space and recover the relative camera poses. + +Cross-Instance, Single-Category. Internet images from generic objects may contain more shape and texture variations compared to landmarks. Figure 5 shows results for various objects, where the framework infers a canonical shape from the inputs to capture the shared semantic components being observed. + +Cross-Category. The method leverages semantic features to establish alignment and does not strictly assume that inputs are of the same category. In Figure 6, the method infers an average shape as an anchor to further reason about the relative relation among images from different categories. + +Inputs with Deformable Shapes. To test the robustness of the method, we run the pipeline on images of humans with highly diverse poses. Figures 1 and 7 show that the method assigns plausible poses to the inputs despite the large diversity of shapes and articulated poses contained in the inputs. + +Image Editing. The proposed method finds image correspondence and can be applied to image editing, as shown in Figure 8. Figure 8 (c) shows that our method obtains more visually plausible results compared to the Nearest-Neighbor (NN) baseline using the same DINO features. The baseline matches features in 2D + +![](images/7af03c3af46925888c033ffb75d3a45e2061e556eec3b1343e71aa76783e4a20.jpg) +Fig. 7: Results on Deformable Objects. The method can be applied to images with highly diverse articulated poses and shapes as shown in the examples above. + +![](images/fbf7042de2cf9b45bfd3d816dcd50e2a2ad49d33715e037590b2e71f4bf7d996.jpg) +(a) Texture Propagation + +![](images/57852ca76fad81fafab7972c2d5c18d657810d57aff50291248ef72539e24171.jpg) +(b) Editing Propagation +(c) Baseline Comparisons +Fig. 8: Image Editing. Our method propagates texture in (a) and (c) and regional editing in (b) to real images. As shown in (c), it achieves smoother results compared to the nearest-neighbor (NN) baseline thanks to the 3D geometric reasoning. + +for each pixel individually and produces noisy results, as discussed in Appendix C. Quantitative evaluation of correspondence matching and additional qualitative results for editing are included in Appendix D and E. + +# 5 Conclusion + +We have introduced 3D Congealing, 3D-aware alignment for 2D images capturing semantically similar objects. Our proposed framework leverages a canonical 3D representation that encapsulates geometric and semantic information and, through optimization, fuses prior knowledge from a pre-trained image generative model and semantic information from input images. We show that our model achieves strong results on real-world image datasets under challenging identity, illumination, and background conditions. + +Acknowledgments. We thank Chen Geng and Sharon Lee for their help in reviewing the manuscript. This work is in part supported by NSF RI #2211258, #2338203, and ONR MURI N00014-22-1-2740. + +# References + +1. Boss, M., Engelhardt, A., Kar, A., Li, Y., Sun, D., Barron, J., Lensch, H., Jampani, V.: Samurai: Shape and material from unconstrained real-world arbitrary image collections. Advances in Neural Information Processing Systems 35, 26389-26403 (2022) +2. Caron, M., Touvron, H., Misra, I., Jégou, H., Mairal, J., Bojanowski, P., Joulin, A.: Emerging properties in self-supervised vision transformers. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 9650-9660 (2021) +3. Chen, X., Dong, Z., Song, J., Geiger, A., Hilliges, O.: Category level object pose estimation via neural analysis-by-synthesis. In: Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XXVI 16. pp. 139-156. Springer (2020) +4. Chen, Y., Chen, X., Wang, X., Zhang, Q., Guo, Y., Shan, Y., Wang, F.: Local-to-global registration for bundle-adjusting neural radiance fields. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 8264-8273 (2023) +5. Cheng, W., Cao, Y.P., Shan, Y.: Id-pose: Sparse-view camera pose estimation by inverting diffusion models. arXiv preprint arXiv:2306.17140 (2023) +6. Deng, Y., Yang, J., Tong, X.: Deformed implicit field: Modeling 3D shapes with learned dense correspondence. In: CVPR (2021) +7. Gal, R., Alaluf, Y., Atzmon, Y., Patashnik, O., Bermano, A.H., Chechik, G., Cohen-Or, D.: An image is worth one word: Personalizing text-to-image generation using textual inversion. arXiv preprint arXiv:2208.01618 (2022) +8. Goodfellow, I., Pouget-Abadie, J., Mirza, M., Xu, B., Warde-Farley, D., Ozair, S., Courville, A., Bengio, Y.: Generative adversarial networks. Communications of the ACM 63(11), 139–144 (2020) +9. Goodwin, W., Vaze, S., Havoutis, I., Posner, I.: Zero-shot category-level object pose estimation. In: European Conference on Computer Vision. pp. 516-532. Springer (2022) +0. Gower, J.C., Dijksterhuis, G.B.: Procrustes problems, vol. 30. OUP Oxford (2004) +1. Gupta, K., Jampani, V., Esteves, C., Shrivastava, A., Makadia, A., Snavely, N., Kar, A.: ASIC: Aligning sparse in-the-wild image collections. arXiv preprint arXiv:2303.16201 (2023) +2. Huang, G., Mattar, M., Lee, H., Learned-Miller, E.: Learning to align from scratch. Advances in neural information processing systems 25 (2012) +3. Huang, G.B., Jain, V., Learned-Miller, E.: Unsupervised joint alignment of complex images. In: ICCV. pp. 1-8. IEEE (2007) +4. Jampani, V., Maninis, K.K., Engelhardt, A., Karpur, A., Truong, K., Sargent, K., Popov, S., Araujo, A., Martin-Brualla, R., Patel, K., et al.: Navi: Category-agnostic image collections with high-quality 3d shape and pose annotations. arXiv preprint arXiv:2306.09109 (2023) +5. Kingma, D.P., Ba, J.: Adam: A method for stochastic optimization. In: International Conference on Learning Representations (2015) + +16. Kirillov, A., Mintun, E., Ravi, N., Mao, H., Rolland, C., Gustafson, L., Xiao, T., Whitehead, S., Berg, A.C., Lo, W.Y., et al.: Segment anything. arXiv preprint arXiv:2304.02643 (2023) +17. Kuang, Z., Olszewski, K., Chai, M., Huang, Z., Achlioptas, P., Tulyakov, S.: Neroic: Neural rendering of objects from online image collections. ACM Transactions on Graphics (TOG) 41(4), 1-12 (2022) +18. Learned-Miller, E.G.: Data driven image models through continuous joint alignment. IEEE TPAMI 28(2), 236-250 (2005) +19. Lin, A., Zhang, J.Y., Ramanan, D., Tulsiani, S.: Relpose++: Recovering 6d poses from sparse-view observations. arXiv preprint arXiv:2305.04926 (2023) +20. Lin, C.H., Ma, W.C., Torralba, A., Lucey, S.: Barf: Bundle-adjusting neural radiance fields. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 5741-5751 (2021) +21. Liu, R., Wu, R., Van Hoorick, B., Tokmakov, P., Zakharov, S., Vondrick, C.: Zero-1-to-3: Zero-shot one image to 3d object. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 9298-9309 (2023) +22. Lorensen, W.E., Cline, H.E.: Marching cubes: A high resolution 3d surface construction algorithm. ACM SIGGRAPH Computer Graphics 21(4), 163-169 (1987) +23. Loshchilov, I., Hutter, F.: Decoupled weight decay regularization. In: International Conference on Learning Representations (2018) +24. Martin-Brualla, R., Radwan, N., Sajjadi, M.S.M., Barron, J.T., Dosovitskiy, A., Duckworth, D.: NeRF in the Wild: Neural Radiance Fields for Unconstrained Photo Collections. In: CVPR (2021) +25. Meng, Q., Chen, A., Luo, H., Wu, M., Su, H., Xu, L., He, X., Yu, J.: Gnerf: Gan-based neural radiance field without posed camera. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 6351-6361 (2021) +26. Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM 65(1), 99-106 (2021) +27. Miller, E.G., Matsakis, N.E., Viola, P.A.: Learning from one example through shared densities on transforms. In: CVPR. vol. 1, pp. 464-471. IEEE (2000) +28. Min, J., Lee, J., Ponce, J., Cho, M.: Spair-71k: A large-scale benchmark for semantic correspondence. arXiv preprint arXiv:1908.10543 (2019) +29. Ofri-Amar, D., Geyer, M., Kasten, Y., Dekel, T.: Neural congealing: Aligning images to a joint semantic atlas. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 19403-19412 (2023) +30. Oquab, M., Darcet, T., Moutakanni, T., Vo, H., Szafraniec, M., Khalidov, V., Fernandez, P., Haziza, D., Massa, F., El-Nouby, A., et al.: Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:2304.07193 (2023) +31. Peebles, W., Zhu, J.Y., Zhang, R., Torralba, A., Efros, A.A., Shechtman, E.: Gansupervised dense visual alignment. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 13470-13481 (2022) +32. Poole, B., Jain, A., Barron, J.T., Mildenhall, B.: Dreamfusion: Text-to-3d using 2d diffusion. arXiv preprint arXiv:2209.14988 (2022) +33. Raj, A., Kaza, S., Poole, B., Niemeyer, M., Ruiz, N., Mildenhall, B., Zada, S., Aberman, K., Rubinstein, M., Barron, J., et al.: Dreambooth3d: Subject-driven text-to-3d generation. arXiv preprint arXiv:2303.13508 (2023) +34. Reizenstein, J., Shapovalov, R., Henzler, P., Sbordone, L., Labatut, P., Novotny, D.: Common objects in 3d: Large-scale learning and evaluation of real-life 3d category reconstruction. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 10901-10911 (2021) + +35. Ren, T., Liu, S., Zeng, A., Lin, J., Li, K., Cao, H., Chen, J., Huang, X., Chen, Y., Yan, F., et al.: Grounded sam: Assembling open-world models for diverse visual tasks. arXiv preprint arXiv:2401.14159 (2024) +36. Rombach, R., Blattmann, A., Lorenz, D., Esser, P., Ommer, B.: High-resolution image synthesis with latent diffusion models. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 10684-10695 (2022) +37. Ruiz, N., Li, Y., Jampani, V., Pritch, Y., Rubinstein, M., Aberman, K.: Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 22500-22510 (2023) +38. Schonberger, J.L., Frahm, J.M.: Structure-from-motion revisited. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 4104-4113 (2016) +39. Shi, Y., Wang, P., Ye, J., Long, M., Li, K., Yang, X.: Mvdream: Multi-view diffusion for 3d generation. arXiv preprint arXiv:2308.16512 (2023) +40. Sun, X., Wu, J., Zhang, X., Zhang, Z., Zhang, C., Xue, T., Tenenbaum, J.B., Freeman, W.T.: Pix3d: Dataset and methods for single-image 3d shape modeling. In: CVPR (2018) +41. Wang, H., Sridhar, S., Huang, J., Valentin, J., Song, S., Guibas, L.J.: Normalized object coordinate space for category-level 6d object pose and size estimation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 2642-2651 (2019) +42. Wang, J., Rupprecht, C., Novotny, D.: Posediffusion: Solving pose estimation via diffusion-aided bundle adjustment. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 9773-9783 (2023) +43. Wang, P., Liu, L., Liu, Y., Theobalt, C., Komura, T., Wang, W.: Neus: Learning neural implicit surfaces by volume rendering for multi-view reconstruction. arXiv preprint arXiv:2106.10689 (2021) +44. Wang, Z., Wu, S., Xie, W., Chen, M., Prisacariu, V.A.: Nerf-: Neural radiance fields without known camera parameters. arXiv preprint arXiv:2102.07064 (2021) +45. Yariv, L., Gu, J., Kasten, Y., Lipman, Y.: Volume rendering of neural implicit surfaces. Advances in Neural Information Processing Systems 34, 4805-4815 (2021) +46. Yen-Chen, L., Florence, P., Barron, J.T., Rodriguez, A., Isola, P., Lin, T.Y.: inerf: Inverting neural radiance fields for pose estimation. In: 2021 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). pp. 1323-1330. IEEE (2021) +47. Zhang, J., Yang, G., Tulsiani, S., Ramanan, D.: Ners: Neural reflectance surfaces for sparse-view 3d reconstruction in the wild. In: Advances in Neural Information Processing Systems. vol. 34, pp. 29835-29847 (2021) +48. Zhang, J.Y., Ramanan, D., Tulsiani, S.: Relpose: Predicting probabilistic relative rotation for single objects in the wild. In: European Conference on Computer Vision. pp. 592-611. Springer (2022) \ No newline at end of file diff --git a/2024/3D Congealing_ 3D-Aware Image Alignment in the Wild/images.zip b/2024/3D Congealing_ 3D-Aware Image Alignment in the Wild/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..1b97d36f27d2eecbac722c17df6a1f362e7f17bc --- /dev/null +++ b/2024/3D Congealing_ 3D-Aware Image Alignment in the Wild/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e304add3f282cb4b4ec2bc55240eb0c86cd72eeb7122530737c00a27e2e45ef6 +size 736179 diff --git a/2024/3D Congealing_ 3D-Aware Image Alignment in the Wild/layout.json b/2024/3D Congealing_ 3D-Aware Image Alignment in the Wild/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..9fd63773b4e7637162c309d812717b2ff594c652 --- /dev/null +++ b/2024/3D Congealing_ 3D-Aware Image Alignment in the Wild/layout.json @@ -0,0 +1,10630 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 203, + 112, + 411, + 148 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 203, + 112, + 411, + 148 + ], + "spans": [ + { + "bbox": [ + 203, + 112, + 411, + 148 + ], + "type": "text", + "content": "3D Congealing: 3D-Aware Image Alignment in the Wild" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 132, + 167, + 481, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 167, + 481, + 194 + ], + "spans": [ + { + "bbox": [ + 132, + 167, + 481, + 194 + ], + "type": "text", + "content": "Yunzhi Zhang" + }, + { + "bbox": [ + 132, + 167, + 481, + 194 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 132, + 167, + 481, + 194 + ], + "type": "text", + "content": ", Zizhang Li" + }, + { + "bbox": [ + 132, + 167, + 481, + 194 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 132, + 167, + 481, + 194 + ], + "type": "text", + "content": ", Amit Raj" + }, + { + "bbox": [ + 132, + 167, + 481, + 194 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 132, + 167, + 481, + 194 + ], + "type": "text", + "content": ", Andreas Engelhardt" + }, + { + "bbox": [ + 132, + 167, + 481, + 194 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 132, + 167, + 481, + 194 + ], + "type": "text", + "content": ", Yuanzhen Li" + }, + { + "bbox": [ + 132, + 167, + 481, + 194 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 132, + 167, + 481, + 194 + ], + "type": "text", + "content": ", Tingbo Hou" + }, + { + "bbox": [ + 132, + 167, + 481, + 194 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 132, + 167, + 481, + 194 + ], + "type": "text", + "content": ", Jiajun Wu" + }, + { + "bbox": [ + 132, + 167, + 481, + 194 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 132, + 167, + 481, + 194 + ], + "type": "text", + "content": ", and Varun Jampani" + }, + { + "bbox": [ + 132, + 167, + 481, + 194 + ], + "type": "inline_equation", + "content": "^{5}" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 255, + 201, + 358, + 256 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 261, + 201, + 352, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 261, + 201, + 352, + 213 + ], + "spans": [ + { + "bbox": [ + 261, + 201, + 352, + 213 + ], + "type": "text", + "content": "1 Stanford University" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 265, + 213, + 348, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 265, + 213, + 348, + 224 + ], + "spans": [ + { + "bbox": [ + 265, + 213, + 348, + 224 + ], + "type": "text", + "content": "2 Google DeepMind" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 255, + 224, + 358, + 235 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 255, + 224, + 358, + 235 + ], + "spans": [ + { + "bbox": [ + 255, + 224, + 358, + 235 + ], + "type": "text", + "content": "3 University of Tübingen" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 276, + 235, + 337, + 244 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 235, + 337, + 244 + ], + "spans": [ + { + "bbox": [ + 276, + 235, + 337, + 244 + ], + "type": "text", + "content": "4 Meta GenAI" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 278, + 245, + 336, + 256 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 278, + 245, + 336, + 256 + ], + "spans": [ + { + "bbox": [ + 278, + 245, + 336, + 256 + ], + "type": "text", + "content": "Stability AI" + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 159, + 286, + 455, + 508 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 286, + 455, + 508 + ], + "spans": [ + { + "bbox": [ + 159, + 286, + 455, + 508 + ], + "type": "text", + "content": "Abstract. We propose 3D Congealing, a novel problem of 3D-aware alignment for 2D images capturing semantically similar objects. Given a collection of unlabeled Internet images, our goal is to associate the shared semantic parts from the inputs and aggregate the knowledge from 2D images to a shared 3D canonical space. We introduce a general framework that tackles the task without assuming shape templates, poses, or any camera parameters. At its core is a canonical 3D representation that encapsulates geometric and semantic information. The framework optimizes for the canonical representation together with the pose for each input image, and a per-image coordinate map that warps 2D pixel coordinates to the 3D canonical frame to account for the shape matching. The optimization procedure fuses prior knowledge from a pre-trained image generative model and semantic information from input images. The former provides strong knowledge guidance for this under-constraint task, while the latter provides the necessary information to mitigate the training data bias from the pre-trained model. Our framework can be used for various tasks such as pose estimation and image editing, achieving strong results on real-world image datasets under challenging illumination conditions and on in-the-wild online image collections. Project page at https://ai.stanford.edu/~yzzhang/projects/3d-congealing/." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 544, + 230, + 558 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 544, + 230, + 558 + ], + "spans": [ + { + "bbox": [ + 132, + 544, + 230, + 558 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 569, + 482, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 569, + 482, + 668 + ], + "spans": [ + { + "bbox": [ + 130, + 569, + 482, + 668 + ], + "type": "text", + "content": "We propose the task of 3D Congealing, where the goal is to align a collection of images containing semantically similar objects into a shared 3D space. Specifically, we aim to obtain a canonical 3D representation together with the pose and a dense map of 2D-3D correspondence for each image in the collection. The input images may contain object instances belonging to a similar category with varying shapes and textures, and are captured under distinct camera viewpoints and illumination conditions, which all contribute to the pixel-level difference as shown in Figure 1. Despite such inter-image differences, humans excel at aligning such" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 138, + 114, + 478, + 205 + ], + "blocks": [ + { + "bbox": [ + 138, + 114, + 478, + 205 + ], + "lines": [ + { + "bbox": [ + 138, + 114, + 478, + 205 + ], + "spans": [ + { + "bbox": [ + 138, + 114, + 478, + 205 + ], + "type": "image", + "image_path": "326fa3f4c2133cca056625ab6933f5a81979e4d9cd0e440db6d346aa227594ec.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 216, + 482, + 271 + ], + "lines": [ + { + "bbox": [ + 130, + 216, + 482, + 271 + ], + "spans": [ + { + "bbox": [ + 130, + 216, + 482, + 271 + ], + "type": "text", + "content": "Fig. 1: Objects with different shapes and appearances, such as these sculptures, may share similar semantic parts and a similar geometric structure. We study 3D Congealing, inferring and aligning such a shared structure from an unlabeled image collection. Such alignment can be used for tasks such as pose estimation and image editing. See Appendix A for full results." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 294, + 481, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 294, + 481, + 319 + ], + "spans": [ + { + "bbox": [ + 130, + 294, + 481, + 319 + ], + "type": "text", + "content": "images with one another in a geometrically and semantically consistent manner based on their 3D-aware understanding." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 322, + 482, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 322, + 482, + 453 + ], + "spans": [ + { + "bbox": [ + 130, + 322, + 482, + 453 + ], + "type": "text", + "content": "Obtaining a canonical 3D representation and grounding input images to the 3D canonical space enable several downstream tasks, such as 6-DoF object pose estimation, pose-aware image filtering, and image editing. Unlike the task of 2D congealing [11, 29, 31], where the aim is to align the 2D pixels across the images, 3D Congealing requires aggregating the information from the image collection altogether and forming the association among images in 3D. The task is also closely related to 3D reconstruction from multiview images, with a key distinction in the problem setting, as inputs here do not necessarily contain identical objects but rather semantically similar ones. Such a difference opens up the possibility of image alignment from readily available image collections on the Internet, e.g., online search results, landmark images, and personal photo collections." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 458, + 482, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 458, + 482, + 590 + ], + "spans": [ + { + "bbox": [ + 130, + 458, + 482, + 590 + ], + "type": "text", + "content": "3D Congealing represents a challenging problem, particularly for arbitrary images without camera pose or lighting annotations, even when the input images contain identical objects [1,4,20,44], because the solutions for pose and shape are generally entangled. On the one hand, the definition of poses is specific to the coordinate frame of the shape; on the other hand, the shape optimization is typically guided by the pixel-wise supervision of images under the estimated poses. To overcome the ambiguity in jointly estimating poses and shapes, prior works mostly start from noisy pose initializations [20], data-specific initial pose distributions [25,44], or rough pose annotations such as pose quadrants [1]. They then perform joint optimization for a 3D representation using an objective of reconstructing input image pixels [1,20,44] or distribution matching [25]." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "type": "text", + "content": "In this work, instead of relying on initial poses as starting points for shape reconstruction, we propose to tackle the joint optimization problem from a different perspective. We first obtain a plausible 3D shape that is compliant with the input image observations using pre-trained generative models, and then use semantic-aware visual features, e.g., pre-trained features from DINO [2,30] and Stable-Diffusion [36], to register input images to the 3D shape. Compared to" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 229, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 229, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 229, + 102 + ], + "type": "text", + "content": "Y. Zhang et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "type": "text", + "content": "photometric reconstruction losses, these features are more tolerant of variance in object identities among image inputs." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 140, + 481, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 140, + 481, + 247 + ], + "spans": [ + { + "bbox": [ + 130, + 140, + 481, + 247 + ], + "type": "text", + "content": "We make deliberate design choices to instantiate such a framework that fuses the knowledge from pre-trained text-to-image (T2I) generative models with real image inputs. First, to utilize the prior knowledge from generative models, we opt to apply a T2I personalization method, Textual Inversion [7], which aims to find the most suitable text embedding to reconstruct the input images via the pre-trained model. Furthermore, a semantic-aware distance is proposed to mitigate the appearance discrepancy between the rendered image and the input photo collection. Finally, a canonical coordinate mapping is learned to find the correspondence between 3D canonical representation and 2D input images." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 248, + 481, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 248, + 481, + 307 + ], + "spans": [ + { + "bbox": [ + 130, + 248, + 481, + 307 + ], + "type": "text", + "content": "To prove the effectiveness of the proposed framework, we compare the proposed method against several baselines on the task of pose estimation on a dataset with varying illuminations and show that our method surpasses all the baselines significantly. We also demonstrate several applications of the proposed method, including image editing and object alignment on web image data." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 146, + 308, + 302, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 146, + 308, + 302, + 319 + ], + "spans": [ + { + "bbox": [ + 146, + 308, + 302, + 319 + ], + "type": "text", + "content": "In summary, our contributions are:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 135, + 322, + 480, + 406 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 136, + 322, + 480, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 322, + 480, + 346 + ], + "spans": [ + { + "bbox": [ + 136, + 322, + 480, + 346 + ], + "type": "text", + "content": "1. We propose a novel task of 3D Congealing that involves aligning images of semantically similar objects in a shared 3D space." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 135, + 346, + 480, + 381 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 346, + 480, + 381 + ], + "spans": [ + { + "bbox": [ + 135, + 346, + 480, + 381 + ], + "type": "text", + "content": "2. We develop a framework tackling the proposed task and demonstrate several applications using the obtained 2D-3D correspondence, such as pose estimation and image editing." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 135, + 382, + 480, + 406 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 382, + 480, + 406 + ], + "spans": [ + { + "bbox": [ + 135, + 382, + 480, + 406 + ], + "type": "text", + "content": "3. We show the effectiveness and applicability of the proposed method on a diverse range of in-the-wild Internet images." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 132, + 424, + 242, + 437 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 424, + 242, + 437 + ], + "spans": [ + { + "bbox": [ + 132, + 424, + 242, + 437 + ], + "type": "text", + "content": "2 Related Works" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 449, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 449, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 449, + 482, + 666 + ], + "type": "text", + "content": "Image Alignment and Congealing. The task of image alignment for a single instance, possibly under varying illuminations, has been relatively well-studied [24, 47]. To align images containing different instances from the same category with small deformations, one line of approach is known as imageCongealing [12, 13, 18, 27, 29, 31]. In particular, Neural Congealing [29] learns atlases to capture common semantic features from input images and recovers a dense mapping between input images and the atlases. GANgealing [31] uses a spatial transformer to map a randomly generated image from a GAN [8] to a jointly aligned space. These 2D-warping-based methods are typically applied to source and target image pairs with no or small camera rotation, and work best on in-plane transformation, while our proposed framework handles a larger variation of viewpoints due to 3D reasoning. On the other hand, DIFNet [6] exemplifies an approach of joint optimization of shape template and deformation, provided with the 3D shape. In comparison, we propose a template-followed-by-implicit-deformation approach and assume a single 2D observation for each instance instead of 3D inputs. The proposed approach exploits the fact that a \"good\" template, i.e., one that captures common geometric structure of inputs, is not unique and a solution can be effectively found before knowing input image poses. Compared to joint" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 219, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 219, + 91, + 447, + 102 + ], + "type": "text", + "content": "3D Congealing: 3D-Aware Image Alignment in the Wild" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 189 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 189 + ], + "type": "text", + "content": "optimization methods, it reduces task complexity by providing such an anchoring template to make later image registration easier. Finally, this work provides qualitative results on aligning images cross instances with large deformation. The output global alignment of input instances and articulation-free templates can be useful for downstream reconstruction with image-specific articulation, which is beyond the scope of this work." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 215, + 483, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 215, + 483, + 396 + ], + "spans": [ + { + "bbox": [ + 130, + 215, + 483, + 396 + ], + "type": "text", + "content": "Object Pose Estimation. Object pose estimation aims to estimate the pose of an object instance with respect to the coordinate frame of its 3D shape. Classical methods for pose estimation recover poses from multi-view images using pixel- or feature-level matching to find the alignment between different images [38]. These methods are less suitable in the in-the-wild setting due to the increasing appearance variance. Recent methods tackle this task by supervised learning wht pose annotations [19,42,48], but it remains challenging for these methods to generalize beyond the training distribution. Another class of methods uses an analysis-by-synthesis framework to estimate pose given category-specific templates [3] or a pre-trained 3D representation [46]; these assumptions make it challenging to apply these methods to generic objects in the real world. ID-Pose [5] leverages Zero-1-to-3 [21], a view synthesis model, and optimizes for the relative pose given a source and a target image. Goodwin et al. [9] use pre-trained self-supervised features for matching, instead of doing it at the pixel level, but require both RGB and depth inputs." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 422, + 483, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 422, + 483, + 531 + ], + "spans": [ + { + "bbox": [ + 130, + 422, + 483, + 531 + ], + "type": "text", + "content": "Shape Reconstruction from Image Collections. Neural rendering approaches [26, 43, 45] use images with known poses to reconstruct the 3D shape and appearance from a collection of multiview images. The assumptions of known poses and consistent illumination prevent these methods from being applied in the wild. Several works have extended these approaches to relax the pose assumption, proposing to handle noisy or unknown camera poses of input images through joint optimization of poses and 3D representation [4, 20, 44]. SAMURAI [1] further handles scenes under various illuminations, but requires access to coarse initial poses in the form of pose quadrant annotations." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 557, + 483, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 557, + 483, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 557, + 483, + 666 + ], + "type": "text", + "content": "3D Distillation from 2D Diffusion Models. Recently, text-to-image diffusion models have shown great advancement in 2D image generation and are used for 3D asset distillation with conditions such as texts [32,39], single image [21], and image collections [33]. DreamFusion [32] has proposed to apply gradients computed from pre-trained text-to-image models to the optimized 3D representations. DreamBooth3D [33] proposed to utilize fine-tuned diffusion model [37] for the image-conditioned 3D reconstruction task. These works provide a viable solution for 3D reconstruction from image collections but without grounding the inputs to the 3D space as in ours." + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 228, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 228, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 228, + 102 + ], + "type": "text", + "content": "Y. Zhang et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 136, + 117, + 482, + 266 + ], + "blocks": [ + { + "bbox": [ + 136, + 117, + 482, + 266 + ], + "lines": [ + { + "bbox": [ + 136, + 117, + 482, + 266 + ], + "spans": [ + { + "bbox": [ + 136, + 117, + 482, + 266 + ], + "type": "image", + "image_path": "29133ff7e713ee4a80c5650915b474bc3ca0e87797780a9fef76cd115e896d26.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 278, + 482, + 356 + ], + "lines": [ + { + "bbox": [ + 130, + 278, + 482, + 356 + ], + "spans": [ + { + "bbox": [ + 130, + 278, + 482, + 356 + ], + "type": "text", + "content": "Fig. 2: Pipeline. Given a collection of in-the-wild images capturing similar objects as inputs, we develop a framework that \"congeals\" these images in 3D. The core representation consists of a canonical 3D shape that captures the geometric structure shared among the inputs, together with a set of coordinate mappings that register the input images to the canonical shape. The framework utilizes the prior knowledge of plausible 3D shapes from a generative model, and aligns images in the semantic space using pre-trained semantic feature extractors." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 370, + 202, + 383 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 370, + 202, + 383 + ], + "spans": [ + { + "bbox": [ + 132, + 370, + 202, + 383 + ], + "type": "text", + "content": "3 Method" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 398, + 482, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 398, + 482, + 506 + ], + "spans": [ + { + "bbox": [ + 130, + 398, + 482, + 506 + ], + "type": "text", + "content": "We formulate the problem of 3D Congealing as follows. Given a set of " + }, + { + "bbox": [ + 130, + 398, + 482, + 506 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 130, + 398, + 482, + 506 + ], + "type": "text", + "content": " object-centric images " + }, + { + "bbox": [ + 130, + 398, + 482, + 506 + ], + "type": "inline_equation", + "content": "\\mathcal{D} = \\{x_{n}\\}_{n = 1}^{N}" + }, + { + "bbox": [ + 130, + 398, + 482, + 506 + ], + "type": "text", + "content": " that captures objects sharing semantic components, e.g., objects from one category, we seek to align the object instances in these images into a canonical 3D representation, e.g., NeRF [26], parameterized by " + }, + { + "bbox": [ + 130, + 398, + 482, + 506 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 130, + 398, + 482, + 506 + ], + "type": "text", + "content": ". We refer to the coordinate frame of this 3D representation as the canonical frame. We also recover the camera pose of each observation " + }, + { + "bbox": [ + 130, + 398, + 482, + 506 + ], + "type": "inline_equation", + "content": "x\\in \\mathcal{D}" + }, + { + "bbox": [ + 130, + 398, + 482, + 506 + ], + "type": "text", + "content": " in the canonical frame, denoted using a pose function " + }, + { + "bbox": [ + 130, + 398, + 482, + 506 + ], + "type": "inline_equation", + "content": "\\pi :x\\mapsto (\\xi ,\\kappa)" + }, + { + "bbox": [ + 130, + 398, + 482, + 506 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 130, + 398, + 482, + 506 + ], + "type": "inline_equation", + "content": "\\xi" + }, + { + "bbox": [ + 130, + 398, + 482, + 506 + ], + "type": "text", + "content": " represents the object pose in SE(3) and " + }, + { + "bbox": [ + 130, + 398, + 482, + 506 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 130, + 398, + 482, + 506 + ], + "type": "text", + "content": " is the camera intrinsic parameters. We assume access to instance masks, which can be obtained using an off-the-shelf segmentation method [16]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 506, + 482, + 543 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 506, + 482, + 543 + ], + "spans": [ + { + "bbox": [ + 130, + 506, + 482, + 543 + ], + "type": "text", + "content": "The 3D representation should be consistent with the physical prior of objects in the natural world, and with input observations both geometrically and semantically. These constraints can be translated into an optimization problem:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 220, + 552, + 482, + 571 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 220, + 552, + 482, + 571 + ], + "spans": [ + { + "bbox": [ + 220, + 552, + 482, + 571 + ], + "type": "interline_equation", + "content": "\\max _ {\\pi , \\theta} p _ {\\Theta} (\\theta), \\text {s . t .} x = \\mathcal {R} (\\pi (x), \\theta), \\forall x \\in \\mathcal {D}, \\tag {1}", + "image_path": "973a0433cb7887caab4acf03438aeb0acbd7994c0b912faf3ab425c3eb55350d.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 581, + 482, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 581, + 482, + 628 + ], + "spans": [ + { + "bbox": [ + 130, + 581, + 482, + 628 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 581, + 482, + 628 + ], + "type": "inline_equation", + "content": "p_{\\Theta}" + }, + { + "bbox": [ + 130, + 581, + 482, + 628 + ], + "type": "text", + "content": " is a prior distribution for the 3D representation parameter " + }, + { + "bbox": [ + 130, + 581, + 482, + 628 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 130, + 581, + 482, + 628 + ], + "type": "text", + "content": " that encourages physically plausible solutions, " + }, + { + "bbox": [ + 130, + 581, + 482, + 628 + ], + "type": "inline_equation", + "content": "\\mathcal{R}" + }, + { + "bbox": [ + 130, + 581, + 482, + 628 + ], + "type": "text", + "content": " is a predefined rendering function that enforces geometric consistency, and the equality constraint on image reconstruction enforces compliance with input observations." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "type": "text", + "content": "We will now describe an instantiation of the 3D prior " + }, + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "type": "inline_equation", + "content": "p_{\\Theta}" + }, + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "type": "text", + "content": " (Sec. 3.1), an image distance function that helps enforce the equality constraint (Sec. 3.2), followed by the 3D Congealing optimization (Sec. 3.3) to estimate input image poses " + }, + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 219, + 91, + 448, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 91, + 448, + 102 + ], + "spans": [ + { + "bbox": [ + 219, + 91, + 448, + 102 + ], + "type": "text", + "content": "3D Congealing: 3D-Aware Image Alignment in the Wild" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 353, + 128 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 116, + 353, + 128 + ], + "spans": [ + { + "bbox": [ + 132, + 116, + 353, + 128 + ], + "type": "text", + "content": "3.1 3D Guidance from Generative Models" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 134, + 482, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 134, + 482, + 196 + ], + "spans": [ + { + "bbox": [ + 130, + 134, + 482, + 196 + ], + "type": "text", + "content": "As illustrated in the left part of Figure 2, we extract the prior knowledge for 3D representations " + }, + { + "bbox": [ + 130, + 134, + 482, + 196 + ], + "type": "inline_equation", + "content": "p_{\\Theta}(\\cdot)" + }, + { + "bbox": [ + 130, + 134, + 482, + 196 + ], + "type": "text", + "content": " from a pre-trained text-to-image (T2I) model such as Stable-Diffusion [36]. DreamFusion [32] proposes to turn a text prompt " + }, + { + "bbox": [ + 130, + 134, + 482, + 196 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 130, + 134, + 482, + 196 + ], + "type": "text", + "content": " into a 3D representation " + }, + { + "bbox": [ + 130, + 134, + 482, + 196 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 130, + 134, + 482, + 196 + ], + "type": "text", + "content": " using the following Score Distillation Sampling (SDS) objective, leveraging a T2I diffusion model with frozen parameters " + }, + { + "bbox": [ + 130, + 134, + 482, + 196 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 130, + 134, + 482, + 196 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 258, + 203, + 482, + 220 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 258, + 203, + 482, + 220 + ], + "spans": [ + { + "bbox": [ + 258, + 203, + 482, + 220 + ], + "type": "interline_equation", + "content": "\\min _ {\\theta} \\mathbb {E} _ {x \\in \\mathcal {D} (\\theta)} \\mathcal {L} _ {\\text {d i f f}} ^ {\\phi} (x, y). \\tag {2}", + "image_path": "bca3e91fc449dd8fbb686bbba001fc3ae8d70041904a00379f9832cd24fbb6a4.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 131, + 227, + 482, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 227, + 482, + 266 + ], + "spans": [ + { + "bbox": [ + 131, + 227, + 482, + 266 + ], + "type": "text", + "content": "Here " + }, + { + "bbox": [ + 131, + 227, + 482, + 266 + ], + "type": "inline_equation", + "content": "\\mathcal{D}(\\theta) \\coloneqq \\{\\mathcal{R}(\\pi, \\theta) \\mid \\pi \\sim p_{\\Pi}(\\cdot)\\}" + }, + { + "bbox": [ + 131, + 227, + 482, + 266 + ], + "type": "text", + "content": " contains images rendered from the 3D representation " + }, + { + "bbox": [ + 131, + 227, + 482, + 266 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 131, + 227, + 482, + 266 + ], + "type": "text", + "content": " under a prior camera distribution " + }, + { + "bbox": [ + 131, + 227, + 482, + 266 + ], + "type": "inline_equation", + "content": "p_{\\Pi}(\\cdot)" + }, + { + "bbox": [ + 131, + 227, + 482, + 266 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 131, + 227, + 482, + 266 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{diff}}^{\\phi}" + }, + { + "bbox": [ + 131, + 227, + 482, + 266 + ], + "type": "text", + "content": " is the training objective of image diffusion models specified as follows:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 169, + 273, + 482, + 288 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 169, + 273, + 482, + 288 + ], + "spans": [ + { + "bbox": [ + 169, + 273, + 482, + 288 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {d i f f}} ^ {\\phi} (x, y) := \\mathbb {E} _ {t \\sim \\mathcal {U} ([ 0, 1 ]), \\epsilon \\sim \\mathcal {N} (\\mathbf {0}, I)} \\left[ \\omega (t) \\| \\epsilon_ {\\phi} (\\alpha_ {t} x + \\sigma_ {t} \\epsilon , y, t) - \\epsilon \\| _ {2} ^ {2} \\right], \\tag {3}", + "image_path": "438a76659f298cf1933d99db92c205862abc5c297bf378c9f1c3d7351704b3cf.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 293, + 482, + 328 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 293, + 482, + 328 + ], + "spans": [ + { + "bbox": [ + 130, + 293, + 482, + 328 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 293, + 482, + 328 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\phi}" + }, + { + "bbox": [ + 130, + 293, + 482, + 328 + ], + "type": "text", + "content": " is the pre-trained denoising network, " + }, + { + "bbox": [ + 130, + 293, + 482, + 328 + ], + "type": "inline_equation", + "content": "\\omega(\\cdot)" + }, + { + "bbox": [ + 130, + 293, + 482, + 328 + ], + "type": "text", + "content": " is the timestep-dependent weighting function, " + }, + { + "bbox": [ + 130, + 293, + 482, + 328 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 130, + 293, + 482, + 328 + ], + "type": "text", + "content": " is the diffusion timestep and " + }, + { + "bbox": [ + 130, + 293, + 482, + 328 + ], + "type": "inline_equation", + "content": "\\alpha_{t}, \\sigma_{t}" + }, + { + "bbox": [ + 130, + 293, + 482, + 328 + ], + "type": "text", + "content": " are timestep-dependent coefficients from the diffusion model schedule." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 131, + 329, + 482, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 329, + 482, + 354 + ], + "spans": [ + { + "bbox": [ + 131, + 329, + 482, + 354 + ], + "type": "text", + "content": "The above loss can be used to guide the optimization of a 3D representation " + }, + { + "bbox": [ + 131, + 329, + 482, + 354 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 131, + 329, + 482, + 354 + ], + "type": "text", + "content": ", whose gradient is approximated by" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 164, + 361, + 482, + 387 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 361, + 482, + 387 + ], + "spans": [ + { + "bbox": [ + 164, + 361, + 482, + 387 + ], + "type": "interline_equation", + "content": "\\nabla_ {\\theta} \\mathcal {L} _ {\\mathrm {d i f f}} ^ {\\phi} (x = \\mathcal {R} (\\xi , \\kappa , \\theta), y) \\approx \\mathbb {E} _ {t, \\epsilon} \\left[ \\omega (t) (\\epsilon_ {\\phi} (\\alpha_ {t} x + \\sigma_ {t} \\epsilon , y, t) - \\epsilon) \\frac {\\partial x}{\\partial \\theta} \\right], \\qquad (4)", + "image_path": "cf2999b283be71ac7fc567b6953e57a5a386e253941d6c071f4651664486ec5f.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 393, + 482, + 429 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 393, + 482, + 429 + ], + "spans": [ + { + "bbox": [ + 130, + 393, + 482, + 429 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 393, + 482, + 429 + ], + "type": "inline_equation", + "content": "\\xi" + }, + { + "bbox": [ + 130, + 393, + 482, + 429 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 393, + 482, + 429 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 130, + 393, + 482, + 429 + ], + "type": "text", + "content": " are the extrinsic and intrinsic camera parameters, respectively. The derived gradient approximation is adopted by later works such as MVDream [39], which we use as the backbone." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 429, + 482, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 429, + 482, + 477 + ], + "spans": [ + { + "bbox": [ + 130, + 429, + 482, + 477 + ], + "type": "text", + "content": "The original SDS objective is optimizing for a text-conditioned 3D shape with a user-specified text prompt " + }, + { + "bbox": [ + 130, + 429, + 482, + 477 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 130, + 429, + 482, + 477 + ], + "type": "text", + "content": " and does not consider image inputs. Here, we use the technique from Textual Inversion [7] to recover the most suitable text prompt " + }, + { + "bbox": [ + 130, + 429, + 482, + 477 + ], + "type": "inline_equation", + "content": "y^{*}" + }, + { + "bbox": [ + 130, + 429, + 482, + 477 + ], + "type": "text", + "content": " that explains input images, defined as follows:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 244, + 485, + 482, + 503 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 244, + 485, + 482, + 503 + ], + "spans": [ + { + "bbox": [ + 244, + 485, + 482, + 503 + ], + "type": "interline_equation", + "content": "y ^ {*} = \\arg \\min _ {y} \\mathbb {E} _ {x \\in \\mathcal {D}} \\mathcal {L} _ {\\text {d i f f}} ^ {\\phi} (x, y). \\tag {5}", + "image_path": "3f017114a703f29cef107fc5a390f2cc37198cd0cde27de7871e5c9c1996850f.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 130, + 510, + 482, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 510, + 482, + 608 + ], + "spans": [ + { + "bbox": [ + 130, + 510, + 482, + 608 + ], + "type": "text", + "content": "Eq. (2) and Eq. (5) differ in that both the sources of the observations " + }, + { + "bbox": [ + 130, + 510, + 482, + 608 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 130, + 510, + 482, + 608 + ], + "type": "text", + "content": " (an infinite dataset of rendered images " + }, + { + "bbox": [ + 130, + 510, + 482, + 608 + ], + "type": "inline_equation", + "content": "\\mathcal{D}(\\theta)" + }, + { + "bbox": [ + 130, + 510, + 482, + 608 + ], + "type": "text", + "content": " for the former, and real data " + }, + { + "bbox": [ + 130, + 510, + 482, + 608 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 130, + 510, + 482, + 608 + ], + "type": "text", + "content": " for the latter) and the parameters being optimized over " + }, + { + "bbox": [ + 130, + 510, + 482, + 608 + ], + "type": "inline_equation", + "content": "(\\theta" + }, + { + "bbox": [ + 130, + 510, + 482, + 608 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 510, + 482, + 608 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 130, + 510, + 482, + 608 + ], + "type": "text", + "content": ", respectively). In our framework, we incorporate the real image information to the SDS guidance via first solving for " + }, + { + "bbox": [ + 130, + 510, + 482, + 608 + ], + "type": "inline_equation", + "content": "y^{*}" + }, + { + "bbox": [ + 130, + 510, + 482, + 608 + ], + "type": "text", + "content": " (Eq. (5)) and keep it frozen when optimizing for " + }, + { + "bbox": [ + 130, + 510, + 482, + 608 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 130, + 510, + 482, + 608 + ], + "type": "text", + "content": " (Eq. (2)). The diffusion model parameter " + }, + { + "bbox": [ + 130, + 510, + 482, + 608 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 130, + 510, + 482, + 608 + ], + "type": "text", + "content": " is frozen throughout the process, requiring significantly less memory compared to the alternative of integrating input image information via finetuning " + }, + { + "bbox": [ + 130, + 510, + 482, + 608 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 130, + 510, + 482, + 608 + ], + "type": "text", + "content": " as in DreamBooth3D [33]." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 131, + 622, + 373, + 635 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 622, + 373, + 635 + ], + "spans": [ + { + "bbox": [ + 131, + 622, + 373, + 635 + ], + "type": "text", + "content": "3.2 Semantic Consistency from Deep Features" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 130, + 641, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 641, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 641, + 482, + 666 + ], + "type": "text", + "content": "The generative model prior from Sec. 3.1 effectively constrains the search space for the solutions. However, the objectives from Eqs. (2) and (5) use the input image" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 229, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 229, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 229, + 102 + ], + "type": "text", + "content": "Y. Zhang et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 479, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 479, + 152 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 479, + 152 + ], + "type": "text", + "content": "information only indirectly, via a text embedding " + }, + { + "bbox": [ + 130, + 116, + 479, + 152 + ], + "type": "inline_equation", + "content": "y^{*}" + }, + { + "bbox": [ + 130, + 116, + 479, + 152 + ], + "type": "text", + "content": ". To explain the relative geometric relation among input images, we explicitly recover the pose of each input image w.r.t. " + }, + { + "bbox": [ + 130, + 116, + 479, + 152 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 130, + 116, + 479, + 152 + ], + "type": "text", + "content": ", as illustrated in Figure 2 (middle) and as explained below" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 152, + 480, + 212 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 152, + 480, + 212 + ], + "spans": [ + { + "bbox": [ + 130, + 152, + 480, + 212 + ], + "type": "text", + "content": "To align input images, we use an image distance metric defined by semantic feature dissimilarity. In particular, pre-trained deep models such as DINO [2,30] have been shown to be effective semantic feature extractors. Denote such a model as " + }, + { + "bbox": [ + 130, + 152, + 480, + 212 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 130, + 152, + 480, + 212 + ], + "type": "text", + "content": " parameterized by " + }, + { + "bbox": [ + 130, + 152, + 480, + 212 + ], + "type": "inline_equation", + "content": "\\zeta" + }, + { + "bbox": [ + 130, + 152, + 480, + 212 + ], + "type": "text", + "content": ". The similarity of two pixel locations " + }, + { + "bbox": [ + 130, + 152, + 480, + 212 + ], + "type": "inline_equation", + "content": "u_{1}" + }, + { + "bbox": [ + 130, + 152, + 480, + 212 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 152, + 480, + 212 + ], + "type": "inline_equation", + "content": "u_{2}" + }, + { + "bbox": [ + 130, + 152, + 480, + 212 + ], + "type": "text", + "content": " from two images " + }, + { + "bbox": [ + 130, + 152, + 480, + 212 + ], + "type": "inline_equation", + "content": "x_{1}" + }, + { + "bbox": [ + 130, + 152, + 480, + 212 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 152, + 480, + 212 + ], + "type": "inline_equation", + "content": "x_{2}" + }, + { + "bbox": [ + 130, + 152, + 480, + 212 + ], + "type": "text", + "content": ", respectively, can be measured with" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 198, + 220, + 481, + 249 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 198, + 220, + 481, + 249 + ], + "spans": [ + { + "bbox": [ + 198, + 220, + 481, + 249 + ], + "type": "interline_equation", + "content": "d _ {\\zeta} ^ {u _ {1}, u _ {2}} \\left(x _ {1}, x _ {2}\\right) := 1 - \\frac {\\langle \\left[ f _ {\\zeta} \\left(x _ {1}\\right) \\right] _ {u _ {1}} , \\left[ f _ {\\zeta} \\left(x _ {2}\\right) \\right] _ {u _ {2}} \\rangle}{\\| \\left[ f _ {\\zeta} \\left(x _ {1}\\right) \\right] _ {u _ {1}} \\| _ {2} \\| \\left[ f _ {\\zeta} \\left(x _ {2}\\right) \\right] _ {u _ {2}} \\| _ {2}}, \\tag {6}", + "image_path": "cf39f2c0df2ee822d37c78404cb51b26a50caf036caa6b6aa5b1c50ad9d74f1c.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 131, + 256, + 477, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 256, + 477, + 269 + ], + "spans": [ + { + "bbox": [ + 131, + 256, + 477, + 269 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 131, + 256, + 477, + 269 + ], + "type": "inline_equation", + "content": "[\\cdot ]" + }, + { + "bbox": [ + 131, + 256, + 477, + 269 + ], + "type": "text", + "content": " is an indexing operator. It thereafter defines an image distance function" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 228, + 277, + 481, + 304 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 228, + 277, + 481, + 304 + ], + "spans": [ + { + "bbox": [ + 228, + 277, + 481, + 304 + ], + "type": "interline_equation", + "content": "\\left\\| x _ {1} - x _ {2} \\right\\| _ {d _ {\\zeta}} := \\frac {1}{H W} \\sum_ {u} d _ {\\zeta} ^ {u, u} \\left(x _ {1}, x _ {2}\\right), \\tag {7}", + "image_path": "a703e1e3b4b0b3e0b6d3a1498cd750c0791032d0bdcfc321a27cd8f2a614a1d8.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 312, + 481, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 312, + 481, + 323 + ], + "spans": [ + { + "bbox": [ + 130, + 312, + 481, + 323 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 312, + 481, + 323 + ], + "type": "inline_equation", + "content": "x_{1}" + }, + { + "bbox": [ + 130, + 312, + 481, + 323 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 312, + 481, + 323 + ], + "type": "inline_equation", + "content": "x_{2}" + }, + { + "bbox": [ + 130, + 312, + 481, + 323 + ], + "type": "text", + "content": " have resolution " + }, + { + "bbox": [ + 130, + 312, + 481, + 323 + ], + "type": "inline_equation", + "content": "H\\times W" + }, + { + "bbox": [ + 130, + 312, + 481, + 323 + ], + "type": "text", + "content": ", and the sum is over all image coordinates." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 324, + 480, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 324, + 480, + 384 + ], + "spans": [ + { + "bbox": [ + 130, + 324, + 480, + 384 + ], + "type": "text", + "content": "The choice of semantic-aware image distance, instead of photometric differences as in the classical problem setting of multiview 3D reconstruction [38,43,45], leads to solutions that maximally align input images to the 3D representation with more tolerance towards variance in object shape, texture, and environmental illuminations among input images, which is crucial in our problem setting." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 131, + 401, + 227, + 414 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 401, + 227, + 414 + ], + "spans": [ + { + "bbox": [ + 131, + 401, + 227, + 414 + ], + "type": "text", + "content": "3.3 Optimization" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 131, + 421, + 479, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 421, + 479, + 445 + ], + "spans": [ + { + "bbox": [ + 131, + 421, + 479, + 445 + ], + "type": "text", + "content": "The Canonical Shape and Image Poses. Combining Secs. 3.1 and 3.2, we convert the original problem in Eq. (1) into" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 191, + 455, + 481, + 483 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 191, + 455, + 481, + 483 + ], + "spans": [ + { + "bbox": [ + 191, + 455, + 481, + 483 + ], + "type": "interline_equation", + "content": "\\min _ {\\pi , \\theta} \\underbrace {\\mathbb {E} _ {x \\in \\mathcal {D} (\\theta)} \\mathcal {L} _ {\\text {d i f f}} ^ {\\phi} \\left(x , y ^ {*}\\right)} _ {\\text {g e n e r a t i v e m o d e l g u i d a n c e}} + \\lambda \\underbrace {\\mathbb {E} _ {x \\in \\mathcal {D}} \\| \\mathcal {R} (\\pi (x) , \\theta) - x \\| _ {d}} _ {\\text {d a t a r e c o n s t r u c t i o n}}, \\tag {8}", + "image_path": "3fb89543ce87be455ef0975aee2aa8ee4dc51a36fc901f4c93ca290de9019c9c.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 130, + 492, + 481, + 587 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 492, + 481, + 587 + ], + "spans": [ + { + "bbox": [ + 130, + 492, + 481, + 587 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 492, + 481, + 587 + ], + "type": "inline_equation", + "content": "y^{*}" + }, + { + "bbox": [ + 130, + 492, + 481, + 587 + ], + "type": "text", + "content": " come from Eq. (5) and " + }, + { + "bbox": [ + 130, + 492, + 481, + 587 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 130, + 492, + 481, + 587 + ], + "type": "text", + "content": " is a loss weight. Compared to Eq. (5), here the first term instantiates the generative modeling prior and the second term is a soft constraint of reconstructing input observations. Specifically, " + }, + { + "bbox": [ + 130, + 492, + 481, + 587 + ], + "type": "inline_equation", + "content": "d = \\lambda_{\\zeta}d_{\\zeta} + \\lambda_{\\mathrm{IoU}}d_{\\mathrm{IoU}}" + }, + { + "bbox": [ + 130, + 492, + 481, + 587 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 130, + 492, + 481, + 587 + ], + "type": "inline_equation", + "content": "d_{\\zeta}" + }, + { + "bbox": [ + 130, + 492, + 481, + 587 + ], + "type": "text", + "content": " is the semantic-space distance metric from Sec. 3.2, and " + }, + { + "bbox": [ + 130, + 492, + 481, + 587 + ], + "type": "inline_equation", + "content": "d_{\\mathrm{IoU}}" + }, + { + "bbox": [ + 130, + 492, + 481, + 587 + ], + "type": "text", + "content": " is the Intersection-over-Union (IoU) loss for masks, " + }, + { + "bbox": [ + 130, + 492, + 481, + 587 + ], + "type": "inline_equation", + "content": "\\| m_1 - m_2 \\|_{d_{\\mathrm{IoU}}} \\coloneqq 1 - (\\| m_1 \\odot m_2 \\|_1) / (\\| m_1 \\|_1 + \\| m_2 \\|_1 - \\| m_1 \\odot m_2 \\|_1)" + }, + { + "bbox": [ + 130, + 492, + 481, + 587 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 130, + 492, + 481, + 587 + ], + "type": "inline_equation", + "content": "m_1" + }, + { + "bbox": [ + 130, + 492, + 481, + 587 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 492, + 481, + 587 + ], + "type": "inline_equation", + "content": "m_2" + }, + { + "bbox": [ + 130, + 492, + 481, + 587 + ], + "type": "text", + "content": " are image masks, which in Eq. (8) are set to be the mask rendering and the instance mask for " + }, + { + "bbox": [ + 130, + 492, + 481, + 587 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 130, + 492, + 481, + 587 + ], + "type": "text", + "content": ". The use of both " + }, + { + "bbox": [ + 130, + 492, + 481, + 587 + ], + "type": "inline_equation", + "content": "d_{\\zeta}" + }, + { + "bbox": [ + 130, + 492, + 481, + 587 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 492, + 481, + 587 + ], + "type": "inline_equation", + "content": "d_{\\mathrm{IoU}}" + }, + { + "bbox": [ + 130, + 492, + 481, + 587 + ], + "type": "text", + "content": " tolerates shape variance among input instances." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 130, + 588, + 481, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 588, + 481, + 635 + ], + "spans": [ + { + "bbox": [ + 130, + 588, + 481, + 635 + ], + "type": "text", + "content": "For the shape representation, we follow NeRF [26] and use neural networks " + }, + { + "bbox": [ + 130, + 588, + 481, + 635 + ], + "type": "inline_equation", + "content": "\\sigma_{\\theta}:\\mathbb{R}^{3}\\to \\mathbb{R}" + }, + { + "bbox": [ + 130, + 588, + 481, + 635 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 588, + 481, + 635 + ], + "type": "inline_equation", + "content": "c_{\\theta}:\\mathbb{R}^{3}\\rightarrow \\mathbb{R}^{3}" + }, + { + "bbox": [ + 130, + 588, + 481, + 635 + ], + "type": "text", + "content": " to map a 3D spatial coordinate to a density and an RGB value, respectively. The rendering operation " + }, + { + "bbox": [ + 130, + 588, + 481, + 635 + ], + "type": "inline_equation", + "content": "\\mathcal{R}" + }, + { + "bbox": [ + 130, + 588, + 481, + 635 + ], + "type": "text", + "content": " is the volumetric rendering operation specified as follows:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 214, + 644, + 481, + 669 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 214, + 644, + 481, + 669 + ], + "spans": [ + { + "bbox": [ + 214, + 644, + 481, + 669 + ], + "type": "interline_equation", + "content": "\\mathcal {R} (r, \\xi , \\theta ; c _ {\\theta}) = \\int T (t) \\sigma_ {\\theta} (\\xi r (t)) c _ {\\theta} (\\xi r (t)) \\mathrm {d} t, \\tag {9}", + "image_path": "851c694d807bf0d8b7c043d6b0968fb79a60b2f0f15da3944c17e316d5c0d7ad.jpg" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 219, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 219, + 91, + 447, + 102 + ], + "type": "text", + "content": "3D Congealing: 3D-Aware Image Alignment in the Wild" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 115, + 482, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 115, + 482, + 163 + ], + "spans": [ + { + "bbox": [ + 130, + 115, + 482, + 163 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 115, + 482, + 163 + ], + "type": "inline_equation", + "content": "T(t) = \\exp \\left(-\\int \\sigma_{\\theta}(r(t'))\\mathrm{d}t'\\right)" + }, + { + "bbox": [ + 130, + 115, + 482, + 163 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 115, + 482, + 163 + ], + "type": "inline_equation", + "content": "r: \\mathbb{R} \\to \\mathbb{R}^3" + }, + { + "bbox": [ + 130, + 115, + 482, + 163 + ], + "type": "text", + "content": " is a ray shooting from the camera center to the image plane, parameterized by the camera location and the ray's direction, and " + }, + { + "bbox": [ + 130, + 115, + 482, + 163 + ], + "type": "inline_equation", + "content": "\\xi" + }, + { + "bbox": [ + 130, + 115, + 482, + 163 + ], + "type": "text", + "content": " is the relative pose that transforms the ray from the camera frame to the canonical frame." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 174, + 482, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 174, + 482, + 233 + ], + "spans": [ + { + "bbox": [ + 130, + 174, + 482, + 233 + ], + "type": "text", + "content": "Forward Canonical Coordinate Mappings. After the above optimization, each image " + }, + { + "bbox": [ + 130, + 174, + 482, + 233 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 130, + 174, + 482, + 233 + ], + "type": "text", + "content": " from the input image collection can be \"congealed\" to the shape " + }, + { + "bbox": [ + 130, + 174, + 482, + 233 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 130, + 174, + 482, + 233 + ], + "type": "text", + "content": " via a canonical coordinate mapping, i.e., a forward warping operation " + }, + { + "bbox": [ + 130, + 174, + 482, + 233 + ], + "type": "inline_equation", + "content": "\\varPhi_x^{\\mathrm{fwd}}: \\mathbb{R}^2 \\to \\mathbb{R}^3" + }, + { + "bbox": [ + 130, + 174, + 482, + 233 + ], + "type": "text", + "content": " that maps a 2D image coordinate to a 3D coordinate in the canonical frame of reference as illustrated in Figure 2. " + }, + { + "bbox": [ + 130, + 174, + 482, + 233 + ], + "type": "inline_equation", + "content": "\\varPhi_x^{\\mathrm{fwd}}" + }, + { + "bbox": [ + 130, + 174, + 482, + 233 + ], + "type": "text", + "content": " consists of the following two operations." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 234, + 482, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 234, + 482, + 258 + ], + "spans": [ + { + "bbox": [ + 130, + 234, + 482, + 258 + ], + "type": "text", + "content": "First, we warp a coordinate " + }, + { + "bbox": [ + 130, + 234, + 482, + 258 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 130, + 234, + 482, + 258 + ], + "type": "text", + "content": " from the real image " + }, + { + "bbox": [ + 130, + 234, + 482, + 258 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 130, + 234, + 482, + 258 + ], + "type": "text", + "content": " to the rendering of the canonical shape under its pose " + }, + { + "bbox": [ + 130, + 234, + 482, + 258 + ], + "type": "inline_equation", + "content": "\\pi(x)" + }, + { + "bbox": [ + 130, + 234, + 482, + 258 + ], + "type": "text", + "content": ", denoted as " + }, + { + "bbox": [ + 130, + 234, + 482, + 258 + ], + "type": "inline_equation", + "content": "\\tilde{x} \\coloneqq \\mathcal{R}(\\pi(x), \\theta)" + }, + { + "bbox": [ + 130, + 234, + 482, + 258 + ], + "type": "text", + "content": ". Specifically," + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 145, + 264, + 482, + 283 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 264, + 482, + 283 + ], + "spans": [ + { + "bbox": [ + 145, + 264, + 482, + 283 + ], + "type": "interline_equation", + "content": "\\varPhi_ {\\tilde {x} \\leftarrow x} ^ {\\mathrm {2 D} \\leftarrow 2 \\mathrm {D}} (u) := \\arg \\min _ {\\tilde {u}} d _ {\\zeta} ^ {\\tilde {u}, u} (\\tilde {x}, x) + \\lambda_ {\\ell_ {2}} \\| \\tilde {u} - u \\| _ {2} ^ {2} + \\lambda_ {\\text {s m o o t h}} \\mathcal {L} _ {\\text {s m o o t h}} (\\tilde {u}, u), \\quad (1 0)", + "image_path": "507a94a5e69c31483c5fd43f2b42ed300e17136e2ec6e4bf8b4fc723152763ed.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 289, + 482, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 289, + 482, + 386 + ], + "spans": [ + { + "bbox": [ + 130, + 289, + 482, + 386 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 289, + 482, + 386 + ], + "type": "inline_equation", + "content": "d_{\\zeta}" + }, + { + "bbox": [ + 130, + 289, + 482, + 386 + ], + "type": "text", + "content": " follows Eq. (6), the 2D coordinates " + }, + { + "bbox": [ + 130, + 289, + 482, + 386 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 130, + 289, + 482, + 386 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 289, + 482, + 386 + ], + "type": "inline_equation", + "content": "\\tilde{u}" + }, + { + "bbox": [ + 130, + 289, + 482, + 386 + ], + "type": "text", + "content": " are normalized into range [0,1] before computing the " + }, + { + "bbox": [ + 130, + 289, + 482, + 386 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 130, + 289, + 482, + 386 + ], + "type": "text", + "content": " norm, the smoothness term " + }, + { + "bbox": [ + 130, + 289, + 482, + 386 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{smooth}}" + }, + { + "bbox": [ + 130, + 289, + 482, + 386 + ], + "type": "text", + "content": " is specified in Appendix B, and " + }, + { + "bbox": [ + 130, + 289, + 482, + 386 + ], + "type": "inline_equation", + "content": "\\lambda_{\\ell_2}" + }, + { + "bbox": [ + 130, + 289, + 482, + 386 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 289, + 482, + 386 + ], + "type": "inline_equation", + "content": "\\lambda_{\\mathrm{smooth}}" + }, + { + "bbox": [ + 130, + 289, + 482, + 386 + ], + "type": "text", + "content": " are scalar weights. This objective searches for a new image coordinate " + }, + { + "bbox": [ + 130, + 289, + 482, + 386 + ], + "type": "inline_equation", + "content": "\\tilde{u}" + }, + { + "bbox": [ + 130, + 289, + 482, + 386 + ], + "type": "text", + "content": " (from the rendering " + }, + { + "bbox": [ + 130, + 289, + 482, + 386 + ], + "type": "inline_equation", + "content": "\\tilde{x}" + }, + { + "bbox": [ + 130, + 289, + 482, + 386 + ], + "type": "text", + "content": ") that shares a semantic feature similar to " + }, + { + "bbox": [ + 130, + 289, + 482, + 386 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 130, + 289, + 482, + 386 + ], + "type": "text", + "content": " (from the real image " + }, + { + "bbox": [ + 130, + 289, + 482, + 386 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 130, + 289, + 482, + 386 + ], + "type": "text", + "content": "), and ensures that " + }, + { + "bbox": [ + 130, + 289, + 482, + 386 + ], + "type": "inline_equation", + "content": "\\tilde{u}" + }, + { + "bbox": [ + 130, + 289, + 482, + 386 + ], + "type": "text", + "content": " stays in the local neighborhood of " + }, + { + "bbox": [ + 130, + 289, + 482, + 386 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 130, + 289, + 482, + 386 + ], + "type": "text", + "content": " via a soft constraint of the coordinate distance. Afterward, a 2D-to-3D operation takes in the warped coordinate from above and outputs its 3D location in the normalized object coordinate space (NOCS) [41] of " + }, + { + "bbox": [ + 130, + 289, + 482, + 386 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 130, + 289, + 482, + 386 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 233, + 392, + 482, + 406 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 392, + 482, + 406 + ], + "spans": [ + { + "bbox": [ + 233, + 392, + 482, + 406 + ], + "type": "interline_equation", + "content": "\\Phi_ {x} ^ {\\mathrm {3 D} \\leftarrow 2 \\mathrm {D}} (\\tilde {u}) := \\left[ \\mathcal {R} _ {\\text {N O C S}} (\\pi (x), \\theta) \\right] _ {\\tilde {u}}, \\tag {11}", + "image_path": "03cb34ba98c4ebe52dc653113100d967f37731cb4832d5055798d6e7b7e54050.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 412, + 482, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 412, + 482, + 472 + ], + "spans": [ + { + "bbox": [ + 130, + 412, + 482, + 472 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 412, + 482, + 472 + ], + "type": "inline_equation", + "content": "\\mathcal{R}_{\\mathrm{NOCS}}" + }, + { + "bbox": [ + 130, + 412, + 482, + 472 + ], + "type": "text", + "content": " is identical to " + }, + { + "bbox": [ + 130, + 412, + 482, + 472 + ], + "type": "inline_equation", + "content": "\\mathcal{R}" + }, + { + "bbox": [ + 130, + 412, + 482, + 472 + ], + "type": "text", + "content": " from Eq. (9), but replacing the color field " + }, + { + "bbox": [ + 130, + 412, + 482, + 472 + ], + "type": "inline_equation", + "content": "c_{\\theta}" + }, + { + "bbox": [ + 130, + 412, + 482, + 472 + ], + "type": "text", + "content": " with a canonical object coordinate field, " + }, + { + "bbox": [ + 130, + 412, + 482, + 472 + ], + "type": "inline_equation", + "content": "c_{\\mathrm{NOCS}}: \\mathbb{R}^3 \\to \\mathbb{R}^3" + }, + { + "bbox": [ + 130, + 412, + 482, + 472 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 412, + 482, + 472 + ], + "type": "inline_equation", + "content": "p \\mapsto (p - p_{\\mathrm{min}}) / (p_{\\mathrm{max}} - p_{\\mathrm{min}})" + }, + { + "bbox": [ + 130, + 412, + 482, + 472 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 130, + 412, + 482, + 472 + ], + "type": "inline_equation", + "content": "p_{\\mathrm{min}}" + }, + { + "bbox": [ + 130, + 412, + 482, + 472 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 412, + 482, + 472 + ], + "type": "inline_equation", + "content": "p_{\\mathrm{max}}" + }, + { + "bbox": [ + 130, + 412, + 482, + 472 + ], + "type": "text", + "content": " are the two opposite corners of the canonical shape's bounding box. These bounding boxes are determined by the mesh extracted from the density neural field " + }, + { + "bbox": [ + 130, + 412, + 482, + 472 + ], + "type": "inline_equation", + "content": "\\sigma_{\\theta}" + }, + { + "bbox": [ + 130, + 412, + 482, + 472 + ], + "type": "text", + "content": " using the Marching Cube [22] algorithm." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 472, + 482, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 472, + 482, + 498 + ], + "spans": [ + { + "bbox": [ + 130, + 472, + 482, + 498 + ], + "type": "text", + "content": "Combining the above, given an input image coordinate " + }, + { + "bbox": [ + 130, + 472, + 482, + 498 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 130, + 472, + 482, + 498 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 472, + 482, + 498 + ], + "type": "inline_equation", + "content": "\\varPhi_x^{\\mathrm{fwd}}(u) := \\varPhi_x^{3\\mathrm{D} \\leftarrow 2\\mathrm{D}} \\circ \\varPhi_{\\tilde{x} \\leftarrow x}^{2\\mathrm{D} \\leftarrow 2\\mathrm{D}}(u)" + }, + { + "bbox": [ + 130, + 472, + 482, + 498 + ], + "type": "text", + "content": " identifies a 3D location in the canonical frame corresponding to " + }, + { + "bbox": [ + 130, + 472, + 482, + 498 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 130, + 472, + 482, + 498 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 506, + 482, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 506, + 482, + 552 + ], + "spans": [ + { + "bbox": [ + 130, + 506, + 482, + 552 + ], + "type": "text", + "content": "Reverse Canonical Coordinate Mappings. Each image can be \"uncongealed\" from the canonical shape using " + }, + { + "bbox": [ + 130, + 506, + 482, + 552 + ], + "type": "inline_equation", + "content": "\\varPhi_x^{\\mathrm{rev}}:\\mathbb{R}^3\\to \\mathbb{R}^2" + }, + { + "bbox": [ + 130, + 506, + 482, + 552 + ], + "type": "text", + "content": ", which is the reverse operation of " + }, + { + "bbox": [ + 130, + 506, + 482, + 552 + ], + "type": "inline_equation", + "content": "\\varPhi_x^{\\mathrm{fwd}}(u)" + }, + { + "bbox": [ + 130, + 506, + 482, + 552 + ], + "type": "text", + "content": " and is approximately computed via nearest-neighbor inversion as explained below." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 552, + 482, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 552, + 482, + 578 + ], + "spans": [ + { + "bbox": [ + 130, + 552, + 482, + 578 + ], + "type": "text", + "content": "Given a 3D location within a unit cube, " + }, + { + "bbox": [ + 130, + 552, + 482, + 578 + ], + "type": "inline_equation", + "content": "p \\in [0,1]^3" + }, + { + "bbox": [ + 130, + 552, + 482, + 578 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 552, + 482, + 578 + ], + "type": "inline_equation", + "content": "\\varPhi_x^{\\mathrm{rev}}(p) := \\varPhi_{x \\leftarrow \\tilde{x}}^{2\\mathrm{D} \\leftarrow 2\\mathrm{D}} \\circ \\varPhi_x^{2\\mathrm{D} \\leftarrow 3\\mathrm{D}}(p)" + }, + { + "bbox": [ + 130, + 552, + 482, + 578 + ], + "type": "text", + "content": ". In particular," + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 217, + 584, + 482, + 602 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 217, + 584, + 482, + 602 + ], + "spans": [ + { + "bbox": [ + 217, + 584, + 482, + 602 + ], + "type": "interline_equation", + "content": "\\Phi_ {x} ^ {\\mathrm {2 D} \\leftarrow 3 \\mathrm {D}} (p) := \\arg \\min _ {\\tilde {u}} \\| p - \\Phi_ {x} ^ {\\mathrm {3 D} \\leftarrow 2 \\mathrm {D}} (\\tilde {u}) \\| _ {2} \\tag {12}", + "image_path": "3a2df04b86e685a80429215895bfa2fac66c7c4ddd545766ee056d7fef8d3278.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 130, + 608, + 482, + 645 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 608, + 482, + 645 + ], + "spans": [ + { + "bbox": [ + 130, + 608, + 482, + 645 + ], + "type": "text", + "content": "is an operation that takes in a 3D coordinate " + }, + { + "bbox": [ + 130, + 608, + 482, + 645 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 130, + 608, + 482, + 645 + ], + "type": "text", + "content": " in the canonical frame and searches for a 2D image coordinate whose NOCS value is the closest to " + }, + { + "bbox": [ + 130, + 608, + 482, + 645 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 130, + 608, + 482, + 645 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 130, + 608, + 482, + 645 + ], + "type": "inline_equation", + "content": "\\varPhi_{x\\leftarrow \\bar{x}}^{2\\mathrm{D}\\leftarrow 2\\mathrm{D}}" + }, + { + "bbox": [ + 130, + 608, + 482, + 645 + ], + "type": "text", + "content": " is computed via inverting " + }, + { + "bbox": [ + 130, + 608, + 482, + 645 + ], + "type": "inline_equation", + "content": "\\varPhi_{\\bar{x}\\leftarrow x}^{2\\mathrm{D}\\leftarrow 2\\mathrm{D}}" + }, + { + "bbox": [ + 130, + 608, + 482, + 645 + ], + "type": "text", + "content": " from Eq. (10)," + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 215, + 650, + 482, + 668 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 215, + 650, + 482, + 668 + ], + "spans": [ + { + "bbox": [ + 215, + 650, + 482, + 668 + ], + "type": "interline_equation", + "content": "\\Phi_ {x \\leftarrow \\tilde {x}} ^ {2 \\mathrm {D} \\leftarrow 2 \\mathrm {D}} (\\tilde {u}) := \\arg \\min _ {u} \\| \\tilde {u} - \\Phi_ {\\tilde {x} \\leftarrow x} ^ {2 \\mathrm {D} \\leftarrow 2 \\mathrm {D}} (u) \\| _ {2}. \\tag {13}", + "image_path": "e1466277baae93c9a9f30d65ef0d0eff13d9a47b969015c4f0040dde92533398.jpg" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 229, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 229, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 229, + 102 + ], + "type": "text", + "content": "Y. Zhang et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 140, + 112, + 478, + 327 + ], + "blocks": [ + { + "bbox": [ + 140, + 112, + 478, + 327 + ], + "lines": [ + { + "bbox": [ + 140, + 112, + 478, + 327 + ], + "spans": [ + { + "bbox": [ + 140, + 112, + 478, + 327 + ], + "type": "image", + "image_path": "908f2fa958d582a64e1292a8d8b01de6df5745ce15abca255ca4e99fd26785d0.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 341, + 482, + 375 + ], + "lines": [ + { + "bbox": [ + 130, + 341, + 482, + 375 + ], + "spans": [ + { + "bbox": [ + 130, + 341, + 482, + 375 + ], + "type": "text", + "content": "Fig. 3: Pose Estimation from Multi-Illumination Captures. The figure shows 4 example scenes from the NAVI dataset, displaying the real image inputs, canonical shapes under estimated poses, and the canonical coordinate maps." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 388, + 482, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 388, + 482, + 437 + ], + "spans": [ + { + "bbox": [ + 130, + 388, + 482, + 437 + ], + "type": "text", + "content": "In summary, the above procedure establishes the 2D-3D correspondence between an input image " + }, + { + "bbox": [ + 130, + 388, + 482, + 437 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 130, + 388, + 482, + 437 + ], + "type": "text", + "content": " and the canonical shape via " + }, + { + "bbox": [ + 130, + 388, + 482, + 437 + ], + "type": "inline_equation", + "content": "\\varPhi_x^{\\mathrm{fwd}}" + }, + { + "bbox": [ + 130, + 388, + 482, + 437 + ], + "type": "text", + "content": ", and defines the dense 2D-2D correspondences between two images " + }, + { + "bbox": [ + 130, + 388, + 482, + 437 + ], + "type": "inline_equation", + "content": "x_1, x_2" + }, + { + "bbox": [ + 130, + 388, + 482, + 437 + ], + "type": "text", + "content": " via " + }, + { + "bbox": [ + 130, + 388, + 482, + 437 + ], + "type": "inline_equation", + "content": "\\varPhi_{x_2}^{\\mathrm{rev}} \\circ \\varPhi_{x_1}^{\\mathrm{fwd}}" + }, + { + "bbox": [ + 130, + 388, + 482, + 437 + ], + "type": "text", + "content": " which enables image editing (Figure 8). The full framework is described in Algorithm 1." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 131, + 454, + 279, + 467 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 454, + 279, + 467 + ], + "spans": [ + { + "bbox": [ + 131, + 454, + 279, + 467 + ], + "type": "text", + "content": "3.4 Implementation Details" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 474, + 299, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 474, + 299, + 544 + ], + "spans": [ + { + "bbox": [ + 130, + 474, + 299, + 544 + ], + "type": "text", + "content": "Input images are cropped with the tightest bounding box around the foreground masks. The masks come from dataset annotations, if available, or from Grounded-SAM [16, 35], an off-the-shelf segmentation model." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 546, + 299, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 546, + 299, + 605 + ], + "spans": [ + { + "bbox": [ + 130, + 546, + 299, + 605 + ], + "type": "text", + "content": "Across all experiments, we optimize for " + }, + { + "bbox": [ + 130, + 546, + 299, + 605 + ], + "type": "inline_equation", + "content": "y^{*}" + }, + { + "bbox": [ + 130, + 546, + 299, + 605 + ], + "type": "text", + "content": " (Algorithm 1, line 2) for 1,000 iterations using an AdamW [23] optimizer with learning rate 0.02 and weight decay 0.01. We optimize for " + }, + { + "bbox": [ + 130, + 546, + 299, + 605 + ], + "type": "inline_equation", + "content": "\\theta" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 605, + 482, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 605, + 482, + 641 + ], + "spans": [ + { + "bbox": [ + 130, + 605, + 482, + 641 + ], + "type": "text", + "content": "(line 3) with " + }, + { + "bbox": [ + 130, + 605, + 482, + 641 + ], + "type": "inline_equation", + "content": "\\lambda = 0" + }, + { + "bbox": [ + 130, + 605, + 482, + 641 + ], + "type": "text", + "content": " for 10,000 iterations, with AdamW and learning rate 0.001. The NeRF model " + }, + { + "bbox": [ + 130, + 605, + 482, + 641 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 130, + 605, + 482, + 641 + ], + "type": "text", + "content": " has 12.6M parameters. It is frozen afterwards and defines the coordinate frame for poses." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 642, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 642, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 642, + 482, + 666 + ], + "type": "text", + "content": "Since directly optimizing poses and camera parameters with gradient descents easily falls into local minima [20], we initialize " + }, + { + "bbox": [ + 130, + 642, + 482, + 666 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 130, + 642, + 482, + 666 + ], + "type": "text", + "content": " using an analysis-by-synthesis" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 306, + 474, + 480, + 572 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 310, + 474, + 440, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 474, + 440, + 485 + ], + "spans": [ + { + "bbox": [ + 310, + 474, + 440, + 485 + ], + "type": "text", + "content": "1: procedure RUN " + }, + { + "bbox": [ + 310, + 474, + 440, + 485 + ], + "type": "inline_equation", + "content": "(\\mathcal{D} = \\{x_{n}\\}_{n = 1}^{N})" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 310, + 484, + 424, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 484, + 424, + 492 + ], + "spans": [ + { + "bbox": [ + 310, + 484, + 424, + 492 + ], + "type": "text", + "content": "2: " + }, + { + "bbox": [ + 310, + 484, + 424, + 492 + ], + "type": "inline_equation", + "content": "y^{*}\\gets" + }, + { + "bbox": [ + 310, + 484, + 424, + 492 + ], + "type": "text", + "content": " Solution to Eq. (5)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 310, + 492, + 420, + 500 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 492, + 420, + 500 + ], + "spans": [ + { + "bbox": [ + 310, + 492, + 420, + 500 + ], + "type": "text", + "content": "3: Optimize " + }, + { + "bbox": [ + 310, + 492, + 420, + 500 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 310, + 492, + 420, + 500 + ], + "type": "text", + "content": " with Eq. (8)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 310, + 500, + 438, + 508 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 500, + 438, + 508 + ], + "spans": [ + { + "bbox": [ + 310, + 500, + 438, + 508 + ], + "type": "text", + "content": "4: Sample pose candidates " + }, + { + "bbox": [ + 310, + 500, + 438, + 508 + ], + "type": "inline_equation", + "content": "\\{\\xi_i\\}_i" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 310, + 508, + 480, + 516 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 508, + 480, + 516 + ], + "spans": [ + { + "bbox": [ + 310, + 508, + 480, + 516 + ], + "type": "text", + "content": "5: for " + }, + { + "bbox": [ + 310, + 508, + 480, + 516 + ], + "type": "inline_equation", + "content": "n\\gets 1" + }, + { + "bbox": [ + 310, + 508, + 480, + 516 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 310, + 508, + 480, + 516 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 310, + 508, + 480, + 516 + ], + "type": "text", + "content": " do " + }, + { + "bbox": [ + 310, + 508, + 480, + 516 + ], + "type": "inline_equation", + "content": "\\triangleright" + }, + { + "bbox": [ + 310, + 508, + 480, + 516 + ], + "type": "text", + "content": " Pose initialization" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 310, + 516, + 474, + 526 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 516, + 474, + 526 + ], + "spans": [ + { + "bbox": [ + 310, + 516, + 474, + 526 + ], + "type": "text", + "content": "6: " + }, + { + "bbox": [ + 310, + 516, + 474, + 526 + ], + "type": "inline_equation", + "content": "\\pi (x_{n})\\gets \\arg \\min_{\\xi_{i}}\\| \\mathcal{R}(\\xi ,\\theta) - x_{n}\\|_{d_{\\zeta}}" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 310, + 526, + 364, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 526, + 364, + 533 + ], + "spans": [ + { + "bbox": [ + 310, + 526, + 364, + 533 + ], + "type": "text", + "content": "7: end for" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 310, + 533, + 468, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 533, + 468, + 544 + ], + "spans": [ + { + "bbox": [ + 310, + 533, + 468, + 544 + ], + "type": "text", + "content": "8: Optimize " + }, + { + "bbox": [ + 310, + 533, + 468, + 544 + ], + "type": "inline_equation", + "content": "\\pi (x_{n})" + }, + { + "bbox": [ + 310, + 533, + 468, + 544 + ], + "type": "text", + "content": " with Eq. (8) for all " + }, + { + "bbox": [ + 310, + 533, + 468, + 544 + ], + "type": "inline_equation", + "content": "n" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 310, + 544, + 456, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 544, + 456, + 552 + ], + "spans": [ + { + "bbox": [ + 310, + 544, + 456, + 552 + ], + "type": "text", + "content": "9: Determine " + }, + { + "bbox": [ + 310, + 544, + 456, + 552 + ], + "type": "inline_equation", + "content": "\\Phi_{x_n}^{\\mathrm{fwd}}" + }, + { + "bbox": [ + 310, + 544, + 456, + 552 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 310, + 544, + 456, + 552 + ], + "type": "inline_equation", + "content": "\\Phi_{x_n}^{\\mathrm{rev}}" + }, + { + "bbox": [ + 310, + 544, + 456, + 552 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 310, + 544, + 456, + 552 + ], + "type": "inline_equation", + "content": "n" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 306, + 552, + 459, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 552, + 459, + 563 + ], + "spans": [ + { + "bbox": [ + 306, + 552, + 459, + 563 + ], + "type": "text", + "content": "10: return " + }, + { + "bbox": [ + 306, + 552, + 459, + 563 + ], + "type": "inline_equation", + "content": "\\theta, \\pi, \\{\\Phi_{x_n}^{\\mathrm{fwd}}\\}_{n=1}^N, \\{\\Phi_{x_n}^{\\mathrm{rev}}\\}_{n=1}^N" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 306, + 563, + 381, + 572 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 563, + 381, + 572 + ], + "spans": [ + { + "bbox": [ + 306, + 563, + 381, + 572 + ], + "type": "text", + "content": "11: end procedure" + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 337, + 578, + 448, + 588 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 337, + 578, + 448, + 588 + ], + "spans": [ + { + "bbox": [ + 337, + 578, + 448, + 588 + ], + "type": "text", + "content": "Algorithm 1: Overview." + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 219, + 91, + 448, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 91, + 448, + 102 + ], + "spans": [ + { + "bbox": [ + 219, + 91, + 448, + 102 + ], + "type": "text", + "content": "3D Congealing: 3D-Aware Image Alignment in the Wild" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 156, + 114, + 455, + 220 + ], + "blocks": [ + { + "bbox": [ + 156, + 114, + 455, + 220 + ], + "lines": [ + { + "bbox": [ + 156, + 114, + 455, + 220 + ], + "spans": [ + { + "bbox": [ + 156, + 114, + 455, + 220 + ], + "type": "table", + "html": "
LabelsMethodsRotation°↓Translation↓
SC~ SCSC~ SC
PoseNeROIC [17]42.11-0.09-
NeRS [47]122.41123.630.490.52
SAMURAI [1]26.1636.590.240.35
NoneGNeRF [25]93.1580.221.021.04
PoseDiffusion [42]46.7946.340.810.90
Ours (3 seeds)26.97±2.2432.56±2.900.40±0.010.41±0.04
Ours (No Pose Init)53.4557.870.970.96
Ours (No IoU Loss)31.2931.150.870.85
", + "image_path": "a0c33ae171a9626877cd83e9d57774c70e40d7b3f5e7709eb72556b296df8897.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 132, + 286, + 481, + 328 + ], + "blocks": [ + { + "bbox": [ + 130, + 224, + 482, + 280 + ], + "lines": [ + { + "bbox": [ + 130, + 224, + 482, + 280 + ], + "spans": [ + { + "bbox": [ + 130, + 224, + 482, + 280 + ], + "type": "text", + "content": "Table 1: Pose Estimation from Multi-Illumination Image Captures. Our method performs better than both GNeRF and PoseDiffusion with the same input information, and on par with SAMURAI which additionally assumes camera pose direction as inputs. Different random seeds lead to different canonical shapes, but our method is robust to such variations. " + }, + { + "bbox": [ + 130, + 224, + 482, + 280 + ], + "type": "inline_equation", + "content": "\\pm" + }, + { + "bbox": [ + 130, + 224, + 482, + 280 + ], + "type": "text", + "content": " denotes means followed by standard deviations." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 132, + 286, + 481, + 328 + ], + "lines": [ + { + "bbox": [ + 132, + 286, + 481, + 328 + ], + "spans": [ + { + "bbox": [ + 132, + 286, + 481, + 328 + ], + "type": "table", + "html": "
MethodsBedBookcaseChairDeskSofaTableWardrobeOverall
R°↓T↓R°↓T↓R°↓T↓R°↓T↓R°↓T↓R°↓T↓R°↓T↓R°↓T↓
[42]45.740.9922.830.3346.801.0423.890.4933.990.6943.531.2231.541.8035.47±10.00.94±0.49
Ours37.000.4036.470.4534.580.7626.530.3626.490.2749.440.6727.410.3933.99±8.260.47 ±0.18
", + "image_path": "1917e09c2daf0df41490f0531d7a4b07f898fa3d3828b91f2e552026427899da.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 331, + 481, + 365 + ], + "lines": [ + { + "bbox": [ + 130, + 331, + 481, + 365 + ], + "spans": [ + { + "bbox": [ + 130, + 331, + 481, + 365 + ], + "type": "text", + "content": "Table 2: Pose Estimation from Cross-Instance Image Collections. Our method achieves overall better performance than PoseDiffusion on Pix3D. \"R\" stands for rotation and \"T\" for translation. " + }, + { + "bbox": [ + 130, + 331, + 481, + 365 + ], + "type": "inline_equation", + "content": "\\pm" + }, + { + "bbox": [ + 130, + 331, + 481, + 365 + ], + "type": "text", + "content": " denotes cross-category means followed by standard deviations." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 130, + 380, + 481, + 476 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 380, + 481, + 476 + ], + "spans": [ + { + "bbox": [ + 130, + 380, + 481, + 476 + ], + "type": "text", + "content": "approach (line 5-7). Specifically, we parameterize the camera intrinsics using a pinhole camera model with a scalar Field-of-View (FoV) value, and sample the camera parameter " + }, + { + "bbox": [ + 130, + 380, + 481, + 476 + ], + "type": "inline_equation", + "content": "(\\xi, \\kappa)" + }, + { + "bbox": [ + 130, + 380, + 481, + 476 + ], + "type": "text", + "content": " from a set of candidates determined by an exhaustive combination of 3 FoV, 16 azimuth, and 16 elevation values uniformly sampled from " + }, + { + "bbox": [ + 130, + 380, + 481, + 476 + ], + "type": "inline_equation", + "content": "[15^{\\circ}, 60^{\\circ}]" + }, + { + "bbox": [ + 130, + 380, + 481, + 476 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 380, + 481, + 476 + ], + "type": "inline_equation", + "content": "[-180^{\\circ}, 180^{\\circ}]" + }, + { + "bbox": [ + 130, + 380, + 481, + 476 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 130, + 380, + 481, + 476 + ], + "type": "inline_equation", + "content": "[-90^{\\circ}, 90^{\\circ}]" + }, + { + "bbox": [ + 130, + 380, + 481, + 476 + ], + "type": "text", + "content": ", respectively. In this pose initialization stage, all renderings use a fixed camera radius and are cropped with the tightest bounding boxes of rendered foreground masks before being compared with the real image inputs. Line 6 is effectively Eq. (8) with " + }, + { + "bbox": [ + 130, + 380, + 481, + 476 + ], + "type": "inline_equation", + "content": "\\lambda_{\\zeta} = 1" + }, + { + "bbox": [ + 130, + 380, + 481, + 476 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 380, + 481, + 476 + ], + "type": "inline_equation", + "content": "\\lambda_{\\mathrm{IoU}} = 0" + }, + { + "bbox": [ + 130, + 380, + 481, + 476 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 477, + 482, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 477, + 482, + 574 + ], + "spans": [ + { + "bbox": [ + 130, + 477, + 482, + 574 + ], + "type": "text", + "content": "After pose initialization, we use the " + }, + { + "bbox": [ + 130, + 477, + 482, + 574 + ], + "type": "inline_equation", + "content": "\\mathfrak{se}(3)" + }, + { + "bbox": [ + 130, + 477, + 482, + 574 + ], + "type": "text", + "content": " Lie algebra for camera extrinsics parameterization following BARF [20], and optimize for the extrinsics and intrinsics of each input image (Algorithm 1, line 8), with " + }, + { + "bbox": [ + 130, + 477, + 482, + 574 + ], + "type": "inline_equation", + "content": "\\lambda_{\\zeta} = 0" + }, + { + "bbox": [ + 130, + 477, + 482, + 574 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 477, + 482, + 574 + ], + "type": "inline_equation", + "content": "\\lambda_{\\mathrm{IoU}} = 1" + }, + { + "bbox": [ + 130, + 477, + 482, + 574 + ], + "type": "text", + "content": ", for 1,000 iterations with the Adam [15] optimizer and learning rate 0.001. Since " + }, + { + "bbox": [ + 130, + 477, + 482, + 574 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 130, + 477, + 482, + 574 + ], + "type": "text", + "content": " is frozen, the optimization effectively only considers the second term from Eq. (8). Finally, to optimize for the canonical coordinate mappings (Algorithm 1, line 9), for each input image, we run 4,000 iterations for Eq. (10) with AdamW and learning rate 0.01. All experiments are run on a single 24GB A5000 GPU." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 597, + 230, + 612 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 597, + 230, + 612 + ], + "spans": [ + { + "bbox": [ + 132, + 597, + 230, + 612 + ], + "type": "text", + "content": "4 Experiments" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 629, + 481, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 629, + 481, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 629, + 481, + 666 + ], + "type": "text", + "content": "In this section, we first benchmark the pose estimation performance of our method on in-the-wild image captures (Sec. 4.1), and then show qualitative results on diverse input data and demonstrate applications such as image editing (Sec. 4.2)." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 229, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 229, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 229, + 102 + ], + "type": "text", + "content": "Y. Zhang et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 137, + 113, + 478, + 266 + ], + "blocks": [ + { + "bbox": [ + 137, + 113, + 478, + 266 + ], + "lines": [ + { + "bbox": [ + 137, + 113, + 478, + 266 + ], + "spans": [ + { + "bbox": [ + 137, + 113, + 478, + 266 + ], + "type": "image", + "image_path": "36334097b008a9f2ca11dbb40b3addfe05a182038a294b0561a9a00a3edd8e18.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 281, + 482, + 338 + ], + "lines": [ + { + "bbox": [ + 130, + 281, + 482, + 338 + ], + "spans": [ + { + "bbox": [ + 130, + 281, + 482, + 338 + ], + "type": "text", + "content": "Fig. 4: Pose Estimation for Tourist Landmarks. This is a challenging problem setting due to the varying viewpoints and lighting conditions, and the proposed method can successfully align online tourist photos taken at different times and possibly at different geographical locations, into one canonical representation. The top rows show input images and the bottom rows show shape templates under aligned poses." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 350, + 242, + 361 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 350, + 242, + 361 + ], + "spans": [ + { + "bbox": [ + 132, + 350, + 242, + 361 + ], + "type": "text", + "content": "4.1 Pose Estimation" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 368, + 482, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 368, + 482, + 487 + ], + "spans": [ + { + "bbox": [ + 130, + 368, + 482, + 487 + ], + "type": "text", + "content": "Dataset. We benchmark pose estimation performance under two settings. First, for a single-instance, varying illumination setting, we use the in-the-wild split of the NAVI [14] dataset, which contains 35 object-centric image collections in its official release. Each image collection contains an average of around 60 casual image captures of an object instance placed under different illumination conditions, backgrounds, and cameras. Second, for a single-category, cross-instance setting, we use Pix3D [40], a dataset of natural in-the-wild images grouped into 9 categories, each containing multiple shape models of IKEA objects. We use 20 randomly selected images from each category except for \"tool\" and \"misc\" as they involve shapes visually and semantically far apart." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 488, + 482, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 488, + 482, + 548 + ], + "spans": [ + { + "bbox": [ + 130, + 488, + 482, + 548 + ], + "type": "text", + "content": "We use identical hyperparameters for all scenes. We use a generic text prompt, \"a photo of sks object\", for initialization for all scenes. The text embeddings corresponding to the tokens for \"sks object\" are being optimized using Eq. (5) with the rest frozen. For each scene, it takes around 1 hr to optimize for NeRF, 15 min for pose initialization, and 45 min for pose optimization." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 557, + 482, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 557, + 482, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 557, + 482, + 665 + ], + "type": "text", + "content": "Baselines. We compare with several multiview reconstruction baselines. In particular, NeROIC [17] uses the poses from COLMAP, and NeRS [47] and SAMURAI [1] require initial camera directions. GNeRF [25] is a pose-free multiview 3D reconstruction method that is originally designed for single-illumination scenes, and is adapted as a baseline using the same input assumption as ours. PoseDiffusion [42] is a learning-based framework that predicts relative object poses, using ground truth pose annotations as training supervision. The original paper takes a model pre-trained on CO3D [34] and evaluates the pose prediction performance in the wild, and we use the same checkpoint for evaluation." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 219, + 91, + 448, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 91, + 448, + 102 + ], + "spans": [ + { + "bbox": [ + 219, + 91, + 448, + 102 + ], + "type": "text", + "content": "3D Congealing: 3D-Aware Image Alignment in the Wild" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 479, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 479, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 479, + 100 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 138, + 113, + 478, + 342 + ], + "blocks": [ + { + "bbox": [ + 138, + 113, + 478, + 342 + ], + "lines": [ + { + "bbox": [ + 138, + 113, + 478, + 342 + ], + "spans": [ + { + "bbox": [ + 138, + 113, + 478, + 342 + ], + "type": "image", + "image_path": "2640969c6ec9657cbdd4d97f071d9713a98aa5c547da988caa613e7c46aabeec.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 357, + 482, + 390 + ], + "lines": [ + { + "bbox": [ + 130, + 357, + 482, + 390 + ], + "spans": [ + { + "bbox": [ + 130, + 357, + 482, + 390 + ], + "type": "text", + "content": "Fig. 5: Object Alignment from Internet Images. Results of an online image search may contain various appearances, identities, and articulated poses of the object. Our method can successfully associate these in-the-wild images with one shared 3D space." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 406, + 482, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 406, + 482, + 491 + ], + "spans": [ + { + "bbox": [ + 130, + 406, + 482, + 491 + ], + "type": "text", + "content": "Metrics. The varying illuminations pose challenges to classical pose estimation methods such as COLMAP [38]. We use the official split of the data which partitions the 35 scenes into 19 scenes where COLMAP converges (" + }, + { + "bbox": [ + 130, + 406, + 482, + 491 + ], + "type": "inline_equation", + "content": "S_{C}" + }, + { + "bbox": [ + 130, + 406, + 482, + 491 + ], + "type": "text", + "content": " in Table 1), and 16 scenes where COLMAP fails to converge (" + }, + { + "bbox": [ + 130, + 406, + 482, + 491 + ], + "type": "inline_equation", + "content": "\\sim S_{C}" + }, + { + "bbox": [ + 130, + 406, + 482, + 491 + ], + "type": "text", + "content": "). Following [14], we report the absolute rotation and translation errors using Procrustes analysis [10], where for each scene, the predicted camera poses are aligned with the ground truth pose annotations using a global transformation before computing the pose metrics." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 506, + 482, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 506, + 482, + 625 + ], + "spans": [ + { + "bbox": [ + 130, + 506, + 482, + 625 + ], + "type": "text", + "content": "Results. Handling different illumination conditions is challenging for all baselines using photometric-reconstruction-based optimization [1,17,47] even with additional information for pose initialization. As shown in Table 1, our approach significantly outperforms both GNeRF and PoseDiffusion and works on par with SAMURAI which requires additional pose initialization. We run our full pipeline with 3 random seeds and observe a consistent performance across seeds. Qualitative results of aligned templates and learned canonical coordinate maps are shown in Figure 3. Failure modes are discussed in Appendix F. In a cross-instance setting from Table 2, our method achieves a better overall performance compared to the best-performing baseline from Table 1." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 641, + 481, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 641, + 481, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 641, + 481, + 666 + ], + "type": "text", + "content": "Ablations. Table 1 also shows ablation for the pose fitting objectives. The initialization is critical (\"No Pose Init\"), which is expected as pose optimization" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 229, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 229, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 229, + 102 + ], + "type": "text", + "content": "Y. Zhang et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 138, + 114, + 477, + 196 + ], + "blocks": [ + { + "bbox": [ + 138, + 114, + 477, + 196 + ], + "lines": [ + { + "bbox": [ + 138, + 114, + 477, + 196 + ], + "spans": [ + { + "bbox": [ + 138, + 114, + 477, + 196 + ], + "type": "image", + "image_path": "46ac92b264d02545207cac6b17cfc59f2675186a671276c526b78bc586a9f0a8.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 206, + 482, + 229 + ], + "lines": [ + { + "bbox": [ + 130, + 206, + 482, + 229 + ], + "spans": [ + { + "bbox": [ + 130, + 206, + 482, + 229 + ], + "type": "text", + "content": "Fig. 6: Cross-Category Results. The method can associate images from different categories, such as cats and dogs, by leveraging a learned average shape." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 234, + 482, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 234, + 482, + 258 + ], + "spans": [ + { + "bbox": [ + 130, + 234, + 482, + 258 + ], + "type": "text", + "content": "is susceptible to local optima [20]. \"No IoU Loss\", which is equivalent to using the initialized poses as final predictions, also negatively affects the performance." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 276, + 224, + 288 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 276, + 224, + 288 + ], + "spans": [ + { + "bbox": [ + 132, + 276, + 224, + 288 + ], + "type": "text", + "content": "4.2 Applications" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 296, + 482, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 296, + 482, + 368 + ], + "spans": [ + { + "bbox": [ + 130, + 296, + 482, + 368 + ], + "type": "text", + "content": "We show qualitative results on various in-the-wild image data. Inputs for Figures 4 and 5 are crawled with standard online image search engines and are CC-licensed, each consisting of 50 to 100 images. Inputs for Figures 6 and 7 come from the SPair-71k dataset [28]. We use identical hyperparameters for all datasets, except for text prompt initialization where we use a generic description of the object, e.g., \"a photo of sks sculpture\", or \"a photo of cats plus dogs\" for Figure 6." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 379, + 482, + 428 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 379, + 482, + 428 + ], + "spans": [ + { + "bbox": [ + 130, + 379, + 482, + 428 + ], + "type": "text", + "content": "Single-Instance. Figure 4 shows the result on Internet photos of tourist landmarks, which may contain a large diversity in illuminations and styles. The proposed method can handle the variations and align these photos and art pieces to the same canonical 3D space and recover the relative camera poses." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 438, + 482, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 438, + 482, + 486 + ], + "spans": [ + { + "bbox": [ + 130, + 438, + 482, + 486 + ], + "type": "text", + "content": "Cross-Instance, Single-Category. Internet images from generic objects may contain more shape and texture variations compared to landmarks. Figure 5 shows results for various objects, where the framework infers a canonical shape from the inputs to capture the shared semantic components being observed." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 498, + 482, + 546 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 498, + 482, + 546 + ], + "spans": [ + { + "bbox": [ + 130, + 498, + 482, + 546 + ], + "type": "text", + "content": "Cross-Category. The method leverages semantic features to establish alignment and does not strictly assume that inputs are of the same category. In Figure 6, the method infers an average shape as an anchor to further reason about the relative relation among images from different categories." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 558, + 482, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 558, + 482, + 606 + ], + "spans": [ + { + "bbox": [ + 130, + 558, + 482, + 606 + ], + "type": "text", + "content": "Inputs with Deformable Shapes. To test the robustness of the method, we run the pipeline on images of humans with highly diverse poses. Figures 1 and 7 show that the method assigns plausible poses to the inputs despite the large diversity of shapes and articulated poses contained in the inputs." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 617, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 617, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 617, + 482, + 666 + ], + "type": "text", + "content": "Image Editing. The proposed method finds image correspondence and can be applied to image editing, as shown in Figure 8. Figure 8 (c) shows that our method obtains more visually plausible results compared to the Nearest-Neighbor (NN) baseline using the same DINO features. The baseline matches features in 2D" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 219, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 219, + 91, + 447, + 102 + ], + "type": "text", + "content": "3D Congealing: 3D-Aware Image Alignment in the Wild" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 139, + 114, + 477, + 195 + ], + "blocks": [ + { + "bbox": [ + 139, + 114, + 477, + 195 + ], + "lines": [ + { + "bbox": [ + 139, + 114, + 477, + 195 + ], + "spans": [ + { + "bbox": [ + 139, + 114, + 477, + 195 + ], + "type": "image", + "image_path": "7af03c3af46925888c033ffb75d3a45e2061e556eec3b1343e71aa76783e4a20.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 131, + 208, + 482, + 232 + ], + "lines": [ + { + "bbox": [ + 131, + 208, + 482, + 232 + ], + "spans": [ + { + "bbox": [ + 131, + 208, + 482, + 232 + ], + "type": "text", + "content": "Fig. 7: Results on Deformable Objects. The method can be applied to images with highly diverse articulated poses and shapes as shown in the examples above." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 133, + 242, + 482, + 296 + ], + "blocks": [ + { + "bbox": [ + 133, + 242, + 482, + 296 + ], + "lines": [ + { + "bbox": [ + 133, + 242, + 482, + 296 + ], + "spans": [ + { + "bbox": [ + 133, + 242, + 482, + 296 + ], + "type": "image", + "image_path": "fbf7042de2cf9b45bfd3d816dcd50e2a2ad49d33715e037590b2e71f4bf7d996.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 266, + 297, + 347, + 306 + ], + "lines": [ + { + "bbox": [ + 266, + 297, + 347, + 306 + ], + "spans": [ + { + "bbox": [ + 266, + 297, + 347, + 306 + ], + "type": "text", + "content": "(a) Texture Propagation" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 133, + 310, + 482, + 411 + ], + "blocks": [ + { + "bbox": [ + 133, + 310, + 482, + 411 + ], + "lines": [ + { + "bbox": [ + 133, + 310, + 482, + 411 + ], + "spans": [ + { + "bbox": [ + 133, + 310, + 482, + 411 + ], + "type": "image", + "image_path": "57852ca76fad81fafab7972c2d5c18d657810d57aff50291248ef72539e24171.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 142, + 413, + 223, + 423 + ], + "lines": [ + { + "bbox": [ + 142, + 413, + 223, + 423 + ], + "spans": [ + { + "bbox": [ + 142, + 413, + 223, + 423 + ], + "type": "text", + "content": "(b) Editing Propagation" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 314, + 414, + 402, + 423 + ], + "lines": [ + { + "bbox": [ + 314, + 414, + 402, + 423 + ], + "spans": [ + { + "bbox": [ + 314, + 414, + 402, + 423 + ], + "type": "text", + "content": "(c) Baseline Comparisons" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 131, + 449, + 482, + 483 + ], + "lines": [ + { + "bbox": [ + 131, + 449, + 482, + 483 + ], + "spans": [ + { + "bbox": [ + 131, + 449, + 482, + 483 + ], + "type": "text", + "content": "Fig. 8: Image Editing. Our method propagates texture in (a) and (c) and regional editing in (b) to real images. As shown in (c), it achieves smoother results compared to the nearest-neighbor (NN) baseline thanks to the 3D geometric reasoning." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 131, + 497, + 482, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 497, + 482, + 534 + ], + "spans": [ + { + "bbox": [ + 131, + 497, + 482, + 534 + ], + "type": "text", + "content": "for each pixel individually and produces noisy results, as discussed in Appendix C. Quantitative evaluation of correspondence matching and additional qualitative results for editing are included in Appendix D and E." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 554, + 220, + 567 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 554, + 220, + 567 + ], + "spans": [ + { + "bbox": [ + 132, + 554, + 220, + 567 + ], + "type": "text", + "content": "5 Conclusion" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 130, + 581, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 581, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 581, + 482, + 666 + ], + "type": "text", + "content": "We have introduced 3D Congealing, 3D-aware alignment for 2D images capturing semantically similar objects. Our proposed framework leverages a canonical 3D representation that encapsulates geometric and semantic information and, through optimization, fuses prior knowledge from a pre-trained image generative model and semantic information from input images. We show that our model achieves strong results on real-world image datasets under challenging identity, illumination, and background conditions." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 229, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 229, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 229, + 102 + ], + "type": "text", + "content": "Y. Zhang et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 482, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 116, + 482, + 152 + ], + "spans": [ + { + "bbox": [ + 132, + 116, + 482, + 152 + ], + "type": "text", + "content": "Acknowledgments. We thank Chen Geng and Sharon Lee for their help in reviewing the manuscript. This work is in part supported by NSF RI #2211258, #2338203, and ONR MURI N00014-22-1-2740." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 133, + 172, + 197, + 185 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 172, + 197, + 185 + ], + "spans": [ + { + "bbox": [ + 133, + 172, + 197, + 185 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 138, + 198, + 482, + 665 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 138, + 198, + 482, + 243 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 198, + 482, + 243 + ], + "spans": [ + { + "bbox": [ + 138, + 198, + 482, + 243 + ], + "type": "text", + "content": "1. Boss, M., Engelhardt, A., Kar, A., Li, Y., Sun, D., Barron, J., Lensch, H., Jampani, V.: Samurai: Shape and material from unconstrained real-world arbitrary image collections. Advances in Neural Information Processing Systems 35, 26389-26403 (2022)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 138, + 244, + 482, + 277 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 244, + 482, + 277 + ], + "spans": [ + { + "bbox": [ + 138, + 244, + 482, + 277 + ], + "type": "text", + "content": "2. Caron, M., Touvron, H., Misra, I., Jégou, H., Mairal, J., Bojanowski, P., Joulin, A.: Emerging properties in self-supervised vision transformers. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 9650-9660 (2021)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 138, + 277, + 482, + 320 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 277, + 482, + 320 + ], + "spans": [ + { + "bbox": [ + 138, + 277, + 482, + 320 + ], + "type": "text", + "content": "3. Chen, X., Dong, Z., Song, J., Geiger, A., Hilliges, O.: Category level object pose estimation via neural analysis-by-synthesis. In: Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XXVI 16. pp. 139-156. Springer (2020)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 138, + 321, + 482, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 321, + 482, + 365 + ], + "spans": [ + { + "bbox": [ + 138, + 321, + 482, + 365 + ], + "type": "text", + "content": "4. Chen, Y., Chen, X., Wang, X., Zhang, Q., Guo, Y., Shan, Y., Wang, F.: Local-to-global registration for bundle-adjusting neural radiance fields. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 8264-8273 (2023)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 138, + 366, + 482, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 366, + 482, + 388 + ], + "spans": [ + { + "bbox": [ + 138, + 366, + 482, + 388 + ], + "type": "text", + "content": "5. Cheng, W., Cao, Y.P., Shan, Y.: Id-pose: Sparse-view camera pose estimation by inverting diffusion models. arXiv preprint arXiv:2306.17140 (2023)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 138, + 388, + 482, + 410 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 388, + 482, + 410 + ], + "spans": [ + { + "bbox": [ + 138, + 388, + 482, + 410 + ], + "type": "text", + "content": "6. Deng, Y., Yang, J., Tong, X.: Deformed implicit field: Modeling 3D shapes with learned dense correspondence. In: CVPR (2021)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 138, + 411, + 482, + 443 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 411, + 482, + 443 + ], + "spans": [ + { + "bbox": [ + 138, + 411, + 482, + 443 + ], + "type": "text", + "content": "7. Gal, R., Alaluf, Y., Atzmon, Y., Patashnik, O., Bermano, A.H., Chechik, G., Cohen-Or, D.: An image is worth one word: Personalizing text-to-image generation using textual inversion. arXiv preprint arXiv:2208.01618 (2022)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 138, + 444, + 482, + 476 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 444, + 482, + 476 + ], + "spans": [ + { + "bbox": [ + 138, + 444, + 482, + 476 + ], + "type": "text", + "content": "8. Goodfellow, I., Pouget-Abadie, J., Mirza, M., Xu, B., Warde-Farley, D., Ozair, S., Courville, A., Bengio, Y.: Generative adversarial networks. Communications of the ACM 63(11), 139–144 (2020)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 138, + 477, + 482, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 477, + 482, + 510 + ], + "spans": [ + { + "bbox": [ + 138, + 477, + 482, + 510 + ], + "type": "text", + "content": "9. Goodwin, W., Vaze, S., Havoutis, I., Posner, I.: Zero-shot category-level object pose estimation. In: European Conference on Computer Vision. pp. 516-532. Springer (2022)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 138, + 510, + 482, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 510, + 482, + 521 + ], + "spans": [ + { + "bbox": [ + 138, + 510, + 482, + 521 + ], + "type": "text", + "content": "0. Gower, J.C., Dijksterhuis, G.B.: Procrustes problems, vol. 30. OUP Oxford (2004)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 138, + 521, + 482, + 554 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 521, + 482, + 554 + ], + "spans": [ + { + "bbox": [ + 138, + 521, + 482, + 554 + ], + "type": "text", + "content": "1. Gupta, K., Jampani, V., Esteves, C., Shrivastava, A., Makadia, A., Snavely, N., Kar, A.: ASIC: Aligning sparse in-the-wild image collections. arXiv preprint arXiv:2303.16201 (2023)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 138, + 555, + 482, + 576 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 555, + 482, + 576 + ], + "spans": [ + { + "bbox": [ + 138, + 555, + 482, + 576 + ], + "type": "text", + "content": "2. Huang, G., Mattar, M., Lee, H., Learned-Miller, E.: Learning to align from scratch. Advances in neural information processing systems 25 (2012)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 138, + 577, + 482, + 598 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 577, + 482, + 598 + ], + "spans": [ + { + "bbox": [ + 138, + 577, + 482, + 598 + ], + "type": "text", + "content": "3. Huang, G.B., Jain, V., Learned-Miller, E.: Unsupervised joint alignment of complex images. In: ICCV. pp. 1-8. IEEE (2007)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 138, + 599, + 482, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 599, + 482, + 643 + ], + "spans": [ + { + "bbox": [ + 138, + 599, + 482, + 643 + ], + "type": "text", + "content": "4. Jampani, V., Maninis, K.K., Engelhardt, A., Karpur, A., Truong, K., Sargent, K., Popov, S., Araujo, A., Martin-Brualla, R., Patel, K., et al.: Navi: Category-agnostic image collections with high-quality 3d shape and pose annotations. arXiv preprint arXiv:2306.09109 (2023)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 138, + 643, + 482, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 643, + 482, + 665 + ], + "spans": [ + { + "bbox": [ + 138, + 643, + 482, + 665 + ], + "type": "text", + "content": "5. Kingma, D.P., Ba, J.: Adam: A method for stochastic optimization. In: International Conference on Learning Representations (2015)" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 219, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 219, + 91, + 447, + 102 + ], + "type": "text", + "content": "3D Congealing: 3D-Aware Image Alignment in the Wild" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 481, + 665 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 133, + 116, + 481, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 116, + 481, + 149 + ], + "spans": [ + { + "bbox": [ + 133, + 116, + 481, + 149 + ], + "type": "text", + "content": "16. Kirillov, A., Mintun, E., Ravi, N., Mao, H., Rolland, C., Gustafson, L., Xiao, T., Whitehead, S., Berg, A.C., Lo, W.Y., et al.: Segment anything. arXiv preprint arXiv:2304.02643 (2023)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 133, + 149, + 481, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 149, + 481, + 182 + ], + "spans": [ + { + "bbox": [ + 133, + 149, + 481, + 182 + ], + "type": "text", + "content": "17. Kuang, Z., Olszewski, K., Chai, M., Huang, Z., Achlioptas, P., Tulyakov, S.: Neroic: Neural rendering of objects from online image collections. ACM Transactions on Graphics (TOG) 41(4), 1-12 (2022)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 182, + 481, + 202 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 182, + 481, + 202 + ], + "spans": [ + { + "bbox": [ + 132, + 182, + 481, + 202 + ], + "type": "text", + "content": "18. Learned-Miller, E.G.: Data driven image models through continuous joint alignment. IEEE TPAMI 28(2), 236-250 (2005)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 203, + 481, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 203, + 481, + 224 + ], + "spans": [ + { + "bbox": [ + 132, + 203, + 481, + 224 + ], + "type": "text", + "content": "19. Lin, A., Zhang, J.Y., Ramanan, D., Tulsiani, S.: Relpose++: Recovering 6d poses from sparse-view observations. arXiv preprint arXiv:2305.04926 (2023)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 224, + 481, + 257 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 224, + 481, + 257 + ], + "spans": [ + { + "bbox": [ + 132, + 224, + 481, + 257 + ], + "type": "text", + "content": "20. Lin, C.H., Ma, W.C., Torralba, A., Lucey, S.: Barf: Bundle-adjusting neural radiance fields. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 5741-5751 (2021)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 257, + 481, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 257, + 481, + 289 + ], + "spans": [ + { + "bbox": [ + 132, + 257, + 481, + 289 + ], + "type": "text", + "content": "21. Liu, R., Wu, R., Van Hoorick, B., Tokmakov, P., Zakharov, S., Vondrick, C.: Zero-1-to-3: Zero-shot one image to 3d object. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 9298-9309 (2023)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 289, + 481, + 311 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 289, + 481, + 311 + ], + "spans": [ + { + "bbox": [ + 132, + 289, + 481, + 311 + ], + "type": "text", + "content": "22. Lorensen, W.E., Cline, H.E.: Marching cubes: A high resolution 3d surface construction algorithm. ACM SIGGRAPH Computer Graphics 21(4), 163-169 (1987)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 311, + 481, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 311, + 481, + 331 + ], + "spans": [ + { + "bbox": [ + 132, + 311, + 481, + 331 + ], + "type": "text", + "content": "23. Loshchilov, I., Hutter, F.: Decoupled weight decay regularization. In: International Conference on Learning Representations (2018)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 332, + 481, + 364 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 332, + 481, + 364 + ], + "spans": [ + { + "bbox": [ + 132, + 332, + 481, + 364 + ], + "type": "text", + "content": "24. Martin-Brualla, R., Radwan, N., Sajjadi, M.S.M., Barron, J.T., Dosovitskiy, A., Duckworth, D.: NeRF in the Wild: Neural Radiance Fields for Unconstrained Photo Collections. In: CVPR (2021)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 364, + 481, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 364, + 481, + 396 + ], + "spans": [ + { + "bbox": [ + 132, + 364, + 481, + 396 + ], + "type": "text", + "content": "25. Meng, Q., Chen, A., Luo, H., Wu, M., Su, H., Xu, L., He, X., Yu, J.: Gnerf: Gan-based neural radiance field without posed camera. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 6351-6361 (2021)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 396, + 481, + 429 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 396, + 481, + 429 + ], + "spans": [ + { + "bbox": [ + 132, + 396, + 481, + 429 + ], + "type": "text", + "content": "26. Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM 65(1), 99-106 (2021)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 132, + 429, + 481, + 450 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 429, + 481, + 450 + ], + "spans": [ + { + "bbox": [ + 132, + 429, + 481, + 450 + ], + "type": "text", + "content": "27. Miller, E.G., Matsakis, N.E., Viola, P.A.: Learning from one example through shared densities on transforms. In: CVPR. vol. 1, pp. 464-471. IEEE (2000)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 132, + 450, + 481, + 472 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 450, + 481, + 472 + ], + "spans": [ + { + "bbox": [ + 132, + 450, + 481, + 472 + ], + "type": "text", + "content": "28. Min, J., Lee, J., Ponce, J., Cho, M.: Spair-71k: A large-scale benchmark for semantic correspondence. arXiv preprint arXiv:1908.10543 (2019)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 132, + 472, + 481, + 504 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 472, + 481, + 504 + ], + "spans": [ + { + "bbox": [ + 132, + 472, + 481, + 504 + ], + "type": "text", + "content": "29. Ofri-Amar, D., Geyer, M., Kasten, Y., Dekel, T.: Neural congealing: Aligning images to a joint semantic atlas. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 19403-19412 (2023)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 132, + 504, + 481, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 504, + 481, + 536 + ], + "spans": [ + { + "bbox": [ + 132, + 504, + 481, + 536 + ], + "type": "text", + "content": "30. Oquab, M., Darcet, T., Moutakanni, T., Vo, H., Szafraniec, M., Khalidov, V., Fernandez, P., Haziza, D., Massa, F., El-Nouby, A., et al.: Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:2304.07193 (2023)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 132, + 536, + 481, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 536, + 481, + 568 + ], + "spans": [ + { + "bbox": [ + 132, + 536, + 481, + 568 + ], + "type": "text", + "content": "31. Peebles, W., Zhu, J.Y., Zhang, R., Torralba, A., Efros, A.A., Shechtman, E.: Gansupervised dense visual alignment. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 13470-13481 (2022)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 132, + 568, + 481, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 568, + 481, + 589 + ], + "spans": [ + { + "bbox": [ + 132, + 568, + 481, + 589 + ], + "type": "text", + "content": "32. Poole, B., Jain, A., Barron, J.T., Mildenhall, B.: Dreamfusion: Text-to-3d using 2d diffusion. arXiv preprint arXiv:2209.14988 (2022)" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 132, + 589, + 481, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 589, + 481, + 622 + ], + "spans": [ + { + "bbox": [ + 132, + 589, + 481, + 622 + ], + "type": "text", + "content": "33. Raj, A., Kaza, S., Poole, B., Niemeyer, M., Ruiz, N., Mildenhall, B., Zada, S., Aberman, K., Rubinstein, M., Barron, J., et al.: Dreambooth3d: Subject-driven text-to-3d generation. arXiv preprint arXiv:2303.13508 (2023)" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 132, + 622, + 481, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 622, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 132, + 622, + 481, + 665 + ], + "type": "text", + "content": "34. Reizenstein, J., Shapovalov, R., Henzler, P., Sbordone, L., Labatut, P., Novotny, D.: Common objects in 3d: Large-scale learning and evaluation of real-life 3d category reconstruction. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 10901-10911 (2021)" + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 229, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 229, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 229, + 102 + ], + "type": "text", + "content": "Y. Zhang et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 567 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 149 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 149 + ], + "type": "text", + "content": "35. Ren, T., Liu, S., Zeng, A., Lin, J., Li, K., Cao, H., Chen, J., Huang, X., Chen, Y., Yan, F., et al.: Grounded sam: Assembling open-world models for diverse visual tasks. arXiv preprint arXiv:2401.14159 (2024)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 150, + 482, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 150, + 482, + 183 + ], + "spans": [ + { + "bbox": [ + 130, + 150, + 482, + 183 + ], + "type": "text", + "content": "36. Rombach, R., Blattmann, A., Lorenz, D., Esser, P., Ommer, B.: High-resolution image synthesis with latent diffusion models. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 10684-10695 (2022)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 183, + 482, + 226 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 183, + 482, + 226 + ], + "spans": [ + { + "bbox": [ + 130, + 183, + 482, + 226 + ], + "type": "text", + "content": "37. Ruiz, N., Li, Y., Jampani, V., Pritch, Y., Rubinstein, M., Aberman, K.: Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 22500-22510 (2023)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 227, + 482, + 258 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 227, + 482, + 258 + ], + "spans": [ + { + "bbox": [ + 130, + 227, + 482, + 258 + ], + "type": "text", + "content": "38. Schonberger, J.L., Frahm, J.M.: Structure-from-motion revisited. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 4104-4113 (2016)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 259, + 482, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 259, + 482, + 281 + ], + "spans": [ + { + "bbox": [ + 130, + 259, + 482, + 281 + ], + "type": "text", + "content": "39. Shi, Y., Wang, P., Ye, J., Long, M., Li, K., Yang, X.: Mvdream: Multi-view diffusion for 3d generation. arXiv preprint arXiv:2308.16512 (2023)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 281, + 482, + 314 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 281, + 482, + 314 + ], + "spans": [ + { + "bbox": [ + 130, + 281, + 482, + 314 + ], + "type": "text", + "content": "40. Sun, X., Wu, J., Zhang, X., Zhang, Z., Zhang, C., Xue, T., Tenenbaum, J.B., Freeman, W.T.: Pix3d: Dataset and methods for single-image 3d shape modeling. In: CVPR (2018)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 315, + 482, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 315, + 482, + 358 + ], + "spans": [ + { + "bbox": [ + 130, + 315, + 482, + 358 + ], + "type": "text", + "content": "41. Wang, H., Sridhar, S., Huang, J., Valentin, J., Song, S., Guibas, L.J.: Normalized object coordinate space for category-level 6d object pose and size estimation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 2642-2651 (2019)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 358, + 482, + 390 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 358, + 482, + 390 + ], + "spans": [ + { + "bbox": [ + 130, + 358, + 482, + 390 + ], + "type": "text", + "content": "42. Wang, J., Rupprecht, C., Novotny, D.: Posediffusion: Solving pose estimation via diffusion-aided bundle adjustment. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 9773-9783 (2023)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 391, + 482, + 423 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 391, + 482, + 423 + ], + "spans": [ + { + "bbox": [ + 130, + 391, + 482, + 423 + ], + "type": "text", + "content": "43. Wang, P., Liu, L., Liu, Y., Theobalt, C., Komura, T., Wang, W.: Neus: Learning neural implicit surfaces by volume rendering for multi-view reconstruction. arXiv preprint arXiv:2106.10689 (2021)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 424, + 482, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 424, + 482, + 445 + ], + "spans": [ + { + "bbox": [ + 130, + 424, + 482, + 445 + ], + "type": "text", + "content": "44. Wang, Z., Wu, S., Xie, W., Chen, M., Prisacariu, V.A.: Nerf-: Neural radiance fields without known camera parameters. arXiv preprint arXiv:2102.07064 (2021)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 130, + 446, + 482, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 446, + 482, + 468 + ], + "spans": [ + { + "bbox": [ + 130, + 446, + 482, + 468 + ], + "type": "text", + "content": "45. Yariv, L., Gu, J., Kasten, Y., Lipman, Y.: Volume rendering of neural implicit surfaces. Advances in Neural Information Processing Systems 34, 4805-4815 (2021)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 130, + 468, + 482, + 501 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 468, + 482, + 501 + ], + "spans": [ + { + "bbox": [ + 130, + 468, + 482, + 501 + ], + "type": "text", + "content": "46. Yen-Chen, L., Florence, P., Barron, J.T., Rodriguez, A., Isola, P., Lin, T.Y.: inerf: Inverting neural radiance fields for pose estimation. In: 2021 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). pp. 1323-1330. IEEE (2021)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 130, + 501, + 482, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 501, + 482, + 533 + ], + "spans": [ + { + "bbox": [ + 130, + 501, + 482, + 533 + ], + "type": "text", + "content": "47. Zhang, J., Yang, G., Tulsiani, S., Ramanan, D.: Ners: Neural reflectance surfaces for sparse-view 3d reconstruction in the wild. In: Advances in Neural Information Processing Systems. vol. 34, pp. 29835-29847 (2021)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 130, + 533, + 482, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 533, + 482, + 567 + ], + "spans": [ + { + "bbox": [ + 130, + 533, + 482, + 567 + ], + "type": "text", + "content": "48. Zhang, J.Y., Ramanan, D., Tulsiani, S.: Relpose: Predicting probabilistic relative rotation for single objects in the wild. In: European Conference on Computer Vision. pp. 592-611. Springer (2022)" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 219, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 219, + 91, + 447, + 102 + ], + "type": "text", + "content": "3D Congealing: 3D-Aware Image Alignment in the Wild" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/3D Gaussian Parametric Head Model/0c0538a8-3292-41a6-ad76-710b8fc8de37_content_list.json b/2024/3D Gaussian Parametric Head Model/0c0538a8-3292-41a6-ad76-710b8fc8de37_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..d1255c32ee606c5bbcc9743b51a5b93975499bbf --- /dev/null +++ b/2024/3D Gaussian Parametric Head Model/0c0538a8-3292-41a6-ad76-710b8fc8de37_content_list.json @@ -0,0 +1,2068 @@ +[ + { + "type": "text", + "text": "3D Gaussian Parametric Head Model", + "text_level": 1, + "bbox": [ + 285, + 140, + 715, + 161 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yuelang $\\mathrm{Xu}^{1}$ , Lizhen Wang $^{1}$ , Zerong Zheng $^{2}$ , Zhaoqi $\\mathrm{Su}^{1}$ , and Yebin $\\mathrm{Liu}^{1}$", + "bbox": [ + 256, + 188, + 750, + 218 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Tsinghua University, Beijing, China", + "bbox": [ + 374, + 231, + 627, + 246 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "2 NNKosmos, Hangzhou, China", + "bbox": [ + 393, + 246, + 609, + 258 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract. Creating high-fidelity 3D human head avatars is crucial for applications in VR/AR, telepresence, digital human interfaces, and film production. Recent advances have leveraged morphable face models to generate animated head avatars from easily accessible data, representing varying identities and expressions within a low-dimensional parametric space. However, existing methods often struggle with modeling complex appearance details, e.g., hairstyles and accessories, and suffer from low rendering quality and efficiency. This paper introduces a novel approach, 3D Gaussian Parametric Head Model, which employs 3D Gaussians to accurately represent the complexities of the human head, allowing precise control over both identity and expression. Additionally, it enables seamless face portrait interpolation and the reconstruction of detailed head avatars from a single image. Unlike previous methods, the Gaussian model can handle intricate details, enabling realistic representations of varying appearances and complex expressions. Furthermore, this paper presents a well-designed training framework to ensure smooth convergence, providing a guarantee for learning the rich content. Our method achieves high-quality, photo-realistic rendering with real-time efficiency, making it a valuable contribution to the field of parametric head models.", + "bbox": [ + 261, + 295, + 743, + 559 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Keywords: 3D Gaussian $\\cdot$ Head Avatar $\\cdot$ Parametric Model", + "bbox": [ + 261, + 573, + 668, + 585 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 215, + 612, + 375, + 628 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Creating high-fidelity 3D human head avatars holds significant importance across various fields, including VR/AR, telepresence, digital human interfaces, and film production. The automatic generation of such avatars has been a focal point in computer vision research for many years. Recent methods [12,13,17,38,55,56,61-63,65] can create an animated head avatar through conveniently collected data such as monocular video data or even a picture [22, 26]. Serving as the most fundamental tool in these methods, the 3D morphable models (3DMM) [14, 25], which represent varying identities and expressions within a low-dimensional space, have been proven to be a highly successful avenue in addressing this challenging problem.", + "bbox": [ + 212, + 643, + 787, + 794 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Since the traditional parametric 3DMMs are typically limited by the topology of the underlying template mesh and only focus on the face part, some works [15,16,28,59] propose to use implicit Signed Distance Field (SDF) as the", + "bbox": [ + 212, + 795, + 787, + 840 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/18d9724a6f94017aeaf954badf8e38047fefbcd5fff7e8310c479cef2a0005b3.jpg", + "image_caption": [ + "Fig. 1: We utilize hybrid datasets comprising captured multi-view video data and rendered image data from 3D scans for training our model. The trained model can be manipulated using decoupled identity and expression codes to produce a diverse array of high-fidelity head models. When presented with an image, our model can be adjusted to reconstruct the portrait in the image and edit the expression according to any other desired expressions." + ], + "image_footnote": [], + "bbox": [ + 217, + 146, + 787, + 266 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "geometric representation to model the entire head. Despite their flexibility, these methods fall short in recovering high-frequency geometric and texture details like hairstyles, glasses or accessories. On the other end of the spectrum, Neural Radiance Field (NeRF) [34] based methods [19,64] learn parametric head models by directly synthesizing images, thus eliminating the need of geometry modeling. However, NeRF is built upon volumetric rendering, which involves sampling and integrating points distributed throughout space. Therefore, NeRF-based methods typically suffer from low rendering efficiency and have to trade it off with rendering resolution, thereby greatly reducing rendering quality. Moreover, skipping geometric reconstruction would probably lead to poor 3D consistency.", + "bbox": [ + 212, + 382, + 787, + 532 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "More recently, 3D Gaussian Splatting (3DGS) [21], which uses explicit Gaussian ellipsoids to represent 3D scenes, has attracted significant attention from the research community. Experiments have verified the superior quality of the rendered results and excellent rendering efficiency compared to previous NeRF-based or surface-based methods even on dynamic scenes [32,48,57,58]. Motivated by this progress, we propose a novel 3D Gaussian Parametric Head Model, which, for the first time, marries the power of 3DGS with the challenging task of parametric head modeling. Our 3D gaussian parametric head model decouples the control signals of the head into the latent spaces of identity and expression, as is also done in SDF-based face model NPHM [15]. These latent spaces are then mapped to the offsets of the Gaussian positions, which effectively represent the variance of shape and appearance of different identities and expressions. Benefiting from the differentiability of Gaussian splatting, our model can be learned from multi-view video data corpus in an end-to-end manner, without relying on geometry supervision.", + "bbox": [ + 212, + 536, + 787, + 762 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Unfortunately, training our 3D Gaussian parametric head model is not quite straightforward, because Gaussian ellipsoids are unstructured and each Gaussian ellipsoid has its own independent learnable attribute. Such a characteristic makes 3DGS powerful in overfitting a specific object or scene, but poses great challenges for generative head modeling. Without proper initialization and regularization,", + "bbox": [ + 212, + 763, + 787, + 840 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Xuet al.", + "bbox": [ + 271, + 114, + 331, + 126 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "the learned parametric head model may suffer from unstable training or a large number of Gaussian points becoming redundant and noisy, as shown in Fig. 4.", + "bbox": [ + 212, + 146, + 782, + 176 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To overcome these challenges, we propose a well-designed two-stage training strategy to ensure smooth convergence of our model training. Specifically, we first roughly train all the networks on a mesh-based guiding model. Subsequently, the network parameters are migrated to the Gaussian model, and all Gaussian points are initialized with the trained mesh geometry to ensure that they are located near the actual surface. Compared to naive initialization with FLAME [25], our initialization strategy leads to a better guess of the positions of Gaussian points, making the subsequent training of the model converge stably and the areas like hairs better recovered. Moreover, we propose to use 3D landmark loss to supervise the deformation of the model learning expressions, which can speed up the convergence and avoid artifacts under exaggerated expressions. Lastly, our method supports training from both 3D head scans and multi-view 2D face datasets, which enhances the versatility and comprehensiveness of facial data collection and model training.", + "bbox": [ + 212, + 176, + 785, + 387 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "After training on large corpus of multi-view head videos, our parametric Gaussian head model can generate photorealistic images that accurately depict the diverse range of facial appearances, naturally handling complex and exaggerated expressions, while also enabling real-time rendering. Additionally, our method supports single-image fitting and surpasses previous techniques in both reconstruction accuracy and identity consistency. Furthermore, the model resulting from our fitting process allows for the control of various expressions while maintaining naturalness and consistent identity even under exaggerated expressions. The contributions of our method can be summarized as:", + "bbox": [ + 212, + 388, + 785, + 523 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We propose 3D Gaussian Parametric Head Model, a novel parametric head model which utilizes 3D Gaussians as the representation and enables photorealistic rendering quality and real-time rendering speed.", + "- We propose a well-designed training strategy to ensure that the Gaussian model converges stably while learning rich appearance details and complex expressions efficiently.", + "- Our 3D Gaussian Parametric Head Model enables the generation of a detailed, high-quality face avatar from a single given image, as well as performing expression and identity editing upon it." + ], + "bbox": [ + 225, + 531, + 784, + 666 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Related Work", + "text_level": 1, + "bbox": [ + 215, + 688, + 387, + 704 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Parametric Head Models. Parametric head models are used to represent facial features, expressions, and identities effectively and efficiently. They allow for the creation of realistic human faces with adjustable parameters, making them essential in computer graphics, animation, and virtual reality. Therefore, research in this field has always been a hot topic. Traditional 3D Morphable Models (3DMM) [2,6,14,25,47] are constructed by non-rigidly registering a template mesh with fixed topology to a series of 3D scans. Through this registration process, a 3DMM can be computed using dimensionality reduction techniques such", + "bbox": [ + 212, + 719, + 785, + 840 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "3D Gaussian Parametric Head Model", + "bbox": [ + 480, + 114, + 730, + 127 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "as principal component analysis (PCA). The resulting parametric space captures the variations in facial geometry and appearance across a population. However, while 3DMMs offer a powerful way to represent faces, they do have limitations. These models rely heavily on the correspondence between the 3D scans and the template for accurate fitting and may struggle to represent local surface details like wrinkles or hairstyles that deviate significantly from the template mesh. Recent advances in implicit representation have led to the great development of neural parametric head models. Some methods [15, 16, 49, 59] propose implicit Signed Distance Field (SDF) based head models, which are not constrained by topology thus can recover more complex content like hair compared to previous mesh-based Methods. Other methods [3, 19, 44, 64] propose to use NeRF [34] as the representation of the parametric head models, which can directly synthesize photorealistic images without geometric reconstruction. Cao, et al. [5] use a hybrid representation [30] of mesh and NeRF to train their model on unpublished large-scale light stage data. However, rendering efficiency is typically low in NeRF-based methods, often resulting in a trade-off with rendering resolution.", + "bbox": [ + 212, + 146, + 787, + 388 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3D GAN based Head Models. 3D Generative Adversarial Networks (GANs) have revolutionized the field of computer vision, particularly in the domain of human head and face modeling, enabling the generation of face avatars from input images. Traditional methods often require labor-intensive manual work or rely on multi-view images to create 3D models. 3D GANs as a more automated and data-driven approach, which are just trained on single-view 2D images but generate detailed and realistic 3D models of human head [7-9, 11, 18, 35, 52]. Panohead [1] additionally introduces images of hairstyles on the back of characters and trains a full-head generative model. Based on the previous methods, IDE-3D [42] proposes to use semantic map to edit the 3D head model. Next3D [43] and AntiFaceGAN [50] extend to use the FLAME model [25] to condition the generated head model, so that the expression and pose of the generated head model can be controlled. AntiPortraitGAN [51] further replaces FLAME model with SMPLX model [36] to generate upper body avatars, thus the shoulders and the neck can also be controlled. These 3D GAN-based models primarily leverage the coarse FLAME model for expression control, often leading to a loss of expression details in the generated faces. In contrast, our method directly learns the expression distribution from the dataset, capturing more facial appearance details.", + "bbox": [ + 212, + 393, + 787, + 681 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3D Gaussians. Recently, 3D Gaussian splatting [21] has shown superior performance compared to NeRF, excelling in both novel view synthesis quality and rendering speed. Several methods have expanded Gaussian representation to dynamic scene reconstruction [32, 48, 57, 58]. For human body avatar modeling, recent approaches [20, 27] propose training a 3D Gaussian avatar animated by SMPL [31] or a skeleton from multi-view videos, surpassing previous methods in rendering quality and efficiency. In the realm of human head avatar modeling, recent techniques [10, 23, 33, 37, 37, 39, 40, 45, 53, 54] also utilize 3D Gaussians to create high-fidelity and efficient head avatars. These approaches center on the creation of a high-fidelity person-specific avatar using data of a single person. In", + "bbox": [ + 212, + 688, + 787, + 840 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Xuet al.", + "bbox": [ + 271, + 114, + 331, + 126 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/f502bb7b723709cb2e4afb2d625e1e81f0d5db3f03a563a244280467a4815d2a.jpg", + "image_caption": [ + "Fig. 2: The pipeline of our method. Our training strategy can be divided into a Guiding Geometry Model for initialization, and a final 3D Gaussian Parametric Head Model. Deformations of each model are further decoupled into identity-related and expression-related deformations. Rendering involves using DMTet to transform the initial model into a mesh and 3D Gaussian Splatting for the Gaussian model. Features from both models are finally upsampled to high-resolution portrait images through a convolutional network $\\Psi$ . During inference, our output exclusively comes from the Gaussian model." + ], + "image_footnote": [], + "bbox": [ + 222, + 143, + 787, + 428 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "contrast, our method focuses on a versatile prior model that can accommodate varying appearances. Once trained, our model is also capable of person-specific avatar reconstruction by fitting to the input image data provided.", + "bbox": [ + 212, + 545, + 787, + 592 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3 Method", + "text_level": 1, + "bbox": [ + 215, + 613, + 328, + 628 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In this section, we present the 3D Gaussian Parametric Head Model. In contrast to previous mesh-based or NeRF-based models, initializing and training Gaussian-based models pose distinct challenges. This section introduces the dataset and preprocessing, the carefully designed guiding geometry model, the Gaussian Parametric Head Model, and outlines their respective training processes. Additionally, we will also provide the training details and demonstrate how to utilize our method when given a single input image.", + "bbox": [ + 212, + 643, + 787, + 750 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.1 Data Preprocessing", + "text_level": 1, + "bbox": [ + 214, + 770, + 423, + 786 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We used three datasets for our model training, including a multi-view video dataset NeRSemble [24], and two 3D scans datasets NPHM [15] and FaceV-erse [47]. We do not use the 3D geometry of the scans directly, but render them", + "bbox": [ + 212, + 794, + 785, + 840 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "3D Gaussian Parametric Head Model", + "bbox": [ + 480, + 114, + 730, + 127 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "into multi-view images and use only the images form the 3 datasets as supervision. In order to better utilize these three different datasets, we need to do preprocessing. First, we resize the images to 512 resolution and adjust the camera parameters. Then, we use BackgroundMattingV2 [29] to extract the foreground characters in the NeRSemble dataset and record the masks. This step is not required for the two synthetic datasets. Next, we use face alignment [4] to detect 2D landmarks in all the images. Through these 2D landmarks, we fit a Basel Face Model (BFM) [14] for each expression of each identity, and record the head pose and 3D landmarks of the BFM. We will use the above processed camera parameters, images, masks, head pose of BFM and 3D landmarks of BFM to train our model.", + "bbox": [ + 212, + 146, + 787, + 311 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.2 Model Representation", + "text_level": 1, + "bbox": [ + 215, + 335, + 444, + 351 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The representation of Gaussian distribution poses challenges due to its unordered and unstructured nature, leading to difficulties in the continuous spread of gradients to neighboring points in space during backpropagation. This often results in convergence failure when Gaussians are randomly initialized. On the other hand, surface-based representations such as mesh are just suitable for rough geometry learning. A direct idea is to utilize an existing 3DMM, such as FLAME [25], as the initial position for the points in 3D Gaussian splatting [21]. However, this coarse initialization still fails to converge the positions of 3D points to the correct locations, as shown in Fig. 4. The network tends to alter the shape of the ellipsoid to achieve a suitable fitting result, leading to inaccurate geometry of the point cloud and blurriness in the rendered image.", + "bbox": [ + 212, + 361, + 787, + 526 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To address this problem, a more detailed initialization process is necessary for capturing the diverse head variations using 3D Gaussian splatting. Specifically, we draw inspiration from Gaussian Head Avatar [54] and leverage the implicit signed distance field (SDF) representation to train a guiding geometry model. This guiding geometry model serves as the initial value for the Gaussian model, providing a more effective starting point for the optimization process. We define the initial model as Guiding Geometry Model and the refined model as 3D Gaussian Parametric Head Model.", + "bbox": [ + 212, + 527, + 787, + 647 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Guiding Geometry Model. The guiding geometry model receives an identity code $z^{id}$ and an expression code $z^{exp}$ as input, producing a mesh with vertices $V$ , faces $F$ , and per-vertex color $C$ that aligns with the specified identity and expression. To achieve this, we use an MLP denoted as $f_{mean}(\\cdot)$ to implicitly model the SDF, which represents the mean geometry:", + "bbox": [ + 212, + 648, + 787, + 724 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\ns, \\gamma = \\boldsymbol {f} _ {\\text {m e a n}} (x), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 439, + 737, + 784, + 752 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $s$ denotes the SDF value, $\\gamma$ denotes the feature from the last layer and $x$ denotes the input position. Then, we convert the implicit SDF through Deep Marching Tetrahedra (DMTet) [41] into an explicit mesh with vertex positions $V_{0}$ , per-vertex feature $\\Gamma$ and faces $F$ . Next, we need to transform the mean shape into a neutral-expression shape on condition of the input identity code", + "bbox": [ + 212, + 763, + 787, + 840 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Xuetal.", + "bbox": [ + 271, + 114, + 331, + 127 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "$z^{id}$ . To inject identity information into the vertices of the mesh, we first use an injection MLP $f_{inj}(\\cdot)$ , which takes the identity code $z^{id}$ and the per-vertex feature $\\Gamma$ as input and produces the identity-conditioned per-vertex feature vectors $H = f_{inj}(z^{id},\\Gamma)$ . Subsequently, utilizing a tiny MLP $f_{id}(\\cdot)$ , we predict the displacement $\\delta V_{id}$ for each vertex. This displacement is used to transform the mean shape into the neutral-expression shape conditioned on the id code $z^{id}$ .", + "bbox": [ + 212, + 145, + 784, + 236 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "After completing deformations related to identity, the next step is to capture the deformation induced by facial expressions. We introduce another tiny MLP $f_{exp}(\\cdot)$ . This MLP takes the feature vectors $H$ obtained in the previous step and the expression code $z^{exp}$ as input, and the output is the displacement $\\delta V_{exp}$ for each vertex. Using this displacement, we update the vertex positions to $V_{can}$ . Additionally, we feed the same feature vectors $H$ and expression code $z^{exp}$ to a color MLP, $f_{col}(\\cdot)$ , to predict the 32-channel color $C$ for each vertex. The vertex positions to $V_{can}$ and 32-channel color $C$ can be described as:", + "bbox": [ + 212, + 238, + 785, + 357 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nV _ {c a n} = V _ {0} + \\boldsymbol {f} _ {i d} (H) + \\boldsymbol {f} _ {e x p} (H, \\boldsymbol {z} ^ {e x p}), C = \\boldsymbol {f} _ {c o l} (H, \\boldsymbol {z} ^ {e x p}). \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 295, + 368, + 785, + 385 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Finally, we utilize the estimated head pose parameters $R$ and $T$ obtained during data preprocessing to transform the mesh from the canonical space to the world space $V = R \\cdot V_{can} + T$ . After generating the final vertex positions, colors and faces $\\{V, C, F\\}$ of the mesh, we render the mesh into a 256-resolution 32-channel feature map $I_F$ and a mask $M$ through differentiable rasterization with a given camera pose. Subsequently, the feature map is interpreted as a 512-resolution RGB $I_{hr}$ image through a lightweight convolutional upsampling network $\\Psi(\\cdot)$ , as shown in Fig. 2.", + "bbox": [ + 212, + 393, + 785, + 515 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3D Gaussian Parametric Head Model. The Gaussian model also takes an identity code $z^{id}$ and an expression code $z^{exp}$ as input, producing the positions $X$ , color $C$ , scale $S$ , rotation $Q$ and opacity $A$ of the 3D Gaussians. Similar to the guiding geometry model, we initially maintain an overall mean point cloud, with the mean positions $X_0$ . However, we no longer generate the per-vertex feature $\\Gamma$ through $f_{mean}(x)$ . Instead, we directly generate it at once and bind it to the Gaussian points as estimizable variables $\\Gamma_0$ . This is possible since the number of Gaussian points is fixed at this stage. Then we need to transform the mean point cloud into a neutral-expression point cloud, conditioned by the id code $z^{id}$ . To achieve this, we utilize the same injection MLP $f_{inj}(\\cdot)$ and identity deformation MLP $f_{id}(\\cdot)$ defined in the guiding geometry model, which can generate feature vectors $H = f_{inj}(z^{id},\\Gamma_0)$ that encode identity information for each point and predict the identity-related displacement of each point. Then, we also need to predict the expression code $z^{exp}$ -conditioned displacement. The resulting positions $X_{can}$ and the 32-channel color $C$ of each point, similar to the approach presented in the guiding geometry model, can be described as:", + "bbox": [ + 212, + 516, + 785, + 758 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nX _ {c a n} = \\boldsymbol {X} _ {\\mathbf {0}} + \\boldsymbol {f} _ {i d} (H). + \\boldsymbol {f} _ {e x p} (H, \\boldsymbol {z} ^ {e x p}), C = \\boldsymbol {f} _ {c o l} (H, \\boldsymbol {z} ^ {e x p}). \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 289, + 768, + 785, + 785 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Unlike the representations of SDF and DMTet, Gaussians have additional attributes that need to be predicted. Here, we introduce a new MLP to predict Gaussian attributes in the canonical space, including the scale $S$ , rotation $Q_{can}$ ,", + "bbox": [ + 212, + 794, + 785, + 842 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "3D Gaussian Parametric Head Model", + "bbox": [ + 480, + 114, + 730, + 127 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 774, + 114, + 785, + 126 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "and opacity $A$ . In order to ensure the stability of the generated results, we refrain from directly predicting these values. Instead, we predict their offsets $\\{\\delta S,\\delta Q,\\delta A\\}$ relative to the overall mean values $\\{S_0,Q_0,A_0\\}$ :", + "bbox": [ + 215, + 146, + 787, + 194 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\{S, Q _ {c a n}, A \\} = \\left\\{S _ {0}, Q _ {0}, A _ {0} \\right\\} + \\boldsymbol {f} _ {a t t} (H, z ^ {e x p}). \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 339, + 204, + 785, + 220 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Following this, we utilize the estimated head pose parameters $R$ and $T$ , obtained during data preprocessing, to transform the canonical space variables $X_{can}$ and $Q_{can}$ into the world space: $X = R \\cdot X_{can} + T$ , $Q = R \\cdot Q_{can}$ . For model rendering, we leverage differentiable rendering [21] and neural rendering techniques to generate images. The generated 3D Gaussian parameters, which include $\\{X, C, S, Q, A\\}$ , are conditioned by the identity code $z^{id}$ and expression code $z^{exp}$ . Finally, we input this feature map into the same upsampling network $\\Psi(\\cdot)$ of the guiding geometry model to generate a 512-resolution RGB image.", + "bbox": [ + 215, + 231, + 787, + 352 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In the 3D Gaussian Parametric Head Model, we leverage the previously trained guiding geometry model to initialize our variables and networks, rather than initiating them randomly and training from scratch. Specifically, we initialize the Gaussian positions $\\mathbf{X_0}$ using the vertex positions of the mean mesh $V_{0}$ . Meanwhile, we generate the per-vertex feature $\\varGamma$ from $f_{mean}(x)$ at the beginning and bind it to the points as an estimizable variable $\\varGamma_{0}$ as described above. Additionally, all identity codes $z^{id}$ , expression codes $z^{exp}$ , and the networks $\\{\\pmb{f}_{\\text{inj}}(\\cdot), \\pmb{f}_{\\text{id}}(\\cdot), \\pmb{f}_{\\text{exp}}(\\cdot), \\pmb{f}_{\\text{col}}(\\cdot), \\pmb{\\Psi}(\\cdot)\\}$ are directly inherited from the guiding geometry model. Note that, the attribute MLP $f_{att}(\\cdot)$ is a newly introduced network, hence it is initialized randomly. Finally, the overall mean values of the Gaussian attributes $\\{S_0, Q_0, A_0\\}$ are initialized following the original 3D Gaussian Splatting [21].", + "bbox": [ + 215, + 353, + 789, + 534 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3.3 Loss Functions", + "text_level": 1, + "bbox": [ + 215, + 556, + 385, + 570 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "To ensure the accurate convergence of the model, we employ various loss functions as constraints, including the basic photometric loss and silhouette loss, to enforce consistency with ground truth of both the rendered high-resolution images $I_{hr}$ and the rendered masks $M$ :", + "bbox": [ + 215, + 580, + 787, + 641 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {h r} = \\left| \\left| I _ {h r} - I _ {g t} \\right| \\right| _ {1}, \\mathcal {L} _ {s i l} = I O U (M, M _ {g t}), \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 351, + 654, + 785, + 671 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "with $I_{gt}$ representing the ground truth RGB images, $M_{gt}$ representing the ground truth masks. We further encourage the first three channels of the low-resolution feature map $I_{lr}$ to closely match the ground-truth RGB image $I_{gt}$ by introducing an $L_{1}$ loss:", + "bbox": [ + 215, + 681, + 787, + 741 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {l r} = \\left| \\left| I _ {l r} - I _ {g t} \\right| \\right| _ {1}. \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 434, + 742, + 785, + 758 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The geometric deformation caused by expressions is typically complex and cannot be learned through image supervision alone. Therefore, we provide additional coarse supervision for expression deformation learning using 3D landmarks. Specifically, we define the 3D landmarks $P_0$ in the canonical space, and then predict their displacements and transform them to the world space as $P$", + "bbox": [ + 215, + 763, + 787, + 840 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Xuet al.", + "bbox": [ + 271, + 114, + 331, + 127 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "just like the transformation of the original vertices $V_{0}$ above. Then, we construct the landmark loss function:", + "bbox": [ + 212, + 146, + 782, + 175 + ], + "page_idx": 8 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {l m k} = | | \\boldsymbol {P} - \\boldsymbol {P} _ {g t} | | _ {2}, \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 426, + 189, + 785, + 205 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "with $P_{gt}$ denoting the ground truth 3D landmarks, which are estimated by fitting a BFM model to the training data during preprocessing.", + "bbox": [ + 212, + 215, + 782, + 244 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Moreover, to guarantee the decoupling of identity and expression deformations learned by the model and minimize redundancy, we introduce the following regularization loss function that aims to minimize the magnitude of both deformations:", + "bbox": [ + 212, + 247, + 784, + 304 + ], + "page_idx": 8 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {r e g} = \\left| \\left| \\delta V _ {i d} \\right| \\right| _ {2} + \\left| \\left| \\delta V _ {e x p} \\right| \\right| _ {2}. \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 403, + 308, + 785, + 325 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "During the training of the Guiding Geometry Model, we also construct a Laplacian smooth term $\\mathcal{L}_{lap}$ to penalize surface noise or breaks. Overall, the total loss function is formulated as:", + "bbox": [ + 212, + 329, + 782, + 373 + ], + "page_idx": 8 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\mathcal {L} _ {h r} + \\lambda_ {s i l} \\mathcal {L} _ {s i l} + \\lambda_ {l r} \\mathcal {L} _ {l r} + \\lambda_ {l m k} \\mathcal {L} _ {l m k} + \\lambda_ {r e g} \\mathcal {L} _ {r e g} + \\lambda_ {l a p} \\mathcal {L} _ {l a p} \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 281, + 388, + 785, + 407 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "with all the $\\lambda$ denoting the weights of each term. In practice, we set $\\lambda_{sil} = 0.1$ , $\\lambda_{lr} = 0.1$ , $\\lambda_{lmk} = 0.1$ , $\\lambda_{reg} = 0.001$ and $\\lambda_{lap} = 100$ . During training, we jointly optimize the bolded variables above: $\\{z^{id}, z^{exp}, f_{inj}(\\cdot), f_{mean}(\\cdot), f_{id}(\\cdot), f_{exp}(\\cdot), f_{col}(\\cdot), \\Psi(\\cdot), P_0\\}$ . Notably, the defined canonical 3D landmarks $P_0$ are initialized by computing the average of the estimated 3D landmarks from the training dataset.", + "bbox": [ + 212, + 417, + 782, + 507 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "During the training stage of the 3D Gaussian Parametric Head Model, we also calculate the perceptual loss [60] to encourage the model to learn more high-frequency details $\\mathcal{L}_{vgg} = VGG(I_{hr},I_{gt})$ . Similar to training the guiding geometry model, we enforce the first three channels of the feature map to be RGB channels as Eqn. 6, introduce landmarks guidance terms as Eqn. 7 and the regular term for the displacement of points as Eqn. 8. Consequently, the overall loss function can be formulated as:", + "bbox": [ + 212, + 508, + 784, + 612 + ], + "page_idx": 8 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\mathcal {L} _ {h r} + \\lambda_ {v g g} \\mathcal {L} _ {v g g} + \\lambda_ {l r} \\mathcal {L} _ {l r} + \\lambda_ {l m k} \\mathcal {L} _ {l m k} + \\lambda_ {r e g} \\mathcal {L} _ {r e g} \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 316, + 628, + 785, + 646 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "with the weights $\\lambda_{vgg} = 0.1$ , $\\lambda_{lr} = 0.1$ , $\\lambda_{lmk} = 0.1$ and $\\lambda_{reg} = 0.001$ . In this training stage, we also jointly optimize all the bolded variables and networks mentioned above, including the overall mean positions and attributes of the Gaussians and the 3D landmarks: $\\{z^{id}, z^{exp}, f_{inj}(\\cdot), f_{id}(\\cdot), f_{exp}(\\cdot), f_{col}(\\cdot), f_{att}(\\cdot), \\Psi(\\cdot), X_0, \\Gamma_0, S_0, Q_0, A_0, P_0\\}$ .", + "bbox": [ + 212, + 656, + 782, + 734 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "3.4 Inference Details", + "text_level": 1, + "bbox": [ + 214, + 753, + 401, + 768 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Image-based Fitting. When a single RGB portrait image is input, we first align the image according to the processing rules of the training set. Subsequently, we employ gradient descent to fit the image rendered by the 3D Gaussian Parametric Head Model to this input image using the photometric loss $\\mathcal{L}_{lr}$", + "bbox": [ + 212, + 779, + 782, + 840 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "3D Gaussian Parametric Head Model", + "bbox": [ + 480, + 114, + 730, + 127 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 774, + 114, + 785, + 127 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "and $\\mathcal{L}_{hr}$ defined in Eqn. 10. This process helps regress the identity code $z^{id}$ and expression code $z^{exp}$ . We just optimize for 200 iterations with learning rate $1 \\times 10^{-3}$ for both latent codes. Following this, we fix the latent codes $z^{id}$ and $z^{exp}$ , such that the variables $H$ , $X_{can}$ are also fixed. We further optimize the color MLP $f_{col}(\\cdot)$ and the canonical positions $X_{can}$ which represent the geometry of the current specific subject, using the same loss function. In this step, we only optimize for 100 iterations with learning rate $1 \\times 10^{-4}$ for both $f_{col}(\\cdot)$ and $X_{can}$ . This optimization process aims to add some details that cannot be recovered by the trained model itself, ultimately resulting in the reconstructed head model. The entire process has a total of 300 iterations and takes only 30 seconds.", + "bbox": [ + 212, + 145, + 787, + 311 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Expression Editing. Given a source portrait image providing the subject whose expression is to be edited and a target portrait image providing the target expression. We first obtain the head model of the source subject through optimization as the above-mentioned Image-based Fitting strategy. Then for the target portrait image, we also obtain the head model and corresponding expression code in the same way. Finally, we input the target expression code to the head model of the source subject, so that the expression of the source subject can be edited to the target one.", + "bbox": [ + 212, + 311, + 787, + 434 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4 Experiments", + "text_level": 1, + "bbox": [ + 215, + 458, + 375, + 474 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4.1 Datasets", + "text_level": 1, + "bbox": [ + 215, + 491, + 334, + 505 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "NeRSemble dataset contains over 260 different identities, and collects 72fps multi-view videos from 16 synchronized cameras for each identity. The total length of the videos of a single identity is approximately 6000-11000 frames. In the experiment, we selected 140 of the identities for training and the rest for evaluation. For each identity video, we selected about 150 frames from all 16 views as training data.", + "bbox": [ + 212, + 517, + 787, + 607 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "NPHM dataset contains 5200 3D human head scans. These scans come from 255 different identities, each with about 20 different expressions. We selected approximately 1600 scans of 80 identities for training. Since our method utilizes 2D images as training supervision, we render each scan from 80 different views to generate synthetic image data and record the camera parameters and the masks.", + "bbox": [ + 212, + 608, + 787, + 684 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "FaceVerse dataset is an East Asian human head scan dataset. It contains 2310 scans from 110 different identities, and each identity contains 21 expressions. We selected 1620 scans data of 80 identities for training. Similarly, for each scan, we render multi-view synthetic image data from 80 different views and record the camera parameters and the masks.", + "bbox": [ + 212, + 684, + 787, + 760 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4.2 Evaluation", + "text_level": 1, + "bbox": [ + 215, + 782, + 351, + 797 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Disentanglement. We tested the performance of the 3D Gaussian Parametric Model under the control of different identity codes and different expression codes.", + "bbox": [ + 212, + 809, + 785, + 839 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Xuetal.", + "bbox": [ + 271, + 114, + 331, + 127 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/3ea98e96476c7b75b15eb65f4f21b8922820ee3ee9f22e271e141cadf072dca2.jpg", + "image_caption": [ + "ID1" + ], + "image_footnote": [], + "bbox": [ + 251, + 145, + 354, + 228 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/4c4a1013f5f8e93b230ea212406950ae7407a371d6fde4f4ef319034f35f7982.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 357, + 143, + 460, + 228 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/b99e8c9dfe3165984df61fe68b2c7a139681c26c29a55091140a24441b565dda.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 464, + 143, + 565, + 228 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/48fc9dd8943886fc16914337f0685499d5db5f8e95510cb93b2685cc0a8db9ad.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 571, + 143, + 674, + 228 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/54a52a6b02cab03c9eb67441feedf50fd29ee47622ae8c9cc37c8b08b25c27cc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 681, + 145, + 779, + 228 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/6e68d76cc3685d1e62b67b35b3464c3d55b3e40cdc3a8e74f293f841000961c7.jpg", + "image_caption": [ + "ID2", + "Exp1" + ], + "image_footnote": [], + "bbox": [ + 251, + 232, + 354, + 315 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/415828e052bfe99f234023ffc77898b9b0bb5aea0e7e946d2aca4c03f6a801a2.jpg", + "image_caption": [ + "Exp2" + ], + "image_footnote": [], + "bbox": [ + 357, + 232, + 460, + 315 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/703cab9be226057eaee8762b839d8a2d629f0f773fa8be6663f7331bc7bce21b.jpg", + "image_caption": [ + "Exp3" + ], + "image_footnote": [], + "bbox": [ + 464, + 232, + 568, + 315 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/c83252e06e0c7c7e22addff1515ce8fa5cf82896957e551321624ae8a912caca.jpg", + "image_caption": [ + "Exp4" + ], + "image_footnote": [], + "bbox": [ + 573, + 231, + 676, + 315 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/0777d4a76322ffef741331a15dd69a4fd5d0357f6797ec6774856a354b62f165.jpg", + "image_caption": [ + "Exp5" + ], + "image_footnote": [], + "bbox": [ + 681, + 231, + 785, + 315 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/539fceef18c436780073843898a144f65a9cf4a50ec1cf62a63163192a63d4ac.jpg", + "image_caption": [ + "GT", + "Fig. 4: We compared our initialization strategy with using the vertices of FLAME model. The left side shows the rendered image, and the right side shows the positions of the Gaussian points." + ], + "image_footnote": [], + "bbox": [ + 217, + 402, + 331, + 503 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/f26549e975431a37b79f7fb0fc8345e6de37f9f5587d22e8db207707ccdea997.jpg", + "image_caption": [ + "Our Initialization" + ], + "image_footnote": [], + "bbox": [ + 341, + 402, + 454, + 503 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/76d8a1e8dfcb81d1b7d9308c51380e94ed4be872da1fb1c4b0039c9ae8aa3640.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 455, + 402, + 558, + 503 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/4087e5f58aed4b718bb63f715c53955288413a1b648bfa1d01409d37cdb61da0.jpg", + "image_caption": [ + "FLAME Initialization" + ], + "image_footnote": [], + "bbox": [ + 570, + 402, + 683, + 503 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/d98b9faaa14affdc9f2f295f5522bc5a3d106800307e8c15bc87019937d3d8cb.jpg", + "image_caption": [ + "Fig. 3: We generate the head models with randomly sampled identity codes and expression codes as condition. Each row corresponds to the same identity code, and each column corresponds to the same expression code." + ], + "image_footnote": [], + "bbox": [ + 694, + 402, + 785, + 503 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "We randomly sampled 2 identity codes and 5 expression codes to generate 10 head models. Each horizontal row corresponds to the same identity code, and each column corresponds to the same expression code, as shown in Fig. 3. It can be observed that our model performs well in identity consistency and expression consistency, and the two components are fully disentangled.", + "bbox": [ + 212, + 580, + 787, + 657 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Ablation on Initialization. To evaluate the effectiveness of our initialization strategy with guiding geometry model outlined in Section 3, we compare it against a FLAME-based initialization strategy. To use FLAME model for the initialization, we first fit a FLAME model to overall mean 3D landmarks which are estimated during data preprocessing. Then, we sample 100,000 points near the surface of the FLAME mesh as an initialization of the mean Gaussian positions $\\mathbf{X_0}$ . For the per-vertex features bound to each point $\\pmb{\\Gamma}$ , we just set them to zero. And for all the networks $\\{f_{inj}(\\cdot), f_{id}(\\cdot), f_{exp}(\\cdot), f_{col}(\\cdot), \\Psi(\\cdot)\\}$ and $f_{att}(\\cdot)$ are randomly initialized as there is no available prior. The initialization process for the Gaussian attributes $\\{S_0, Q_0, A_0\\}$ remains the same as in our strategy.", + "bbox": [ + 212, + 657, + 787, + 809 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "We show the visualization results in Figure 4, with the Gaussian model rendering image on the left and the Gaussian positions displayed as point clouds", + "bbox": [ + 212, + 810, + 785, + 839 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "3D Gaussian Parametric Head Model", + "bbox": [ + 480, + 114, + 730, + 127 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 767, + 114, + 782, + 126 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/ec7cea1c7380f527b9d3b72265a034e62d0cfe0283abb6322b59282cbca66c4a.jpg", + "image_caption": [ + "Mesh" + ], + "image_footnote": [], + "bbox": [ + 217, + 141, + 326, + 237 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/8d1289e3aac52396e9ff8a5c886ae7acae423f3990cdbd8011e5fc7b79de252b.jpg", + "image_caption": [ + "Mesh+SR" + ], + "image_footnote": [], + "bbox": [ + 331, + 141, + 442, + 238 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/75ece36e26df68fbcfc01df5b051e366513075b812588f8deb59b4a3b09b6661.jpg", + "image_caption": [ + "Gaussian", + "Fig. 5: The comparison of the different representations with super resolution." + ], + "image_footnote": [], + "bbox": [ + 447, + 141, + 557, + 238 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/f0009af8684436ba891ccf4afd4916dfc229b4c8b3b3a77fc5100541c8bcbbbf.jpg", + "image_caption": [ + "Gaussian+SR" + ], + "image_footnote": [], + "bbox": [ + 563, + 141, + 671, + 238 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/b4ec2aff5a530f3ab998f44c2b3be602578f01ed08660801646d6b4961a55d13.jpg", + "image_caption": [ + "GT" + ], + "image_footnote": [], + "bbox": [ + 676, + 141, + 787, + 238 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "on the right. Our initialization strategy using the guiding geometry model can ensure that all the Gaussian points fall evenly on the actual surface of the model, thereby ensuring reconstruction quality. When using the FLAME model for the initialization, a large number of points wander inside or outside the actual surface of the model, causing noise or redundancy and leading the model to lose some high-frequency information and making it difficult to fully converge. We also perform a quantitative evaluation of different initialization strategies on the rendered images, as shown in Table 1, which shows that our method leads to better rendering results.", + "bbox": [ + 212, + 286, + 787, + 421 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/8efdefe6e644650afad296c540614587c5c9a8c21ca4c6d09c730272d30dbdb0.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodPSNR ↑SSIM ↑LPIPS ↓
FLAME Initialization25.70.820.109
Our Initialization28.00.840.085
", + "bbox": [ + 276, + 429, + 725, + 477 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Table 1: Quantitative evaluation results of our initialization strategy and naive FLAME initialization strategy.", + "bbox": [ + 212, + 477, + 785, + 503 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Ablation on Representation and Super Resolution. We conduct the ablation study for the guiding mesh model, the Gaussian model and the super-resolution network (abbreviated as SR) as shown in the Fig. 5. The corresponding PSNR metrics are: Mesh (15.7), Mesh+SR (17.3), Gaussian (27.0), Gaussian+SR (29.3). Compared to mesh, utilizing 3D Gaussian as the representation brings significant improvements (+12), while the super resolution module adds some details, generating more realistic results.", + "bbox": [ + 212, + 518, + 787, + 625 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "4.3 Applications", + "text_level": 1, + "bbox": [ + 215, + 648, + 366, + 662 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Image-based Fitting. In this section, we demonstrate the capability of our 3D Gaussian Parametric Model for single-image fitting using the fitting strategy detailed in Section 3.4. We compare our model with similar works: HeadNeRF [19], MoFaNeRF [64], and PanoHead [1]. In addition to evaluating the above methods on our evaluation dataset, we also conduct comparisons using cases from MEAD [46] dataset (the first two rows). The qualitative results are presented in Figure 6. Our model exhibits reconstruction accuracy while maintaining excellent 3D consistency and identity preservation. HeadNeRF's fitting results often suffer from missing hair, and they remove the body and neck. MoFaNeRF, trained solely on the FaceScape dataset where all subjects wear hats, struggles to fit hair. As a GAN-based model, PanoHead can achieve highly accurate reproductions", + "bbox": [ + 212, + 672, + 787, + 840 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Xuet al.", + "bbox": [ + 271, + 114, + 331, + 126 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/3ad3a43bbbbe0a2b9ae04df2fc3acd2ed3b3a28c503a8e686d10e8f434ab7519.jpg", + "image_caption": [ + "Fig. 6: We compare our method with other SOTA methods on the task of single image fitting. The far left is the input image, and to the right are Our method, HeadNeRF [19], MoFaNeRF [64] and PanoHead [1]. Our model significantly outperforms other methods in reconstruction quality and 3D consistency." + ], + "image_footnote": [], + "bbox": [ + 215, + 143, + 787, + 508 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "from the input view. However, due to overfitting, the results from side views reveal poor 3D consistency and identity preservation.", + "bbox": [ + 212, + 588, + 785, + 618 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "In addition to qualitative evaluations, we also conducted quantitative evaluations on 60 images using three metrics: Peak Signal-to-Noise Ratio (PSNR), Structural Similarity Index (SSIM), and Face Distance (FD). Here, we provide a brief explanation of the Face Distance (FD). To compute the FD metric, we utilized a face recognition tool $^3$ to encode two images containing faces into 128-dimensional vectors. Subsequently, we calculated the distance between these two vectors to reflect the similarity of the two faces. In our experiments, FD serves as an indicator of identity consistency. The results are shown in Table 2. Our model demonstrates optimal performance in both fitting accuracy and identity consistency.", + "bbox": [ + 212, + 619, + 787, + 768 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Expression Editing. Our 3D Gaussian Parametric Head Model possesses the capability for expression editing. Upon completing the fitting process on a portrait image, we can animate the model by applying different expression codes.", + "bbox": [ + 212, + 770, + 787, + 816 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "3D Gaussian Parametric Head Model", + "bbox": [ + 480, + 114, + 730, + 127 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 767, + 114, + 785, + 126 + ], + "page_idx": 12 + }, + { + "type": "page_footnote", + "text": "3 https://github.com/ageitgey/face_recognition", + "bbox": [ + 217, + 823, + 542, + 840 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/748267b6e02f8e804bcf5835e585087f34b1de73de7e0bbbd0fbe356c99a0011.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodPSNR ↑SSIM ↑FD ↓
HeadNeRF28.90.840.37
MoFaNeRF28.60.820.37
PanoHead29.10.860.41
Ours30.30.860.35
", + "bbox": [ + 321, + 143, + 679, + 218 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Table 2: Quantitative evaluation results on the task of single image fitting. We compare our method with other 3 SOTA methods: HeadNeRF [19], MoFaNeRF [64], PanoHead [1].", + "bbox": [ + 215, + 218, + 782, + 258 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/61a81ad08d615aa023cbdc29d34f4032335be5f2316e1e88e986f03b8adee9c3.jpg", + "image_caption": [ + "Input" + ], + "image_footnote": [], + "bbox": [ + 217, + 272, + 287, + 330 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/319f95f3e9f4efa9604caf82561e803863b3b2f150f5b868c739b6e665ec41d4.jpg", + "image_caption": [ + "Exp1" + ], + "image_footnote": [], + "bbox": [ + 292, + 272, + 364, + 332 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/80cfa2c0ccd204a8c9aeac0b5543adb5a39ce55d64fabc8df862de8d2b11e261.jpg", + "image_caption": [ + "Exp2", + "Fig. 7: We perform expression editing on the head model reconstructed from the input image. Our model is able to handle very exaggerated expressions with superior identity consistency." + ], + "image_footnote": [], + "bbox": [ + 364, + 273, + 433, + 332 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/2361aa9c4dda95ff149f245f3e90611c8140792f624ce848565405030de84823.jpg", + "image_caption": [ + "Exp3" + ], + "image_footnote": [], + "bbox": [ + 433, + 273, + 501, + 332 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/df9e24b0d1388bb3471c409b606c36714298523c3d184f15b56a58880f8f26a4.jpg", + "image_caption": [ + "Exp4" + ], + "image_footnote": [], + "bbox": [ + 501, + 273, + 571, + 332 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/4605d43b5668d0ff7ef2acdaaafbf5745041e40b20343f30e26e57a4bd81c1b3.jpg", + "image_caption": [ + "Exp5" + ], + "image_footnote": [], + "bbox": [ + 571, + 273, + 643, + 332 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/3bce3d7d8741445ca67c7dc09715008be6f2aa0ef1477f10b670807df8483318.jpg", + "image_caption": [ + "Exp6" + ], + "image_footnote": [], + "bbox": [ + 643, + 273, + 712, + 332 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/552f455544647a2bcb5f662d6d0eb7e83b6a151c2bf93e6cd8083a5db756c821.jpg", + "image_caption": [ + "Exp7" + ], + "image_footnote": [], + "bbox": [ + 712, + 273, + 785, + 332 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "The detailed pipeline is outlined in Section 3.4. An example is illustrated in Figure 7. Our model can generate images depicting the corresponding expressions of the input subject based on a reference expression (as seen in the lower left corner of each image in the figure). It performs admirably even with exaggerated expressions, producing natural and realistic results.", + "bbox": [ + 212, + 410, + 784, + 486 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "5 Discussion", + "text_level": 1, + "bbox": [ + 215, + 521, + 352, + 536 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Ethical Considerations. Our technique can generate artificial portrait videos, posing a significant risk of spreading misinformation, shaping public opinions, and undermining trust in media outlets. These consequences could have profound negative effects on society. Therefore, it is crucial to explore methods that effectively differentiate between genuine and manipulated content.", + "bbox": [ + 212, + 563, + 784, + 638 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "**Limitation.** Our 3D Gaussian Parametric Head Model takes a step forward in the characterization of parametric head models. However, due to the limited amount of training data, the generalization ability of the model is still insufficient. In some cases where the illumination is significantly different from the training set, the reconstruction results are not good.", + "bbox": [ + 212, + 641, + 784, + 717 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Conclusion. In this paper, we propose the 3D Gaussian Parametric Head Model, a novel framework for parametric head model. This model leverages the power of 3D Gaussians, enabling realistic rendering quality and real-time speed. Our well-designed training strategy ensured stable convergence while enabling the model to learn appearance details and expressions. Besides, our model allows for creating detailed, high-quality face avatars from a single input image, and also enables editing for expressions and identity. We believe our model represents a significant advancement in the field of parametric head model.", + "bbox": [ + 212, + 719, + 785, + 839 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Xuet al.", + "bbox": [ + 271, + 114, + 331, + 126 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Acknowledgements", + "text_level": 1, + "bbox": [ + 217, + 143, + 401, + 162 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "The work is supported by the National Science Foundation of China (NSFC) under Grant Number 62125107 and the Postdoctoral Fellowship Program of China Postdoctoral Science Foundation under Grant Number GZC20231304.", + "bbox": [ + 215, + 175, + 787, + 220 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 217, + 243, + 321, + 258 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "1. An, S., Xu, H., Shi, Y., Song, G., Ogras, U.Y., Luo, L.: Panohead: Geometry-aware 3d full-head synthesis in 360deg. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 20950-20959 (June 2023)", + "2. Blanz, V., Vetter, T.: A morphable model for the synthesis of 3d faces. In: 26th Annual Conference on Computer Graphics and Interactive Techniques (SIGGRAPH 1999). pp. 187-194. ACM Press (1999)", + "3. Bühler, M.C., Sarkar, K., Shah, T., Li, G., Wang, D., Helminger, L., Orts-Escalano, S., Lagun, D., Hilliges, O., Beeler, T., et al.: Preface: A data-driven volumetric prior for few-shot ultra high-resolution face synthesis. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 3402-3413 (2023)", + "4. Bulat, A., Tzimiropoulos, G.: How far are we from solving the 2d & 3d face alignment problem? (and a dataset of 230,000 3d facial landmarks). In: International Conference on Computer Vision (2017)", + "5. Cao, C., Simon, T., Kim, J.K., Schwartz, G., Zollhoefer, M., Saito, S.S., Lombardi, S., Wei, S.E., Belko, D., Yu, S.I., Sheikh, Y., Saragih, J.: Authentic volumetric avatars from a phone scan. ACM Trans. Graph. 41(4) (jul 2022)", + "6. Cao, C., Weng, Y., Zhou, S., Tong, Y., Zhou, K.: Facewarehouse: A 3d facial expression database for visual computing. In: IEEE Transactions on Visualization and Computer Graphics. vol. 20, pp. 413-425 (2014)", + "7. Chan, E., Monteiro, M., Kellnhofer, P., Wu, J., Wetzstein, G.: pi-gan: Periodic implicit generative adversarial networks for 3d-aware image synthesis. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 5795-5805 (2020)", + "8. Chan, E.R., Lin, C.Z., Chan, M.A., Nagano, K., Pan, B., Mello, S.D., Gallo, O., Guibas, L., Tremblay, J., Khamis, S., Karras, T., Wetzstein, G.: Efficient geometry-aware 3D generative adversarial networks. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 16102-16112 (2022)", + "9. Chen, X., Deng, Y., Wang, B.: Mimic3d: Thriving 3d-aware gans via 3d-to-2d imitation. In: Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV) (2023)", + "10. Chen, Y., Wang, L., Li, Q., Xiao, H., Zhang, S., Yao, H., Liu, Y.: Monogaussiana-vatar: Monocular gaussian point-based head avatar. In: ACM SIGGRAPH 2023 Conference Proceedings (2024)", + "11. Deng, Y., Yang, J., Xiang, J., Tong, X.: Gram: Generative radiance manifolds for 3d-aware image generation. Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) pp. 10663-10673 (2021)", + "12. Gafni, G., Thies, J., Zollhofer, M., Niessner, M.: Dynamic neural radiance fields for monocular 4d facial avatar reconstruction. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 8645-8654 (June 2021)" + ], + "bbox": [ + 218, + 273, + 785, + 839 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "3D Gaussian Parametric Head Model", + "bbox": [ + 480, + 114, + 730, + 127 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 767, + 116, + 785, + 126 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "13. Gao, X., Zhong, C., Xiang, J., Hong, Y., Guo, Y., Zhang, J.: Reconstructing personalized semantic facial nerf models from monocular video. ACM Transactions on Graphics (Proceedings of SIGGRAPH Asia) 41(6) (2022)", + "14. Gerig, T., Forster, A., Blumer, C., Egger, B., Lüthi, M., Schönborn, S., Vetter, T.: Morphable face models - an open framework. In: 2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018). pp. 75-82 (2017)", + "15. Giebenhain, S., Kirschstein, T., Georgopoulos, M., Rünz, M., Agapito, L., Nießner, M.: Learning neural parametric head models. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2023)", + "16. Giebenhain, S., Kirschstein, T., Georgopoulos, M., Rünz, M., Agapito, L., Nießner, M.: Mononphm: Dynamic head reconstruction from monocular videos. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2024)", + "17. Grassal, P.W., Prinzler, M., Leistner, T., Rother, C., Nießner, M., Thies, J.: Neural head avatars from monocular rgb videos. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 18632-18643 (June 2022)", + "18. Gu, J., Liu, L., Wang, P., Theobalt, C.: Stylenerf: A style-based 3d aware generator for high-resolution image synthesis. In: International Conference on Learning Representations (2022)", + "19. Hong, Y., Peng, B., Xiao, H., Liu, L., Zhang, J.: Headnerf: A real-time nerf-based parametric head model. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 20374-20384 (June 2022)", + "20. Hu, L., Zhang, H., Zhang, Y., Zhou, B., Liu, B., Zhang, S., Nie, L.: Gaussian avatar: Towards realistic human avatar modeling from a single video via animatable 3d gaussians. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2024)", + "21. Kerbl, B., Kopanas, G., Leimkuhler, T., Drettakis, G.: 3d gaussian splatting for real-time radiance field rendering. ACM Transactions on Graphics 42(4) (July 2023)", + "22. Khakhulin, T., Sklyarova, V., Lempitsky, V., Zakharov, E.: Realistic one-shot mesh-based head avatars. In: Proceedings of the European Conference on Computer Vision (ECCV) (2022)", + "23. Kirschstein, T., Giebenhain, S., Nießner, M.: Diffusion avatars: Deferred diffusion for high-fidelity 3d head avatars. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2024)", + "24. Kirschstein, T., Qian, S., Giebenhain, S., Walter, T., Niefner, M.: Nersemble: Multi-view radiance field reconstruction of human heads. ACM Trans. Graph. 42(4) (jul 2023)", + "25. Li, T., Bolkart, T., Black, M.J., Li, H., Romero, J.: Learning a model of facial shape and expression from 4d scans. ACM Trans. Graph. 36(6) (nov 2017)", + "26. Li, X., De Mello, S., Liu, S., Nagano, K., Iqbal, U., Kautz, J.: Generalizable one-shot neural head avatar. NeurIPS (2023)", + "27. Li, Z., Zheng, Z., Wang, L., Liu, Y.: Animatable gaussians: Learning pose-dependent gaussian maps for high-fidelity human avatar modeling. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2024)", + "28. Lin, C.Z., Nagano, K., Kautz, J., Chan, E.R., Iqbal, U., Guibas, L., Wetzstein, G., Khamis, S.: Single-shot implicit morphable faces with consistent texture parameterization. In: ACM SIGGRAPH 2023 Conference Proceedings (2023)" + ], + "bbox": [ + 215, + 146, + 785, + 840 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Xuet al.", + "bbox": [ + 271, + 114, + 331, + 126 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "29. Lin, S., Ryabtsev, A., Sengupta, S., Curless, B., Seitz, S., Kemelmacher-Shlizerman, I.: Real-time high-resolution background matting. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (Jun 2021)", + "30. Lombardi, S., Simon, T., Schwartz, G., Zollhoefer, M., Sheikh, Y., Saragih, J.: Mixture of volumetric primitives for efficient neural rendering. ACM Trans. Graph. 40(4) (jul 2021)", + "31. Loper, M., Mahmood, N., Romero, J., Pons-Moll, G., Black, M.J.: SMPL: A skinned multi-person linear model. ACM Trans. Graphics (Proc. SIGGRAPH Asia) 34(6), 248:1-248:16 (Oct 2015)", + "32. Luiten, J., Kopanas, G., Leibe, B., Ramanan, D.: Dynamic 3d gaussians: Tracking by persistent dynamic view synthesis. In: 3DV (2024)", + "33. Ma, S., Weng, Y., Shao, T., Zhou, K.: 3d gaussian blendshapes for head avatar animation. In: ACM SIGGRAPH 2023 Conference Proceedings (2024)", + "34. Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: Nerf: Representing scenes as neural radiance fields for view synthesis. In: Proceedings of the European Conference on Computer Vision (ECCV) (2020)", + "35. Or-El, R., Luo, X., Shan, M., Shechtman, E., Park, J.J., Kemelmacher-Shlizerman, I.: Stylesdf: High-resolution 3d-consistent image and geometry generation. Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) pp. 13493-13503 (2021)", + "36. Pavlakos, G., Choutas, V., Ghorbani, N., Bolkart, T., Osman, A.A.A., Tzionas, D., Black, M.J.: Expressive body capture: 3D hands, face, and body from a single image. In: Proceedings IEEE Conf. on Computer Vision and Pattern Recognition (CVPR). pp. 10975-10985 (2019)", + "37. Qian, S., Kirschstein, T., Schoneveld, L., Davoli, D., Giebenhain, S., Nießner, M.: Gaussian avatars: Photorealistic head avatars with rigged 3d gaussians. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2024)", + "38. Qin, M., Liu, Y., Xu, Y., Zhao, X., Liu, Y., Wang, H.: High-fidelity 3d head avatars reconstruction through spatially-varying expression conditioned neural radiance field. In: AAAI Conference on Artificial Intelligence (2023)", + "39. Saito, S., Schwartz, G., Simon, T., Li, J., Nam, G.: Relightable gaussian codec avatars. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2024)", + "40. Shao, Z., Wang, Z., Li, Z., Wang, D., Lin, X., Zhang, Y., Fan, M., Wang, Z.: SplattingAvatar: Realistic Real-Time Human Avatars with Mesh-Embedded Gaussian Splatting. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2024)", + "41. Shen, T., Gao, J., Yin, K., Liu, M.Y., Fidler, S.: Deep marching tetrahedra: a hybrid representation for high-resolution 3d shape synthesis. In: Advances in Neural Information Processing Systems (NeurIPS) (2021)", + "42. Sun, J., Wang, X., Shi, Y., Wang, L., Wang, J., Liu, Y.: Ide-3d: Interactive disentangled editing for high-resolution 3d-aware portrait synthesis. ACM Transactions on Graphics (TOG) 41(6), 1-10 (2022)", + "43. Sun, J., Wang, X., Wang, L., Li, X., Zhang, Y., Zhang, H., Liu, Y.: Next3d: Generative neural texture rasterization for 3d-aware head avatars. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2023)" + ], + "bbox": [ + 212, + 146, + 787, + 840 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "3D Gaussian Parametric Head Model", + "bbox": [ + 480, + 114, + 730, + 127 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 767, + 114, + 785, + 126 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "44. Wang, D., Chandran, P., Zoss, G., Bradley, D., Gotardo, P.: Morf: Morphable radiance fields for multiview neural head modeling. In: ACM SIGGRAPH 2022 Conference Proceedings. SIGGRAPH '22, Association for Computing Machinery, New York, NY, USA (2022)", + "45. Wang, J., Xie, J.C., Li, X., Xu, F., Pun, C.M., Gao, H.: Gaussianhead: High-fidelity head avatars with learnable gaussian derivation (2024)", + "46. Wang, K., Wu, Q., Song, L., Yang, Z., Wu, W., Qian, C., He, R., Qiao, Y., Loy, C.C.: Mead: A large-scale audio-visual dataset for emotional talking-face generation. In: Proceedings of the European Conference on Computer Vision (ECCV) (August 2020)", + "47. Wang, L., Chen, Z., Yu, T., Ma, C., Li, L., Liu, Y.: Faceverse: a fine-grained and detail-controllable 3d face morphable model from a hybrid dataset. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (Jun 2022)", + "48. Wu, G., Yi, T., Fang, J., Xie, L., Zhang, X., Wei, W., Liu, W., Tian, Q., Wang, X.: 4d gaussian splatting for real-time dynamic scene rendering (2024)", + "49. Wu, S., Yan, Y., Li, Y., Cheng, Y., Zhu, W., Gao, K., Li, X., Zhai, G.: Ganhead: Towards generative animatable neural head avatars. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 437-447 (2023)", + "50. Wu, Y., Deng, Y., Yang, J., Wei, F., Qifeng, C., Tong, X.: Anifacegan: Animatable 3d-aware face image generation for video avatars. In: Advances in Neural Information Processing Systems (2022)", + "51. Wu, Y., Xu, S., Xiang, J., Wei, F., Chen, Q., Yang, J., Tong, X.: Aniportraitgan: Animatable 3d portrait generation from 2d image collections. In: SIGGRAPH Asia 2023 Conference Proceedings (2023)", + "52. Xiang, J., Yang, J., Deng, Y., Tong, X.: Gram-hd: 3d-consistent image generation at high resolution with generative radiance manifolds. Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV) pp. 2195-2205 (2022)", + "53. Xiang, J., Gao, X., Guo, Y., Zhang, J.: Flashavatar: High-fidelity head avatar with efficient gaussian embedding. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2024)", + "54. Xu, Y., Chen, B., Li, Z., Zhang, H., Wang, L., Zheng, Z., Liu, Y.: Gaussian head avatar: Ultra high-fidelity head avatar via dynamic gaussians. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2024)", + "55. Xu, Y., Wang, L., Zhao, X., Zhang, H., Liu, Y.: Avatarmav: Fast 3d head avatar reconstruction using motion-aware neural voxels. In: ACM SIGGRAPH 2023 Conference Proceedings (2023)", + "56. Xu, Y., Zhang, H., Wang, L., Zhao, X., Han, H., Guojun, Q., Liu, Y.: Latentavatar: Learning latent expression code for expressive neural head avatar. In: ACM SIGGRAPH 2023 Conference Proceedings (2023)", + "57. Yang, Z., Yang, H., Pan, Z., Zhu, X., Zhang, L.: Real-time photorealistic dynamic scene representation and rendering with 4d gaussian splatting (2023)", + "58. Yang, Z., Gao, X., Zhou, W., Jiao, S., Zhang, Y., Jin, X.: Deformable 3d gaussians for high-fidelity monocular dynamic scene reconstruction (June 2023)", + "59. Yenamandra, T., Tewari, A., Bernard, F., Seidel, H., Elgharib, M., Cremers, D., Theobalt, C.: i3dmm: Deep implicit 3d morphable model of human heads. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (June 2021)" + ], + "bbox": [ + 215, + 146, + 785, + 839 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Xuet al.", + "bbox": [ + 271, + 114, + 331, + 126 + ], + "page_idx": 17 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "60. Zhang, R., Isola, P., Efros, A.A., Shechtman, E., Wang, O.: The unreasonable effectiveness of deep features as a perceptual metric. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 586-595 (June 2018)", + "61. Zhao, X., Wang, L., Sun, J., Zhang, H., Suo, J., Liu, Y.: Havatar: High-fidelity head avatar via facial model conditioned neural radiance field. ACM Trans. Graph. (oct 2023)", + "62. Zheng, Y., Abrevaya, V.F., Bühler, M.C., Chen, X., Black, M.J., Hilliges, O.: I m avatar: Implicit morphable head avatars from videos. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 13535-13545 (June 2022)", + "63. Zheng, Y., Yifan, W., Wetzstein, G., Black, M.J., Hilliges, O.: Pointavatar: Deformable point-based head avatars from videos. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2023)", + "64. Zhuang, Y., Zhu, H., Sun, X., Cao, X.: Mofanerf: Morphable facial neural radiance field. In: Proceedings of the European Conference on Computer Vision (ECCV) (2022)", + "65. Zielonka, W., Bolkart, T., Thies, J.: Instant volumetric head avatars (June 2023)" + ], + "bbox": [ + 212, + 146, + 787, + 397 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "3D Gaussian Parametric Head Model", + "bbox": [ + 480, + 114, + 730, + 127 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 767, + 116, + 785, + 126 + ], + "page_idx": 18 + } +] \ No newline at end of file diff --git a/2024/3D Gaussian Parametric Head Model/0c0538a8-3292-41a6-ad76-710b8fc8de37_model.json b/2024/3D Gaussian Parametric Head Model/0c0538a8-3292-41a6-ad76-710b8fc8de37_model.json new file mode 100644 index 0000000000000000000000000000000000000000..d94c2be7862b0ec6f78d6d73af74093a7ac2de2b --- /dev/null +++ b/2024/3D Gaussian Parametric Head Model/0c0538a8-3292-41a6-ad76-710b8fc8de37_model.json @@ -0,0 +1,2955 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.287, + 0.141, + 0.717, + 0.162 + ], + "angle": 0, + "content": "3D Gaussian Parametric Head Model" + }, + { + "type": "text", + "bbox": [ + 0.257, + 0.189, + 0.75, + 0.219 + ], + "angle": 0, + "content": "Yuelang \\(\\mathrm{Xu}^{1}\\), Lizhen Wang\\(^{1}\\), Zerong Zheng\\(^{2}\\), Zhaoqi \\(\\mathrm{Su}^{1}\\), and Yebin \\(\\mathrm{Liu}^{1}\\)" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.232, + 0.628, + 0.247 + ], + "angle": 0, + "content": "\\(^{1}\\) Tsinghua University, Beijing, China" + }, + { + "type": "text", + "bbox": [ + 0.394, + 0.247, + 0.61, + 0.26 + ], + "angle": 0, + "content": "2 NNKosmos, Hangzhou, China" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.296, + 0.744, + 0.56 + ], + "angle": 0, + "content": "Abstract. Creating high-fidelity 3D human head avatars is crucial for applications in VR/AR, telepresence, digital human interfaces, and film production. Recent advances have leveraged morphable face models to generate animated head avatars from easily accessible data, representing varying identities and expressions within a low-dimensional parametric space. However, existing methods often struggle with modeling complex appearance details, e.g., hairstyles and accessories, and suffer from low rendering quality and efficiency. This paper introduces a novel approach, 3D Gaussian Parametric Head Model, which employs 3D Gaussians to accurately represent the complexities of the human head, allowing precise control over both identity and expression. Additionally, it enables seamless face portrait interpolation and the reconstruction of detailed head avatars from a single image. Unlike previous methods, the Gaussian model can handle intricate details, enabling realistic representations of varying appearances and complex expressions. Furthermore, this paper presents a well-designed training framework to ensure smooth convergence, providing a guarantee for learning the rich content. Our method achieves high-quality, photo-realistic rendering with real-time efficiency, making it a valuable contribution to the field of parametric head models." + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.574, + 0.669, + 0.587 + ], + "angle": 0, + "content": "Keywords: 3D Gaussian \\(\\cdot\\) Head Avatar \\(\\cdot\\) Parametric Model" + }, + { + "type": "title", + "bbox": [ + 0.217, + 0.613, + 0.377, + 0.629 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.644, + 0.788, + 0.795 + ], + "angle": 0, + "content": "Creating high-fidelity 3D human head avatars holds significant importance across various fields, including VR/AR, telepresence, digital human interfaces, and film production. The automatic generation of such avatars has been a focal point in computer vision research for many years. Recent methods [12,13,17,38,55,56,61-63,65] can create an animated head avatar through conveniently collected data such as monocular video data or even a picture [22, 26]. Serving as the most fundamental tool in these methods, the 3D morphable models (3DMM) [14, 25], which represent varying identities and expressions within a low-dimensional space, have been proven to be a highly successful avenue in addressing this challenging problem." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.796, + 0.788, + 0.842 + ], + "angle": 0, + "content": "Since the traditional parametric 3DMMs are typically limited by the topology of the underlying template mesh and only focus on the face part, some works [15,16,28,59] propose to use implicit Signed Distance Field (SDF) as the" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "2" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.116, + 0.333, + 0.127 + ], + "angle": 0, + "content": "Xuet al." + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.147, + 0.788, + 0.267 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.277, + 0.789, + 0.361 + ], + "angle": 0, + "content": "Fig. 1: We utilize hybrid datasets comprising captured multi-view video data and rendered image data from 3D scans for training our model. The trained model can be manipulated using decoupled identity and expression codes to produce a diverse array of high-fidelity head models. When presented with an image, our model can be adjusted to reconstruct the portrait in the image and edit the expression according to any other desired expressions." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.383, + 0.789, + 0.534 + ], + "angle": 0, + "content": "geometric representation to model the entire head. Despite their flexibility, these methods fall short in recovering high-frequency geometric and texture details like hairstyles, glasses or accessories. On the other end of the spectrum, Neural Radiance Field (NeRF) [34] based methods [19,64] learn parametric head models by directly synthesizing images, thus eliminating the need of geometry modeling. However, NeRF is built upon volumetric rendering, which involves sampling and integrating points distributed throughout space. Therefore, NeRF-based methods typically suffer from low rendering efficiency and have to trade it off with rendering resolution, thereby greatly reducing rendering quality. Moreover, skipping geometric reconstruction would probably lead to poor 3D consistency." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.537, + 0.789, + 0.763 + ], + "angle": 0, + "content": "More recently, 3D Gaussian Splatting (3DGS) [21], which uses explicit Gaussian ellipsoids to represent 3D scenes, has attracted significant attention from the research community. Experiments have verified the superior quality of the rendered results and excellent rendering efficiency compared to previous NeRF-based or surface-based methods even on dynamic scenes [32,48,57,58]. Motivated by this progress, we propose a novel 3D Gaussian Parametric Head Model, which, for the first time, marries the power of 3DGS with the challenging task of parametric head modeling. Our 3D gaussian parametric head model decouples the control signals of the head into the latent spaces of identity and expression, as is also done in SDF-based face model NPHM [15]. These latent spaces are then mapped to the offsets of the Gaussian positions, which effectively represent the variance of shape and appearance of different identities and expressions. Benefiting from the differentiability of Gaussian splatting, our model can be learned from multi-view video data corpus in an end-to-end manner, without relying on geometry supervision." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.765, + 0.789, + 0.842 + ], + "angle": 0, + "content": "Unfortunately, training our 3D Gaussian parametric head model is not quite straightforward, because Gaussian ellipsoids are unstructured and each Gaussian ellipsoid has its own independent learnable attribute. Such a characteristic makes 3DGS powerful in overfitting a specific object or scene, but poses great challenges for generative head modeling. Without proper initialization and regularization," + } + ], + [ + { + "type": "header", + "bbox": [ + 0.481, + 0.115, + 0.732, + 0.128 + ], + "angle": 0, + "content": "3D Gaussian Parametric Head Model" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "3" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.784, + 0.177 + ], + "angle": 0, + "content": "the learned parametric head model may suffer from unstable training or a large number of Gaussian points becoming redundant and noisy, as shown in Fig. 4." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.178, + 0.787, + 0.388 + ], + "angle": 0, + "content": "To overcome these challenges, we propose a well-designed two-stage training strategy to ensure smooth convergence of our model training. Specifically, we first roughly train all the networks on a mesh-based guiding model. Subsequently, the network parameters are migrated to the Gaussian model, and all Gaussian points are initialized with the trained mesh geometry to ensure that they are located near the actual surface. Compared to naive initialization with FLAME [25], our initialization strategy leads to a better guess of the positions of Gaussian points, making the subsequent training of the model converge stably and the areas like hairs better recovered. Moreover, we propose to use 3D landmark loss to supervise the deformation of the model learning expressions, which can speed up the convergence and avoid artifacts under exaggerated expressions. Lastly, our method supports training from both 3D head scans and multi-view 2D face datasets, which enhances the versatility and comprehensiveness of facial data collection and model training." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.389, + 0.787, + 0.525 + ], + "angle": 0, + "content": "After training on large corpus of multi-view head videos, our parametric Gaussian head model can generate photorealistic images that accurately depict the diverse range of facial appearances, naturally handling complex and exaggerated expressions, while also enabling real-time rendering. Additionally, our method supports single-image fitting and surpasses previous techniques in both reconstruction accuracy and identity consistency. Furthermore, the model resulting from our fitting process allows for the control of various expressions while maintaining naturalness and consistent identity even under exaggerated expressions. The contributions of our method can be summarized as:" + }, + { + "type": "text", + "bbox": [ + 0.226, + 0.532, + 0.784, + 0.577 + ], + "angle": 0, + "content": "- We propose 3D Gaussian Parametric Head Model, a novel parametric head model which utilizes 3D Gaussians as the representation and enables photorealistic rendering quality and real-time rendering speed." + }, + { + "type": "text", + "bbox": [ + 0.226, + 0.578, + 0.785, + 0.621 + ], + "angle": 0, + "content": "- We propose a well-designed training strategy to ensure that the Gaussian model converges stably while learning rich appearance details and complex expressions efficiently." + }, + { + "type": "text", + "bbox": [ + 0.226, + 0.622, + 0.785, + 0.667 + ], + "angle": 0, + "content": "- Our 3D Gaussian Parametric Head Model enables the generation of a detailed, high-quality face avatar from a single given image, as well as performing expression and identity editing upon it." + }, + { + "type": "list", + "bbox": [ + 0.226, + 0.532, + 0.785, + 0.667 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.689, + 0.388, + 0.705 + ], + "angle": 0, + "content": "2 Related Work" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.72, + 0.787, + 0.841 + ], + "angle": 0, + "content": "Parametric Head Models. Parametric head models are used to represent facial features, expressions, and identities effectively and efficiently. They allow for the creation of realistic human faces with adjustable parameters, making them essential in computer graphics, animation, and virtual reality. Therefore, research in this field has always been a hot topic. Traditional 3D Morphable Models (3DMM) [2,6,14,25,47] are constructed by non-rigidly registering a template mesh with fixed topology to a series of 3D scans. Through this registration process, a 3DMM can be computed using dimensionality reduction techniques such" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "4" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.116, + 0.333, + 0.127 + ], + "angle": 0, + "content": "Xuet al." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.389 + ], + "angle": 0, + "content": "as principal component analysis (PCA). The resulting parametric space captures the variations in facial geometry and appearance across a population. However, while 3DMMs offer a powerful way to represent faces, they do have limitations. These models rely heavily on the correspondence between the 3D scans and the template for accurate fitting and may struggle to represent local surface details like wrinkles or hairstyles that deviate significantly from the template mesh. Recent advances in implicit representation have led to the great development of neural parametric head models. Some methods [15, 16, 49, 59] propose implicit Signed Distance Field (SDF) based head models, which are not constrained by topology thus can recover more complex content like hair compared to previous mesh-based Methods. Other methods [3, 19, 44, 64] propose to use NeRF [34] as the representation of the parametric head models, which can directly synthesize photorealistic images without geometric reconstruction. Cao, et al. [5] use a hybrid representation [30] of mesh and NeRF to train their model on unpublished large-scale light stage data. However, rendering efficiency is typically low in NeRF-based methods, often resulting in a trade-off with rendering resolution." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.395, + 0.788, + 0.683 + ], + "angle": 0, + "content": "3D GAN based Head Models. 3D Generative Adversarial Networks (GANs) have revolutionized the field of computer vision, particularly in the domain of human head and face modeling, enabling the generation of face avatars from input images. Traditional methods often require labor-intensive manual work or rely on multi-view images to create 3D models. 3D GANs as a more automated and data-driven approach, which are just trained on single-view 2D images but generate detailed and realistic 3D models of human head [7-9, 11, 18, 35, 52]. Panohead [1] additionally introduces images of hairstyles on the back of characters and trains a full-head generative model. Based on the previous methods, IDE-3D [42] proposes to use semantic map to edit the 3D head model. Next3D [43] and AntiFaceGAN [50] extend to use the FLAME model [25] to condition the generated head model, so that the expression and pose of the generated head model can be controlled. AntiPortraitGAN [51] further replaces FLAME model with SMPLX model [36] to generate upper body avatars, thus the shoulders and the neck can also be controlled. These 3D GAN-based models primarily leverage the coarse FLAME model for expression control, often leading to a loss of expression details in the generated faces. In contrast, our method directly learns the expression distribution from the dataset, capturing more facial appearance details." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.689, + 0.788, + 0.842 + ], + "angle": 0, + "content": "3D Gaussians. Recently, 3D Gaussian splatting [21] has shown superior performance compared to NeRF, excelling in both novel view synthesis quality and rendering speed. Several methods have expanded Gaussian representation to dynamic scene reconstruction [32, 48, 57, 58]. For human body avatar modeling, recent approaches [20, 27] propose training a 3D Gaussian avatar animated by SMPL [31] or a skeleton from multi-view videos, surpassing previous methods in rendering quality and efficiency. In the realm of human head avatar modeling, recent techniques [10, 23, 33, 37, 37, 39, 40, 45, 53, 54] also utilize 3D Gaussians to create high-fidelity and efficient head avatars. These approaches center on the creation of a high-fidelity person-specific avatar using data of a single person. In" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.481, + 0.115, + 0.732, + 0.128 + ], + "angle": 0, + "content": "3D Gaussian Parametric Head Model" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "5" + }, + { + "type": "image", + "bbox": [ + 0.223, + 0.145, + 0.788, + 0.429 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.441, + 0.788, + 0.54 + ], + "angle": 0, + "content": "Fig. 2: The pipeline of our method. Our training strategy can be divided into a Guiding Geometry Model for initialization, and a final 3D Gaussian Parametric Head Model. Deformations of each model are further decoupled into identity-related and expression-related deformations. Rendering involves using DMTet to transform the initial model into a mesh and 3D Gaussian Splatting for the Gaussian model. Features from both models are finally upsampled to high-resolution portrait images through a convolutional network \\(\\Psi\\). During inference, our output exclusively comes from the Gaussian model." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.546, + 0.788, + 0.593 + ], + "angle": 0, + "content": "contrast, our method focuses on a versatile prior model that can accommodate varying appearances. Once trained, our model is also capable of person-specific avatar reconstruction by fitting to the input image data provided." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.614, + 0.33, + 0.629 + ], + "angle": 0, + "content": "3 Method" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.644, + 0.788, + 0.751 + ], + "angle": 0, + "content": "In this section, we present the 3D Gaussian Parametric Head Model. In contrast to previous mesh-based or NeRF-based models, initializing and training Gaussian-based models pose distinct challenges. This section introduces the dataset and preprocessing, the carefully designed guiding geometry model, the Gaussian Parametric Head Model, and outlines their respective training processes. Additionally, we will also provide the training details and demonstrate how to utilize our method when given a single input image." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.771, + 0.424, + 0.787 + ], + "angle": 0, + "content": "3.1 Data Preprocessing" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.795, + 0.787, + 0.842 + ], + "angle": 0, + "content": "We used three datasets for our model training, including a multi-view video dataset NeRSemble [24], and two 3D scans datasets NPHM [15] and FaceV-erse [47]. We do not use the 3D geometry of the scans directly, but render them" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "6" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.333, + 0.128 + ], + "angle": 0, + "content": "Xuetal." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.313 + ], + "angle": 0, + "content": "into multi-view images and use only the images form the 3 datasets as supervision. In order to better utilize these three different datasets, we need to do preprocessing. First, we resize the images to 512 resolution and adjust the camera parameters. Then, we use BackgroundMattingV2 [29] to extract the foreground characters in the NeRSemble dataset and record the masks. This step is not required for the two synthetic datasets. Next, we use face alignment [4] to detect 2D landmarks in all the images. Through these 2D landmarks, we fit a Basel Face Model (BFM) [14] for each expression of each identity, and record the head pose and 3D landmarks of the BFM. We will use the above processed camera parameters, images, masks, head pose of BFM and 3D landmarks of BFM to train our model." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.336, + 0.446, + 0.352 + ], + "angle": 0, + "content": "3.2 Model Representation" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.362, + 0.789, + 0.527 + ], + "angle": 0, + "content": "The representation of Gaussian distribution poses challenges due to its unordered and unstructured nature, leading to difficulties in the continuous spread of gradients to neighboring points in space during backpropagation. This often results in convergence failure when Gaussians are randomly initialized. On the other hand, surface-based representations such as mesh are just suitable for rough geometry learning. A direct idea is to utilize an existing 3DMM, such as FLAME [25], as the initial position for the points in 3D Gaussian splatting [21]. However, this coarse initialization still fails to converge the positions of 3D points to the correct locations, as shown in Fig. 4. The network tends to alter the shape of the ellipsoid to achieve a suitable fitting result, leading to inaccurate geometry of the point cloud and blurriness in the rendered image." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.528, + 0.789, + 0.648 + ], + "angle": 0, + "content": "To address this problem, a more detailed initialization process is necessary for capturing the diverse head variations using 3D Gaussian splatting. Specifically, we draw inspiration from Gaussian Head Avatar [54] and leverage the implicit signed distance field (SDF) representation to train a guiding geometry model. This guiding geometry model serves as the initial value for the Gaussian model, providing a more effective starting point for the optimization process. We define the initial model as Guiding Geometry Model and the refined model as 3D Gaussian Parametric Head Model." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.649, + 0.789, + 0.725 + ], + "angle": 0, + "content": "Guiding Geometry Model. The guiding geometry model receives an identity code \\( z^{id} \\) and an expression code \\( z^{exp} \\) as input, producing a mesh with vertices \\( V \\), faces \\( F \\), and per-vertex color \\( C \\) that aligns with the specified identity and expression. To achieve this, we use an MLP denoted as \\( f_{mean}(\\cdot) \\) to implicitly model the SDF, which represents the mean geometry:" + }, + { + "type": "equation", + "bbox": [ + 0.441, + 0.738, + 0.785, + 0.753 + ], + "angle": 0, + "content": "\\[\ns, \\gamma = \\boldsymbol {f} _ {\\text {m e a n}} (x), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.765, + 0.789, + 0.841 + ], + "angle": 0, + "content": "where \\( s \\) denotes the SDF value, \\( \\gamma \\) denotes the feature from the last layer and \\( x \\) denotes the input position. Then, we convert the implicit SDF through Deep Marching Tetrahedra (DMTet) [41] into an explicit mesh with vertex positions \\( V_{0} \\), per-vertex feature \\( \\Gamma \\) and faces \\( F \\). Next, we need to transform the mean shape into a neutral-expression shape on condition of the input identity code" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.481, + 0.115, + 0.732, + 0.128 + ], + "angle": 0, + "content": "3D Gaussian Parametric Head Model" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.116, + 0.787, + 0.127 + ], + "angle": 0, + "content": "7" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.146, + 0.785, + 0.237 + ], + "angle": 0, + "content": "\\(z^{id}\\). To inject identity information into the vertices of the mesh, we first use an injection MLP \\(f_{inj}(\\cdot)\\), which takes the identity code \\(z^{id}\\) and the per-vertex feature \\(\\Gamma\\) as input and produces the identity-conditioned per-vertex feature vectors \\(H = f_{inj}(z^{id},\\Gamma)\\). Subsequently, utilizing a tiny MLP \\(f_{id}(\\cdot)\\), we predict the displacement \\(\\delta V_{id}\\) for each vertex. This displacement is used to transform the mean shape into the neutral-expression shape conditioned on the id code \\(z^{id}\\)." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.239, + 0.787, + 0.358 + ], + "angle": 0, + "content": "After completing deformations related to identity, the next step is to capture the deformation induced by facial expressions. We introduce another tiny MLP \\( f_{exp}(\\cdot) \\). This MLP takes the feature vectors \\( H \\) obtained in the previous step and the expression code \\( z^{exp} \\) as input, and the output is the displacement \\( \\delta V_{exp} \\) for each vertex. Using this displacement, we update the vertex positions to \\( V_{can} \\). Additionally, we feed the same feature vectors \\( H \\) and expression code \\( z^{exp} \\) to a color MLP, \\( f_{col}(\\cdot) \\), to predict the 32-channel color \\( C \\) for each vertex. The vertex positions to \\( V_{can} \\) and 32-channel color \\( C \\) can be described as:" + }, + { + "type": "equation", + "bbox": [ + 0.297, + 0.369, + 0.786, + 0.386 + ], + "angle": 0, + "content": "\\[\nV _ {c a n} = V _ {0} + \\boldsymbol {f} _ {i d} (H) + \\boldsymbol {f} _ {e x p} (H, \\boldsymbol {z} ^ {e x p}), C = \\boldsymbol {f} _ {c o l} (H, \\boldsymbol {z} ^ {e x p}). \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.395, + 0.787, + 0.516 + ], + "angle": 0, + "content": "Finally, we utilize the estimated head pose parameters \\( R \\) and \\( T \\) obtained during data preprocessing to transform the mesh from the canonical space to the world space \\( V = R \\cdot V_{can} + T \\). After generating the final vertex positions, colors and faces \\( \\{V, C, F\\} \\) of the mesh, we render the mesh into a 256-resolution 32-channel feature map \\( I_F \\) and a mask \\( M \\) through differentiable rasterization with a given camera pose. Subsequently, the feature map is interpreted as a 512-resolution RGB \\( I_{hr} \\) image through a lightweight convolutional upsampling network \\( \\Psi(\\cdot) \\), as shown in Fig. 2." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.517, + 0.787, + 0.759 + ], + "angle": 0, + "content": "3D Gaussian Parametric Head Model. The Gaussian model also takes an identity code \\( z^{id} \\) and an expression code \\( z^{exp} \\) as input, producing the positions \\( X \\), color \\( C \\), scale \\( S \\), rotation \\( Q \\) and opacity \\( A \\) of the 3D Gaussians. Similar to the guiding geometry model, we initially maintain an overall mean point cloud, with the mean positions \\( X_0 \\). However, we no longer generate the per-vertex feature \\( \\Gamma \\) through \\( f_{mean}(x) \\). Instead, we directly generate it at once and bind it to the Gaussian points as estimizable variables \\( \\Gamma_0 \\). This is possible since the number of Gaussian points is fixed at this stage. Then we need to transform the mean point cloud into a neutral-expression point cloud, conditioned by the id code \\( z^{id} \\). To achieve this, we utilize the same injection MLP \\( f_{inj}(\\cdot) \\) and identity deformation MLP \\( f_{id}(\\cdot) \\) defined in the guiding geometry model, which can generate feature vectors \\( H = f_{inj}(z^{id},\\Gamma_0) \\) that encode identity information for each point and predict the identity-related displacement of each point. Then, we also need to predict the expression code \\( z^{exp} \\)-conditioned displacement. The resulting positions \\( X_{can} \\) and the 32-channel color \\( C \\) of each point, similar to the approach presented in the guiding geometry model, can be described as:" + }, + { + "type": "equation", + "bbox": [ + 0.29, + 0.769, + 0.786, + 0.786 + ], + "angle": 0, + "content": "\\[\nX _ {c a n} = \\boldsymbol {X} _ {\\mathbf {0}} + \\boldsymbol {f} _ {i d} (H). + \\boldsymbol {f} _ {e x p} (H, \\boldsymbol {z} ^ {e x p}), C = \\boldsymbol {f} _ {c o l} (H, \\boldsymbol {z} ^ {e x p}). \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.795, + 0.787, + 0.843 + ], + "angle": 0, + "content": "Unlike the representations of SDF and DMTet, Gaussians have additional attributes that need to be predicted. Here, we introduce a new MLP to predict Gaussian attributes in the canonical space, including the scale \\(S\\), rotation \\(Q_{can}\\)," + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "8" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.333, + 0.128 + ], + "angle": 0, + "content": "Xuet al." + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.147, + 0.788, + 0.195 + ], + "angle": 0, + "content": "and opacity \\(A\\). In order to ensure the stability of the generated results, we refrain from directly predicting these values. Instead, we predict their offsets \\(\\{\\delta S,\\delta Q,\\delta A\\}\\) relative to the overall mean values \\(\\{S_0,Q_0,A_0\\}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.34, + 0.205, + 0.786, + 0.222 + ], + "angle": 0, + "content": "\\[\n\\{S, Q _ {c a n}, A \\} = \\left\\{S _ {0}, Q _ {0}, A _ {0} \\right\\} + \\boldsymbol {f} _ {a t t} (H, z ^ {e x p}). \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.232, + 0.789, + 0.353 + ], + "angle": 0, + "content": "Following this, we utilize the estimated head pose parameters \\( R \\) and \\( T \\), obtained during data preprocessing, to transform the canonical space variables \\( X_{can} \\) and \\( Q_{can} \\) into the world space: \\( X = R \\cdot X_{can} + T \\), \\( Q = R \\cdot Q_{can} \\). For model rendering, we leverage differentiable rendering [21] and neural rendering techniques to generate images. The generated 3D Gaussian parameters, which include \\( \\{X, C, S, Q, A\\} \\), are conditioned by the identity code \\( z^{id} \\) and expression code \\( z^{exp} \\). Finally, we input this feature map into the same upsampling network \\( \\Psi(\\cdot) \\) of the guiding geometry model to generate a 512-resolution RGB image." + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.354, + 0.79, + 0.535 + ], + "angle": 0, + "content": "In the 3D Gaussian Parametric Head Model, we leverage the previously trained guiding geometry model to initialize our variables and networks, rather than initiating them randomly and training from scratch. Specifically, we initialize the Gaussian positions \\( \\mathbf{X_0} \\) using the vertex positions of the mean mesh \\( V_{0} \\). Meanwhile, we generate the per-vertex feature \\( \\varGamma \\) from \\( f_{mean}(x) \\) at the beginning and bind it to the points as an estimizable variable \\( \\varGamma_{0} \\) as described above. Additionally, all identity codes \\( z^{id} \\), expression codes \\( z^{exp} \\), and the networks \\( \\{\\pmb{f}_{\\text{inj}}(\\cdot), \\pmb{f}_{\\text{id}}(\\cdot), \\pmb{f}_{\\text{exp}}(\\cdot), \\pmb{f}_{\\text{col}}(\\cdot), \\pmb{\\Psi}(\\cdot)\\} \\) are directly inherited from the guiding geometry model. Note that, the attribute MLP \\( f_{att}(\\cdot) \\) is a newly introduced network, hence it is initialized randomly. Finally, the overall mean values of the Gaussian attributes \\( \\{S_0, Q_0, A_0\\} \\) are initialized following the original 3D Gaussian Splatting [21]." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.557, + 0.387, + 0.571 + ], + "angle": 0, + "content": "3.3 Loss Functions" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.582, + 0.788, + 0.642 + ], + "angle": 0, + "content": "To ensure the accurate convergence of the model, we employ various loss functions as constraints, including the basic photometric loss and silhouette loss, to enforce consistency with ground truth of both the rendered high-resolution images \\( I_{hr} \\) and the rendered masks \\( M \\):" + }, + { + "type": "equation", + "bbox": [ + 0.352, + 0.655, + 0.786, + 0.672 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {h r} = \\left| \\left| I _ {h r} - I _ {g t} \\right| \\right| _ {1}, \\mathcal {L} _ {s i l} = I O U (M, M _ {g t}), \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.682, + 0.788, + 0.742 + ], + "angle": 0, + "content": "with \\( I_{gt} \\) representing the ground truth RGB images, \\( M_{gt} \\) representing the ground truth masks. We further encourage the first three channels of the low-resolution feature map \\( I_{lr} \\) to closely match the ground-truth RGB image \\( I_{gt} \\) by introducing an \\( L_{1} \\) loss:" + }, + { + "type": "equation", + "bbox": [ + 0.435, + 0.743, + 0.786, + 0.759 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {l r} = \\left| \\left| I _ {l r} - I _ {g t} \\right| \\right| _ {1}. \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.765, + 0.789, + 0.841 + ], + "angle": 0, + "content": "The geometric deformation caused by expressions is typically complex and cannot be learned through image supervision alone. Therefore, we provide additional coarse supervision for expression deformation learning using 3D landmarks. Specifically, we define the 3D landmarks \\( P_0 \\) in the canonical space, and then predict their displacements and transform them to the world space as \\( P \\)" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.481, + 0.115, + 0.732, + 0.128 + ], + "angle": 0, + "content": "3D Gaussian Parametric Head Model" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.116, + 0.787, + 0.128 + ], + "angle": 0, + "content": "9" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.784, + 0.176 + ], + "angle": 0, + "content": "just like the transformation of the original vertices \\( V_{0} \\) above. Then, we construct the landmark loss function:" + }, + { + "type": "equation", + "bbox": [ + 0.428, + 0.19, + 0.786, + 0.207 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {l m k} = | | \\boldsymbol {P} - \\boldsymbol {P} _ {g t} | | _ {2}, \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.216, + 0.784, + 0.246 + ], + "angle": 0, + "content": "with \\( P_{gt} \\) denoting the ground truth 3D landmarks, which are estimated by fitting a BFM model to the training data during preprocessing." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.248, + 0.785, + 0.305 + ], + "angle": 0, + "content": "Moreover, to guarantee the decoupling of identity and expression deformations learned by the model and minimize redundancy, we introduce the following regularization loss function that aims to minimize the magnitude of both deformations:" + }, + { + "type": "equation", + "bbox": [ + 0.405, + 0.309, + 0.786, + 0.326 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {r e g} = \\left| \\left| \\delta V _ {i d} \\right| \\right| _ {2} + \\left| \\left| \\delta V _ {e x p} \\right| \\right| _ {2}. \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.33, + 0.784, + 0.374 + ], + "angle": 0, + "content": "During the training of the Guiding Geometry Model, we also construct a Laplacian smooth term \\(\\mathcal{L}_{lap}\\) to penalize surface noise or breaks. Overall, the total loss function is formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.282, + 0.39, + 0.786, + 0.408 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\mathcal {L} _ {h r} + \\lambda_ {s i l} \\mathcal {L} _ {s i l} + \\lambda_ {l r} \\mathcal {L} _ {l r} + \\lambda_ {l m k} \\mathcal {L} _ {l m k} + \\lambda_ {r e g} \\mathcal {L} _ {r e g} + \\lambda_ {l a p} \\mathcal {L} _ {l a p} \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.418, + 0.784, + 0.508 + ], + "angle": 0, + "content": "with all the \\(\\lambda\\) denoting the weights of each term. In practice, we set \\(\\lambda_{sil} = 0.1\\), \\(\\lambda_{lr} = 0.1\\), \\(\\lambda_{lmk} = 0.1\\), \\(\\lambda_{reg} = 0.001\\) and \\(\\lambda_{lap} = 100\\). During training, we jointly optimize the bolded variables above: \\(\\{z^{id}, z^{exp}, f_{inj}(\\cdot), f_{mean}(\\cdot), f_{id}(\\cdot), f_{exp}(\\cdot), f_{col}(\\cdot), \\Psi(\\cdot), P_0\\}\\). Notably, the defined canonical 3D landmarks \\(P_0\\) are initialized by computing the average of the estimated 3D landmarks from the training dataset." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.509, + 0.785, + 0.613 + ], + "angle": 0, + "content": "During the training stage of the 3D Gaussian Parametric Head Model, we also calculate the perceptual loss [60] to encourage the model to learn more high-frequency details \\(\\mathcal{L}_{vgg} = VGG(I_{hr},I_{gt})\\). Similar to training the guiding geometry model, we enforce the first three channels of the feature map to be RGB channels as Eqn. 6, introduce landmarks guidance terms as Eqn. 7 and the regular term for the displacement of points as Eqn. 8. Consequently, the overall loss function can be formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.317, + 0.63, + 0.786, + 0.647 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\mathcal {L} _ {h r} + \\lambda_ {v g g} \\mathcal {L} _ {v g g} + \\lambda_ {l r} \\mathcal {L} _ {l r} + \\lambda_ {l m k} \\mathcal {L} _ {l m k} + \\lambda_ {r e g} \\mathcal {L} _ {r e g} \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.657, + 0.784, + 0.735 + ], + "angle": 0, + "content": "with the weights \\(\\lambda_{vgg} = 0.1\\), \\(\\lambda_{lr} = 0.1\\), \\(\\lambda_{lmk} = 0.1\\) and \\(\\lambda_{reg} = 0.001\\). In this training stage, we also jointly optimize all the bolded variables and networks mentioned above, including the overall mean positions and attributes of the Gaussians and the 3D landmarks: \\(\\{z^{id}, z^{exp}, f_{inj}(\\cdot), f_{id}(\\cdot), f_{exp}(\\cdot), f_{col}(\\cdot), f_{att}(\\cdot), \\Psi(\\cdot), X_0, \\Gamma_0, S_0, Q_0, A_0, P_0\\}\\)." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.755, + 0.403, + 0.769 + ], + "angle": 0, + "content": "3.4 Inference Details" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.78, + 0.784, + 0.841 + ], + "angle": 0, + "content": "Image-based Fitting. When a single RGB portrait image is input, we first align the image according to the processing rules of the training set. Subsequently, we employ gradient descent to fit the image rendered by the 3D Gaussian Parametric Head Model to this input image using the photometric loss \\(\\mathcal{L}_{lr}\\)" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "10" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.333, + 0.128 + ], + "angle": 0, + "content": "Xuetal." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.146, + 0.788, + 0.312 + ], + "angle": 0, + "content": "and \\(\\mathcal{L}_{hr}\\) defined in Eqn. 10. This process helps regress the identity code \\(z^{id}\\) and expression code \\(z^{exp}\\). We just optimize for 200 iterations with learning rate \\(1 \\times 10^{-3}\\) for both latent codes. Following this, we fix the latent codes \\(z^{id}\\) and \\(z^{exp}\\), such that the variables \\(H\\), \\(X_{can}\\) are also fixed. We further optimize the color MLP \\(f_{col}(\\cdot)\\) and the canonical positions \\(X_{can}\\) which represent the geometry of the current specific subject, using the same loss function. In this step, we only optimize for 100 iterations with learning rate \\(1 \\times 10^{-4}\\) for both \\(f_{col}(\\cdot)\\) and \\(X_{can}\\). This optimization process aims to add some details that cannot be recovered by the trained model itself, ultimately resulting in the reconstructed head model. The entire process has a total of 300 iterations and takes only 30 seconds." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.313, + 0.788, + 0.435 + ], + "angle": 0, + "content": "Expression Editing. Given a source portrait image providing the subject whose expression is to be edited and a target portrait image providing the target expression. We first obtain the head model of the source subject through optimization as the above-mentioned Image-based Fitting strategy. Then for the target portrait image, we also obtain the head model and corresponding expression code in the same way. Finally, we input the target expression code to the head model of the source subject, so that the expression of the source subject can be edited to the target one." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.459, + 0.377, + 0.476 + ], + "angle": 0, + "content": "4 Experiments" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.492, + 0.335, + 0.506 + ], + "angle": 0, + "content": "4.1 Datasets" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.518, + 0.788, + 0.608 + ], + "angle": 0, + "content": "NeRSemble dataset contains over 260 different identities, and collects 72fps multi-view videos from 16 synchronized cameras for each identity. The total length of the videos of a single identity is approximately 6000-11000 frames. In the experiment, we selected 140 of the identities for training and the rest for evaluation. For each identity video, we selected about 150 frames from all 16 views as training data." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.609, + 0.788, + 0.685 + ], + "angle": 0, + "content": "NPHM dataset contains 5200 3D human head scans. These scans come from 255 different identities, each with about 20 different expressions. We selected approximately 1600 scans of 80 identities for training. Since our method utilizes 2D images as training supervision, we render each scan from 80 different views to generate synthetic image data and record the camera parameters and the masks." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.685, + 0.788, + 0.761 + ], + "angle": 0, + "content": "FaceVerse dataset is an East Asian human head scan dataset. It contains 2310 scans from 110 different identities, and each identity contains 21 expressions. We selected 1620 scans data of 80 identities for training. Similarly, for each scan, we render multi-view synthetic image data from 80 different views and record the camera parameters and the masks." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.784, + 0.352, + 0.798 + ], + "angle": 0, + "content": "4.2 Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.81, + 0.787, + 0.84 + ], + "angle": 0, + "content": "Disentanglement. We tested the performance of the 3D Gaussian Parametric Model under the control of different identity codes and different expression codes." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.481, + 0.115, + 0.732, + 0.128 + ], + "angle": 0, + "content": "3D Gaussian Parametric Head Model" + }, + { + "type": "page_number", + "bbox": [ + 0.768, + 0.116, + 0.784, + 0.127 + ], + "angle": 0, + "content": "11" + }, + { + "type": "image_caption", + "bbox": [ + 0.225, + 0.18, + 0.251, + 0.192 + ], + "angle": 0, + "content": "ID1" + }, + { + "type": "image", + "bbox": [ + 0.252, + 0.146, + 0.355, + 0.229 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.358, + 0.145, + 0.461, + 0.229 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.465, + 0.145, + 0.566, + 0.229 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.572, + 0.145, + 0.676, + 0.229 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.682, + 0.146, + 0.781, + 0.229 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.225, + 0.266, + 0.251, + 0.278 + ], + "angle": 0, + "content": "ID2" + }, + { + "type": "image", + "bbox": [ + 0.252, + 0.233, + 0.355, + 0.316 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.289, + 0.318, + 0.321, + 0.33 + ], + "angle": 0, + "content": "Exp1" + }, + { + "type": "image", + "bbox": [ + 0.358, + 0.233, + 0.462, + 0.316 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.395, + 0.318, + 0.428, + 0.33 + ], + "angle": 0, + "content": "Exp2" + }, + { + "type": "image", + "bbox": [ + 0.465, + 0.233, + 0.57, + 0.316 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.502, + 0.318, + 0.535, + 0.33 + ], + "angle": 0, + "content": "Exp3" + }, + { + "type": "image", + "bbox": [ + 0.574, + 0.232, + 0.678, + 0.316 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.61, + 0.318, + 0.643, + 0.33 + ], + "angle": 0, + "content": "Exp4" + }, + { + "type": "image", + "bbox": [ + 0.682, + 0.232, + 0.787, + 0.316 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.72, + 0.318, + 0.752, + 0.33 + ], + "angle": 0, + "content": "Exp5" + }, + { + "type": "image_caption", + "bbox": [ + 0.215, + 0.343, + 0.788, + 0.386 + ], + "angle": 0, + "content": "Fig. 3: We generate the head models with randomly sampled identity codes and expression codes as condition. Each row corresponds to the same identity code, and each column corresponds to the same expression code." + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.404, + 0.332, + 0.505 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.266, + 0.506, + 0.283, + 0.514 + ], + "angle": 0, + "content": "GT" + }, + { + "type": "image", + "bbox": [ + 0.342, + 0.404, + 0.455, + 0.505 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.457, + 0.404, + 0.559, + 0.505 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.411, + 0.506, + 0.498, + 0.514 + ], + "angle": 0, + "content": "Our Initialization" + }, + { + "type": "image", + "bbox": [ + 0.571, + 0.404, + 0.684, + 0.505 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.628, + 0.506, + 0.735, + 0.514 + ], + "angle": 0, + "content": "FLAME Initialization" + }, + { + "type": "image", + "bbox": [ + 0.696, + 0.404, + 0.787, + 0.505 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.527, + 0.788, + 0.57 + ], + "angle": 0, + "content": "Fig. 4: We compared our initialization strategy with using the vertices of FLAME model. The left side shows the rendered image, and the right side shows the positions of the Gaussian points." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.582, + 0.788, + 0.658 + ], + "angle": 0, + "content": "We randomly sampled 2 identity codes and 5 expression codes to generate 10 head models. Each horizontal row corresponds to the same identity code, and each column corresponds to the same expression code, as shown in Fig. 3. It can be observed that our model performs well in identity consistency and expression consistency, and the two components are fully disentangled." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.659, + 0.788, + 0.81 + ], + "angle": 0, + "content": "Ablation on Initialization. To evaluate the effectiveness of our initialization strategy with guiding geometry model outlined in Section 3, we compare it against a FLAME-based initialization strategy. To use FLAME model for the initialization, we first fit a FLAME model to overall mean 3D landmarks which are estimated during data preprocessing. Then, we sample 100,000 points near the surface of the FLAME mesh as an initialization of the mean Gaussian positions \\( \\mathbf{X_0} \\). For the per-vertex features bound to each point \\( \\pmb{\\Gamma} \\), we just set them to zero. And for all the networks \\( \\{f_{inj}(\\cdot), f_{id}(\\cdot), f_{exp}(\\cdot), f_{col}(\\cdot), \\Psi(\\cdot)\\} \\) and \\( f_{att}(\\cdot) \\) are randomly initialized as there is no available prior. The initialization process for the Gaussian attributes \\( \\{S_0, Q_0, A_0\\} \\) remains the same as in our strategy." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.811, + 0.787, + 0.84 + ], + "angle": 0, + "content": "We show the visualization results in Figure 4, with the Gaussian model rendering image on the left and the Gaussian positions displayed as point clouds" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "12" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.333, + 0.127 + ], + "angle": 0, + "content": "Xuet al." + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.142, + 0.327, + 0.238 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.258, + 0.239, + 0.288, + 0.248 + ], + "angle": 0, + "content": "Mesh" + }, + { + "type": "image", + "bbox": [ + 0.332, + 0.142, + 0.443, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.362, + 0.239, + 0.414, + 0.248 + ], + "angle": 0, + "content": "Mesh+SR" + }, + { + "type": "image", + "bbox": [ + 0.448, + 0.142, + 0.558, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.48, + 0.239, + 0.528, + 0.248 + ], + "angle": 0, + "content": "Gaussian" + }, + { + "type": "image", + "bbox": [ + 0.565, + 0.142, + 0.673, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.584, + 0.239, + 0.654, + 0.248 + ], + "angle": 0, + "content": "Gaussian+SR" + }, + { + "type": "image", + "bbox": [ + 0.677, + 0.142, + 0.788, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.723, + 0.239, + 0.741, + 0.248 + ], + "angle": 0, + "content": "GT" + }, + { + "type": "image_caption", + "bbox": [ + 0.242, + 0.26, + 0.759, + 0.275 + ], + "angle": 0, + "content": "Fig. 5: The comparison of the different representations with super resolution." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.287, + 0.788, + 0.422 + ], + "angle": 0, + "content": "on the right. Our initialization strategy using the guiding geometry model can ensure that all the Gaussian points fall evenly on the actual surface of the model, thereby ensuring reconstruction quality. When using the FLAME model for the initialization, a large number of points wander inside or outside the actual surface of the model, causing noise or redundancy and leading the model to lose some high-frequency information and making it difficult to fully converge. We also perform a quantitative evaluation of different initialization strategies on the rendered images, as shown in Table 1, which shows that our method leads to better rendering results." + }, + { + "type": "table", + "bbox": [ + 0.277, + 0.43, + 0.727, + 0.478 + ], + "angle": 0, + "content": "
MethodPSNR ↑SSIM ↑LPIPS ↓
FLAME Initialization25.70.820.109
Our Initialization28.00.840.085
" + }, + { + "type": "table_caption", + "bbox": [ + 0.214, + 0.478, + 0.786, + 0.505 + ], + "angle": 0, + "content": "Table 1: Quantitative evaluation results of our initialization strategy and naive FLAME initialization strategy." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.52, + 0.788, + 0.625 + ], + "angle": 0, + "content": "Ablation on Representation and Super Resolution. We conduct the ablation study for the guiding mesh model, the Gaussian model and the super-resolution network (abbreviated as SR) as shown in the Fig. 5. The corresponding PSNR metrics are: Mesh (15.7), Mesh+SR (17.3), Gaussian (27.0), Gaussian+SR (29.3). Compared to mesh, utilizing 3D Gaussian as the representation brings significant improvements (+12), while the super resolution module adds some details, generating more realistic results." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.649, + 0.367, + 0.664 + ], + "angle": 0, + "content": "4.3 Applications" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.674, + 0.789, + 0.841 + ], + "angle": 0, + "content": "Image-based Fitting. In this section, we demonstrate the capability of our 3D Gaussian Parametric Model for single-image fitting using the fitting strategy detailed in Section 3.4. We compare our model with similar works: HeadNeRF [19], MoFaNeRF [64], and PanoHead [1]. In addition to evaluating the above methods on our evaluation dataset, we also conduct comparisons using cases from MEAD [46] dataset (the first two rows). The qualitative results are presented in Figure 6. Our model exhibits reconstruction accuracy while maintaining excellent 3D consistency and identity preservation. HeadNeRF's fitting results often suffer from missing hair, and they remove the body and neck. MoFaNeRF, trained solely on the FaceScape dataset where all subjects wear hats, struggles to fit hair. As a GAN-based model, PanoHead can achieve highly accurate reproductions" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.481, + 0.115, + 0.732, + 0.128 + ], + "angle": 0, + "content": "3D Gaussian Parametric Head Model" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.116, + 0.786, + 0.127 + ], + "angle": 0, + "content": "13" + }, + { + "type": "image", + "bbox": [ + 0.216, + 0.145, + 0.788, + 0.509 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.52, + 0.788, + 0.577 + ], + "angle": 0, + "content": "Fig. 6: We compare our method with other SOTA methods on the task of single image fitting. The far left is the input image, and to the right are Our method, HeadNeRF [19], MoFaNeRF [64] and PanoHead [1]. Our model significantly outperforms other methods in reconstruction quality and 3D consistency." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.589, + 0.787, + 0.619 + ], + "angle": 0, + "content": "from the input view. However, due to overfitting, the results from side views reveal poor 3D consistency and identity preservation." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.62, + 0.788, + 0.77 + ], + "angle": 0, + "content": "In addition to qualitative evaluations, we also conducted quantitative evaluations on 60 images using three metrics: Peak Signal-to-Noise Ratio (PSNR), Structural Similarity Index (SSIM), and Face Distance (FD). Here, we provide a brief explanation of the Face Distance (FD). To compute the FD metric, we utilized a face recognition tool \\(^3\\) to encode two images containing faces into 128-dimensional vectors. Subsequently, we calculated the distance between these two vectors to reflect the similarity of the two faces. In our experiments, FD serves as an indicator of identity consistency. The results are shown in Table 2. Our model demonstrates optimal performance in both fitting accuracy and identity consistency." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.771, + 0.788, + 0.817 + ], + "angle": 0, + "content": "Expression Editing. Our 3D Gaussian Parametric Head Model possesses the capability for expression editing. Upon completing the fitting process on a portrait image, we can animate the model by applying different expression codes." + }, + { + "type": "page_footnote", + "bbox": [ + 0.218, + 0.824, + 0.544, + 0.841 + ], + "angle": 0, + "content": "3 https://github.com/ageitgey/face_recognition" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "14" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.333, + 0.127 + ], + "angle": 0, + "content": "Xuet al." + }, + { + "type": "table", + "bbox": [ + 0.323, + 0.144, + 0.681, + 0.219 + ], + "angle": 0, + "content": "
MethodPSNR ↑SSIM ↑FD ↓
HeadNeRF28.90.840.37
MoFaNeRF28.60.820.37
PanoHead29.10.860.41
Ours30.30.860.35
" + }, + { + "type": "table_caption", + "bbox": [ + 0.216, + 0.219, + 0.784, + 0.26 + ], + "angle": 0, + "content": "Table 2: Quantitative evaluation results on the task of single image fitting. We compare our method with other 3 SOTA methods: HeadNeRF [19], MoFaNeRF [64], PanoHead [1]." + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.273, + 0.288, + 0.332 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.242, + 0.333, + 0.267, + 0.34 + ], + "angle": 0, + "content": "Input" + }, + { + "type": "image", + "bbox": [ + 0.293, + 0.273, + 0.365, + 0.333 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.32, + 0.333, + 0.341, + 0.34 + ], + "angle": 0, + "content": "Exp1" + }, + { + "type": "image", + "bbox": [ + 0.365, + 0.274, + 0.434, + 0.333 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.389, + 0.333, + 0.411, + 0.34 + ], + "angle": 0, + "content": "Exp2" + }, + { + "type": "image", + "bbox": [ + 0.434, + 0.274, + 0.503, + 0.333 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.459, + 0.333, + 0.481, + 0.34 + ], + "angle": 0, + "content": "Exp3" + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.274, + 0.572, + 0.333 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.528, + 0.333, + 0.551, + 0.34 + ], + "angle": 0, + "content": "Exp4" + }, + { + "type": "image", + "bbox": [ + 0.573, + 0.274, + 0.644, + 0.333 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.599, + 0.333, + 0.619, + 0.34 + ], + "angle": 0, + "content": "Exp5" + }, + { + "type": "image", + "bbox": [ + 0.644, + 0.274, + 0.714, + 0.333 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.67, + 0.333, + 0.692, + 0.34 + ], + "angle": 0, + "content": "Exp6" + }, + { + "type": "image", + "bbox": [ + 0.714, + 0.274, + 0.787, + 0.333 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.74, + 0.333, + 0.762, + 0.34 + ], + "angle": 0, + "content": "Exp7" + }, + { + "type": "image_caption", + "bbox": [ + 0.216, + 0.353, + 0.784, + 0.394 + ], + "angle": 0, + "content": "Fig. 7: We perform expression editing on the head model reconstructed from the input image. Our model is able to handle very exaggerated expressions with superior identity consistency." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.411, + 0.785, + 0.487 + ], + "angle": 0, + "content": "The detailed pipeline is outlined in Section 3.4. An example is illustrated in Figure 7. Our model can generate images depicting the corresponding expressions of the input subject based on a reference expression (as seen in the lower left corner of each image in the figure). It performs admirably even with exaggerated expressions, producing natural and realistic results." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.522, + 0.354, + 0.537 + ], + "angle": 0, + "content": "5 Discussion" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.564, + 0.785, + 0.64 + ], + "angle": 0, + "content": "Ethical Considerations. Our technique can generate artificial portrait videos, posing a significant risk of spreading misinformation, shaping public opinions, and undermining trust in media outlets. These consequences could have profound negative effects on society. Therefore, it is crucial to explore methods that effectively differentiate between genuine and manipulated content." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.642, + 0.785, + 0.718 + ], + "angle": 0, + "content": "**Limitation.** Our 3D Gaussian Parametric Head Model takes a step forward in the characterization of parametric head models. However, due to the limited amount of training data, the generalization ability of the model is still insufficient. In some cases where the illumination is significantly different from the training set, the reconstruction results are not good." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.72, + 0.787, + 0.84 + ], + "angle": 0, + "content": "Conclusion. In this paper, we propose the 3D Gaussian Parametric Head Model, a novel framework for parametric head model. This model leverages the power of 3D Gaussians, enabling realistic rendering quality and real-time speed. Our well-designed training strategy ensured stable convergence while enabling the model to learn appearance details and expressions. Besides, our model allows for creating detailed, high-quality face avatars from a single input image, and also enables editing for expressions and identity. We believe our model represents a significant advancement in the field of parametric head model." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.481, + 0.115, + 0.732, + 0.128 + ], + "angle": 0, + "content": "3D Gaussian Parametric Head Model" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.117, + 0.786, + 0.127 + ], + "angle": 0, + "content": "15" + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.145, + 0.403, + 0.163 + ], + "angle": 0, + "content": "Acknowledgements" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.176, + 0.788, + 0.221 + ], + "angle": 0, + "content": "The work is supported by the National Science Foundation of China (NSFC) under Grant Number 62125107 and the Postdoctoral Fellowship Program of China Postdoctoral Science Foundation under Grant Number GZC20231304." + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.244, + 0.323, + 0.26 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.274, + 0.787, + 0.317 + ], + "angle": 0, + "content": "1. An, S., Xu, H., Shi, Y., Song, G., Ogras, U.Y., Luo, L.: Panohead: Geometry-aware 3d full-head synthesis in 360deg. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 20950-20959 (June 2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.318, + 0.787, + 0.357 + ], + "angle": 0, + "content": "2. Blanz, V., Vetter, T.: A morphable model for the synthesis of 3d faces. In: 26th Annual Conference on Computer Graphics and Interactive Techniques (SIGGRAPH 1999). pp. 187-194. ACM Press (1999)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.359, + 0.787, + 0.413 + ], + "angle": 0, + "content": "3. Bühler, M.C., Sarkar, K., Shah, T., Li, G., Wang, D., Helminger, L., Orts-Escalano, S., Lagun, D., Hilliges, O., Beeler, T., et al.: Preface: A data-driven volumetric prior for few-shot ultra high-resolution face synthesis. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 3402-3413 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.414, + 0.787, + 0.455 + ], + "angle": 0, + "content": "4. Bulat, A., Tzimiropoulos, G.: How far are we from solving the 2d & 3d face alignment problem? (and a dataset of 230,000 3d facial landmarks). In: International Conference on Computer Vision (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.456, + 0.787, + 0.496 + ], + "angle": 0, + "content": "5. Cao, C., Simon, T., Kim, J.K., Schwartz, G., Zollhoefer, M., Saito, S.S., Lombardi, S., Wei, S.E., Belko, D., Yu, S.I., Sheikh, Y., Saragih, J.: Authentic volumetric avatars from a phone scan. ACM Trans. Graph. 41(4) (jul 2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.497, + 0.787, + 0.537 + ], + "angle": 0, + "content": "6. Cao, C., Weng, Y., Zhou, S., Tong, Y., Zhou, K.: Facewarehouse: A 3d facial expression database for visual computing. In: IEEE Transactions on Visualization and Computer Graphics. vol. 20, pp. 413-425 (2014)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.538, + 0.787, + 0.592 + ], + "angle": 0, + "content": "7. Chan, E., Monteiro, M., Kellnhofer, P., Wu, J., Wetzstein, G.: pi-gan: Periodic implicit generative adversarial networks for 3d-aware image synthesis. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 5795-5805 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.593, + 0.787, + 0.66 + ], + "angle": 0, + "content": "8. Chan, E.R., Lin, C.Z., Chan, M.A., Nagano, K., Pan, B., Mello, S.D., Gallo, O., Guibas, L., Tremblay, J., Khamis, S., Karras, T., Wetzstein, G.: Efficient geometry-aware 3D generative adversarial networks. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 16102-16112 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.661, + 0.787, + 0.702 + ], + "angle": 0, + "content": "9. Chen, X., Deng, Y., Wang, B.: Mimic3d: Thriving 3d-aware gans via 3d-to-2d imitation. In: Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV) (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.22, + 0.703, + 0.787, + 0.744 + ], + "angle": 0, + "content": "10. Chen, Y., Wang, L., Li, Q., Xiao, H., Zhang, S., Yao, H., Liu, Y.: Monogaussiana-vatar: Monocular gaussian point-based head avatar. In: ACM SIGGRAPH 2023 Conference Proceedings (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.22, + 0.745, + 0.787, + 0.785 + ], + "angle": 0, + "content": "11. Deng, Y., Yang, J., Xiang, J., Tong, X.: Gram: Generative radiance manifolds for 3d-aware image generation. Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) pp. 10663-10673 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.22, + 0.786, + 0.787, + 0.84 + ], + "angle": 0, + "content": "12. Gafni, G., Thies, J., Zollhofer, M., Niessner, M.: Dynamic neural radiance fields for monocular 4d facial avatar reconstruction. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 8645-8654 (June 2021)" + }, + { + "type": "list", + "bbox": [ + 0.22, + 0.274, + 0.787, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "16" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.333, + 0.127 + ], + "angle": 0, + "content": "Xuet al." + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.147, + 0.785, + 0.189 + ], + "angle": 0, + "content": "13. Gao, X., Zhong, C., Xiang, J., Hong, Y., Guo, Y., Zhang, J.: Reconstructing personalized semantic facial nerf models from monocular video. ACM Transactions on Graphics (Proceedings of SIGGRAPH Asia) 41(6) (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.19, + 0.787, + 0.232 + ], + "angle": 0, + "content": "14. Gerig, T., Forster, A., Blumer, C., Egger, B., Lüthi, M., Schönborn, S., Vetter, T.: Morphable face models - an open framework. In: 2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018). pp. 75-82 (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.232, + 0.785, + 0.272 + ], + "angle": 0, + "content": "15. Giebenhain, S., Kirschstein, T., Georgopoulos, M., Rünz, M., Agapito, L., Nießner, M.: Learning neural parametric head models. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.273, + 0.785, + 0.327 + ], + "angle": 0, + "content": "16. Giebenhain, S., Kirschstein, T., Georgopoulos, M., Rünz, M., Agapito, L., Nießner, M.: Mononphm: Dynamic head reconstruction from monocular videos. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.328, + 0.785, + 0.383 + ], + "angle": 0, + "content": "17. Grassal, P.W., Prinzler, M., Leistner, T., Rother, C., Nießner, M., Thies, J.: Neural head avatars from monocular rgb videos. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 18632-18643 (June 2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.383, + 0.785, + 0.424 + ], + "angle": 0, + "content": "18. Gu, J., Liu, L., Wang, P., Theobalt, C.: Stylenerf: A style-based 3d aware generator for high-resolution image synthesis. In: International Conference on Learning Representations (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.425, + 0.785, + 0.467 + ], + "angle": 0, + "content": "19. Hong, Y., Peng, B., Xiao, H., Liu, L., Zhang, J.: Headnerf: A real-time nerf-based parametric head model. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 20374-20384 (June 2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.467, + 0.785, + 0.522 + ], + "angle": 0, + "content": "20. Hu, L., Zhang, H., Zhang, Y., Zhou, B., Liu, B., Zhang, S., Nie, L.: Gaussian avatar: Towards realistic human avatar modeling from a single video via animatable 3d gaussians. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.522, + 0.785, + 0.563 + ], + "angle": 0, + "content": "21. Kerbl, B., Kopanas, G., Leimkuhler, T., Drettakis, G.: 3d gaussian splatting for real-time radiance field rendering. ACM Transactions on Graphics 42(4) (July 2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.564, + 0.785, + 0.605 + ], + "angle": 0, + "content": "22. Khakhulin, T., Sklyarova, V., Lempitsky, V., Zakharov, E.: Realistic one-shot mesh-based head avatars. In: Proceedings of the European Conference on Computer Vision (ECCV) (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.606, + 0.785, + 0.647 + ], + "angle": 0, + "content": "23. Kirschstein, T., Giebenhain, S., Nießner, M.: Diffusion avatars: Deferred diffusion for high-fidelity 3d head avatars. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.647, + 0.785, + 0.688 + ], + "angle": 0, + "content": "24. Kirschstein, T., Qian, S., Giebenhain, S., Walter, T., Niefner, M.: Nersemble: Multi-view radiance field reconstruction of human heads. ACM Trans. Graph. 42(4) (jul 2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.689, + 0.785, + 0.716 + ], + "angle": 0, + "content": "25. Li, T., Bolkart, T., Black, M.J., Li, H., Romero, J.: Learning a model of facial shape and expression from 4d scans. ACM Trans. Graph. 36(6) (nov 2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.717, + 0.785, + 0.743 + ], + "angle": 0, + "content": "26. Li, X., De Mello, S., Liu, S., Nagano, K., Iqbal, U., Kautz, J.: Generalizable one-shot neural head avatar. NeurIPS (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.744, + 0.785, + 0.799 + ], + "angle": 0, + "content": "27. Li, Z., Zheng, Z., Wang, L., Liu, Y.: Animatable gaussians: Learning pose-dependent gaussian maps for high-fidelity human avatar modeling. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.799, + 0.785, + 0.841 + ], + "angle": 0, + "content": "28. Lin, C.Z., Nagano, K., Kautz, J., Chan, E.R., Iqbal, U., Guibas, L., Wetzstein, G., Khamis, S.: Single-shot implicit morphable faces with consistent texture parameterization. In: ACM SIGGRAPH 2023 Conference Proceedings (2023)" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.147, + 0.787, + 0.841 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.481, + 0.115, + 0.732, + 0.128 + ], + "angle": 0, + "content": "3D Gaussian Parametric Head Model" + }, + { + "type": "page_number", + "bbox": [ + 0.768, + 0.116, + 0.786, + 0.127 + ], + "angle": 0, + "content": "17" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.205 + ], + "angle": 0, + "content": "29. Lin, S., Ryabtsev, A., Sengupta, S., Curless, B., Seitz, S., Kemelmacher-Shlizerman, I.: Real-time high-resolution background matting. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (Jun 2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.205, + 0.787, + 0.247 + ], + "angle": 0, + "content": "30. Lombardi, S., Simon, T., Schwartz, G., Zollhoefer, M., Sheikh, Y., Saragih, J.: Mixture of volumetric primitives for efficient neural rendering. ACM Trans. Graph. 40(4) (jul 2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.247, + 0.786, + 0.288 + ], + "angle": 0, + "content": "31. Loper, M., Mahmood, N., Romero, J., Pons-Moll, G., Black, M.J.: SMPL: A skinned multi-person linear model. ACM Trans. Graphics (Proc. SIGGRAPH Asia) 34(6), 248:1-248:16 (Oct 2015)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.289, + 0.787, + 0.319 + ], + "angle": 0, + "content": "32. Luiten, J., Kopanas, G., Leibe, B., Ramanan, D.: Dynamic 3d gaussians: Tracking by persistent dynamic view synthesis. In: 3DV (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.318, + 0.787, + 0.348 + ], + "angle": 0, + "content": "33. Ma, S., Weng, Y., Shao, T., Zhou, K.: 3d gaussian blendshapes for head avatar animation. In: ACM SIGGRAPH 2023 Conference Proceedings (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.348, + 0.787, + 0.389 + ], + "angle": 0, + "content": "34. Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: Nerf: Representing scenes as neural radiance fields for view synthesis. In: Proceedings of the European Conference on Computer Vision (ECCV) (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.39, + 0.787, + 0.445 + ], + "angle": 0, + "content": "35. Or-El, R., Luo, X., Shan, M., Shechtman, E., Park, J.J., Kemelmacher-Shlizerman, I.: Stylesdf: High-resolution 3d-consistent image and geometry generation. Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) pp. 13493-13503 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.446, + 0.787, + 0.502 + ], + "angle": 0, + "content": "36. Pavlakos, G., Choutas, V., Ghorbani, N., Bolkart, T., Osman, A.A.A., Tzionas, D., Black, M.J.: Expressive body capture: 3D hands, face, and body from a single image. In: Proceedings IEEE Conf. on Computer Vision and Pattern Recognition (CVPR). pp. 10975-10985 (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.502, + 0.787, + 0.558 + ], + "angle": 0, + "content": "37. Qian, S., Kirschstein, T., Schoneveld, L., Davoli, D., Giebenhain, S., Nießner, M.: Gaussian avatars: Photorealistic head avatars with rigged 3d gaussians. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.559, + 0.787, + 0.601 + ], + "angle": 0, + "content": "38. Qin, M., Liu, Y., Xu, Y., Zhao, X., Liu, Y., Wang, H.: High-fidelity 3d head avatars reconstruction through spatially-varying expression conditioned neural radiance field. In: AAAI Conference on Artificial Intelligence (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.601, + 0.787, + 0.643 + ], + "angle": 0, + "content": "39. Saito, S., Schwartz, G., Simon, T., Li, J., Nam, G.: Relightable gaussian codec avatars. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.643, + 0.787, + 0.699 + ], + "angle": 0, + "content": "40. Shao, Z., Wang, Z., Li, Z., Wang, D., Lin, X., Zhang, Y., Fan, M., Wang, Z.: SplattingAvatar: Realistic Real-Time Human Avatars with Mesh-Embedded Gaussian Splatting. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.7, + 0.787, + 0.742 + ], + "angle": 0, + "content": "41. Shen, T., Gao, J., Yin, K., Liu, M.Y., Fidler, S.: Deep marching tetrahedra: a hybrid representation for high-resolution 3d shape synthesis. In: Advances in Neural Information Processing Systems (NeurIPS) (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.742, + 0.787, + 0.785 + ], + "angle": 0, + "content": "42. Sun, J., Wang, X., Shi, Y., Wang, L., Wang, J., Liu, Y.: Ide-3d: Interactive disentangled editing for high-resolution 3d-aware portrait synthesis. ACM Transactions on Graphics (TOG) 41(6), 1-10 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.785, + 0.787, + 0.841 + ], + "angle": 0, + "content": "43. Sun, J., Wang, X., Wang, L., Li, X., Zhang, Y., Zhang, H., Liu, Y.: Next3d: Generative neural texture rasterization for 3d-aware head avatars. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2023)" + }, + { + "type": "list", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.841 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "18" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.333, + 0.127 + ], + "angle": 0, + "content": "Xuet al." + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.147, + 0.787, + 0.203 + ], + "angle": 0, + "content": "44. Wang, D., Chandran, P., Zoss, G., Bradley, D., Gotardo, P.: Morf: Morphable radiance fields for multiview neural head modeling. In: ACM SIGGRAPH 2022 Conference Proceedings. SIGGRAPH '22, Association for Computing Machinery, New York, NY, USA (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.203, + 0.787, + 0.23 + ], + "angle": 0, + "content": "45. Wang, J., Xie, J.C., Li, X., Xu, F., Pun, C.M., Gao, H.: Gaussianhead: High-fidelity head avatars with learnable gaussian derivation (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.23, + 0.787, + 0.284 + ], + "angle": 0, + "content": "46. Wang, K., Wu, Q., Song, L., Yang, Z., Wu, W., Qian, C., He, R., Qiao, Y., Loy, C.C.: Mead: A large-scale audio-visual dataset for emotional talking-face generation. In: Proceedings of the European Conference on Computer Vision (ECCV) (August 2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.284, + 0.787, + 0.338 + ], + "angle": 0, + "content": "47. Wang, L., Chen, Z., Yu, T., Ma, C., Li, L., Liu, Y.: Faceverse: a fine-grained and detail-controllable 3d face morphable model from a hybrid dataset. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (Jun 2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.339, + 0.787, + 0.366 + ], + "angle": 0, + "content": "48. Wu, G., Yi, T., Fang, J., Xie, L., Zhang, X., Wei, W., Liu, W., Tian, Q., Wang, X.: 4d gaussian splatting for real-time dynamic scene rendering (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.366, + 0.787, + 0.42 + ], + "angle": 0, + "content": "49. Wu, S., Yan, Y., Li, Y., Cheng, Y., Zhu, W., Gao, K., Li, X., Zhai, G.: Ganhead: Towards generative animatable neural head avatars. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 437-447 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.42, + 0.787, + 0.461 + ], + "angle": 0, + "content": "50. Wu, Y., Deng, Y., Yang, J., Wei, F., Qifeng, C., Tong, X.: Anifacegan: Animatable 3d-aware face image generation for video avatars. In: Advances in Neural Information Processing Systems (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.461, + 0.787, + 0.501 + ], + "angle": 0, + "content": "51. Wu, Y., Xu, S., Xiang, J., Wei, F., Chen, Q., Yang, J., Tong, X.: Aniportraitgan: Animatable 3d portrait generation from 2d image collections. In: SIGGRAPH Asia 2023 Conference Proceedings (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.501, + 0.787, + 0.555 + ], + "angle": 0, + "content": "52. Xiang, J., Yang, J., Deng, Y., Tong, X.: Gram-hd: 3d-consistent image generation at high resolution with generative radiance manifolds. Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV) pp. 2195-2205 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.555, + 0.787, + 0.597 + ], + "angle": 0, + "content": "53. Xiang, J., Gao, X., Guo, Y., Zhang, J.: Flashavatar: High-fidelity head avatar with efficient gaussian embedding. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.597, + 0.787, + 0.651 + ], + "angle": 0, + "content": "54. Xu, Y., Chen, B., Li, Z., Zhang, H., Wang, L., Zheng, Z., Liu, Y.: Gaussian head avatar: Ultra high-fidelity head avatar via dynamic gaussians. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.651, + 0.787, + 0.692 + ], + "angle": 0, + "content": "55. Xu, Y., Wang, L., Zhao, X., Zhang, H., Liu, Y.: Avatarmav: Fast 3d head avatar reconstruction using motion-aware neural voxels. In: ACM SIGGRAPH 2023 Conference Proceedings (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.692, + 0.787, + 0.732 + ], + "angle": 0, + "content": "56. Xu, Y., Zhang, H., Wang, L., Zhao, X., Han, H., Guojun, Q., Liu, Y.: Latentavatar: Learning latent expression code for expressive neural head avatar. In: ACM SIGGRAPH 2023 Conference Proceedings (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.732, + 0.787, + 0.759 + ], + "angle": 0, + "content": "57. Yang, Z., Yang, H., Pan, Z., Zhu, X., Zhang, L.: Real-time photorealistic dynamic scene representation and rendering with 4d gaussian splatting (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.759, + 0.787, + 0.786 + ], + "angle": 0, + "content": "58. Yang, Z., Gao, X., Zhou, W., Jiao, S., Zhang, Y., Jin, X.: Deformable 3d gaussians for high-fidelity monocular dynamic scene reconstruction (June 2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.786, + 0.787, + 0.84 + ], + "angle": 0, + "content": "59. Yenamandra, T., Tewari, A., Bernard, F., Seidel, H., Elgharib, M., Cremers, D., Theobalt, C.: i3dmm: Deep implicit 3d morphable model of human heads. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (June 2021)" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.147, + 0.787, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.481, + 0.115, + 0.732, + 0.128 + ], + "angle": 0, + "content": "3D Gaussian Parametric Head Model" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.117, + 0.786, + 0.127 + ], + "angle": 0, + "content": "19" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.203 + ], + "angle": 0, + "content": "60. Zhang, R., Isola, P., Efros, A.A., Shechtman, E., Wang, O.: The unreasonable effectiveness of deep features as a perceptual metric. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 586-595 (June 2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.204, + 0.788, + 0.245 + ], + "angle": 0, + "content": "61. Zhao, X., Wang, L., Sun, J., Zhang, H., Suo, J., Liu, Y.: Havatar: High-fidelity head avatar via facial model conditioned neural radiance field. ACM Trans. Graph. (oct 2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.245, + 0.788, + 0.3 + ], + "angle": 0, + "content": "62. Zheng, Y., Abrevaya, V.F., Bühler, M.C., Chen, X., Black, M.J., Hilliges, O.: I m avatar: Implicit morphable head avatars from videos. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 13535-13545 (June 2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.3, + 0.788, + 0.342 + ], + "angle": 0, + "content": "63. Zheng, Y., Yifan, W., Wetzstein, G., Black, M.J., Hilliges, O.: Pointavatar: Deformable point-based head avatars from videos. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.342, + 0.788, + 0.383 + ], + "angle": 0, + "content": "64. Zhuang, Y., Zhu, H., Sun, X., Cao, X.: Mofanerf: Morphable facial neural radiance field. In: Proceedings of the European Conference on Computer Vision (ECCV) (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.383, + 0.788, + 0.398 + ], + "angle": 0, + "content": "65. Zielonka, W., Bolkart, T., Thies, J.: Instant volumetric head avatars (June 2023)" + }, + { + "type": "list", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.398 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/2024/3D Gaussian Parametric Head Model/0c0538a8-3292-41a6-ad76-710b8fc8de37_origin.pdf b/2024/3D Gaussian Parametric Head Model/0c0538a8-3292-41a6-ad76-710b8fc8de37_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..ed6feb75aa2839904349126d79647d058606ff1a --- /dev/null +++ b/2024/3D Gaussian Parametric Head Model/0c0538a8-3292-41a6-ad76-710b8fc8de37_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ba032a5773e87aae2f4120cb2d0ff93d6f040dd7e2253ad60e9819da3b23e75 +size 3354113 diff --git a/2024/3D Gaussian Parametric Head Model/full.md b/2024/3D Gaussian Parametric Head Model/full.md new file mode 100644 index 0000000000000000000000000000000000000000..4d0106df59c233c076c7b96d9090d5be3c8bfc37 --- /dev/null +++ b/2024/3D Gaussian Parametric Head Model/full.md @@ -0,0 +1,371 @@ +# 3D Gaussian Parametric Head Model + +Yuelang $\mathrm{Xu}^{1}$ , Lizhen Wang $^{1}$ , Zerong Zheng $^{2}$ , Zhaoqi $\mathrm{Su}^{1}$ , and Yebin $\mathrm{Liu}^{1}$ + +$^{1}$ Tsinghua University, Beijing, China + +2 NNKosmos, Hangzhou, China + +Abstract. Creating high-fidelity 3D human head avatars is crucial for applications in VR/AR, telepresence, digital human interfaces, and film production. Recent advances have leveraged morphable face models to generate animated head avatars from easily accessible data, representing varying identities and expressions within a low-dimensional parametric space. However, existing methods often struggle with modeling complex appearance details, e.g., hairstyles and accessories, and suffer from low rendering quality and efficiency. This paper introduces a novel approach, 3D Gaussian Parametric Head Model, which employs 3D Gaussians to accurately represent the complexities of the human head, allowing precise control over both identity and expression. Additionally, it enables seamless face portrait interpolation and the reconstruction of detailed head avatars from a single image. Unlike previous methods, the Gaussian model can handle intricate details, enabling realistic representations of varying appearances and complex expressions. Furthermore, this paper presents a well-designed training framework to ensure smooth convergence, providing a guarantee for learning the rich content. Our method achieves high-quality, photo-realistic rendering with real-time efficiency, making it a valuable contribution to the field of parametric head models. + +Keywords: 3D Gaussian $\cdot$ Head Avatar $\cdot$ Parametric Model + +# 1 Introduction + +Creating high-fidelity 3D human head avatars holds significant importance across various fields, including VR/AR, telepresence, digital human interfaces, and film production. The automatic generation of such avatars has been a focal point in computer vision research for many years. Recent methods [12,13,17,38,55,56,61-63,65] can create an animated head avatar through conveniently collected data such as monocular video data or even a picture [22, 26]. Serving as the most fundamental tool in these methods, the 3D morphable models (3DMM) [14, 25], which represent varying identities and expressions within a low-dimensional space, have been proven to be a highly successful avenue in addressing this challenging problem. + +Since the traditional parametric 3DMMs are typically limited by the topology of the underlying template mesh and only focus on the face part, some works [15,16,28,59] propose to use implicit Signed Distance Field (SDF) as the + +![](images/18d9724a6f94017aeaf954badf8e38047fefbcd5fff7e8310c479cef2a0005b3.jpg) +Fig. 1: We utilize hybrid datasets comprising captured multi-view video data and rendered image data from 3D scans for training our model. The trained model can be manipulated using decoupled identity and expression codes to produce a diverse array of high-fidelity head models. When presented with an image, our model can be adjusted to reconstruct the portrait in the image and edit the expression according to any other desired expressions. + +geometric representation to model the entire head. Despite their flexibility, these methods fall short in recovering high-frequency geometric and texture details like hairstyles, glasses or accessories. On the other end of the spectrum, Neural Radiance Field (NeRF) [34] based methods [19,64] learn parametric head models by directly synthesizing images, thus eliminating the need of geometry modeling. However, NeRF is built upon volumetric rendering, which involves sampling and integrating points distributed throughout space. Therefore, NeRF-based methods typically suffer from low rendering efficiency and have to trade it off with rendering resolution, thereby greatly reducing rendering quality. Moreover, skipping geometric reconstruction would probably lead to poor 3D consistency. + +More recently, 3D Gaussian Splatting (3DGS) [21], which uses explicit Gaussian ellipsoids to represent 3D scenes, has attracted significant attention from the research community. Experiments have verified the superior quality of the rendered results and excellent rendering efficiency compared to previous NeRF-based or surface-based methods even on dynamic scenes [32,48,57,58]. Motivated by this progress, we propose a novel 3D Gaussian Parametric Head Model, which, for the first time, marries the power of 3DGS with the challenging task of parametric head modeling. Our 3D gaussian parametric head model decouples the control signals of the head into the latent spaces of identity and expression, as is also done in SDF-based face model NPHM [15]. These latent spaces are then mapped to the offsets of the Gaussian positions, which effectively represent the variance of shape and appearance of different identities and expressions. Benefiting from the differentiability of Gaussian splatting, our model can be learned from multi-view video data corpus in an end-to-end manner, without relying on geometry supervision. + +Unfortunately, training our 3D Gaussian parametric head model is not quite straightforward, because Gaussian ellipsoids are unstructured and each Gaussian ellipsoid has its own independent learnable attribute. Such a characteristic makes 3DGS powerful in overfitting a specific object or scene, but poses great challenges for generative head modeling. Without proper initialization and regularization, + +the learned parametric head model may suffer from unstable training or a large number of Gaussian points becoming redundant and noisy, as shown in Fig. 4. + +To overcome these challenges, we propose a well-designed two-stage training strategy to ensure smooth convergence of our model training. Specifically, we first roughly train all the networks on a mesh-based guiding model. Subsequently, the network parameters are migrated to the Gaussian model, and all Gaussian points are initialized with the trained mesh geometry to ensure that they are located near the actual surface. Compared to naive initialization with FLAME [25], our initialization strategy leads to a better guess of the positions of Gaussian points, making the subsequent training of the model converge stably and the areas like hairs better recovered. Moreover, we propose to use 3D landmark loss to supervise the deformation of the model learning expressions, which can speed up the convergence and avoid artifacts under exaggerated expressions. Lastly, our method supports training from both 3D head scans and multi-view 2D face datasets, which enhances the versatility and comprehensiveness of facial data collection and model training. + +After training on large corpus of multi-view head videos, our parametric Gaussian head model can generate photorealistic images that accurately depict the diverse range of facial appearances, naturally handling complex and exaggerated expressions, while also enabling real-time rendering. Additionally, our method supports single-image fitting and surpasses previous techniques in both reconstruction accuracy and identity consistency. Furthermore, the model resulting from our fitting process allows for the control of various expressions while maintaining naturalness and consistent identity even under exaggerated expressions. The contributions of our method can be summarized as: + +- We propose 3D Gaussian Parametric Head Model, a novel parametric head model which utilizes 3D Gaussians as the representation and enables photorealistic rendering quality and real-time rendering speed. +- We propose a well-designed training strategy to ensure that the Gaussian model converges stably while learning rich appearance details and complex expressions efficiently. +- Our 3D Gaussian Parametric Head Model enables the generation of a detailed, high-quality face avatar from a single given image, as well as performing expression and identity editing upon it. + +# 2 Related Work + +Parametric Head Models. Parametric head models are used to represent facial features, expressions, and identities effectively and efficiently. They allow for the creation of realistic human faces with adjustable parameters, making them essential in computer graphics, animation, and virtual reality. Therefore, research in this field has always been a hot topic. Traditional 3D Morphable Models (3DMM) [2,6,14,25,47] are constructed by non-rigidly registering a template mesh with fixed topology to a series of 3D scans. Through this registration process, a 3DMM can be computed using dimensionality reduction techniques such + +as principal component analysis (PCA). The resulting parametric space captures the variations in facial geometry and appearance across a population. However, while 3DMMs offer a powerful way to represent faces, they do have limitations. These models rely heavily on the correspondence between the 3D scans and the template for accurate fitting and may struggle to represent local surface details like wrinkles or hairstyles that deviate significantly from the template mesh. Recent advances in implicit representation have led to the great development of neural parametric head models. Some methods [15, 16, 49, 59] propose implicit Signed Distance Field (SDF) based head models, which are not constrained by topology thus can recover more complex content like hair compared to previous mesh-based Methods. Other methods [3, 19, 44, 64] propose to use NeRF [34] as the representation of the parametric head models, which can directly synthesize photorealistic images without geometric reconstruction. Cao, et al. [5] use a hybrid representation [30] of mesh and NeRF to train their model on unpublished large-scale light stage data. However, rendering efficiency is typically low in NeRF-based methods, often resulting in a trade-off with rendering resolution. + +3D GAN based Head Models. 3D Generative Adversarial Networks (GANs) have revolutionized the field of computer vision, particularly in the domain of human head and face modeling, enabling the generation of face avatars from input images. Traditional methods often require labor-intensive manual work or rely on multi-view images to create 3D models. 3D GANs as a more automated and data-driven approach, which are just trained on single-view 2D images but generate detailed and realistic 3D models of human head [7-9, 11, 18, 35, 52]. Panohead [1] additionally introduces images of hairstyles on the back of characters and trains a full-head generative model. Based on the previous methods, IDE-3D [42] proposes to use semantic map to edit the 3D head model. Next3D [43] and AntiFaceGAN [50] extend to use the FLAME model [25] to condition the generated head model, so that the expression and pose of the generated head model can be controlled. AntiPortraitGAN [51] further replaces FLAME model with SMPLX model [36] to generate upper body avatars, thus the shoulders and the neck can also be controlled. These 3D GAN-based models primarily leverage the coarse FLAME model for expression control, often leading to a loss of expression details in the generated faces. In contrast, our method directly learns the expression distribution from the dataset, capturing more facial appearance details. + +3D Gaussians. Recently, 3D Gaussian splatting [21] has shown superior performance compared to NeRF, excelling in both novel view synthesis quality and rendering speed. Several methods have expanded Gaussian representation to dynamic scene reconstruction [32, 48, 57, 58]. For human body avatar modeling, recent approaches [20, 27] propose training a 3D Gaussian avatar animated by SMPL [31] or a skeleton from multi-view videos, surpassing previous methods in rendering quality and efficiency. In the realm of human head avatar modeling, recent techniques [10, 23, 33, 37, 37, 39, 40, 45, 53, 54] also utilize 3D Gaussians to create high-fidelity and efficient head avatars. These approaches center on the creation of a high-fidelity person-specific avatar using data of a single person. In + +![](images/f502bb7b723709cb2e4afb2d625e1e81f0d5db3f03a563a244280467a4815d2a.jpg) +Fig. 2: The pipeline of our method. Our training strategy can be divided into a Guiding Geometry Model for initialization, and a final 3D Gaussian Parametric Head Model. Deformations of each model are further decoupled into identity-related and expression-related deformations. Rendering involves using DMTet to transform the initial model into a mesh and 3D Gaussian Splatting for the Gaussian model. Features from both models are finally upsampled to high-resolution portrait images through a convolutional network $\Psi$ . During inference, our output exclusively comes from the Gaussian model. + +contrast, our method focuses on a versatile prior model that can accommodate varying appearances. Once trained, our model is also capable of person-specific avatar reconstruction by fitting to the input image data provided. + +# 3 Method + +In this section, we present the 3D Gaussian Parametric Head Model. In contrast to previous mesh-based or NeRF-based models, initializing and training Gaussian-based models pose distinct challenges. This section introduces the dataset and preprocessing, the carefully designed guiding geometry model, the Gaussian Parametric Head Model, and outlines their respective training processes. Additionally, we will also provide the training details and demonstrate how to utilize our method when given a single input image. + +# 3.1 Data Preprocessing + +We used three datasets for our model training, including a multi-view video dataset NeRSemble [24], and two 3D scans datasets NPHM [15] and FaceV-erse [47]. We do not use the 3D geometry of the scans directly, but render them + +into multi-view images and use only the images form the 3 datasets as supervision. In order to better utilize these three different datasets, we need to do preprocessing. First, we resize the images to 512 resolution and adjust the camera parameters. Then, we use BackgroundMattingV2 [29] to extract the foreground characters in the NeRSemble dataset and record the masks. This step is not required for the two synthetic datasets. Next, we use face alignment [4] to detect 2D landmarks in all the images. Through these 2D landmarks, we fit a Basel Face Model (BFM) [14] for each expression of each identity, and record the head pose and 3D landmarks of the BFM. We will use the above processed camera parameters, images, masks, head pose of BFM and 3D landmarks of BFM to train our model. + +# 3.2 Model Representation + +The representation of Gaussian distribution poses challenges due to its unordered and unstructured nature, leading to difficulties in the continuous spread of gradients to neighboring points in space during backpropagation. This often results in convergence failure when Gaussians are randomly initialized. On the other hand, surface-based representations such as mesh are just suitable for rough geometry learning. A direct idea is to utilize an existing 3DMM, such as FLAME [25], as the initial position for the points in 3D Gaussian splatting [21]. However, this coarse initialization still fails to converge the positions of 3D points to the correct locations, as shown in Fig. 4. The network tends to alter the shape of the ellipsoid to achieve a suitable fitting result, leading to inaccurate geometry of the point cloud and blurriness in the rendered image. + +To address this problem, a more detailed initialization process is necessary for capturing the diverse head variations using 3D Gaussian splatting. Specifically, we draw inspiration from Gaussian Head Avatar [54] and leverage the implicit signed distance field (SDF) representation to train a guiding geometry model. This guiding geometry model serves as the initial value for the Gaussian model, providing a more effective starting point for the optimization process. We define the initial model as Guiding Geometry Model and the refined model as 3D Gaussian Parametric Head Model. + +Guiding Geometry Model. The guiding geometry model receives an identity code $z^{id}$ and an expression code $z^{exp}$ as input, producing a mesh with vertices $V$ , faces $F$ , and per-vertex color $C$ that aligns with the specified identity and expression. To achieve this, we use an MLP denoted as $f_{mean}(\cdot)$ to implicitly model the SDF, which represents the mean geometry: + +$$ +s, \gamma = \boldsymbol {f} _ {\text {m e a n}} (x), \tag {1} +$$ + +where $s$ denotes the SDF value, $\gamma$ denotes the feature from the last layer and $x$ denotes the input position. Then, we convert the implicit SDF through Deep Marching Tetrahedra (DMTet) [41] into an explicit mesh with vertex positions $V_{0}$ , per-vertex feature $\Gamma$ and faces $F$ . Next, we need to transform the mean shape into a neutral-expression shape on condition of the input identity code + +$z^{id}$ . To inject identity information into the vertices of the mesh, we first use an injection MLP $f_{inj}(\cdot)$ , which takes the identity code $z^{id}$ and the per-vertex feature $\Gamma$ as input and produces the identity-conditioned per-vertex feature vectors $H = f_{inj}(z^{id},\Gamma)$ . Subsequently, utilizing a tiny MLP $f_{id}(\cdot)$ , we predict the displacement $\delta V_{id}$ for each vertex. This displacement is used to transform the mean shape into the neutral-expression shape conditioned on the id code $z^{id}$ . + +After completing deformations related to identity, the next step is to capture the deformation induced by facial expressions. We introduce another tiny MLP $f_{exp}(\cdot)$ . This MLP takes the feature vectors $H$ obtained in the previous step and the expression code $z^{exp}$ as input, and the output is the displacement $\delta V_{exp}$ for each vertex. Using this displacement, we update the vertex positions to $V_{can}$ . Additionally, we feed the same feature vectors $H$ and expression code $z^{exp}$ to a color MLP, $f_{col}(\cdot)$ , to predict the 32-channel color $C$ for each vertex. The vertex positions to $V_{can}$ and 32-channel color $C$ can be described as: + +$$ +V _ {c a n} = V _ {0} + \boldsymbol {f} _ {i d} (H) + \boldsymbol {f} _ {e x p} (H, \boldsymbol {z} ^ {e x p}), C = \boldsymbol {f} _ {c o l} (H, \boldsymbol {z} ^ {e x p}). \tag {2} +$$ + +Finally, we utilize the estimated head pose parameters $R$ and $T$ obtained during data preprocessing to transform the mesh from the canonical space to the world space $V = R \cdot V_{can} + T$ . After generating the final vertex positions, colors and faces $\{V, C, F\}$ of the mesh, we render the mesh into a 256-resolution 32-channel feature map $I_F$ and a mask $M$ through differentiable rasterization with a given camera pose. Subsequently, the feature map is interpreted as a 512-resolution RGB $I_{hr}$ image through a lightweight convolutional upsampling network $\Psi(\cdot)$ , as shown in Fig. 2. + +3D Gaussian Parametric Head Model. The Gaussian model also takes an identity code $z^{id}$ and an expression code $z^{exp}$ as input, producing the positions $X$ , color $C$ , scale $S$ , rotation $Q$ and opacity $A$ of the 3D Gaussians. Similar to the guiding geometry model, we initially maintain an overall mean point cloud, with the mean positions $X_0$ . However, we no longer generate the per-vertex feature $\Gamma$ through $f_{mean}(x)$ . Instead, we directly generate it at once and bind it to the Gaussian points as estimizable variables $\Gamma_0$ . This is possible since the number of Gaussian points is fixed at this stage. Then we need to transform the mean point cloud into a neutral-expression point cloud, conditioned by the id code $z^{id}$ . To achieve this, we utilize the same injection MLP $f_{inj}(\cdot)$ and identity deformation MLP $f_{id}(\cdot)$ defined in the guiding geometry model, which can generate feature vectors $H = f_{inj}(z^{id},\Gamma_0)$ that encode identity information for each point and predict the identity-related displacement of each point. Then, we also need to predict the expression code $z^{exp}$ -conditioned displacement. The resulting positions $X_{can}$ and the 32-channel color $C$ of each point, similar to the approach presented in the guiding geometry model, can be described as: + +$$ +X _ {c a n} = \boldsymbol {X} _ {\mathbf {0}} + \boldsymbol {f} _ {i d} (H). + \boldsymbol {f} _ {e x p} (H, \boldsymbol {z} ^ {e x p}), C = \boldsymbol {f} _ {c o l} (H, \boldsymbol {z} ^ {e x p}). \tag {3} +$$ + +Unlike the representations of SDF and DMTet, Gaussians have additional attributes that need to be predicted. Here, we introduce a new MLP to predict Gaussian attributes in the canonical space, including the scale $S$ , rotation $Q_{can}$ , + +and opacity $A$ . In order to ensure the stability of the generated results, we refrain from directly predicting these values. Instead, we predict their offsets $\{\delta S,\delta Q,\delta A\}$ relative to the overall mean values $\{S_0,Q_0,A_0\}$ : + +$$ +\{S, Q _ {c a n}, A \} = \left\{S _ {0}, Q _ {0}, A _ {0} \right\} + \boldsymbol {f} _ {a t t} (H, z ^ {e x p}). \tag {4} +$$ + +Following this, we utilize the estimated head pose parameters $R$ and $T$ , obtained during data preprocessing, to transform the canonical space variables $X_{can}$ and $Q_{can}$ into the world space: $X = R \cdot X_{can} + T$ , $Q = R \cdot Q_{can}$ . For model rendering, we leverage differentiable rendering [21] and neural rendering techniques to generate images. The generated 3D Gaussian parameters, which include $\{X, C, S, Q, A\}$ , are conditioned by the identity code $z^{id}$ and expression code $z^{exp}$ . Finally, we input this feature map into the same upsampling network $\Psi(\cdot)$ of the guiding geometry model to generate a 512-resolution RGB image. + +In the 3D Gaussian Parametric Head Model, we leverage the previously trained guiding geometry model to initialize our variables and networks, rather than initiating them randomly and training from scratch. Specifically, we initialize the Gaussian positions $\mathbf{X_0}$ using the vertex positions of the mean mesh $V_{0}$ . Meanwhile, we generate the per-vertex feature $\varGamma$ from $f_{mean}(x)$ at the beginning and bind it to the points as an estimizable variable $\varGamma_{0}$ as described above. Additionally, all identity codes $z^{id}$ , expression codes $z^{exp}$ , and the networks $\{\pmb{f}_{\text{inj}}(\cdot), \pmb{f}_{\text{id}}(\cdot), \pmb{f}_{\text{exp}}(\cdot), \pmb{f}_{\text{col}}(\cdot), \pmb{\Psi}(\cdot)\}$ are directly inherited from the guiding geometry model. Note that, the attribute MLP $f_{att}(\cdot)$ is a newly introduced network, hence it is initialized randomly. Finally, the overall mean values of the Gaussian attributes $\{S_0, Q_0, A_0\}$ are initialized following the original 3D Gaussian Splatting [21]. + +# 3.3 Loss Functions + +To ensure the accurate convergence of the model, we employ various loss functions as constraints, including the basic photometric loss and silhouette loss, to enforce consistency with ground truth of both the rendered high-resolution images $I_{hr}$ and the rendered masks $M$ : + +$$ +\mathcal {L} _ {h r} = \left| \left| I _ {h r} - I _ {g t} \right| \right| _ {1}, \mathcal {L} _ {s i l} = I O U (M, M _ {g t}), \tag {5} +$$ + +with $I_{gt}$ representing the ground truth RGB images, $M_{gt}$ representing the ground truth masks. We further encourage the first three channels of the low-resolution feature map $I_{lr}$ to closely match the ground-truth RGB image $I_{gt}$ by introducing an $L_{1}$ loss: + +$$ +\mathcal {L} _ {l r} = \left| \left| I _ {l r} - I _ {g t} \right| \right| _ {1}. \tag {6} +$$ + +The geometric deformation caused by expressions is typically complex and cannot be learned through image supervision alone. Therefore, we provide additional coarse supervision for expression deformation learning using 3D landmarks. Specifically, we define the 3D landmarks $P_0$ in the canonical space, and then predict their displacements and transform them to the world space as $P$ + +just like the transformation of the original vertices $V_{0}$ above. Then, we construct the landmark loss function: + +$$ +\mathcal {L} _ {l m k} = | | \boldsymbol {P} - \boldsymbol {P} _ {g t} | | _ {2}, \tag {7} +$$ + +with $P_{gt}$ denoting the ground truth 3D landmarks, which are estimated by fitting a BFM model to the training data during preprocessing. + +Moreover, to guarantee the decoupling of identity and expression deformations learned by the model and minimize redundancy, we introduce the following regularization loss function that aims to minimize the magnitude of both deformations: + +$$ +\mathcal {L} _ {r e g} = \left| \left| \delta V _ {i d} \right| \right| _ {2} + \left| \left| \delta V _ {e x p} \right| \right| _ {2}. \tag {8} +$$ + +During the training of the Guiding Geometry Model, we also construct a Laplacian smooth term $\mathcal{L}_{lap}$ to penalize surface noise or breaks. Overall, the total loss function is formulated as: + +$$ +\mathcal {L} = \mathcal {L} _ {h r} + \lambda_ {s i l} \mathcal {L} _ {s i l} + \lambda_ {l r} \mathcal {L} _ {l r} + \lambda_ {l m k} \mathcal {L} _ {l m k} + \lambda_ {r e g} \mathcal {L} _ {r e g} + \lambda_ {l a p} \mathcal {L} _ {l a p} \tag {9} +$$ + +with all the $\lambda$ denoting the weights of each term. In practice, we set $\lambda_{sil} = 0.1$ , $\lambda_{lr} = 0.1$ , $\lambda_{lmk} = 0.1$ , $\lambda_{reg} = 0.001$ and $\lambda_{lap} = 100$ . During training, we jointly optimize the bolded variables above: $\{z^{id}, z^{exp}, f_{inj}(\cdot), f_{mean}(\cdot), f_{id}(\cdot), f_{exp}(\cdot), f_{col}(\cdot), \Psi(\cdot), P_0\}$ . Notably, the defined canonical 3D landmarks $P_0$ are initialized by computing the average of the estimated 3D landmarks from the training dataset. + +During the training stage of the 3D Gaussian Parametric Head Model, we also calculate the perceptual loss [60] to encourage the model to learn more high-frequency details $\mathcal{L}_{vgg} = VGG(I_{hr},I_{gt})$ . Similar to training the guiding geometry model, we enforce the first three channels of the feature map to be RGB channels as Eqn. 6, introduce landmarks guidance terms as Eqn. 7 and the regular term for the displacement of points as Eqn. 8. Consequently, the overall loss function can be formulated as: + +$$ +\mathcal {L} = \mathcal {L} _ {h r} + \lambda_ {v g g} \mathcal {L} _ {v g g} + \lambda_ {l r} \mathcal {L} _ {l r} + \lambda_ {l m k} \mathcal {L} _ {l m k} + \lambda_ {r e g} \mathcal {L} _ {r e g} \tag {10} +$$ + +with the weights $\lambda_{vgg} = 0.1$ , $\lambda_{lr} = 0.1$ , $\lambda_{lmk} = 0.1$ and $\lambda_{reg} = 0.001$ . In this training stage, we also jointly optimize all the bolded variables and networks mentioned above, including the overall mean positions and attributes of the Gaussians and the 3D landmarks: $\{z^{id}, z^{exp}, f_{inj}(\cdot), f_{id}(\cdot), f_{exp}(\cdot), f_{col}(\cdot), f_{att}(\cdot), \Psi(\cdot), X_0, \Gamma_0, S_0, Q_0, A_0, P_0\}$ . + +# 3.4 Inference Details + +Image-based Fitting. When a single RGB portrait image is input, we first align the image according to the processing rules of the training set. Subsequently, we employ gradient descent to fit the image rendered by the 3D Gaussian Parametric Head Model to this input image using the photometric loss $\mathcal{L}_{lr}$ + +and $\mathcal{L}_{hr}$ defined in Eqn. 10. This process helps regress the identity code $z^{id}$ and expression code $z^{exp}$ . We just optimize for 200 iterations with learning rate $1 \times 10^{-3}$ for both latent codes. Following this, we fix the latent codes $z^{id}$ and $z^{exp}$ , such that the variables $H$ , $X_{can}$ are also fixed. We further optimize the color MLP $f_{col}(\cdot)$ and the canonical positions $X_{can}$ which represent the geometry of the current specific subject, using the same loss function. In this step, we only optimize for 100 iterations with learning rate $1 \times 10^{-4}$ for both $f_{col}(\cdot)$ and $X_{can}$ . This optimization process aims to add some details that cannot be recovered by the trained model itself, ultimately resulting in the reconstructed head model. The entire process has a total of 300 iterations and takes only 30 seconds. + +Expression Editing. Given a source portrait image providing the subject whose expression is to be edited and a target portrait image providing the target expression. We first obtain the head model of the source subject through optimization as the above-mentioned Image-based Fitting strategy. Then for the target portrait image, we also obtain the head model and corresponding expression code in the same way. Finally, we input the target expression code to the head model of the source subject, so that the expression of the source subject can be edited to the target one. + +# 4 Experiments + +# 4.1 Datasets + +NeRSemble dataset contains over 260 different identities, and collects 72fps multi-view videos from 16 synchronized cameras for each identity. The total length of the videos of a single identity is approximately 6000-11000 frames. In the experiment, we selected 140 of the identities for training and the rest for evaluation. For each identity video, we selected about 150 frames from all 16 views as training data. + +NPHM dataset contains 5200 3D human head scans. These scans come from 255 different identities, each with about 20 different expressions. We selected approximately 1600 scans of 80 identities for training. Since our method utilizes 2D images as training supervision, we render each scan from 80 different views to generate synthetic image data and record the camera parameters and the masks. + +FaceVerse dataset is an East Asian human head scan dataset. It contains 2310 scans from 110 different identities, and each identity contains 21 expressions. We selected 1620 scans data of 80 identities for training. Similarly, for each scan, we render multi-view synthetic image data from 80 different views and record the camera parameters and the masks. + +# 4.2 Evaluation + +Disentanglement. We tested the performance of the 3D Gaussian Parametric Model under the control of different identity codes and different expression codes. + +![](images/3ea98e96476c7b75b15eb65f4f21b8922820ee3ee9f22e271e141cadf072dca2.jpg) +ID1 + +![](images/4c4a1013f5f8e93b230ea212406950ae7407a371d6fde4f4ef319034f35f7982.jpg) + +![](images/b99e8c9dfe3165984df61fe68b2c7a139681c26c29a55091140a24441b565dda.jpg) + +![](images/48fc9dd8943886fc16914337f0685499d5db5f8e95510cb93b2685cc0a8db9ad.jpg) + +![](images/54a52a6b02cab03c9eb67441feedf50fd29ee47622ae8c9cc37c8b08b25c27cc.jpg) + +![](images/6e68d76cc3685d1e62b67b35b3464c3d55b3e40cdc3a8e74f293f841000961c7.jpg) +ID2 +Exp1 + +![](images/415828e052bfe99f234023ffc77898b9b0bb5aea0e7e946d2aca4c03f6a801a2.jpg) +Exp2 + +![](images/703cab9be226057eaee8762b839d8a2d629f0f773fa8be6663f7331bc7bce21b.jpg) +Exp3 + +![](images/c83252e06e0c7c7e22addff1515ce8fa5cf82896957e551321624ae8a912caca.jpg) +Exp4 + +![](images/0777d4a76322ffef741331a15dd69a4fd5d0357f6797ec6774856a354b62f165.jpg) +Exp5 + +![](images/539fceef18c436780073843898a144f65a9cf4a50ec1cf62a63163192a63d4ac.jpg) +GT +Fig. 4: We compared our initialization strategy with using the vertices of FLAME model. The left side shows the rendered image, and the right side shows the positions of the Gaussian points. + +![](images/f26549e975431a37b79f7fb0fc8345e6de37f9f5587d22e8db207707ccdea997.jpg) +Our Initialization + +![](images/76d8a1e8dfcb81d1b7d9308c51380e94ed4be872da1fb1c4b0039c9ae8aa3640.jpg) + +![](images/4087e5f58aed4b718bb63f715c53955288413a1b648bfa1d01409d37cdb61da0.jpg) +FLAME Initialization + +![](images/d98b9faaa14affdc9f2f295f5522bc5a3d106800307e8c15bc87019937d3d8cb.jpg) +Fig. 3: We generate the head models with randomly sampled identity codes and expression codes as condition. Each row corresponds to the same identity code, and each column corresponds to the same expression code. + +We randomly sampled 2 identity codes and 5 expression codes to generate 10 head models. Each horizontal row corresponds to the same identity code, and each column corresponds to the same expression code, as shown in Fig. 3. It can be observed that our model performs well in identity consistency and expression consistency, and the two components are fully disentangled. + +Ablation on Initialization. To evaluate the effectiveness of our initialization strategy with guiding geometry model outlined in Section 3, we compare it against a FLAME-based initialization strategy. To use FLAME model for the initialization, we first fit a FLAME model to overall mean 3D landmarks which are estimated during data preprocessing. Then, we sample 100,000 points near the surface of the FLAME mesh as an initialization of the mean Gaussian positions $\mathbf{X_0}$ . For the per-vertex features bound to each point $\pmb{\Gamma}$ , we just set them to zero. And for all the networks $\{f_{inj}(\cdot), f_{id}(\cdot), f_{exp}(\cdot), f_{col}(\cdot), \Psi(\cdot)\}$ and $f_{att}(\cdot)$ are randomly initialized as there is no available prior. The initialization process for the Gaussian attributes $\{S_0, Q_0, A_0\}$ remains the same as in our strategy. + +We show the visualization results in Figure 4, with the Gaussian model rendering image on the left and the Gaussian positions displayed as point clouds + +![](images/ec7cea1c7380f527b9d3b72265a034e62d0cfe0283abb6322b59282cbca66c4a.jpg) +Mesh + +![](images/8d1289e3aac52396e9ff8a5c886ae7acae423f3990cdbd8011e5fc7b79de252b.jpg) +Mesh+SR + +![](images/75ece36e26df68fbcfc01df5b051e366513075b812588f8deb59b4a3b09b6661.jpg) +Gaussian +Fig. 5: The comparison of the different representations with super resolution. + +![](images/f0009af8684436ba891ccf4afd4916dfc229b4c8b3b3a77fc5100541c8bcbbbf.jpg) +Gaussian+SR + +![](images/b4ec2aff5a530f3ab998f44c2b3be602578f01ed08660801646d6b4961a55d13.jpg) +GT + +on the right. Our initialization strategy using the guiding geometry model can ensure that all the Gaussian points fall evenly on the actual surface of the model, thereby ensuring reconstruction quality. When using the FLAME model for the initialization, a large number of points wander inside or outside the actual surface of the model, causing noise or redundancy and leading the model to lose some high-frequency information and making it difficult to fully converge. We also perform a quantitative evaluation of different initialization strategies on the rendered images, as shown in Table 1, which shows that our method leads to better rendering results. + +
MethodPSNR ↑SSIM ↑LPIPS ↓
FLAME Initialization25.70.820.109
Our Initialization28.00.840.085
+ +Table 1: Quantitative evaluation results of our initialization strategy and naive FLAME initialization strategy. + +Ablation on Representation and Super Resolution. We conduct the ablation study for the guiding mesh model, the Gaussian model and the super-resolution network (abbreviated as SR) as shown in the Fig. 5. The corresponding PSNR metrics are: Mesh (15.7), Mesh+SR (17.3), Gaussian (27.0), Gaussian+SR (29.3). Compared to mesh, utilizing 3D Gaussian as the representation brings significant improvements (+12), while the super resolution module adds some details, generating more realistic results. + +# 4.3 Applications + +Image-based Fitting. In this section, we demonstrate the capability of our 3D Gaussian Parametric Model for single-image fitting using the fitting strategy detailed in Section 3.4. We compare our model with similar works: HeadNeRF [19], MoFaNeRF [64], and PanoHead [1]. In addition to evaluating the above methods on our evaluation dataset, we also conduct comparisons using cases from MEAD [46] dataset (the first two rows). The qualitative results are presented in Figure 6. Our model exhibits reconstruction accuracy while maintaining excellent 3D consistency and identity preservation. HeadNeRF's fitting results often suffer from missing hair, and they remove the body and neck. MoFaNeRF, trained solely on the FaceScape dataset where all subjects wear hats, struggles to fit hair. As a GAN-based model, PanoHead can achieve highly accurate reproductions + +![](images/3ad3a43bbbbe0a2b9ae04df2fc3acd2ed3b3a28c503a8e686d10e8f434ab7519.jpg) +Fig. 6: We compare our method with other SOTA methods on the task of single image fitting. The far left is the input image, and to the right are Our method, HeadNeRF [19], MoFaNeRF [64] and PanoHead [1]. Our model significantly outperforms other methods in reconstruction quality and 3D consistency. + +from the input view. However, due to overfitting, the results from side views reveal poor 3D consistency and identity preservation. + +In addition to qualitative evaluations, we also conducted quantitative evaluations on 60 images using three metrics: Peak Signal-to-Noise Ratio (PSNR), Structural Similarity Index (SSIM), and Face Distance (FD). Here, we provide a brief explanation of the Face Distance (FD). To compute the FD metric, we utilized a face recognition tool $^3$ to encode two images containing faces into 128-dimensional vectors. Subsequently, we calculated the distance between these two vectors to reflect the similarity of the two faces. In our experiments, FD serves as an indicator of identity consistency. The results are shown in Table 2. Our model demonstrates optimal performance in both fitting accuracy and identity consistency. + +Expression Editing. Our 3D Gaussian Parametric Head Model possesses the capability for expression editing. Upon completing the fitting process on a portrait image, we can animate the model by applying different expression codes. + +
MethodPSNR ↑SSIM ↑FD ↓
HeadNeRF28.90.840.37
MoFaNeRF28.60.820.37
PanoHead29.10.860.41
Ours30.30.860.35
+ +Table 2: Quantitative evaluation results on the task of single image fitting. We compare our method with other 3 SOTA methods: HeadNeRF [19], MoFaNeRF [64], PanoHead [1]. + +![](images/61a81ad08d615aa023cbdc29d34f4032335be5f2316e1e88e986f03b8adee9c3.jpg) +Input + +![](images/319f95f3e9f4efa9604caf82561e803863b3b2f150f5b868c739b6e665ec41d4.jpg) +Exp1 + +![](images/80cfa2c0ccd204a8c9aeac0b5543adb5a39ce55d64fabc8df862de8d2b11e261.jpg) +Exp2 +Fig. 7: We perform expression editing on the head model reconstructed from the input image. Our model is able to handle very exaggerated expressions with superior identity consistency. + +![](images/2361aa9c4dda95ff149f245f3e90611c8140792f624ce848565405030de84823.jpg) +Exp3 + +![](images/df9e24b0d1388bb3471c409b606c36714298523c3d184f15b56a58880f8f26a4.jpg) +Exp4 + +![](images/4605d43b5668d0ff7ef2acdaaafbf5745041e40b20343f30e26e57a4bd81c1b3.jpg) +Exp5 + +![](images/3bce3d7d8741445ca67c7dc09715008be6f2aa0ef1477f10b670807df8483318.jpg) +Exp6 + +![](images/552f455544647a2bcb5f662d6d0eb7e83b6a151c2bf93e6cd8083a5db756c821.jpg) +Exp7 + +The detailed pipeline is outlined in Section 3.4. An example is illustrated in Figure 7. Our model can generate images depicting the corresponding expressions of the input subject based on a reference expression (as seen in the lower left corner of each image in the figure). It performs admirably even with exaggerated expressions, producing natural and realistic results. + +# 5 Discussion + +Ethical Considerations. Our technique can generate artificial portrait videos, posing a significant risk of spreading misinformation, shaping public opinions, and undermining trust in media outlets. These consequences could have profound negative effects on society. Therefore, it is crucial to explore methods that effectively differentiate between genuine and manipulated content. + +**Limitation.** Our 3D Gaussian Parametric Head Model takes a step forward in the characterization of parametric head models. However, due to the limited amount of training data, the generalization ability of the model is still insufficient. In some cases where the illumination is significantly different from the training set, the reconstruction results are not good. + +Conclusion. In this paper, we propose the 3D Gaussian Parametric Head Model, a novel framework for parametric head model. This model leverages the power of 3D Gaussians, enabling realistic rendering quality and real-time speed. Our well-designed training strategy ensured stable convergence while enabling the model to learn appearance details and expressions. Besides, our model allows for creating detailed, high-quality face avatars from a single input image, and also enables editing for expressions and identity. We believe our model represents a significant advancement in the field of parametric head model. + +# Acknowledgements + +The work is supported by the National Science Foundation of China (NSFC) under Grant Number 62125107 and the Postdoctoral Fellowship Program of China Postdoctoral Science Foundation under Grant Number GZC20231304. + +# References + +1. An, S., Xu, H., Shi, Y., Song, G., Ogras, U.Y., Luo, L.: Panohead: Geometry-aware 3d full-head synthesis in 360deg. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 20950-20959 (June 2023) +2. Blanz, V., Vetter, T.: A morphable model for the synthesis of 3d faces. In: 26th Annual Conference on Computer Graphics and Interactive Techniques (SIGGRAPH 1999). pp. 187-194. ACM Press (1999) +3. Bühler, M.C., Sarkar, K., Shah, T., Li, G., Wang, D., Helminger, L., Orts-Escalano, S., Lagun, D., Hilliges, O., Beeler, T., et al.: Preface: A data-driven volumetric prior for few-shot ultra high-resolution face synthesis. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 3402-3413 (2023) +4. Bulat, A., Tzimiropoulos, G.: How far are we from solving the 2d & 3d face alignment problem? (and a dataset of 230,000 3d facial landmarks). In: International Conference on Computer Vision (2017) +5. Cao, C., Simon, T., Kim, J.K., Schwartz, G., Zollhoefer, M., Saito, S.S., Lombardi, S., Wei, S.E., Belko, D., Yu, S.I., Sheikh, Y., Saragih, J.: Authentic volumetric avatars from a phone scan. ACM Trans. Graph. 41(4) (jul 2022) +6. Cao, C., Weng, Y., Zhou, S., Tong, Y., Zhou, K.: Facewarehouse: A 3d facial expression database for visual computing. In: IEEE Transactions on Visualization and Computer Graphics. vol. 20, pp. 413-425 (2014) +7. Chan, E., Monteiro, M., Kellnhofer, P., Wu, J., Wetzstein, G.: pi-gan: Periodic implicit generative adversarial networks for 3d-aware image synthesis. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 5795-5805 (2020) +8. Chan, E.R., Lin, C.Z., Chan, M.A., Nagano, K., Pan, B., Mello, S.D., Gallo, O., Guibas, L., Tremblay, J., Khamis, S., Karras, T., Wetzstein, G.: Efficient geometry-aware 3D generative adversarial networks. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 16102-16112 (2022) +9. Chen, X., Deng, Y., Wang, B.: Mimic3d: Thriving 3d-aware gans via 3d-to-2d imitation. In: Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV) (2023) +10. Chen, Y., Wang, L., Li, Q., Xiao, H., Zhang, S., Yao, H., Liu, Y.: Monogaussiana-vatar: Monocular gaussian point-based head avatar. In: ACM SIGGRAPH 2023 Conference Proceedings (2024) +11. Deng, Y., Yang, J., Xiang, J., Tong, X.: Gram: Generative radiance manifolds for 3d-aware image generation. Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) pp. 10663-10673 (2021) +12. Gafni, G., Thies, J., Zollhofer, M., Niessner, M.: Dynamic neural radiance fields for monocular 4d facial avatar reconstruction. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 8645-8654 (June 2021) + +13. Gao, X., Zhong, C., Xiang, J., Hong, Y., Guo, Y., Zhang, J.: Reconstructing personalized semantic facial nerf models from monocular video. ACM Transactions on Graphics (Proceedings of SIGGRAPH Asia) 41(6) (2022) +14. Gerig, T., Forster, A., Blumer, C., Egger, B., Lüthi, M., Schönborn, S., Vetter, T.: Morphable face models - an open framework. In: 2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018). pp. 75-82 (2017) +15. Giebenhain, S., Kirschstein, T., Georgopoulos, M., Rünz, M., Agapito, L., Nießner, M.: Learning neural parametric head models. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2023) +16. Giebenhain, S., Kirschstein, T., Georgopoulos, M., Rünz, M., Agapito, L., Nießner, M.: Mononphm: Dynamic head reconstruction from monocular videos. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2024) +17. Grassal, P.W., Prinzler, M., Leistner, T., Rother, C., Nießner, M., Thies, J.: Neural head avatars from monocular rgb videos. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 18632-18643 (June 2022) +18. Gu, J., Liu, L., Wang, P., Theobalt, C.: Stylenerf: A style-based 3d aware generator for high-resolution image synthesis. In: International Conference on Learning Representations (2022) +19. Hong, Y., Peng, B., Xiao, H., Liu, L., Zhang, J.: Headnerf: A real-time nerf-based parametric head model. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 20374-20384 (June 2022) +20. Hu, L., Zhang, H., Zhang, Y., Zhou, B., Liu, B., Zhang, S., Nie, L.: Gaussian avatar: Towards realistic human avatar modeling from a single video via animatable 3d gaussians. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2024) +21. Kerbl, B., Kopanas, G., Leimkuhler, T., Drettakis, G.: 3d gaussian splatting for real-time radiance field rendering. ACM Transactions on Graphics 42(4) (July 2023) +22. Khakhulin, T., Sklyarova, V., Lempitsky, V., Zakharov, E.: Realistic one-shot mesh-based head avatars. In: Proceedings of the European Conference on Computer Vision (ECCV) (2022) +23. Kirschstein, T., Giebenhain, S., Nießner, M.: Diffusion avatars: Deferred diffusion for high-fidelity 3d head avatars. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2024) +24. Kirschstein, T., Qian, S., Giebenhain, S., Walter, T., Niefner, M.: Nersemble: Multi-view radiance field reconstruction of human heads. ACM Trans. Graph. 42(4) (jul 2023) +25. Li, T., Bolkart, T., Black, M.J., Li, H., Romero, J.: Learning a model of facial shape and expression from 4d scans. ACM Trans. Graph. 36(6) (nov 2017) +26. Li, X., De Mello, S., Liu, S., Nagano, K., Iqbal, U., Kautz, J.: Generalizable one-shot neural head avatar. NeurIPS (2023) +27. Li, Z., Zheng, Z., Wang, L., Liu, Y.: Animatable gaussians: Learning pose-dependent gaussian maps for high-fidelity human avatar modeling. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2024) +28. Lin, C.Z., Nagano, K., Kautz, J., Chan, E.R., Iqbal, U., Guibas, L., Wetzstein, G., Khamis, S.: Single-shot implicit morphable faces with consistent texture parameterization. In: ACM SIGGRAPH 2023 Conference Proceedings (2023) + +29. Lin, S., Ryabtsev, A., Sengupta, S., Curless, B., Seitz, S., Kemelmacher-Shlizerman, I.: Real-time high-resolution background matting. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (Jun 2021) +30. Lombardi, S., Simon, T., Schwartz, G., Zollhoefer, M., Sheikh, Y., Saragih, J.: Mixture of volumetric primitives for efficient neural rendering. ACM Trans. Graph. 40(4) (jul 2021) +31. Loper, M., Mahmood, N., Romero, J., Pons-Moll, G., Black, M.J.: SMPL: A skinned multi-person linear model. ACM Trans. Graphics (Proc. SIGGRAPH Asia) 34(6), 248:1-248:16 (Oct 2015) +32. Luiten, J., Kopanas, G., Leibe, B., Ramanan, D.: Dynamic 3d gaussians: Tracking by persistent dynamic view synthesis. In: 3DV (2024) +33. Ma, S., Weng, Y., Shao, T., Zhou, K.: 3d gaussian blendshapes for head avatar animation. In: ACM SIGGRAPH 2023 Conference Proceedings (2024) +34. Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: Nerf: Representing scenes as neural radiance fields for view synthesis. In: Proceedings of the European Conference on Computer Vision (ECCV) (2020) +35. Or-El, R., Luo, X., Shan, M., Shechtman, E., Park, J.J., Kemelmacher-Shlizerman, I.: Stylesdf: High-resolution 3d-consistent image and geometry generation. Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) pp. 13493-13503 (2021) +36. Pavlakos, G., Choutas, V., Ghorbani, N., Bolkart, T., Osman, A.A.A., Tzionas, D., Black, M.J.: Expressive body capture: 3D hands, face, and body from a single image. In: Proceedings IEEE Conf. on Computer Vision and Pattern Recognition (CVPR). pp. 10975-10985 (2019) +37. Qian, S., Kirschstein, T., Schoneveld, L., Davoli, D., Giebenhain, S., Nießner, M.: Gaussian avatars: Photorealistic head avatars with rigged 3d gaussians. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2024) +38. Qin, M., Liu, Y., Xu, Y., Zhao, X., Liu, Y., Wang, H.: High-fidelity 3d head avatars reconstruction through spatially-varying expression conditioned neural radiance field. In: AAAI Conference on Artificial Intelligence (2023) +39. Saito, S., Schwartz, G., Simon, T., Li, J., Nam, G.: Relightable gaussian codec avatars. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2024) +40. Shao, Z., Wang, Z., Li, Z., Wang, D., Lin, X., Zhang, Y., Fan, M., Wang, Z.: SplattingAvatar: Realistic Real-Time Human Avatars with Mesh-Embedded Gaussian Splatting. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2024) +41. Shen, T., Gao, J., Yin, K., Liu, M.Y., Fidler, S.: Deep marching tetrahedra: a hybrid representation for high-resolution 3d shape synthesis. In: Advances in Neural Information Processing Systems (NeurIPS) (2021) +42. Sun, J., Wang, X., Shi, Y., Wang, L., Wang, J., Liu, Y.: Ide-3d: Interactive disentangled editing for high-resolution 3d-aware portrait synthesis. ACM Transactions on Graphics (TOG) 41(6), 1-10 (2022) +43. Sun, J., Wang, X., Wang, L., Li, X., Zhang, Y., Zhang, H., Liu, Y.: Next3d: Generative neural texture rasterization for 3d-aware head avatars. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2023) + +44. Wang, D., Chandran, P., Zoss, G., Bradley, D., Gotardo, P.: Morf: Morphable radiance fields for multiview neural head modeling. In: ACM SIGGRAPH 2022 Conference Proceedings. SIGGRAPH '22, Association for Computing Machinery, New York, NY, USA (2022) +45. Wang, J., Xie, J.C., Li, X., Xu, F., Pun, C.M., Gao, H.: Gaussianhead: High-fidelity head avatars with learnable gaussian derivation (2024) +46. Wang, K., Wu, Q., Song, L., Yang, Z., Wu, W., Qian, C., He, R., Qiao, Y., Loy, C.C.: Mead: A large-scale audio-visual dataset for emotional talking-face generation. In: Proceedings of the European Conference on Computer Vision (ECCV) (August 2020) +47. Wang, L., Chen, Z., Yu, T., Ma, C., Li, L., Liu, Y.: Faceverse: a fine-grained and detail-controllable 3d face morphable model from a hybrid dataset. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (Jun 2022) +48. Wu, G., Yi, T., Fang, J., Xie, L., Zhang, X., Wei, W., Liu, W., Tian, Q., Wang, X.: 4d gaussian splatting for real-time dynamic scene rendering (2024) +49. Wu, S., Yan, Y., Li, Y., Cheng, Y., Zhu, W., Gao, K., Li, X., Zhai, G.: Ganhead: Towards generative animatable neural head avatars. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 437-447 (2023) +50. Wu, Y., Deng, Y., Yang, J., Wei, F., Qifeng, C., Tong, X.: Anifacegan: Animatable 3d-aware face image generation for video avatars. In: Advances in Neural Information Processing Systems (2022) +51. Wu, Y., Xu, S., Xiang, J., Wei, F., Chen, Q., Yang, J., Tong, X.: Aniportraitgan: Animatable 3d portrait generation from 2d image collections. In: SIGGRAPH Asia 2023 Conference Proceedings (2023) +52. Xiang, J., Yang, J., Deng, Y., Tong, X.: Gram-hd: 3d-consistent image generation at high resolution with generative radiance manifolds. Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV) pp. 2195-2205 (2022) +53. Xiang, J., Gao, X., Guo, Y., Zhang, J.: Flashavatar: High-fidelity head avatar with efficient gaussian embedding. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2024) +54. Xu, Y., Chen, B., Li, Z., Zhang, H., Wang, L., Zheng, Z., Liu, Y.: Gaussian head avatar: Ultra high-fidelity head avatar via dynamic gaussians. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2024) +55. Xu, Y., Wang, L., Zhao, X., Zhang, H., Liu, Y.: Avatarmav: Fast 3d head avatar reconstruction using motion-aware neural voxels. In: ACM SIGGRAPH 2023 Conference Proceedings (2023) +56. Xu, Y., Zhang, H., Wang, L., Zhao, X., Han, H., Guojun, Q., Liu, Y.: Latentavatar: Learning latent expression code for expressive neural head avatar. In: ACM SIGGRAPH 2023 Conference Proceedings (2023) +57. Yang, Z., Yang, H., Pan, Z., Zhu, X., Zhang, L.: Real-time photorealistic dynamic scene representation and rendering with 4d gaussian splatting (2023) +58. Yang, Z., Gao, X., Zhou, W., Jiao, S., Zhang, Y., Jin, X.: Deformable 3d gaussians for high-fidelity monocular dynamic scene reconstruction (June 2023) +59. Yenamandra, T., Tewari, A., Bernard, F., Seidel, H., Elgharib, M., Cremers, D., Theobalt, C.: i3dmm: Deep implicit 3d morphable model of human heads. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (June 2021) + +60. Zhang, R., Isola, P., Efros, A.A., Shechtman, E., Wang, O.: The unreasonable effectiveness of deep features as a perceptual metric. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 586-595 (June 2018) +61. Zhao, X., Wang, L., Sun, J., Zhang, H., Suo, J., Liu, Y.: Havatar: High-fidelity head avatar via facial model conditioned neural radiance field. ACM Trans. Graph. (oct 2023) +62. Zheng, Y., Abrevaya, V.F., Bühler, M.C., Chen, X., Black, M.J., Hilliges, O.: I m avatar: Implicit morphable head avatars from videos. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 13535-13545 (June 2022) +63. Zheng, Y., Yifan, W., Wetzstein, G., Black, M.J., Hilliges, O.: Pointavatar: Deformable point-based head avatars from videos. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2023) +64. Zhuang, Y., Zhu, H., Sun, X., Cao, X.: Mofanerf: Morphable facial neural radiance field. In: Proceedings of the European Conference on Computer Vision (ECCV) (2022) +65. Zielonka, W., Bolkart, T., Thies, J.: Instant volumetric head avatars (June 2023) \ No newline at end of file diff --git a/2024/3D Gaussian Parametric Head Model/images.zip b/2024/3D Gaussian Parametric Head Model/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..4b37a5d831fa797b8806c1910a75abca43d74c4f --- /dev/null +++ b/2024/3D Gaussian Parametric Head Model/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:06f880f94cac032b8755c5014a9c984150a7b0591ef8cba62ee62a773b15f0d8 +size 481217 diff --git a/2024/3D Gaussian Parametric Head Model/layout.json b/2024/3D Gaussian Parametric Head Model/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..8c4a74c8c2c55ed09bcc0af719b291122ea03f50 --- /dev/null +++ b/2024/3D Gaussian Parametric Head Model/layout.json @@ -0,0 +1,11943 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 175, + 111, + 438, + 128 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 175, + 111, + 438, + 128 + ], + "spans": [ + { + "bbox": [ + 175, + 111, + 438, + 128 + ], + "type": "text", + "content": "3D Gaussian Parametric Head Model" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 157, + 149, + 459, + 173 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 157, + 149, + 459, + 173 + ], + "spans": [ + { + "bbox": [ + 157, + 149, + 459, + 173 + ], + "type": "text", + "content": "Yuelang " + }, + { + "bbox": [ + 157, + 149, + 459, + 173 + ], + "type": "inline_equation", + "content": "\\mathrm{Xu}^{1}" + }, + { + "bbox": [ + 157, + 149, + 459, + 173 + ], + "type": "text", + "content": ", Lizhen Wang" + }, + { + "bbox": [ + 157, + 149, + 459, + 173 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 157, + 149, + 459, + 173 + ], + "type": "text", + "content": ", Zerong Zheng" + }, + { + "bbox": [ + 157, + 149, + 459, + 173 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 157, + 149, + 459, + 173 + ], + "type": "text", + "content": ", Zhaoqi " + }, + { + "bbox": [ + 157, + 149, + 459, + 173 + ], + "type": "inline_equation", + "content": "\\mathrm{Su}^{1}" + }, + { + "bbox": [ + 157, + 149, + 459, + 173 + ], + "type": "text", + "content": ", and Yebin " + }, + { + "bbox": [ + 157, + 149, + 459, + 173 + ], + "type": "inline_equation", + "content": "\\mathrm{Liu}^{1}" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 229, + 183, + 384, + 195 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 183, + 384, + 195 + ], + "spans": [ + { + "bbox": [ + 229, + 183, + 384, + 195 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 229, + 183, + 384, + 195 + ], + "type": "text", + "content": " Tsinghua University, Beijing, China" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 241, + 195, + 373, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 241, + 195, + 373, + 205 + ], + "spans": [ + { + "bbox": [ + 241, + 195, + 373, + 205 + ], + "type": "text", + "content": "2 NNKosmos, Hangzhou, China" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 160, + 234, + 455, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 234, + 455, + 443 + ], + "spans": [ + { + "bbox": [ + 160, + 234, + 455, + 443 + ], + "type": "text", + "content": "Abstract. Creating high-fidelity 3D human head avatars is crucial for applications in VR/AR, telepresence, digital human interfaces, and film production. Recent advances have leveraged morphable face models to generate animated head avatars from easily accessible data, representing varying identities and expressions within a low-dimensional parametric space. However, existing methods often struggle with modeling complex appearance details, e.g., hairstyles and accessories, and suffer from low rendering quality and efficiency. This paper introduces a novel approach, 3D Gaussian Parametric Head Model, which employs 3D Gaussians to accurately represent the complexities of the human head, allowing precise control over both identity and expression. Additionally, it enables seamless face portrait interpolation and the reconstruction of detailed head avatars from a single image. Unlike previous methods, the Gaussian model can handle intricate details, enabling realistic representations of varying appearances and complex expressions. Furthermore, this paper presents a well-designed training framework to ensure smooth convergence, providing a guarantee for learning the rich content. Our method achieves high-quality, photo-realistic rendering with real-time efficiency, making it a valuable contribution to the field of parametric head models." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 160, + 454, + 409, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 454, + 409, + 464 + ], + "spans": [ + { + "bbox": [ + 160, + 454, + 409, + 464 + ], + "type": "text", + "content": "Keywords: 3D Gaussian " + }, + { + "bbox": [ + 160, + 454, + 409, + 464 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 160, + 454, + 409, + 464 + ], + "type": "text", + "content": " Head Avatar " + }, + { + "bbox": [ + 160, + 454, + 409, + 464 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 160, + 454, + 409, + 464 + ], + "type": "text", + "content": " Parametric Model" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 485, + 230, + 498 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 485, + 230, + 498 + ], + "spans": [ + { + "bbox": [ + 132, + 485, + 230, + 498 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 510, + 482, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 510, + 482, + 629 + ], + "spans": [ + { + "bbox": [ + 130, + 510, + 482, + 629 + ], + "type": "text", + "content": "Creating high-fidelity 3D human head avatars holds significant importance across various fields, including VR/AR, telepresence, digital human interfaces, and film production. The automatic generation of such avatars has been a focal point in computer vision research for many years. Recent methods [12,13,17,38,55,56,61-63,65] can create an animated head avatar through conveniently collected data such as monocular video data or even a picture [22, 26]. Serving as the most fundamental tool in these methods, the 3D morphable models (3DMM) [14, 25], which represent varying identities and expressions within a low-dimensional space, have been proven to be a highly successful avenue in addressing this challenging problem." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 630, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 630, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 630, + 482, + 666 + ], + "type": "text", + "content": "Since the traditional parametric 3DMMs are typically limited by the topology of the underlying template mesh and only focus on the face part, some works [15,16,28,59] propose to use implicit Signed Distance Field (SDF) as the" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 133, + 116, + 482, + 211 + ], + "blocks": [ + { + "bbox": [ + 133, + 116, + 482, + 211 + ], + "lines": [ + { + "bbox": [ + 133, + 116, + 482, + 211 + ], + "spans": [ + { + "bbox": [ + 133, + 116, + 482, + 211 + ], + "type": "image", + "image_path": "18d9724a6f94017aeaf954badf8e38047fefbcd5fff7e8310c479cef2a0005b3.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 219, + 482, + 285 + ], + "lines": [ + { + "bbox": [ + 130, + 219, + 482, + 285 + ], + "spans": [ + { + "bbox": [ + 130, + 219, + 482, + 285 + ], + "type": "text", + "content": "Fig. 1: We utilize hybrid datasets comprising captured multi-view video data and rendered image data from 3D scans for training our model. The trained model can be manipulated using decoupled identity and expression codes to produce a diverse array of high-fidelity head models. When presented with an image, our model can be adjusted to reconstruct the portrait in the image and edit the expression according to any other desired expressions." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 303, + 482, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 303, + 482, + 422 + ], + "spans": [ + { + "bbox": [ + 130, + 303, + 482, + 422 + ], + "type": "text", + "content": "geometric representation to model the entire head. Despite their flexibility, these methods fall short in recovering high-frequency geometric and texture details like hairstyles, glasses or accessories. On the other end of the spectrum, Neural Radiance Field (NeRF) [34] based methods [19,64] learn parametric head models by directly synthesizing images, thus eliminating the need of geometry modeling. However, NeRF is built upon volumetric rendering, which involves sampling and integrating points distributed throughout space. Therefore, NeRF-based methods typically suffer from low rendering efficiency and have to trade it off with rendering resolution, thereby greatly reducing rendering quality. Moreover, skipping geometric reconstruction would probably lead to poor 3D consistency." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 425, + 482, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 425, + 482, + 604 + ], + "spans": [ + { + "bbox": [ + 130, + 425, + 482, + 604 + ], + "type": "text", + "content": "More recently, 3D Gaussian Splatting (3DGS) [21], which uses explicit Gaussian ellipsoids to represent 3D scenes, has attracted significant attention from the research community. Experiments have verified the superior quality of the rendered results and excellent rendering efficiency compared to previous NeRF-based or surface-based methods even on dynamic scenes [32,48,57,58]. Motivated by this progress, we propose a novel 3D Gaussian Parametric Head Model, which, for the first time, marries the power of 3DGS with the challenging task of parametric head modeling. Our 3D gaussian parametric head model decouples the control signals of the head into the latent spaces of identity and expression, as is also done in SDF-based face model NPHM [15]. These latent spaces are then mapped to the offsets of the Gaussian positions, which effectively represent the variance of shape and appearance of different identities and expressions. Benefiting from the differentiability of Gaussian splatting, our model can be learned from multi-view video data corpus in an end-to-end manner, without relying on geometry supervision." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "type": "text", + "content": "Unfortunately, training our 3D Gaussian parametric head model is not quite straightforward, because Gaussian ellipsoids are unstructured and each Gaussian ellipsoid has its own independent learnable attribute. Such a characteristic makes 3DGS powerful in overfitting a specific object or scene, but poses great challenges for generative head modeling. Without proper initialization and regularization," + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 203, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 203, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 203, + 100 + ], + "type": "text", + "content": "Xuet al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "type": "text", + "content": "the learned parametric head model may suffer from unstable training or a large number of Gaussian points becoming redundant and noisy, as shown in Fig. 4." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 140, + 481, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 140, + 481, + 307 + ], + "spans": [ + { + "bbox": [ + 130, + 140, + 481, + 307 + ], + "type": "text", + "content": "To overcome these challenges, we propose a well-designed two-stage training strategy to ensure smooth convergence of our model training. Specifically, we first roughly train all the networks on a mesh-based guiding model. Subsequently, the network parameters are migrated to the Gaussian model, and all Gaussian points are initialized with the trained mesh geometry to ensure that they are located near the actual surface. Compared to naive initialization with FLAME [25], our initialization strategy leads to a better guess of the positions of Gaussian points, making the subsequent training of the model converge stably and the areas like hairs better recovered. Moreover, we propose to use 3D landmark loss to supervise the deformation of the model learning expressions, which can speed up the convergence and avoid artifacts under exaggerated expressions. Lastly, our method supports training from both 3D head scans and multi-view 2D face datasets, which enhances the versatility and comprehensiveness of facial data collection and model training." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 308, + 481, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 308, + 481, + 415 + ], + "spans": [ + { + "bbox": [ + 130, + 308, + 481, + 415 + ], + "type": "text", + "content": "After training on large corpus of multi-view head videos, our parametric Gaussian head model can generate photorealistic images that accurately depict the diverse range of facial appearances, naturally handling complex and exaggerated expressions, while also enabling real-time rendering. Additionally, our method supports single-image fitting and surpasses previous techniques in both reconstruction accuracy and identity consistency. Furthermore, the model resulting from our fitting process allows for the control of various expressions while maintaining naturalness and consistent identity even under exaggerated expressions. The contributions of our method can be summarized as:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 138, + 421, + 480, + 528 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 138, + 421, + 479, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 421, + 479, + 456 + ], + "spans": [ + { + "bbox": [ + 138, + 421, + 479, + 456 + ], + "type": "text", + "content": "- We propose 3D Gaussian Parametric Head Model, a novel parametric head model which utilizes 3D Gaussians as the representation and enables photorealistic rendering quality and real-time rendering speed." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 138, + 457, + 480, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 457, + 480, + 491 + ], + "spans": [ + { + "bbox": [ + 138, + 457, + 480, + 491 + ], + "type": "text", + "content": "- We propose a well-designed training strategy to ensure that the Gaussian model converges stably while learning rich appearance details and complex expressions efficiently." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 138, + 492, + 480, + 528 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 492, + 480, + 528 + ], + "spans": [ + { + "bbox": [ + 138, + 492, + 480, + 528 + ], + "type": "text", + "content": "- Our 3D Gaussian Parametric Head Model enables the generation of a detailed, high-quality face avatar from a single given image, as well as performing expression and identity editing upon it." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 132, + 545, + 237, + 558 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 545, + 237, + 558 + ], + "spans": [ + { + "bbox": [ + 132, + 545, + 237, + 558 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 570, + 481, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 570, + 481, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 570, + 481, + 666 + ], + "type": "text", + "content": "Parametric Head Models. Parametric head models are used to represent facial features, expressions, and identities effectively and efficiently. They allow for the creation of realistic human faces with adjustable parameters, making them essential in computer graphics, animation, and virtual reality. Therefore, research in this field has always been a hot topic. Traditional 3D Morphable Models (3DMM) [2,6,14,25,47] are constructed by non-rigidly registering a template mesh with fixed topology to a series of 3D scans. Through this registration process, a 3DMM can be computed using dimensionality reduction techniques such" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 91, + 447, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 91, + 447, + 101 + ], + "spans": [ + { + "bbox": [ + 294, + 91, + 447, + 101 + ], + "type": "text", + "content": "3D Gaussian Parametric Head Model" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 308 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 308 + ], + "type": "text", + "content": "as principal component analysis (PCA). The resulting parametric space captures the variations in facial geometry and appearance across a population. However, while 3DMMs offer a powerful way to represent faces, they do have limitations. These models rely heavily on the correspondence between the 3D scans and the template for accurate fitting and may struggle to represent local surface details like wrinkles or hairstyles that deviate significantly from the template mesh. Recent advances in implicit representation have led to the great development of neural parametric head models. Some methods [15, 16, 49, 59] propose implicit Signed Distance Field (SDF) based head models, which are not constrained by topology thus can recover more complex content like hair compared to previous mesh-based Methods. Other methods [3, 19, 44, 64] propose to use NeRF [34] as the representation of the parametric head models, which can directly synthesize photorealistic images without geometric reconstruction. Cao, et al. [5] use a hybrid representation [30] of mesh and NeRF to train their model on unpublished large-scale light stage data. However, rendering efficiency is typically low in NeRF-based methods, often resulting in a trade-off with rendering resolution." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 312, + 482, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 312, + 482, + 540 + ], + "spans": [ + { + "bbox": [ + 130, + 312, + 482, + 540 + ], + "type": "text", + "content": "3D GAN based Head Models. 3D Generative Adversarial Networks (GANs) have revolutionized the field of computer vision, particularly in the domain of human head and face modeling, enabling the generation of face avatars from input images. Traditional methods often require labor-intensive manual work or rely on multi-view images to create 3D models. 3D GANs as a more automated and data-driven approach, which are just trained on single-view 2D images but generate detailed and realistic 3D models of human head [7-9, 11, 18, 35, 52]. Panohead [1] additionally introduces images of hairstyles on the back of characters and trains a full-head generative model. Based on the previous methods, IDE-3D [42] proposes to use semantic map to edit the 3D head model. Next3D [43] and AntiFaceGAN [50] extend to use the FLAME model [25] to condition the generated head model, so that the expression and pose of the generated head model can be controlled. AntiPortraitGAN [51] further replaces FLAME model with SMPLX model [36] to generate upper body avatars, thus the shoulders and the neck can also be controlled. These 3D GAN-based models primarily leverage the coarse FLAME model for expression control, often leading to a loss of expression details in the generated faces. In contrast, our method directly learns the expression distribution from the dataset, capturing more facial appearance details." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 545, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 545, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 545, + 482, + 666 + ], + "type": "text", + "content": "3D Gaussians. Recently, 3D Gaussian splatting [21] has shown superior performance compared to NeRF, excelling in both novel view synthesis quality and rendering speed. Several methods have expanded Gaussian representation to dynamic scene reconstruction [32, 48, 57, 58]. For human body avatar modeling, recent approaches [20, 27] propose training a 3D Gaussian avatar animated by SMPL [31] or a skeleton from multi-view videos, surpassing previous methods in rendering quality and efficiency. In the realm of human head avatar modeling, recent techniques [10, 23, 33, 37, 37, 39, 40, 45, 53, 54] also utilize 3D Gaussians to create high-fidelity and efficient head avatars. These approaches center on the creation of a high-fidelity person-specific avatar using data of a single person. In" + } + ] + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 203, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 203, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 203, + 100 + ], + "type": "text", + "content": "Xuet al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 136, + 114, + 482, + 339 + ], + "blocks": [ + { + "bbox": [ + 136, + 114, + 482, + 339 + ], + "lines": [ + { + "bbox": [ + 136, + 114, + 482, + 339 + ], + "spans": [ + { + "bbox": [ + 136, + 114, + 482, + 339 + ], + "type": "image", + "image_path": "f502bb7b723709cb2e4afb2d625e1e81f0d5db3f03a563a244280467a4815d2a.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 349, + 482, + 427 + ], + "lines": [ + { + "bbox": [ + 130, + 349, + 482, + 427 + ], + "spans": [ + { + "bbox": [ + 130, + 349, + 482, + 427 + ], + "type": "text", + "content": "Fig. 2: The pipeline of our method. Our training strategy can be divided into a Guiding Geometry Model for initialization, and a final 3D Gaussian Parametric Head Model. Deformations of each model are further decoupled into identity-related and expression-related deformations. Rendering involves using DMTet to transform the initial model into a mesh and 3D Gaussian Splatting for the Gaussian model. Features from both models are finally upsampled to high-resolution portrait images through a convolutional network " + }, + { + "bbox": [ + 130, + 349, + 482, + 427 + ], + "type": "inline_equation", + "content": "\\Psi" + }, + { + "bbox": [ + 130, + 349, + 482, + 427 + ], + "type": "text", + "content": ". During inference, our output exclusively comes from the Gaussian model." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 432, + 482, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 432, + 482, + 469 + ], + "spans": [ + { + "bbox": [ + 130, + 432, + 482, + 469 + ], + "type": "text", + "content": "contrast, our method focuses on a versatile prior model that can accommodate varying appearances. Once trained, our model is also capable of person-specific avatar reconstruction by fitting to the input image data provided." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 486, + 201, + 498 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 486, + 201, + 498 + ], + "spans": [ + { + "bbox": [ + 132, + 486, + 201, + 498 + ], + "type": "text", + "content": "3 Method" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 510, + 482, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 510, + 482, + 594 + ], + "spans": [ + { + "bbox": [ + 130, + 510, + 482, + 594 + ], + "type": "text", + "content": "In this section, we present the 3D Gaussian Parametric Head Model. In contrast to previous mesh-based or NeRF-based models, initializing and training Gaussian-based models pose distinct challenges. This section introduces the dataset and preprocessing, the carefully designed guiding geometry model, the Gaussian Parametric Head Model, and outlines their respective training processes. Additionally, we will also provide the training details and demonstrate how to utilize our method when given a single input image." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 131, + 610, + 259, + 623 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 610, + 259, + 623 + ], + "spans": [ + { + "bbox": [ + 131, + 610, + 259, + 623 + ], + "type": "text", + "content": "3.1 Data Preprocessing" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 629, + 481, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 629, + 481, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 629, + 481, + 666 + ], + "type": "text", + "content": "We used three datasets for our model training, including a multi-view video dataset NeRSemble [24], and two 3D scans datasets NPHM [15] and FaceV-erse [47]. We do not use the 3D geometry of the scans directly, but render them" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 91, + 447, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 91, + 447, + 101 + ], + "spans": [ + { + "bbox": [ + 294, + 91, + 447, + 101 + ], + "type": "text", + "content": "3D Gaussian Parametric Head Model" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 247 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 247 + ], + "type": "text", + "content": "into multi-view images and use only the images form the 3 datasets as supervision. In order to better utilize these three different datasets, we need to do preprocessing. First, we resize the images to 512 resolution and adjust the camera parameters. Then, we use BackgroundMattingV2 [29] to extract the foreground characters in the NeRSemble dataset and record the masks. This step is not required for the two synthetic datasets. Next, we use face alignment [4] to detect 2D landmarks in all the images. Through these 2D landmarks, we fit a Basel Face Model (BFM) [14] for each expression of each identity, and record the head pose and 3D landmarks of the BFM. We will use the above processed camera parameters, images, masks, head pose of BFM and 3D landmarks of BFM to train our model." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 266, + 272, + 278 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 266, + 272, + 278 + ], + "spans": [ + { + "bbox": [ + 132, + 266, + 272, + 278 + ], + "type": "text", + "content": "3.2 Model Representation" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 286, + 482, + 417 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 286, + 482, + 417 + ], + "spans": [ + { + "bbox": [ + 130, + 286, + 482, + 417 + ], + "type": "text", + "content": "The representation of Gaussian distribution poses challenges due to its unordered and unstructured nature, leading to difficulties in the continuous spread of gradients to neighboring points in space during backpropagation. This often results in convergence failure when Gaussians are randomly initialized. On the other hand, surface-based representations such as mesh are just suitable for rough geometry learning. A direct idea is to utilize an existing 3DMM, such as FLAME [25], as the initial position for the points in 3D Gaussian splatting [21]. However, this coarse initialization still fails to converge the positions of 3D points to the correct locations, as shown in Fig. 4. The network tends to alter the shape of the ellipsoid to achieve a suitable fitting result, leading to inaccurate geometry of the point cloud and blurriness in the rendered image." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 418, + 482, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 418, + 482, + 513 + ], + "spans": [ + { + "bbox": [ + 130, + 418, + 482, + 513 + ], + "type": "text", + "content": "To address this problem, a more detailed initialization process is necessary for capturing the diverse head variations using 3D Gaussian splatting. Specifically, we draw inspiration from Gaussian Head Avatar [54] and leverage the implicit signed distance field (SDF) representation to train a guiding geometry model. This guiding geometry model serves as the initial value for the Gaussian model, providing a more effective starting point for the optimization process. We define the initial model as Guiding Geometry Model and the refined model as 3D Gaussian Parametric Head Model." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 514, + 482, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 514, + 482, + 574 + ], + "spans": [ + { + "bbox": [ + 130, + 514, + 482, + 574 + ], + "type": "text", + "content": "Guiding Geometry Model. The guiding geometry model receives an identity code " + }, + { + "bbox": [ + 130, + 514, + 482, + 574 + ], + "type": "inline_equation", + "content": "z^{id}" + }, + { + "bbox": [ + 130, + 514, + 482, + 574 + ], + "type": "text", + "content": " and an expression code " + }, + { + "bbox": [ + 130, + 514, + 482, + 574 + ], + "type": "inline_equation", + "content": "z^{exp}" + }, + { + "bbox": [ + 130, + 514, + 482, + 574 + ], + "type": "text", + "content": " as input, producing a mesh with vertices " + }, + { + "bbox": [ + 130, + 514, + 482, + 574 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 130, + 514, + 482, + 574 + ], + "type": "text", + "content": ", faces " + }, + { + "bbox": [ + 130, + 514, + 482, + 574 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 130, + 514, + 482, + 574 + ], + "type": "text", + "content": ", and per-vertex color " + }, + { + "bbox": [ + 130, + 514, + 482, + 574 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 130, + 514, + 482, + 574 + ], + "type": "text", + "content": " that aligns with the specified identity and expression. To achieve this, we use an MLP denoted as " + }, + { + "bbox": [ + 130, + 514, + 482, + 574 + ], + "type": "inline_equation", + "content": "f_{mean}(\\cdot)" + }, + { + "bbox": [ + 130, + 514, + 482, + 574 + ], + "type": "text", + "content": " to implicitly model the SDF, which represents the mean geometry:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 269, + 584, + 480, + 596 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 269, + 584, + 480, + 596 + ], + "spans": [ + { + "bbox": [ + 269, + 584, + 480, + 596 + ], + "type": "interline_equation", + "content": "s, \\gamma = \\boldsymbol {f} _ {\\text {m e a n}} (x), \\tag {1}", + "image_path": "1d588188bd8132a1c6e95d06afc2f66610a0e7a7ae5490db6c344bdfecee534e.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "type": "text", + "content": " denotes the SDF value, " + }, + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "type": "text", + "content": " denotes the feature from the last layer and " + }, + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "type": "text", + "content": " denotes the input position. Then, we convert the implicit SDF through Deep Marching Tetrahedra (DMTet) [41] into an explicit mesh with vertex positions " + }, + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "type": "inline_equation", + "content": "V_{0}" + }, + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "type": "text", + "content": ", per-vertex feature " + }, + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "type": "text", + "content": " and faces " + }, + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "type": "text", + "content": ". Next, we need to transform the mean shape into a neutral-expression shape on condition of the input identity code" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 203, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 203, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 203, + 101 + ], + "type": "text", + "content": "Xuetal." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 115, + 480, + 187 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 115, + 480, + 187 + ], + "spans": [ + { + "bbox": [ + 130, + 115, + 480, + 187 + ], + "type": "inline_equation", + "content": "z^{id}" + }, + { + "bbox": [ + 130, + 115, + 480, + 187 + ], + "type": "text", + "content": ". To inject identity information into the vertices of the mesh, we first use an injection MLP " + }, + { + "bbox": [ + 130, + 115, + 480, + 187 + ], + "type": "inline_equation", + "content": "f_{inj}(\\cdot)" + }, + { + "bbox": [ + 130, + 115, + 480, + 187 + ], + "type": "text", + "content": ", which takes the identity code " + }, + { + "bbox": [ + 130, + 115, + 480, + 187 + ], + "type": "inline_equation", + "content": "z^{id}" + }, + { + "bbox": [ + 130, + 115, + 480, + 187 + ], + "type": "text", + "content": " and the per-vertex feature " + }, + { + "bbox": [ + 130, + 115, + 480, + 187 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 130, + 115, + 480, + 187 + ], + "type": "text", + "content": " as input and produces the identity-conditioned per-vertex feature vectors " + }, + { + "bbox": [ + 130, + 115, + 480, + 187 + ], + "type": "inline_equation", + "content": "H = f_{inj}(z^{id},\\Gamma)" + }, + { + "bbox": [ + 130, + 115, + 480, + 187 + ], + "type": "text", + "content": ". Subsequently, utilizing a tiny MLP " + }, + { + "bbox": [ + 130, + 115, + 480, + 187 + ], + "type": "inline_equation", + "content": "f_{id}(\\cdot)" + }, + { + "bbox": [ + 130, + 115, + 480, + 187 + ], + "type": "text", + "content": ", we predict the displacement " + }, + { + "bbox": [ + 130, + 115, + 480, + 187 + ], + "type": "inline_equation", + "content": "\\delta V_{id}" + }, + { + "bbox": [ + 130, + 115, + 480, + 187 + ], + "type": "text", + "content": " for each vertex. This displacement is used to transform the mean shape into the neutral-expression shape conditioned on the id code " + }, + { + "bbox": [ + 130, + 115, + 480, + 187 + ], + "type": "inline_equation", + "content": "z^{id}" + }, + { + "bbox": [ + 130, + 115, + 480, + 187 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 189, + 481, + 283 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 189, + 481, + 283 + ], + "spans": [ + { + "bbox": [ + 130, + 189, + 481, + 283 + ], + "type": "text", + "content": "After completing deformations related to identity, the next step is to capture the deformation induced by facial expressions. We introduce another tiny MLP " + }, + { + "bbox": [ + 130, + 189, + 481, + 283 + ], + "type": "inline_equation", + "content": "f_{exp}(\\cdot)" + }, + { + "bbox": [ + 130, + 189, + 481, + 283 + ], + "type": "text", + "content": ". This MLP takes the feature vectors " + }, + { + "bbox": [ + 130, + 189, + 481, + 283 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 130, + 189, + 481, + 283 + ], + "type": "text", + "content": " obtained in the previous step and the expression code " + }, + { + "bbox": [ + 130, + 189, + 481, + 283 + ], + "type": "inline_equation", + "content": "z^{exp}" + }, + { + "bbox": [ + 130, + 189, + 481, + 283 + ], + "type": "text", + "content": " as input, and the output is the displacement " + }, + { + "bbox": [ + 130, + 189, + 481, + 283 + ], + "type": "inline_equation", + "content": "\\delta V_{exp}" + }, + { + "bbox": [ + 130, + 189, + 481, + 283 + ], + "type": "text", + "content": " for each vertex. Using this displacement, we update the vertex positions to " + }, + { + "bbox": [ + 130, + 189, + 481, + 283 + ], + "type": "inline_equation", + "content": "V_{can}" + }, + { + "bbox": [ + 130, + 189, + 481, + 283 + ], + "type": "text", + "content": ". Additionally, we feed the same feature vectors " + }, + { + "bbox": [ + 130, + 189, + 481, + 283 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 130, + 189, + 481, + 283 + ], + "type": "text", + "content": " and expression code " + }, + { + "bbox": [ + 130, + 189, + 481, + 283 + ], + "type": "inline_equation", + "content": "z^{exp}" + }, + { + "bbox": [ + 130, + 189, + 481, + 283 + ], + "type": "text", + "content": " to a color MLP, " + }, + { + "bbox": [ + 130, + 189, + 481, + 283 + ], + "type": "inline_equation", + "content": "f_{col}(\\cdot)" + }, + { + "bbox": [ + 130, + 189, + 481, + 283 + ], + "type": "text", + "content": ", to predict the 32-channel color " + }, + { + "bbox": [ + 130, + 189, + 481, + 283 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 130, + 189, + 481, + 283 + ], + "type": "text", + "content": " for each vertex. The vertex positions to " + }, + { + "bbox": [ + 130, + 189, + 481, + 283 + ], + "type": "inline_equation", + "content": "V_{can}" + }, + { + "bbox": [ + 130, + 189, + 481, + 283 + ], + "type": "text", + "content": " and 32-channel color " + }, + { + "bbox": [ + 130, + 189, + 481, + 283 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 130, + 189, + 481, + 283 + ], + "type": "text", + "content": " can be described as:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 181, + 292, + 481, + 305 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 181, + 292, + 481, + 305 + ], + "spans": [ + { + "bbox": [ + 181, + 292, + 481, + 305 + ], + "type": "interline_equation", + "content": "V _ {c a n} = V _ {0} + \\boldsymbol {f} _ {i d} (H) + \\boldsymbol {f} _ {e x p} (H, \\boldsymbol {z} ^ {e x p}), C = \\boldsymbol {f} _ {c o l} (H, \\boldsymbol {z} ^ {e x p}). \\tag {2}", + "image_path": "471106df1e98bbb08250899c58e77408564ffcbadc80dd6b8e028dddafce1714.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 312, + 481, + 408 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 312, + 481, + 408 + ], + "spans": [ + { + "bbox": [ + 130, + 312, + 481, + 408 + ], + "type": "text", + "content": "Finally, we utilize the estimated head pose parameters " + }, + { + "bbox": [ + 130, + 312, + 481, + 408 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 130, + 312, + 481, + 408 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 312, + 481, + 408 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 130, + 312, + 481, + 408 + ], + "type": "text", + "content": " obtained during data preprocessing to transform the mesh from the canonical space to the world space " + }, + { + "bbox": [ + 130, + 312, + 481, + 408 + ], + "type": "inline_equation", + "content": "V = R \\cdot V_{can} + T" + }, + { + "bbox": [ + 130, + 312, + 481, + 408 + ], + "type": "text", + "content": ". After generating the final vertex positions, colors and faces " + }, + { + "bbox": [ + 130, + 312, + 481, + 408 + ], + "type": "inline_equation", + "content": "\\{V, C, F\\}" + }, + { + "bbox": [ + 130, + 312, + 481, + 408 + ], + "type": "text", + "content": " of the mesh, we render the mesh into a 256-resolution 32-channel feature map " + }, + { + "bbox": [ + 130, + 312, + 481, + 408 + ], + "type": "inline_equation", + "content": "I_F" + }, + { + "bbox": [ + 130, + 312, + 481, + 408 + ], + "type": "text", + "content": " and a mask " + }, + { + "bbox": [ + 130, + 312, + 481, + 408 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 130, + 312, + 481, + 408 + ], + "type": "text", + "content": " through differentiable rasterization with a given camera pose. Subsequently, the feature map is interpreted as a 512-resolution RGB " + }, + { + "bbox": [ + 130, + 312, + 481, + 408 + ], + "type": "inline_equation", + "content": "I_{hr}" + }, + { + "bbox": [ + 130, + 312, + 481, + 408 + ], + "type": "text", + "content": " image through a lightweight convolutional upsampling network " + }, + { + "bbox": [ + 130, + 312, + 481, + 408 + ], + "type": "inline_equation", + "content": "\\Psi(\\cdot)" + }, + { + "bbox": [ + 130, + 312, + 481, + 408 + ], + "type": "text", + "content": ", as shown in Fig. 2." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 409, + 481, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 409, + 481, + 601 + ], + "spans": [ + { + "bbox": [ + 130, + 409, + 481, + 601 + ], + "type": "text", + "content": "3D Gaussian Parametric Head Model. The Gaussian model also takes an identity code " + }, + { + "bbox": [ + 130, + 409, + 481, + 601 + ], + "type": "inline_equation", + "content": "z^{id}" + }, + { + "bbox": [ + 130, + 409, + 481, + 601 + ], + "type": "text", + "content": " and an expression code " + }, + { + "bbox": [ + 130, + 409, + 481, + 601 + ], + "type": "inline_equation", + "content": "z^{exp}" + }, + { + "bbox": [ + 130, + 409, + 481, + 601 + ], + "type": "text", + "content": " as input, producing the positions " + }, + { + "bbox": [ + 130, + 409, + 481, + 601 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 130, + 409, + 481, + 601 + ], + "type": "text", + "content": ", color " + }, + { + "bbox": [ + 130, + 409, + 481, + 601 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 130, + 409, + 481, + 601 + ], + "type": "text", + "content": ", scale " + }, + { + "bbox": [ + 130, + 409, + 481, + 601 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 130, + 409, + 481, + 601 + ], + "type": "text", + "content": ", rotation " + }, + { + "bbox": [ + 130, + 409, + 481, + 601 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 130, + 409, + 481, + 601 + ], + "type": "text", + "content": " and opacity " + }, + { + "bbox": [ + 130, + 409, + 481, + 601 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 130, + 409, + 481, + 601 + ], + "type": "text", + "content": " of the 3D Gaussians. Similar to the guiding geometry model, we initially maintain an overall mean point cloud, with the mean positions " + }, + { + "bbox": [ + 130, + 409, + 481, + 601 + ], + "type": "inline_equation", + "content": "X_0" + }, + { + "bbox": [ + 130, + 409, + 481, + 601 + ], + "type": "text", + "content": ". However, we no longer generate the per-vertex feature " + }, + { + "bbox": [ + 130, + 409, + 481, + 601 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 130, + 409, + 481, + 601 + ], + "type": "text", + "content": " through " + }, + { + "bbox": [ + 130, + 409, + 481, + 601 + ], + "type": "inline_equation", + "content": "f_{mean}(x)" + }, + { + "bbox": [ + 130, + 409, + 481, + 601 + ], + "type": "text", + "content": ". Instead, we directly generate it at once and bind it to the Gaussian points as estimizable variables " + }, + { + "bbox": [ + 130, + 409, + 481, + 601 + ], + "type": "inline_equation", + "content": "\\Gamma_0" + }, + { + "bbox": [ + 130, + 409, + 481, + 601 + ], + "type": "text", + "content": ". This is possible since the number of Gaussian points is fixed at this stage. Then we need to transform the mean point cloud into a neutral-expression point cloud, conditioned by the id code " + }, + { + "bbox": [ + 130, + 409, + 481, + 601 + ], + "type": "inline_equation", + "content": "z^{id}" + }, + { + "bbox": [ + 130, + 409, + 481, + 601 + ], + "type": "text", + "content": ". To achieve this, we utilize the same injection MLP " + }, + { + "bbox": [ + 130, + 409, + 481, + 601 + ], + "type": "inline_equation", + "content": "f_{inj}(\\cdot)" + }, + { + "bbox": [ + 130, + 409, + 481, + 601 + ], + "type": "text", + "content": " and identity deformation MLP " + }, + { + "bbox": [ + 130, + 409, + 481, + 601 + ], + "type": "inline_equation", + "content": "f_{id}(\\cdot)" + }, + { + "bbox": [ + 130, + 409, + 481, + 601 + ], + "type": "text", + "content": " defined in the guiding geometry model, which can generate feature vectors " + }, + { + "bbox": [ + 130, + 409, + 481, + 601 + ], + "type": "inline_equation", + "content": "H = f_{inj}(z^{id},\\Gamma_0)" + }, + { + "bbox": [ + 130, + 409, + 481, + 601 + ], + "type": "text", + "content": " that encode identity information for each point and predict the identity-related displacement of each point. Then, we also need to predict the expression code " + }, + { + "bbox": [ + 130, + 409, + 481, + 601 + ], + "type": "inline_equation", + "content": "z^{exp}" + }, + { + "bbox": [ + 130, + 409, + 481, + 601 + ], + "type": "text", + "content": "-conditioned displacement. The resulting positions " + }, + { + "bbox": [ + 130, + 409, + 481, + 601 + ], + "type": "inline_equation", + "content": "X_{can}" + }, + { + "bbox": [ + 130, + 409, + 481, + 601 + ], + "type": "text", + "content": " and the 32-channel color " + }, + { + "bbox": [ + 130, + 409, + 481, + 601 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 130, + 409, + 481, + 601 + ], + "type": "text", + "content": " of each point, similar to the approach presented in the guiding geometry model, can be described as:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 177, + 609, + 481, + 622 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 177, + 609, + 481, + 622 + ], + "spans": [ + { + "bbox": [ + 177, + 609, + 481, + 622 + ], + "type": "interline_equation", + "content": "X _ {c a n} = \\boldsymbol {X} _ {\\mathbf {0}} + \\boldsymbol {f} _ {i d} (H). + \\boldsymbol {f} _ {e x p} (H, \\boldsymbol {z} ^ {e x p}), C = \\boldsymbol {f} _ {c o l} (H, \\boldsymbol {z} ^ {e x p}). \\tag {3}", + "image_path": "dc63e62ebf2a278eeeacacddd38420ab304f83358a4f67bbcb3ce8a1ab4434be.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 629, + 481, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 629, + 481, + 667 + ], + "spans": [ + { + "bbox": [ + 130, + 629, + 481, + 667 + ], + "type": "text", + "content": "Unlike the representations of SDF and DMTet, Gaussians have additional attributes that need to be predicted. Here, we introduce a new MLP to predict Gaussian attributes in the canonical space, including the scale " + }, + { + "bbox": [ + 130, + 629, + 481, + 667 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 130, + 629, + 481, + 667 + ], + "type": "text", + "content": ", rotation " + }, + { + "bbox": [ + 130, + 629, + 481, + 667 + ], + "type": "inline_equation", + "content": "Q_{can}" + }, + { + "bbox": [ + 130, + 629, + 481, + 667 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 91, + 447, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 91, + 447, + 101 + ], + "spans": [ + { + "bbox": [ + 294, + 91, + 447, + 101 + ], + "type": "text", + "content": "3D Gaussian Parametric Head Model" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 91, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 91, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 91, + 481, + 100 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 482, + 154 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 116, + 482, + 154 + ], + "spans": [ + { + "bbox": [ + 132, + 116, + 482, + 154 + ], + "type": "text", + "content": "and opacity " + }, + { + "bbox": [ + 132, + 116, + 482, + 154 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 132, + 116, + 482, + 154 + ], + "type": "text", + "content": ". In order to ensure the stability of the generated results, we refrain from directly predicting these values. Instead, we predict their offsets " + }, + { + "bbox": [ + 132, + 116, + 482, + 154 + ], + "type": "inline_equation", + "content": "\\{\\delta S,\\delta Q,\\delta A\\}" + }, + { + "bbox": [ + 132, + 116, + 482, + 154 + ], + "type": "text", + "content": " relative to the overall mean values " + }, + { + "bbox": [ + 132, + 116, + 482, + 154 + ], + "type": "inline_equation", + "content": "\\{S_0,Q_0,A_0\\}" + }, + { + "bbox": [ + 132, + 116, + 482, + 154 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 208, + 162, + 481, + 175 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 208, + 162, + 481, + 175 + ], + "spans": [ + { + "bbox": [ + 208, + 162, + 481, + 175 + ], + "type": "interline_equation", + "content": "\\{S, Q _ {c a n}, A \\} = \\left\\{S _ {0}, Q _ {0}, A _ {0} \\right\\} + \\boldsymbol {f} _ {a t t} (H, z ^ {e x p}). \\tag {4}", + "image_path": "7cfb456e25af54103279566fb7eb7df0eeaf022a7c5f2faf746469a2044c67a3.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 183, + 482, + 279 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 183, + 482, + 279 + ], + "spans": [ + { + "bbox": [ + 132, + 183, + 482, + 279 + ], + "type": "text", + "content": "Following this, we utilize the estimated head pose parameters " + }, + { + "bbox": [ + 132, + 183, + 482, + 279 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 132, + 183, + 482, + 279 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 132, + 183, + 482, + 279 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 132, + 183, + 482, + 279 + ], + "type": "text", + "content": ", obtained during data preprocessing, to transform the canonical space variables " + }, + { + "bbox": [ + 132, + 183, + 482, + 279 + ], + "type": "inline_equation", + "content": "X_{can}" + }, + { + "bbox": [ + 132, + 183, + 482, + 279 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 132, + 183, + 482, + 279 + ], + "type": "inline_equation", + "content": "Q_{can}" + }, + { + "bbox": [ + 132, + 183, + 482, + 279 + ], + "type": "text", + "content": " into the world space: " + }, + { + "bbox": [ + 132, + 183, + 482, + 279 + ], + "type": "inline_equation", + "content": "X = R \\cdot X_{can} + T" + }, + { + "bbox": [ + 132, + 183, + 482, + 279 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 132, + 183, + 482, + 279 + ], + "type": "inline_equation", + "content": "Q = R \\cdot Q_{can}" + }, + { + "bbox": [ + 132, + 183, + 482, + 279 + ], + "type": "text", + "content": ". For model rendering, we leverage differentiable rendering [21] and neural rendering techniques to generate images. The generated 3D Gaussian parameters, which include " + }, + { + "bbox": [ + 132, + 183, + 482, + 279 + ], + "type": "inline_equation", + "content": "\\{X, C, S, Q, A\\}" + }, + { + "bbox": [ + 132, + 183, + 482, + 279 + ], + "type": "text", + "content": ", are conditioned by the identity code " + }, + { + "bbox": [ + 132, + 183, + 482, + 279 + ], + "type": "inline_equation", + "content": "z^{id}" + }, + { + "bbox": [ + 132, + 183, + 482, + 279 + ], + "type": "text", + "content": " and expression code " + }, + { + "bbox": [ + 132, + 183, + 482, + 279 + ], + "type": "inline_equation", + "content": "z^{exp}" + }, + { + "bbox": [ + 132, + 183, + 482, + 279 + ], + "type": "text", + "content": ". Finally, we input this feature map into the same upsampling network " + }, + { + "bbox": [ + 132, + 183, + 482, + 279 + ], + "type": "inline_equation", + "content": "\\Psi(\\cdot)" + }, + { + "bbox": [ + 132, + 183, + 482, + 279 + ], + "type": "text", + "content": " of the guiding geometry model to generate a 512-resolution RGB image." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 280, + 483, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 280, + 483, + 423 + ], + "spans": [ + { + "bbox": [ + 132, + 280, + 483, + 423 + ], + "type": "text", + "content": "In the 3D Gaussian Parametric Head Model, we leverage the previously trained guiding geometry model to initialize our variables and networks, rather than initiating them randomly and training from scratch. Specifically, we initialize the Gaussian positions " + }, + { + "bbox": [ + 132, + 280, + 483, + 423 + ], + "type": "inline_equation", + "content": "\\mathbf{X_0}" + }, + { + "bbox": [ + 132, + 280, + 483, + 423 + ], + "type": "text", + "content": " using the vertex positions of the mean mesh " + }, + { + "bbox": [ + 132, + 280, + 483, + 423 + ], + "type": "inline_equation", + "content": "V_{0}" + }, + { + "bbox": [ + 132, + 280, + 483, + 423 + ], + "type": "text", + "content": ". Meanwhile, we generate the per-vertex feature " + }, + { + "bbox": [ + 132, + 280, + 483, + 423 + ], + "type": "inline_equation", + "content": "\\varGamma" + }, + { + "bbox": [ + 132, + 280, + 483, + 423 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 132, + 280, + 483, + 423 + ], + "type": "inline_equation", + "content": "f_{mean}(x)" + }, + { + "bbox": [ + 132, + 280, + 483, + 423 + ], + "type": "text", + "content": " at the beginning and bind it to the points as an estimizable variable " + }, + { + "bbox": [ + 132, + 280, + 483, + 423 + ], + "type": "inline_equation", + "content": "\\varGamma_{0}" + }, + { + "bbox": [ + 132, + 280, + 483, + 423 + ], + "type": "text", + "content": " as described above. Additionally, all identity codes " + }, + { + "bbox": [ + 132, + 280, + 483, + 423 + ], + "type": "inline_equation", + "content": "z^{id}" + }, + { + "bbox": [ + 132, + 280, + 483, + 423 + ], + "type": "text", + "content": ", expression codes " + }, + { + "bbox": [ + 132, + 280, + 483, + 423 + ], + "type": "inline_equation", + "content": "z^{exp}" + }, + { + "bbox": [ + 132, + 280, + 483, + 423 + ], + "type": "text", + "content": ", and the networks " + }, + { + "bbox": [ + 132, + 280, + 483, + 423 + ], + "type": "inline_equation", + "content": "\\{\\pmb{f}_{\\text{inj}}(\\cdot), \\pmb{f}_{\\text{id}}(\\cdot), \\pmb{f}_{\\text{exp}}(\\cdot), \\pmb{f}_{\\text{col}}(\\cdot), \\pmb{\\Psi}(\\cdot)\\}" + }, + { + "bbox": [ + 132, + 280, + 483, + 423 + ], + "type": "text", + "content": " are directly inherited from the guiding geometry model. Note that, the attribute MLP " + }, + { + "bbox": [ + 132, + 280, + 483, + 423 + ], + "type": "inline_equation", + "content": "f_{att}(\\cdot)" + }, + { + "bbox": [ + 132, + 280, + 483, + 423 + ], + "type": "text", + "content": " is a newly introduced network, hence it is initialized randomly. Finally, the overall mean values of the Gaussian attributes " + }, + { + "bbox": [ + 132, + 280, + 483, + 423 + ], + "type": "inline_equation", + "content": "\\{S_0, Q_0, A_0\\}" + }, + { + "bbox": [ + 132, + 280, + 483, + 423 + ], + "type": "text", + "content": " are initialized following the original 3D Gaussian Splatting [21]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 441, + 236, + 452 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 441, + 236, + 452 + ], + "spans": [ + { + "bbox": [ + 132, + 441, + 236, + 452 + ], + "type": "text", + "content": "3.3 Loss Functions" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 460, + 482, + 508 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 460, + 482, + 508 + ], + "spans": [ + { + "bbox": [ + 132, + 460, + 482, + 508 + ], + "type": "text", + "content": "To ensure the accurate convergence of the model, we employ various loss functions as constraints, including the basic photometric loss and silhouette loss, to enforce consistency with ground truth of both the rendered high-resolution images " + }, + { + "bbox": [ + 132, + 460, + 482, + 508 + ], + "type": "inline_equation", + "content": "I_{hr}" + }, + { + "bbox": [ + 132, + 460, + 482, + 508 + ], + "type": "text", + "content": " and the rendered masks " + }, + { + "bbox": [ + 132, + 460, + 482, + 508 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 132, + 460, + 482, + 508 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 215, + 518, + 481, + 532 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 215, + 518, + 481, + 532 + ], + "spans": [ + { + "bbox": [ + 215, + 518, + 481, + 532 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {h r} = \\left| \\left| I _ {h r} - I _ {g t} \\right| \\right| _ {1}, \\mathcal {L} _ {s i l} = I O U (M, M _ {g t}), \\tag {5}", + "image_path": "65b891725208a1c4d5f25e646d56eb8af57d1dbb0648fc3cae784ee5be1f619f.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 540, + 482, + 587 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 540, + 482, + 587 + ], + "spans": [ + { + "bbox": [ + 132, + 540, + 482, + 587 + ], + "type": "text", + "content": "with " + }, + { + "bbox": [ + 132, + 540, + 482, + 587 + ], + "type": "inline_equation", + "content": "I_{gt}" + }, + { + "bbox": [ + 132, + 540, + 482, + 587 + ], + "type": "text", + "content": " representing the ground truth RGB images, " + }, + { + "bbox": [ + 132, + 540, + 482, + 587 + ], + "type": "inline_equation", + "content": "M_{gt}" + }, + { + "bbox": [ + 132, + 540, + 482, + 587 + ], + "type": "text", + "content": " representing the ground truth masks. We further encourage the first three channels of the low-resolution feature map " + }, + { + "bbox": [ + 132, + 540, + 482, + 587 + ], + "type": "inline_equation", + "content": "I_{lr}" + }, + { + "bbox": [ + 132, + 540, + 482, + 587 + ], + "type": "text", + "content": " to closely match the ground-truth RGB image " + }, + { + "bbox": [ + 132, + 540, + 482, + 587 + ], + "type": "inline_equation", + "content": "I_{gt}" + }, + { + "bbox": [ + 132, + 540, + 482, + 587 + ], + "type": "text", + "content": " by introducing an " + }, + { + "bbox": [ + 132, + 540, + 482, + 587 + ], + "type": "inline_equation", + "content": "L_{1}" + }, + { + "bbox": [ + 132, + 540, + 482, + 587 + ], + "type": "text", + "content": " loss:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 266, + 588, + 481, + 601 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 266, + 588, + 481, + 601 + ], + "spans": [ + { + "bbox": [ + 266, + 588, + 481, + 601 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {l r} = \\left| \\left| I _ {l r} - I _ {g t} \\right| \\right| _ {1}. \\tag {6}", + "image_path": "54aa0f1034acbc3973138d11abad139c54665501a7195830691849bfe1e2cfc0.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 605, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 605, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 132, + 605, + 482, + 666 + ], + "type": "text", + "content": "The geometric deformation caused by expressions is typically complex and cannot be learned through image supervision alone. Therefore, we provide additional coarse supervision for expression deformation learning using 3D landmarks. Specifically, we define the 3D landmarks " + }, + { + "bbox": [ + 132, + 605, + 482, + 666 + ], + "type": "inline_equation", + "content": "P_0" + }, + { + "bbox": [ + 132, + 605, + 482, + 666 + ], + "type": "text", + "content": " in the canonical space, and then predict their displacements and transform them to the world space as " + }, + { + "bbox": [ + 132, + 605, + 482, + 666 + ], + "type": "inline_equation", + "content": "P" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 203, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 203, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 203, + 101 + ], + "type": "text", + "content": "Xuet al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 479, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 479, + 139 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 479, + 139 + ], + "type": "text", + "content": "just like the transformation of the original vertices " + }, + { + "bbox": [ + 130, + 116, + 479, + 139 + ], + "type": "inline_equation", + "content": "V_{0}" + }, + { + "bbox": [ + 130, + 116, + 479, + 139 + ], + "type": "text", + "content": " above. Then, we construct the landmark loss function:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 261, + 150, + 481, + 163 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 261, + 150, + 481, + 163 + ], + "spans": [ + { + "bbox": [ + 261, + 150, + 481, + 163 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {l m k} = | | \\boldsymbol {P} - \\boldsymbol {P} _ {g t} | | _ {2}, \\tag {7}", + "image_path": "51cf33d0d1efb5aef55e2cfe8f8fa49120605d350600c35a8ddf7fc7eafab03c.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 171, + 479, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 171, + 479, + 194 + ], + "spans": [ + { + "bbox": [ + 130, + 171, + 479, + 194 + ], + "type": "text", + "content": "with " + }, + { + "bbox": [ + 130, + 171, + 479, + 194 + ], + "type": "inline_equation", + "content": "P_{gt}" + }, + { + "bbox": [ + 130, + 171, + 479, + 194 + ], + "type": "text", + "content": " denoting the ground truth 3D landmarks, which are estimated by fitting a BFM model to the training data during preprocessing." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 196, + 480, + 241 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 196, + 480, + 241 + ], + "spans": [ + { + "bbox": [ + 130, + 196, + 480, + 241 + ], + "type": "text", + "content": "Moreover, to guarantee the decoupling of identity and expression deformations learned by the model and minimize redundancy, we introduce the following regularization loss function that aims to minimize the magnitude of both deformations:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 247, + 244, + 481, + 258 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 247, + 244, + 481, + 258 + ], + "spans": [ + { + "bbox": [ + 247, + 244, + 481, + 258 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {r e g} = \\left| \\left| \\delta V _ {i d} \\right| \\right| _ {2} + \\left| \\left| \\delta V _ {e x p} \\right| \\right| _ {2}. \\tag {8}", + "image_path": "7e6a4a7b69cfb75d3d836113077a5cd66b5df120cfdde5f35ac65eb25fef6b32.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 261, + 479, + 296 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 261, + 479, + 296 + ], + "spans": [ + { + "bbox": [ + 130, + 261, + 479, + 296 + ], + "type": "text", + "content": "During the training of the Guiding Geometry Model, we also construct a Laplacian smooth term " + }, + { + "bbox": [ + 130, + 261, + 479, + 296 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{lap}" + }, + { + "bbox": [ + 130, + 261, + 479, + 296 + ], + "type": "text", + "content": " to penalize surface noise or breaks. Overall, the total loss function is formulated as:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 172, + 308, + 481, + 323 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 172, + 308, + 481, + 323 + ], + "spans": [ + { + "bbox": [ + 172, + 308, + 481, + 323 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\mathcal {L} _ {h r} + \\lambda_ {s i l} \\mathcal {L} _ {s i l} + \\lambda_ {l r} \\mathcal {L} _ {l r} + \\lambda_ {l m k} \\mathcal {L} _ {l m k} + \\lambda_ {r e g} \\mathcal {L} _ {r e g} + \\lambda_ {l a p} \\mathcal {L} _ {l a p} \\tag {9}", + "image_path": "7fadfc117150d0ab5e1853731c570b389027d316de3c2114bcc886204c8f7913.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 331, + 479, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 331, + 479, + 402 + ], + "spans": [ + { + "bbox": [ + 130, + 331, + 479, + 402 + ], + "type": "text", + "content": "with all the " + }, + { + "bbox": [ + 130, + 331, + 479, + 402 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 130, + 331, + 479, + 402 + ], + "type": "text", + "content": " denoting the weights of each term. In practice, we set " + }, + { + "bbox": [ + 130, + 331, + 479, + 402 + ], + "type": "inline_equation", + "content": "\\lambda_{sil} = 0.1" + }, + { + "bbox": [ + 130, + 331, + 479, + 402 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 331, + 479, + 402 + ], + "type": "inline_equation", + "content": "\\lambda_{lr} = 0.1" + }, + { + "bbox": [ + 130, + 331, + 479, + 402 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 331, + 479, + 402 + ], + "type": "inline_equation", + "content": "\\lambda_{lmk} = 0.1" + }, + { + "bbox": [ + 130, + 331, + 479, + 402 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 331, + 479, + 402 + ], + "type": "inline_equation", + "content": "\\lambda_{reg} = 0.001" + }, + { + "bbox": [ + 130, + 331, + 479, + 402 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 331, + 479, + 402 + ], + "type": "inline_equation", + "content": "\\lambda_{lap} = 100" + }, + { + "bbox": [ + 130, + 331, + 479, + 402 + ], + "type": "text", + "content": ". During training, we jointly optimize the bolded variables above: " + }, + { + "bbox": [ + 130, + 331, + 479, + 402 + ], + "type": "inline_equation", + "content": "\\{z^{id}, z^{exp}, f_{inj}(\\cdot), f_{mean}(\\cdot), f_{id}(\\cdot), f_{exp}(\\cdot), f_{col}(\\cdot), \\Psi(\\cdot), P_0\\}" + }, + { + "bbox": [ + 130, + 331, + 479, + 402 + ], + "type": "text", + "content": ". Notably, the defined canonical 3D landmarks " + }, + { + "bbox": [ + 130, + 331, + 479, + 402 + ], + "type": "inline_equation", + "content": "P_0" + }, + { + "bbox": [ + 130, + 331, + 479, + 402 + ], + "type": "text", + "content": " are initialized by computing the average of the estimated 3D landmarks from the training dataset." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 403, + 480, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 403, + 480, + 485 + ], + "spans": [ + { + "bbox": [ + 130, + 403, + 480, + 485 + ], + "type": "text", + "content": "During the training stage of the 3D Gaussian Parametric Head Model, we also calculate the perceptual loss [60] to encourage the model to learn more high-frequency details " + }, + { + "bbox": [ + 130, + 403, + 480, + 485 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{vgg} = VGG(I_{hr},I_{gt})" + }, + { + "bbox": [ + 130, + 403, + 480, + 485 + ], + "type": "text", + "content": ". Similar to training the guiding geometry model, we enforce the first three channels of the feature map to be RGB channels as Eqn. 6, introduce landmarks guidance terms as Eqn. 7 and the regular term for the displacement of points as Eqn. 8. Consequently, the overall loss function can be formulated as:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 194, + 498, + 481, + 512 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 194, + 498, + 481, + 512 + ], + "spans": [ + { + "bbox": [ + 194, + 498, + 481, + 512 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\mathcal {L} _ {h r} + \\lambda_ {v g g} \\mathcal {L} _ {v g g} + \\lambda_ {l r} \\mathcal {L} _ {l r} + \\lambda_ {l m k} \\mathcal {L} _ {l m k} + \\lambda_ {r e g} \\mathcal {L} _ {r e g} \\tag {10}", + "image_path": "d9ed6885fc5876d6b35bbbc85a51b953c307fa80459b3c5d94fd2413cab6a8a2.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 130, + 520, + 479, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 520, + 479, + 582 + ], + "spans": [ + { + "bbox": [ + 130, + 520, + 479, + 582 + ], + "type": "text", + "content": "with the weights " + }, + { + "bbox": [ + 130, + 520, + 479, + 582 + ], + "type": "inline_equation", + "content": "\\lambda_{vgg} = 0.1" + }, + { + "bbox": [ + 130, + 520, + 479, + 582 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 520, + 479, + 582 + ], + "type": "inline_equation", + "content": "\\lambda_{lr} = 0.1" + }, + { + "bbox": [ + 130, + 520, + 479, + 582 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 520, + 479, + 582 + ], + "type": "inline_equation", + "content": "\\lambda_{lmk} = 0.1" + }, + { + "bbox": [ + 130, + 520, + 479, + 582 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 520, + 479, + 582 + ], + "type": "inline_equation", + "content": "\\lambda_{reg} = 0.001" + }, + { + "bbox": [ + 130, + 520, + 479, + 582 + ], + "type": "text", + "content": ". In this training stage, we also jointly optimize all the bolded variables and networks mentioned above, including the overall mean positions and attributes of the Gaussians and the 3D landmarks: " + }, + { + "bbox": [ + 130, + 520, + 479, + 582 + ], + "type": "inline_equation", + "content": "\\{z^{id}, z^{exp}, f_{inj}(\\cdot), f_{id}(\\cdot), f_{exp}(\\cdot), f_{col}(\\cdot), f_{att}(\\cdot), \\Psi(\\cdot), X_0, \\Gamma_0, S_0, Q_0, A_0, P_0\\}" + }, + { + "bbox": [ + 130, + 520, + 479, + 582 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 131, + 597, + 246, + 609 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 597, + 246, + 609 + ], + "spans": [ + { + "bbox": [ + 131, + 597, + 246, + 609 + ], + "type": "text", + "content": "3.4 Inference Details" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 130, + 617, + 479, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 617, + 479, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 617, + 479, + 666 + ], + "type": "text", + "content": "Image-based Fitting. When a single RGB portrait image is input, we first align the image according to the processing rules of the training set. Subsequently, we employ gradient descent to fit the image rendered by the 3D Gaussian Parametric Head Model to this input image using the photometric loss " + }, + { + "bbox": [ + 130, + 617, + 479, + 666 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{lr}" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 91, + 447, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 91, + 447, + 101 + ], + "spans": [ + { + "bbox": [ + 294, + 91, + 447, + 101 + ], + "type": "text", + "content": "3D Gaussian Parametric Head Model" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 91, + 481, + 101 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 91, + 481, + 101 + ], + "spans": [ + { + "bbox": [ + 474, + 91, + 481, + 101 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 115, + 482, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 115, + 482, + 247 + ], + "spans": [ + { + "bbox": [ + 130, + 115, + 482, + 247 + ], + "type": "text", + "content": "and " + }, + { + "bbox": [ + 130, + 115, + 482, + 247 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{hr}" + }, + { + "bbox": [ + 130, + 115, + 482, + 247 + ], + "type": "text", + "content": " defined in Eqn. 10. This process helps regress the identity code " + }, + { + "bbox": [ + 130, + 115, + 482, + 247 + ], + "type": "inline_equation", + "content": "z^{id}" + }, + { + "bbox": [ + 130, + 115, + 482, + 247 + ], + "type": "text", + "content": " and expression code " + }, + { + "bbox": [ + 130, + 115, + 482, + 247 + ], + "type": "inline_equation", + "content": "z^{exp}" + }, + { + "bbox": [ + 130, + 115, + 482, + 247 + ], + "type": "text", + "content": ". We just optimize for 200 iterations with learning rate " + }, + { + "bbox": [ + 130, + 115, + 482, + 247 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-3}" + }, + { + "bbox": [ + 130, + 115, + 482, + 247 + ], + "type": "text", + "content": " for both latent codes. Following this, we fix the latent codes " + }, + { + "bbox": [ + 130, + 115, + 482, + 247 + ], + "type": "inline_equation", + "content": "z^{id}" + }, + { + "bbox": [ + 130, + 115, + 482, + 247 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 115, + 482, + 247 + ], + "type": "inline_equation", + "content": "z^{exp}" + }, + { + "bbox": [ + 130, + 115, + 482, + 247 + ], + "type": "text", + "content": ", such that the variables " + }, + { + "bbox": [ + 130, + 115, + 482, + 247 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 130, + 115, + 482, + 247 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 115, + 482, + 247 + ], + "type": "inline_equation", + "content": "X_{can}" + }, + { + "bbox": [ + 130, + 115, + 482, + 247 + ], + "type": "text", + "content": " are also fixed. We further optimize the color MLP " + }, + { + "bbox": [ + 130, + 115, + 482, + 247 + ], + "type": "inline_equation", + "content": "f_{col}(\\cdot)" + }, + { + "bbox": [ + 130, + 115, + 482, + 247 + ], + "type": "text", + "content": " and the canonical positions " + }, + { + "bbox": [ + 130, + 115, + 482, + 247 + ], + "type": "inline_equation", + "content": "X_{can}" + }, + { + "bbox": [ + 130, + 115, + 482, + 247 + ], + "type": "text", + "content": " which represent the geometry of the current specific subject, using the same loss function. In this step, we only optimize for 100 iterations with learning rate " + }, + { + "bbox": [ + 130, + 115, + 482, + 247 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-4}" + }, + { + "bbox": [ + 130, + 115, + 482, + 247 + ], + "type": "text", + "content": " for both " + }, + { + "bbox": [ + 130, + 115, + 482, + 247 + ], + "type": "inline_equation", + "content": "f_{col}(\\cdot)" + }, + { + "bbox": [ + 130, + 115, + 482, + 247 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 115, + 482, + 247 + ], + "type": "inline_equation", + "content": "X_{can}" + }, + { + "bbox": [ + 130, + 115, + 482, + 247 + ], + "type": "text", + "content": ". This optimization process aims to add some details that cannot be recovered by the trained model itself, ultimately resulting in the reconstructed head model. The entire process has a total of 300 iterations and takes only 30 seconds." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 247, + 482, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 247, + 482, + 344 + ], + "spans": [ + { + "bbox": [ + 130, + 247, + 482, + 344 + ], + "type": "text", + "content": "Expression Editing. Given a source portrait image providing the subject whose expression is to be edited and a target portrait image providing the target expression. We first obtain the head model of the source subject through optimization as the above-mentioned Image-based Fitting strategy. Then for the target portrait image, we also obtain the head model and corresponding expression code in the same way. Finally, we input the target expression code to the head model of the source subject, so that the expression of the source subject can be edited to the target one." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 363, + 230, + 376 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 363, + 230, + 376 + ], + "spans": [ + { + "bbox": [ + 132, + 363, + 230, + 376 + ], + "type": "text", + "content": "4 Experiments" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 389, + 205, + 400 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 389, + 205, + 400 + ], + "spans": [ + { + "bbox": [ + 132, + 389, + 205, + 400 + ], + "type": "text", + "content": "4.1 Datasets" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 410, + 482, + 481 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 410, + 482, + 481 + ], + "spans": [ + { + "bbox": [ + 130, + 410, + 482, + 481 + ], + "type": "text", + "content": "NeRSemble dataset contains over 260 different identities, and collects 72fps multi-view videos from 16 synchronized cameras for each identity. The total length of the videos of a single identity is approximately 6000-11000 frames. In the experiment, we selected 140 of the identities for training and the rest for evaluation. For each identity video, we selected about 150 frames from all 16 views as training data." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 482, + 482, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 482, + 482, + 542 + ], + "spans": [ + { + "bbox": [ + 130, + 482, + 482, + 542 + ], + "type": "text", + "content": "NPHM dataset contains 5200 3D human head scans. These scans come from 255 different identities, each with about 20 different expressions. We selected approximately 1600 scans of 80 identities for training. Since our method utilizes 2D images as training supervision, we render each scan from 80 different views to generate synthetic image data and record the camera parameters and the masks." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 542, + 482, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 542, + 482, + 602 + ], + "spans": [ + { + "bbox": [ + 130, + 542, + 482, + 602 + ], + "type": "text", + "content": "FaceVerse dataset is an East Asian human head scan dataset. It contains 2310 scans from 110 different identities, and each identity contains 21 expressions. We selected 1620 scans data of 80 identities for training. Similarly, for each scan, we render multi-view synthetic image data from 80 different views and record the camera parameters and the masks." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 620, + 215, + 632 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 620, + 215, + 632 + ], + "spans": [ + { + "bbox": [ + 132, + 620, + 215, + 632 + ], + "type": "text", + "content": "4.2 Evaluation" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 641, + 481, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 641, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 641, + 481, + 665 + ], + "type": "text", + "content": "Disentanglement. We tested the performance of the 3D Gaussian Parametric Model under the control of different identity codes and different expression codes." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 203, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 203, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 203, + 101 + ], + "type": "text", + "content": "Xuetal." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 154, + 115, + 217, + 181 + ], + "blocks": [ + { + "bbox": [ + 137, + 142, + 153, + 152 + ], + "lines": [ + { + "bbox": [ + 137, + 142, + 153, + 152 + ], + "spans": [ + { + "bbox": [ + 137, + 142, + 153, + 152 + ], + "type": "text", + "content": "ID1" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 154, + 115, + 217, + 181 + ], + "lines": [ + { + "bbox": [ + 154, + 115, + 217, + 181 + ], + "spans": [ + { + "bbox": [ + 154, + 115, + 217, + 181 + ], + "type": "image", + "image_path": "3ea98e96476c7b75b15eb65f4f21b8922820ee3ee9f22e271e141cadf072dca2.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 219, + 114, + 282, + 181 + ], + "blocks": [ + { + "bbox": [ + 219, + 114, + 282, + 181 + ], + "lines": [ + { + "bbox": [ + 219, + 114, + 282, + 181 + ], + "spans": [ + { + "bbox": [ + 219, + 114, + 282, + 181 + ], + "type": "image", + "image_path": "4c4a1013f5f8e93b230ea212406950ae7407a371d6fde4f4ef319034f35f7982.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 284, + 114, + 346, + 181 + ], + "blocks": [ + { + "bbox": [ + 284, + 114, + 346, + 181 + ], + "lines": [ + { + "bbox": [ + 284, + 114, + 346, + 181 + ], + "spans": [ + { + "bbox": [ + 284, + 114, + 346, + 181 + ], + "type": "image", + "image_path": "b99e8c9dfe3165984df61fe68b2c7a139681c26c29a55091140a24441b565dda.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 350, + 114, + 413, + 181 + ], + "blocks": [ + { + "bbox": [ + 350, + 114, + 413, + 181 + ], + "lines": [ + { + "bbox": [ + 350, + 114, + 413, + 181 + ], + "spans": [ + { + "bbox": [ + 350, + 114, + 413, + 181 + ], + "type": "image", + "image_path": "48fc9dd8943886fc16914337f0685499d5db5f8e95510cb93b2685cc0a8db9ad.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 417, + 115, + 477, + 181 + ], + "blocks": [ + { + "bbox": [ + 417, + 115, + 477, + 181 + ], + "lines": [ + { + "bbox": [ + 417, + 115, + 477, + 181 + ], + "spans": [ + { + "bbox": [ + 417, + 115, + 477, + 181 + ], + "type": "image", + "image_path": "54a52a6b02cab03c9eb67441feedf50fd29ee47622ae8c9cc37c8b08b25c27cc.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 154, + 184, + 217, + 250 + ], + "blocks": [ + { + "bbox": [ + 137, + 210, + 153, + 220 + ], + "lines": [ + { + "bbox": [ + 137, + 210, + 153, + 220 + ], + "spans": [ + { + "bbox": [ + 137, + 210, + 153, + 220 + ], + "type": "text", + "content": "ID2" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 154, + 184, + 217, + 250 + ], + "lines": [ + { + "bbox": [ + 154, + 184, + 217, + 250 + ], + "spans": [ + { + "bbox": [ + 154, + 184, + 217, + 250 + ], + "type": "image", + "image_path": "6e68d76cc3685d1e62b67b35b3464c3d55b3e40cdc3a8e74f293f841000961c7.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 176, + 251, + 196, + 261 + ], + "lines": [ + { + "bbox": [ + 176, + 251, + 196, + 261 + ], + "spans": [ + { + "bbox": [ + 176, + 251, + 196, + 261 + ], + "type": "text", + "content": "Exp1" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 219, + 184, + 282, + 250 + ], + "blocks": [ + { + "bbox": [ + 219, + 184, + 282, + 250 + ], + "lines": [ + { + "bbox": [ + 219, + 184, + 282, + 250 + ], + "spans": [ + { + "bbox": [ + 219, + 184, + 282, + 250 + ], + "type": "image", + "image_path": "415828e052bfe99f234023ffc77898b9b0bb5aea0e7e946d2aca4c03f6a801a2.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 241, + 251, + 261, + 261 + ], + "lines": [ + { + "bbox": [ + 241, + 251, + 261, + 261 + ], + "spans": [ + { + "bbox": [ + 241, + 251, + 261, + 261 + ], + "type": "text", + "content": "Exp2" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 284, + 184, + 348, + 250 + ], + "blocks": [ + { + "bbox": [ + 284, + 184, + 348, + 250 + ], + "lines": [ + { + "bbox": [ + 284, + 184, + 348, + 250 + ], + "spans": [ + { + "bbox": [ + 284, + 184, + 348, + 250 + ], + "type": "image", + "image_path": "703cab9be226057eaee8762b839d8a2d629f0f773fa8be6663f7331bc7bce21b.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 307, + 251, + 327, + 261 + ], + "lines": [ + { + "bbox": [ + 307, + 251, + 327, + 261 + ], + "spans": [ + { + "bbox": [ + 307, + 251, + 327, + 261 + ], + "type": "text", + "content": "Exp3" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 351, + 183, + 414, + 250 + ], + "blocks": [ + { + "bbox": [ + 351, + 183, + 414, + 250 + ], + "lines": [ + { + "bbox": [ + 351, + 183, + 414, + 250 + ], + "spans": [ + { + "bbox": [ + 351, + 183, + 414, + 250 + ], + "type": "image", + "image_path": "c83252e06e0c7c7e22addff1515ce8fa5cf82896957e551321624ae8a912caca.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 373, + 251, + 393, + 261 + ], + "lines": [ + { + "bbox": [ + 373, + 251, + 393, + 261 + ], + "spans": [ + { + "bbox": [ + 373, + 251, + 393, + 261 + ], + "type": "text", + "content": "Exp4" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 417, + 183, + 481, + 250 + ], + "blocks": [ + { + "bbox": [ + 417, + 183, + 481, + 250 + ], + "lines": [ + { + "bbox": [ + 417, + 183, + 481, + 250 + ], + "spans": [ + { + "bbox": [ + 417, + 183, + 481, + 250 + ], + "type": "image", + "image_path": "0777d4a76322ffef741331a15dd69a4fd5d0357f6797ec6774856a354b62f165.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 440, + 251, + 460, + 261 + ], + "lines": [ + { + "bbox": [ + 440, + 251, + 460, + 261 + ], + "spans": [ + { + "bbox": [ + 440, + 251, + 460, + 261 + ], + "type": "text", + "content": "Exp5" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 133, + 319, + 203, + 399 + ], + "blocks": [ + { + "bbox": [ + 133, + 319, + 203, + 399 + ], + "lines": [ + { + "bbox": [ + 133, + 319, + 203, + 399 + ], + "spans": [ + { + "bbox": [ + 133, + 319, + 203, + 399 + ], + "type": "image", + "image_path": "539fceef18c436780073843898a144f65a9cf4a50ec1cf62a63163192a63d4ac.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 162, + 400, + 173, + 407 + ], + "lines": [ + { + "bbox": [ + 162, + 400, + 173, + 407 + ], + "spans": [ + { + "bbox": [ + 162, + 400, + 173, + 407 + ], + "type": "text", + "content": "GT" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 130, + 417, + 482, + 451 + ], + "lines": [ + { + "bbox": [ + 130, + 417, + 482, + 451 + ], + "spans": [ + { + "bbox": [ + 130, + 417, + 482, + 451 + ], + "type": "text", + "content": "Fig. 4: We compared our initialization strategy with using the vertices of FLAME model. The left side shows the rendered image, and the right side shows the positions of the Gaussian points." + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 209, + 319, + 278, + 399 + ], + "blocks": [ + { + "bbox": [ + 209, + 319, + 278, + 399 + ], + "lines": [ + { + "bbox": [ + 209, + 319, + 278, + 399 + ], + "spans": [ + { + "bbox": [ + 209, + 319, + 278, + 399 + ], + "type": "image", + "image_path": "f26549e975431a37b79f7fb0fc8345e6de37f9f5587d22e8db207707ccdea997.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 251, + 400, + 304, + 407 + ], + "lines": [ + { + "bbox": [ + 251, + 400, + 304, + 407 + ], + "spans": [ + { + "bbox": [ + 251, + 400, + 304, + 407 + ], + "type": "text", + "content": "Our Initialization" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 279, + 319, + 342, + 399 + ], + "blocks": [ + { + "bbox": [ + 279, + 319, + 342, + 399 + ], + "lines": [ + { + "bbox": [ + 279, + 319, + 342, + 399 + ], + "spans": [ + { + "bbox": [ + 279, + 319, + 342, + 399 + ], + "type": "image", + "image_path": "76d8a1e8dfcb81d1b7d9308c51380e94ed4be872da1fb1c4b0039c9ae8aa3640.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 349, + 319, + 418, + 399 + ], + "blocks": [ + { + "bbox": [ + 349, + 319, + 418, + 399 + ], + "lines": [ + { + "bbox": [ + 349, + 319, + 418, + 399 + ], + "spans": [ + { + "bbox": [ + 349, + 319, + 418, + 399 + ], + "type": "image", + "image_path": "4087e5f58aed4b718bb63f715c53955288413a1b648bfa1d01409d37cdb61da0.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 384, + 400, + 449, + 407 + ], + "lines": [ + { + "bbox": [ + 384, + 400, + 449, + 407 + ], + "spans": [ + { + "bbox": [ + 384, + 400, + 449, + 407 + ], + "type": "text", + "content": "FLAME Initialization" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 425, + 319, + 481, + 399 + ], + "blocks": [ + { + "bbox": [ + 131, + 271, + 482, + 305 + ], + "lines": [ + { + "bbox": [ + 131, + 271, + 482, + 305 + ], + "spans": [ + { + "bbox": [ + 131, + 271, + 482, + 305 + ], + "type": "text", + "content": "Fig. 3: We generate the head models with randomly sampled identity codes and expression codes as condition. Each row corresponds to the same identity code, and each column corresponds to the same expression code." + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 425, + 319, + 481, + 399 + ], + "lines": [ + { + "bbox": [ + 425, + 319, + 481, + 399 + ], + "spans": [ + { + "bbox": [ + 425, + 319, + 481, + 399 + ], + "type": "image", + "image_path": "d98b9faaa14affdc9f2f295f5522bc5a3d106800307e8c15bc87019937d3d8cb.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + } + ], + "index": 27 + }, + { + "bbox": [ + 130, + 460, + 482, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 460, + 482, + 521 + ], + "spans": [ + { + "bbox": [ + 130, + 460, + 482, + 521 + ], + "type": "text", + "content": "We randomly sampled 2 identity codes and 5 expression codes to generate 10 head models. Each horizontal row corresponds to the same identity code, and each column corresponds to the same expression code, as shown in Fig. 3. It can be observed that our model performs well in identity consistency and expression consistency, and the two components are fully disentangled." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 130, + 521, + 482, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 521, + 482, + 641 + ], + "spans": [ + { + "bbox": [ + 130, + 521, + 482, + 641 + ], + "type": "text", + "content": "Ablation on Initialization. To evaluate the effectiveness of our initialization strategy with guiding geometry model outlined in Section 3, we compare it against a FLAME-based initialization strategy. To use FLAME model for the initialization, we first fit a FLAME model to overall mean 3D landmarks which are estimated during data preprocessing. Then, we sample 100,000 points near the surface of the FLAME mesh as an initialization of the mean Gaussian positions " + }, + { + "bbox": [ + 130, + 521, + 482, + 641 + ], + "type": "inline_equation", + "content": "\\mathbf{X_0}" + }, + { + "bbox": [ + 130, + 521, + 482, + 641 + ], + "type": "text", + "content": ". For the per-vertex features bound to each point " + }, + { + "bbox": [ + 130, + 521, + 482, + 641 + ], + "type": "inline_equation", + "content": "\\pmb{\\Gamma}" + }, + { + "bbox": [ + 130, + 521, + 482, + 641 + ], + "type": "text", + "content": ", we just set them to zero. And for all the networks " + }, + { + "bbox": [ + 130, + 521, + 482, + 641 + ], + "type": "inline_equation", + "content": "\\{f_{inj}(\\cdot), f_{id}(\\cdot), f_{exp}(\\cdot), f_{col}(\\cdot), \\Psi(\\cdot)\\}" + }, + { + "bbox": [ + 130, + 521, + 482, + 641 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 521, + 482, + 641 + ], + "type": "inline_equation", + "content": "f_{att}(\\cdot)" + }, + { + "bbox": [ + 130, + 521, + 482, + 641 + ], + "type": "text", + "content": " are randomly initialized as there is no available prior. The initialization process for the Gaussian attributes " + }, + { + "bbox": [ + 130, + 521, + 482, + 641 + ], + "type": "inline_equation", + "content": "\\{S_0, Q_0, A_0\\}" + }, + { + "bbox": [ + 130, + 521, + 482, + 641 + ], + "type": "text", + "content": " remains the same as in our strategy." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 130, + 642, + 481, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 642, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 642, + 481, + 665 + ], + "type": "text", + "content": "We show the visualization results in Figure 4, with the Gaussian model rendering image on the left and the Gaussian positions displayed as point clouds" + } + ] + } + ], + "index": 31 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 91, + 447, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 91, + 447, + 101 + ], + "spans": [ + { + "bbox": [ + 294, + 91, + 447, + 101 + ], + "type": "text", + "content": "3D Gaussian Parametric Head Model" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 479, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 479, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 479, + 100 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 133, + 112, + 200, + 188 + ], + "blocks": [ + { + "bbox": [ + 133, + 112, + 200, + 188 + ], + "lines": [ + { + "bbox": [ + 133, + 112, + 200, + 188 + ], + "spans": [ + { + "bbox": [ + 133, + 112, + 200, + 188 + ], + "type": "image", + "image_path": "ec7cea1c7380f527b9d3b72265a034e62d0cfe0283abb6322b59282cbca66c4a.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 157, + 189, + 176, + 196 + ], + "lines": [ + { + "bbox": [ + 157, + 189, + 176, + 196 + ], + "spans": [ + { + "bbox": [ + 157, + 189, + 176, + 196 + ], + "type": "text", + "content": "Mesh" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 203, + 112, + 271, + 189 + ], + "blocks": [ + { + "bbox": [ + 203, + 112, + 271, + 189 + ], + "lines": [ + { + "bbox": [ + 203, + 112, + 271, + 189 + ], + "spans": [ + { + "bbox": [ + 203, + 112, + 271, + 189 + ], + "type": "image", + "image_path": "8d1289e3aac52396e9ff8a5c886ae7acae423f3990cdbd8011e5fc7b79de252b.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 221, + 189, + 253, + 196 + ], + "lines": [ + { + "bbox": [ + 221, + 189, + 253, + 196 + ], + "spans": [ + { + "bbox": [ + 221, + 189, + 253, + 196 + ], + "type": "text", + "content": "Mesh+SR" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 274, + 112, + 341, + 189 + ], + "blocks": [ + { + "bbox": [ + 274, + 112, + 341, + 189 + ], + "lines": [ + { + "bbox": [ + 274, + 112, + 341, + 189 + ], + "spans": [ + { + "bbox": [ + 274, + 112, + 341, + 189 + ], + "type": "image", + "image_path": "75ece36e26df68fbcfc01df5b051e366513075b812588f8deb59b4a3b09b6661.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 293, + 189, + 323, + 196 + ], + "lines": [ + { + "bbox": [ + 293, + 189, + 323, + 196 + ], + "spans": [ + { + "bbox": [ + 293, + 189, + 323, + 196 + ], + "type": "text", + "content": "Gaussian" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 148, + 205, + 464, + 217 + ], + "lines": [ + { + "bbox": [ + 148, + 205, + 464, + 217 + ], + "spans": [ + { + "bbox": [ + 148, + 205, + 464, + 217 + ], + "type": "text", + "content": "Fig. 5: The comparison of the different representations with super resolution." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 345, + 112, + 411, + 189 + ], + "blocks": [ + { + "bbox": [ + 345, + 112, + 411, + 189 + ], + "lines": [ + { + "bbox": [ + 345, + 112, + 411, + 189 + ], + "spans": [ + { + "bbox": [ + 345, + 112, + 411, + 189 + ], + "type": "image", + "image_path": "f0009af8684436ba891ccf4afd4916dfc229b4c8b3b3a77fc5100541c8bcbbbf.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 357, + 189, + 400, + 196 + ], + "lines": [ + { + "bbox": [ + 357, + 189, + 400, + 196 + ], + "spans": [ + { + "bbox": [ + 357, + 189, + 400, + 196 + ], + "type": "text", + "content": "Gaussian+SR" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 414, + 112, + 482, + 189 + ], + "blocks": [ + { + "bbox": [ + 414, + 112, + 482, + 189 + ], + "lines": [ + { + "bbox": [ + 414, + 112, + 482, + 189 + ], + "spans": [ + { + "bbox": [ + 414, + 112, + 482, + 189 + ], + "type": "image", + "image_path": "b4ec2aff5a530f3ab998f44c2b3be602578f01ed08660801646d6b4961a55d13.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 442, + 189, + 453, + 196 + ], + "lines": [ + { + "bbox": [ + 442, + 189, + 453, + 196 + ], + "spans": [ + { + "bbox": [ + 442, + 189, + 453, + 196 + ], + "type": "text", + "content": "GT" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 227, + 482, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 227, + 482, + 334 + ], + "spans": [ + { + "bbox": [ + 130, + 227, + 482, + 334 + ], + "type": "text", + "content": "on the right. Our initialization strategy using the guiding geometry model can ensure that all the Gaussian points fall evenly on the actual surface of the model, thereby ensuring reconstruction quality. When using the FLAME model for the initialization, a large number of points wander inside or outside the actual surface of the model, causing noise or redundancy and leading the model to lose some high-frequency information and making it difficult to fully converge. We also perform a quantitative evaluation of different initialization strategies on the rendered images, as shown in Table 1, which shows that our method leads to better rendering results." + } + ] + } + ], + "index": 13 + }, + { + "type": "table", + "bbox": [ + 169, + 340, + 444, + 378 + ], + "blocks": [ + { + "bbox": [ + 169, + 340, + 444, + 378 + ], + "lines": [ + { + "bbox": [ + 169, + 340, + 444, + 378 + ], + "spans": [ + { + "bbox": [ + 169, + 340, + 444, + 378 + ], + "type": "table", + "html": "
MethodPSNR ↑SSIM ↑LPIPS ↓
FLAME Initialization25.70.820.109
Our Initialization28.00.840.085
", + "image_path": "8efdefe6e644650afad296c540614587c5c9a8c21ca4c6d09c730272d30dbdb0.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_body" + } + ], + "index": 14 + }, + { + "bbox": [ + 130, + 378, + 481, + 399 + ], + "lines": [ + { + "bbox": [ + 130, + 378, + 481, + 399 + ], + "spans": [ + { + "bbox": [ + 130, + 378, + 481, + 399 + ], + "type": "text", + "content": "Table 1: Quantitative evaluation results of our initialization strategy and naive FLAME initialization strategy." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 130, + 411, + 482, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 411, + 482, + 495 + ], + "spans": [ + { + "bbox": [ + 130, + 411, + 482, + 495 + ], + "type": "text", + "content": "Ablation on Representation and Super Resolution. We conduct the ablation study for the guiding mesh model, the Gaussian model and the super-resolution network (abbreviated as SR) as shown in the Fig. 5. The corresponding PSNR metrics are: Mesh (15.7), Mesh+SR (17.3), Gaussian (27.0), Gaussian+SR (29.3). Compared to mesh, utilizing 3D Gaussian as the representation brings significant improvements (+12), while the super resolution module adds some details, generating more realistic results." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 132, + 514, + 224, + 525 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 514, + 224, + 525 + ], + "spans": [ + { + "bbox": [ + 132, + 514, + 224, + 525 + ], + "type": "text", + "content": "4.3 Applications" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 130, + 533, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 533, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 533, + 482, + 666 + ], + "type": "text", + "content": "Image-based Fitting. In this section, we demonstrate the capability of our 3D Gaussian Parametric Model for single-image fitting using the fitting strategy detailed in Section 3.4. We compare our model with similar works: HeadNeRF [19], MoFaNeRF [64], and PanoHead [1]. In addition to evaluating the above methods on our evaluation dataset, we also conduct comparisons using cases from MEAD [46] dataset (the first two rows). The qualitative results are presented in Figure 6. Our model exhibits reconstruction accuracy while maintaining excellent 3D consistency and identity preservation. HeadNeRF's fitting results often suffer from missing hair, and they remove the body and neck. MoFaNeRF, trained solely on the FaceScape dataset where all subjects wear hats, struggles to fit hair. As a GAN-based model, PanoHead can achieve highly accurate reproductions" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 203, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 203, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 203, + 100 + ], + "type": "text", + "content": "Xuet al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 132, + 114, + 482, + 403 + ], + "blocks": [ + { + "bbox": [ + 132, + 114, + 482, + 403 + ], + "lines": [ + { + "bbox": [ + 132, + 114, + 482, + 403 + ], + "spans": [ + { + "bbox": [ + 132, + 114, + 482, + 403 + ], + "type": "image", + "image_path": "3ad3a43bbbbe0a2b9ae04df2fc3acd2ed3b3a28c503a8e686d10e8f434ab7519.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 411, + 482, + 456 + ], + "lines": [ + { + "bbox": [ + 130, + 411, + 482, + 456 + ], + "spans": [ + { + "bbox": [ + 130, + 411, + 482, + 456 + ], + "type": "text", + "content": "Fig. 6: We compare our method with other SOTA methods on the task of single image fitting. The far left is the input image, and to the right are Our method, HeadNeRF [19], MoFaNeRF [64] and PanoHead [1]. Our model significantly outperforms other methods in reconstruction quality and 3D consistency." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 466, + 481, + 490 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 466, + 481, + 490 + ], + "spans": [ + { + "bbox": [ + 130, + 466, + 481, + 490 + ], + "type": "text", + "content": "from the input view. However, due to overfitting, the results from side views reveal poor 3D consistency and identity preservation." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 491, + 482, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 491, + 482, + 609 + ], + "spans": [ + { + "bbox": [ + 130, + 491, + 482, + 609 + ], + "type": "text", + "content": "In addition to qualitative evaluations, we also conducted quantitative evaluations on 60 images using three metrics: Peak Signal-to-Noise Ratio (PSNR), Structural Similarity Index (SSIM), and Face Distance (FD). Here, we provide a brief explanation of the Face Distance (FD). To compute the FD metric, we utilized a face recognition tool " + }, + { + "bbox": [ + 130, + 491, + 482, + 609 + ], + "type": "inline_equation", + "content": "^3" + }, + { + "bbox": [ + 130, + 491, + 482, + 609 + ], + "type": "text", + "content": " to encode two images containing faces into 128-dimensional vectors. Subsequently, we calculated the distance between these two vectors to reflect the similarity of the two faces. In our experiments, FD serves as an indicator of identity consistency. The results are shown in Table 2. Our model demonstrates optimal performance in both fitting accuracy and identity consistency." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 610, + 482, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 610, + 482, + 647 + ], + "spans": [ + { + "bbox": [ + 130, + 610, + 482, + 647 + ], + "type": "text", + "content": "Expression Editing. Our 3D Gaussian Parametric Head Model possesses the capability for expression editing. Upon completing the fitting process on a portrait image, we can animate the model by applying different expression codes." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 91, + 447, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 91, + 447, + 101 + ], + "spans": [ + { + "bbox": [ + 294, + 91, + 447, + 101 + ], + "type": "text", + "content": "3D Gaussian Parametric Head Model" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 133, + 652, + 332, + 666 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 652, + 332, + 666 + ], + "spans": [ + { + "bbox": [ + 133, + 652, + 332, + 666 + ], + "type": "text", + "content": "3 https://github.com/ageitgey/face_recognition" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 197, + 114, + 416, + 173 + ], + "blocks": [ + { + "bbox": [ + 197, + 114, + 416, + 173 + ], + "lines": [ + { + "bbox": [ + 197, + 114, + 416, + 173 + ], + "spans": [ + { + "bbox": [ + 197, + 114, + 416, + 173 + ], + "type": "table", + "html": "
MethodPSNR ↑SSIM ↑FD ↓
HeadNeRF28.90.840.37
MoFaNeRF28.60.820.37
PanoHead29.10.860.41
Ours30.30.860.35
", + "image_path": "748267b6e02f8e804bcf5835e585087f34b1de73de7e0bbbd0fbe356c99a0011.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 173, + 479, + 205 + ], + "lines": [ + { + "bbox": [ + 132, + 173, + 479, + 205 + ], + "spans": [ + { + "bbox": [ + 132, + 173, + 479, + 205 + ], + "type": "text", + "content": "Table 2: Quantitative evaluation results on the task of single image fitting. We compare our method with other 3 SOTA methods: HeadNeRF [19], MoFaNeRF [64], PanoHead [1]." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 133, + 216, + 176, + 262 + ], + "blocks": [ + { + "bbox": [ + 133, + 216, + 176, + 262 + ], + "lines": [ + { + "bbox": [ + 133, + 216, + 176, + 262 + ], + "spans": [ + { + "bbox": [ + 133, + 216, + 176, + 262 + ], + "type": "image", + "image_path": "61a81ad08d615aa023cbdc29d34f4032335be5f2316e1e88e986f03b8adee9c3.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 148, + 263, + 163, + 269 + ], + "lines": [ + { + "bbox": [ + 148, + 263, + 163, + 269 + ], + "spans": [ + { + "bbox": [ + 148, + 263, + 163, + 269 + ], + "type": "text", + "content": "Input" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 179, + 216, + 223, + 263 + ], + "blocks": [ + { + "bbox": [ + 179, + 216, + 223, + 263 + ], + "lines": [ + { + "bbox": [ + 179, + 216, + 223, + 263 + ], + "spans": [ + { + "bbox": [ + 179, + 216, + 223, + 263 + ], + "type": "image", + "image_path": "319f95f3e9f4efa9604caf82561e803863b3b2f150f5b868c739b6e665ec41d4.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 195, + 263, + 208, + 269 + ], + "lines": [ + { + "bbox": [ + 195, + 263, + 208, + 269 + ], + "spans": [ + { + "bbox": [ + 195, + 263, + 208, + 269 + ], + "type": "text", + "content": "Exp1" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 223, + 217, + 265, + 263 + ], + "blocks": [ + { + "bbox": [ + 223, + 217, + 265, + 263 + ], + "lines": [ + { + "bbox": [ + 223, + 217, + 265, + 263 + ], + "spans": [ + { + "bbox": [ + 223, + 217, + 265, + 263 + ], + "type": "image", + "image_path": "80cfa2c0ccd204a8c9aeac0b5543adb5a39ce55d64fabc8df862de8d2b11e261.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 238, + 263, + 251, + 269 + ], + "lines": [ + { + "bbox": [ + 238, + 263, + 251, + 269 + ], + "spans": [ + { + "bbox": [ + 238, + 263, + 251, + 269 + ], + "type": "text", + "content": "Exp2" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 132, + 279, + 479, + 312 + ], + "lines": [ + { + "bbox": [ + 132, + 279, + 479, + 312 + ], + "spans": [ + { + "bbox": [ + 132, + 279, + 479, + 312 + ], + "type": "text", + "content": "Fig. 7: We perform expression editing on the head model reconstructed from the input image. Our model is able to handle very exaggerated expressions with superior identity consistency." + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 265, + 217, + 307, + 263 + ], + "blocks": [ + { + "bbox": [ + 265, + 217, + 307, + 263 + ], + "lines": [ + { + "bbox": [ + 265, + 217, + 307, + 263 + ], + "spans": [ + { + "bbox": [ + 265, + 217, + 307, + 263 + ], + "type": "image", + "image_path": "2361aa9c4dda95ff149f245f3e90611c8140792f624ce848565405030de84823.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 280, + 263, + 294, + 269 + ], + "lines": [ + { + "bbox": [ + 280, + 263, + 294, + 269 + ], + "spans": [ + { + "bbox": [ + 280, + 263, + 294, + 269 + ], + "type": "text", + "content": "Exp3" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 307, + 217, + 350, + 263 + ], + "blocks": [ + { + "bbox": [ + 307, + 217, + 350, + 263 + ], + "lines": [ + { + "bbox": [ + 307, + 217, + 350, + 263 + ], + "spans": [ + { + "bbox": [ + 307, + 217, + 350, + 263 + ], + "type": "image", + "image_path": "df9e24b0d1388bb3471c409b606c36714298523c3d184f15b56a58880f8f26a4.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 323, + 263, + 337, + 269 + ], + "lines": [ + { + "bbox": [ + 323, + 263, + 337, + 269 + ], + "spans": [ + { + "bbox": [ + 323, + 263, + 337, + 269 + ], + "type": "text", + "content": "Exp4" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 350, + 217, + 394, + 263 + ], + "blocks": [ + { + "bbox": [ + 350, + 217, + 394, + 263 + ], + "lines": [ + { + "bbox": [ + 350, + 217, + 394, + 263 + ], + "spans": [ + { + "bbox": [ + 350, + 217, + 394, + 263 + ], + "type": "image", + "image_path": "4605d43b5668d0ff7ef2acdaaafbf5745041e40b20343f30e26e57a4bd81c1b3.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 366, + 263, + 378, + 269 + ], + "lines": [ + { + "bbox": [ + 366, + 263, + 378, + 269 + ], + "spans": [ + { + "bbox": [ + 366, + 263, + 378, + 269 + ], + "type": "text", + "content": "Exp5" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 394, + 217, + 436, + 263 + ], + "blocks": [ + { + "bbox": [ + 394, + 217, + 436, + 263 + ], + "lines": [ + { + "bbox": [ + 394, + 217, + 436, + 263 + ], + "spans": [ + { + "bbox": [ + 394, + 217, + 436, + 263 + ], + "type": "image", + "image_path": "3bce3d7d8741445ca67c7dc09715008be6f2aa0ef1477f10b670807df8483318.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 410, + 263, + 423, + 269 + ], + "lines": [ + { + "bbox": [ + 410, + 263, + 423, + 269 + ], + "spans": [ + { + "bbox": [ + 410, + 263, + 423, + 269 + ], + "type": "text", + "content": "Exp6" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 436, + 217, + 481, + 263 + ], + "blocks": [ + { + "bbox": [ + 436, + 217, + 481, + 263 + ], + "lines": [ + { + "bbox": [ + 436, + 217, + 481, + 263 + ], + "spans": [ + { + "bbox": [ + 436, + 217, + 481, + 263 + ], + "type": "image", + "image_path": "552f455544647a2bcb5f662d6d0eb7e83b6a151c2bf93e6cd8083a5db756c821.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 452, + 263, + 466, + 269 + ], + "lines": [ + { + "bbox": [ + 452, + 263, + 466, + 269 + ], + "spans": [ + { + "bbox": [ + 452, + 263, + 466, + 269 + ], + "type": "text", + "content": "Exp7" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "bbox": [ + 130, + 325, + 480, + 385 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 325, + 480, + 385 + ], + "spans": [ + { + "bbox": [ + 130, + 325, + 480, + 385 + ], + "type": "text", + "content": "The detailed pipeline is outlined in Section 3.4. An example is illustrated in Figure 7. Our model can generate images depicting the corresponding expressions of the input subject based on a reference expression (as seen in the lower left corner of each image in the figure). It performs admirably even with exaggerated expressions, producing natural and realistic results." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 132, + 413, + 216, + 425 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 413, + 216, + 425 + ], + "spans": [ + { + "bbox": [ + 132, + 413, + 216, + 425 + ], + "type": "text", + "content": "5 Discussion" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 130, + 446, + 480, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 446, + 480, + 506 + ], + "spans": [ + { + "bbox": [ + 130, + 446, + 480, + 506 + ], + "type": "text", + "content": "Ethical Considerations. Our technique can generate artificial portrait videos, posing a significant risk of spreading misinformation, shaping public opinions, and undermining trust in media outlets. These consequences could have profound negative effects on society. Therefore, it is crucial to explore methods that effectively differentiate between genuine and manipulated content." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 130, + 508, + 480, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 508, + 480, + 568 + ], + "spans": [ + { + "bbox": [ + 130, + 508, + 480, + 568 + ], + "type": "text", + "content": "**Limitation.** Our 3D Gaussian Parametric Head Model takes a step forward in the characterization of parametric head models. However, due to the limited amount of training data, the generalization ability of the model is still insufficient. In some cases where the illumination is significantly different from the training set, the reconstruction results are not good." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 130, + 570, + 481, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 570, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 570, + 481, + 665 + ], + "type": "text", + "content": "Conclusion. In this paper, we propose the 3D Gaussian Parametric Head Model, a novel framework for parametric head model. This model leverages the power of 3D Gaussians, enabling realistic rendering quality and real-time speed. Our well-designed training strategy ensured stable convergence while enabling the model to learn appearance details and expressions. Besides, our model allows for creating detailed, high-quality face avatars from a single input image, and also enables editing for expressions and identity. We believe our model represents a significant advancement in the field of parametric head model." + } + ] + } + ], + "index": 25 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 203, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 203, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 203, + 100 + ], + "type": "text", + "content": "Xuet al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 133, + 114, + 246, + 129 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 114, + 246, + 129 + ], + "spans": [ + { + "bbox": [ + 133, + 114, + 246, + 129 + ], + "type": "text", + "content": "Acknowledgements" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 139, + 482, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 139, + 482, + 175 + ], + "spans": [ + { + "bbox": [ + 132, + 139, + 482, + 175 + ], + "type": "text", + "content": "The work is supported by the National Science Foundation of China (NSFC) under Grant Number 62125107 and the Postdoctoral Fellowship Program of China Postdoctoral Science Foundation under Grant Number GZC20231304." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 133, + 193, + 197, + 205 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 193, + 197, + 205 + ], + "spans": [ + { + "bbox": [ + 133, + 193, + 197, + 205 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 134, + 217, + 481, + 665 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 138, + 217, + 481, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 217, + 481, + 251 + ], + "spans": [ + { + "bbox": [ + 138, + 217, + 481, + 251 + ], + "type": "text", + "content": "1. An, S., Xu, H., Shi, Y., Song, G., Ogras, U.Y., Luo, L.: Panohead: Geometry-aware 3d full-head synthesis in 360deg. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 20950-20959 (June 2023)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 138, + 251, + 481, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 251, + 481, + 282 + ], + "spans": [ + { + "bbox": [ + 138, + 251, + 481, + 282 + ], + "type": "text", + "content": "2. Blanz, V., Vetter, T.: A morphable model for the synthesis of 3d faces. In: 26th Annual Conference on Computer Graphics and Interactive Techniques (SIGGRAPH 1999). pp. 187-194. ACM Press (1999)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 138, + 284, + 481, + 327 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 284, + 481, + 327 + ], + "spans": [ + { + "bbox": [ + 138, + 284, + 481, + 327 + ], + "type": "text", + "content": "3. Bühler, M.C., Sarkar, K., Shah, T., Li, G., Wang, D., Helminger, L., Orts-Escalano, S., Lagun, D., Hilliges, O., Beeler, T., et al.: Preface: A data-driven volumetric prior for few-shot ultra high-resolution face synthesis. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 3402-3413 (2023)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 138, + 327, + 481, + 360 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 327, + 481, + 360 + ], + "spans": [ + { + "bbox": [ + 138, + 327, + 481, + 360 + ], + "type": "text", + "content": "4. Bulat, A., Tzimiropoulos, G.: How far are we from solving the 2d & 3d face alignment problem? (and a dataset of 230,000 3d facial landmarks). In: International Conference on Computer Vision (2017)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 138, + 361, + 481, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 361, + 481, + 392 + ], + "spans": [ + { + "bbox": [ + 138, + 361, + 481, + 392 + ], + "type": "text", + "content": "5. Cao, C., Simon, T., Kim, J.K., Schwartz, G., Zollhoefer, M., Saito, S.S., Lombardi, S., Wei, S.E., Belko, D., Yu, S.I., Sheikh, Y., Saragih, J.: Authentic volumetric avatars from a phone scan. ACM Trans. Graph. 41(4) (jul 2022)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 138, + 393, + 481, + 425 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 393, + 481, + 425 + ], + "spans": [ + { + "bbox": [ + 138, + 393, + 481, + 425 + ], + "type": "text", + "content": "6. Cao, C., Weng, Y., Zhou, S., Tong, Y., Zhou, K.: Facewarehouse: A 3d facial expression database for visual computing. In: IEEE Transactions on Visualization and Computer Graphics. vol. 20, pp. 413-425 (2014)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 138, + 426, + 481, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 426, + 481, + 468 + ], + "spans": [ + { + "bbox": [ + 138, + 426, + 481, + 468 + ], + "type": "text", + "content": "7. Chan, E., Monteiro, M., Kellnhofer, P., Wu, J., Wetzstein, G.: pi-gan: Periodic implicit generative adversarial networks for 3d-aware image synthesis. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 5795-5805 (2020)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 138, + 469, + 481, + 522 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 469, + 481, + 522 + ], + "spans": [ + { + "bbox": [ + 138, + 469, + 481, + 522 + ], + "type": "text", + "content": "8. Chan, E.R., Lin, C.Z., Chan, M.A., Nagano, K., Pan, B., Mello, S.D., Gallo, O., Guibas, L., Tremblay, J., Khamis, S., Karras, T., Wetzstein, G.: Efficient geometry-aware 3D generative adversarial networks. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 16102-16112 (2022)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 138, + 523, + 481, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 523, + 481, + 555 + ], + "spans": [ + { + "bbox": [ + 138, + 523, + 481, + 555 + ], + "type": "text", + "content": "9. Chen, X., Deng, Y., Wang, B.: Mimic3d: Thriving 3d-aware gans via 3d-to-2d imitation. In: Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV) (2023)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 134, + 556, + 481, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 556, + 481, + 589 + ], + "spans": [ + { + "bbox": [ + 134, + 556, + 481, + 589 + ], + "type": "text", + "content": "10. Chen, Y., Wang, L., Li, Q., Xiao, H., Zhang, S., Yao, H., Liu, Y.: Monogaussiana-vatar: Monocular gaussian point-based head avatar. In: ACM SIGGRAPH 2023 Conference Proceedings (2024)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 134, + 590, + 481, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 590, + 481, + 621 + ], + "spans": [ + { + "bbox": [ + 134, + 590, + 481, + 621 + ], + "type": "text", + "content": "11. Deng, Y., Yang, J., Xiang, J., Tong, X.: Gram: Generative radiance manifolds for 3d-aware image generation. Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) pp. 10663-10673 (2021)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 134, + 622, + 481, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 622, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 134, + 622, + 481, + 665 + ], + "type": "text", + "content": "12. Gafni, G., Thies, J., Zollhofer, M., Niessner, M.: Dynamic neural radiance fields for monocular 4d facial avatar reconstruction. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 8645-8654 (June 2021)" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 91, + 447, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 91, + 447, + 101 + ], + "spans": [ + { + "bbox": [ + 294, + 91, + 447, + 101 + ], + "type": "text", + "content": "3D Gaussian Parametric Head Model" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 481, + 100 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 481, + 666 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 133, + 116, + 480, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 116, + 480, + 149 + ], + "spans": [ + { + "bbox": [ + 133, + 116, + 480, + 149 + ], + "type": "text", + "content": "13. Gao, X., Zhong, C., Xiang, J., Hong, Y., Guo, Y., Zhang, J.: Reconstructing personalized semantic facial nerf models from monocular video. ACM Transactions on Graphics (Proceedings of SIGGRAPH Asia) 41(6) (2022)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 133, + 150, + 481, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 150, + 481, + 183 + ], + "spans": [ + { + "bbox": [ + 133, + 150, + 481, + 183 + ], + "type": "text", + "content": "14. Gerig, T., Forster, A., Blumer, C., Egger, B., Lüthi, M., Schönborn, S., Vetter, T.: Morphable face models - an open framework. In: 2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018). pp. 75-82 (2017)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 133, + 183, + 480, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 183, + 480, + 215 + ], + "spans": [ + { + "bbox": [ + 133, + 183, + 480, + 215 + ], + "type": "text", + "content": "15. Giebenhain, S., Kirschstein, T., Georgopoulos, M., Rünz, M., Agapito, L., Nießner, M.: Learning neural parametric head models. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2023)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 216, + 480, + 258 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 216, + 480, + 258 + ], + "spans": [ + { + "bbox": [ + 132, + 216, + 480, + 258 + ], + "type": "text", + "content": "16. Giebenhain, S., Kirschstein, T., Georgopoulos, M., Rünz, M., Agapito, L., Nießner, M.: Mononphm: Dynamic head reconstruction from monocular videos. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2024)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 259, + 480, + 303 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 259, + 480, + 303 + ], + "spans": [ + { + "bbox": [ + 132, + 259, + 480, + 303 + ], + "type": "text", + "content": "17. Grassal, P.W., Prinzler, M., Leistner, T., Rother, C., Nießner, M., Thies, J.: Neural head avatars from monocular rgb videos. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 18632-18643 (June 2022)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 303, + 480, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 303, + 480, + 335 + ], + "spans": [ + { + "bbox": [ + 132, + 303, + 480, + 335 + ], + "type": "text", + "content": "18. Gu, J., Liu, L., Wang, P., Theobalt, C.: Stylenerf: A style-based 3d aware generator for high-resolution image synthesis. In: International Conference on Learning Representations (2022)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 336, + 480, + 369 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 336, + 480, + 369 + ], + "spans": [ + { + "bbox": [ + 132, + 336, + 480, + 369 + ], + "type": "text", + "content": "19. Hong, Y., Peng, B., Xiao, H., Liu, L., Zhang, J.: Headnerf: A real-time nerf-based parametric head model. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 20374-20384 (June 2022)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 369, + 480, + 413 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 369, + 480, + 413 + ], + "spans": [ + { + "bbox": [ + 132, + 369, + 480, + 413 + ], + "type": "text", + "content": "20. Hu, L., Zhang, H., Zhang, Y., Zhou, B., Liu, B., Zhang, S., Nie, L.: Gaussian avatar: Towards realistic human avatar modeling from a single video via animatable 3d gaussians. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2024)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 413, + 480, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 413, + 480, + 445 + ], + "spans": [ + { + "bbox": [ + 132, + 413, + 480, + 445 + ], + "type": "text", + "content": "21. Kerbl, B., Kopanas, G., Leimkuhler, T., Drettakis, G.: 3d gaussian splatting for real-time radiance field rendering. ACM Transactions on Graphics 42(4) (July 2023)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 446, + 480, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 446, + 480, + 479 + ], + "spans": [ + { + "bbox": [ + 132, + 446, + 480, + 479 + ], + "type": "text", + "content": "22. Khakhulin, T., Sklyarova, V., Lempitsky, V., Zakharov, E.: Realistic one-shot mesh-based head avatars. In: Proceedings of the European Conference on Computer Vision (ECCV) (2022)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 479, + 480, + 512 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 479, + 480, + 512 + ], + "spans": [ + { + "bbox": [ + 132, + 479, + 480, + 512 + ], + "type": "text", + "content": "23. Kirschstein, T., Giebenhain, S., Nießner, M.: Diffusion avatars: Deferred diffusion for high-fidelity 3d head avatars. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2024)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 132, + 512, + 480, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 512, + 480, + 544 + ], + "spans": [ + { + "bbox": [ + 132, + 512, + 480, + 544 + ], + "type": "text", + "content": "24. Kirschstein, T., Qian, S., Giebenhain, S., Walter, T., Niefner, M.: Nersemble: Multi-view radiance field reconstruction of human heads. ACM Trans. Graph. 42(4) (jul 2023)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 132, + 545, + 480, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 545, + 480, + 567 + ], + "spans": [ + { + "bbox": [ + 132, + 545, + 480, + 567 + ], + "type": "text", + "content": "25. Li, T., Bolkart, T., Black, M.J., Li, H., Romero, J.: Learning a model of facial shape and expression from 4d scans. ACM Trans. Graph. 36(6) (nov 2017)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 132, + 567, + 480, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 567, + 480, + 588 + ], + "spans": [ + { + "bbox": [ + 132, + 567, + 480, + 588 + ], + "type": "text", + "content": "26. Li, X., De Mello, S., Liu, S., Nagano, K., Iqbal, U., Kautz, J.: Generalizable one-shot neural head avatar. NeurIPS (2023)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 132, + 589, + 480, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 589, + 480, + 632 + ], + "spans": [ + { + "bbox": [ + 132, + 589, + 480, + 632 + ], + "type": "text", + "content": "27. Li, Z., Zheng, Z., Wang, L., Liu, Y.: Animatable gaussians: Learning pose-dependent gaussian maps for high-fidelity human avatar modeling. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2024)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 132, + 632, + 480, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 632, + 480, + 666 + ], + "spans": [ + { + "bbox": [ + 132, + 632, + 480, + 666 + ], + "type": "text", + "content": "28. Lin, C.Z., Nagano, K., Kautz, J., Chan, E.R., Iqbal, U., Guibas, L., Wetzstein, G., Khamis, S.: Single-shot implicit morphable faces with consistent texture parameterization. In: ACM SIGGRAPH 2023 Conference Proceedings (2023)" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 203, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 203, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 203, + 100 + ], + "type": "text", + "content": "Xuet al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 666 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 162 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 162 + ], + "type": "text", + "content": "29. Lin, S., Ryabtsev, A., Sengupta, S., Curless, B., Seitz, S., Kemelmacher-Shlizerman, I.: Real-time high-resolution background matting. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (Jun 2021)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 162, + 481, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 162, + 481, + 195 + ], + "spans": [ + { + "bbox": [ + 130, + 162, + 481, + 195 + ], + "type": "text", + "content": "30. Lombardi, S., Simon, T., Schwartz, G., Zollhoefer, M., Sheikh, Y., Saragih, J.: Mixture of volumetric primitives for efficient neural rendering. ACM Trans. Graph. 40(4) (jul 2021)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 195, + 481, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 195, + 481, + 228 + ], + "spans": [ + { + "bbox": [ + 132, + 195, + 481, + 228 + ], + "type": "text", + "content": "31. Loper, M., Mahmood, N., Romero, J., Pons-Moll, G., Black, M.J.: SMPL: A skinned multi-person linear model. ACM Trans. Graphics (Proc. SIGGRAPH Asia) 34(6), 248:1-248:16 (Oct 2015)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 228, + 481, + 252 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 228, + 481, + 252 + ], + "spans": [ + { + "bbox": [ + 132, + 228, + 481, + 252 + ], + "type": "text", + "content": "32. Luiten, J., Kopanas, G., Leibe, B., Ramanan, D.: Dynamic 3d gaussians: Tracking by persistent dynamic view synthesis. In: 3DV (2024)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 251, + 481, + 275 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 251, + 481, + 275 + ], + "spans": [ + { + "bbox": [ + 132, + 251, + 481, + 275 + ], + "type": "text", + "content": "33. Ma, S., Weng, Y., Shao, T., Zhou, K.: 3d gaussian blendshapes for head avatar animation. In: ACM SIGGRAPH 2023 Conference Proceedings (2024)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 275, + 481, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 275, + 481, + 308 + ], + "spans": [ + { + "bbox": [ + 132, + 275, + 481, + 308 + ], + "type": "text", + "content": "34. Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: Nerf: Representing scenes as neural radiance fields for view synthesis. In: Proceedings of the European Conference on Computer Vision (ECCV) (2020)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 308, + 481, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 308, + 481, + 352 + ], + "spans": [ + { + "bbox": [ + 132, + 308, + 481, + 352 + ], + "type": "text", + "content": "35. Or-El, R., Luo, X., Shan, M., Shechtman, E., Park, J.J., Kemelmacher-Shlizerman, I.: Stylesdf: High-resolution 3d-consistent image and geometry generation. Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) pp. 13493-13503 (2021)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 353, + 481, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 353, + 481, + 397 + ], + "spans": [ + { + "bbox": [ + 132, + 353, + 481, + 397 + ], + "type": "text", + "content": "36. Pavlakos, G., Choutas, V., Ghorbani, N., Bolkart, T., Osman, A.A.A., Tzionas, D., Black, M.J.: Expressive body capture: 3D hands, face, and body from a single image. In: Proceedings IEEE Conf. on Computer Vision and Pattern Recognition (CVPR). pp. 10975-10985 (2019)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 397, + 481, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 397, + 481, + 441 + ], + "spans": [ + { + "bbox": [ + 132, + 397, + 481, + 441 + ], + "type": "text", + "content": "37. Qian, S., Kirschstein, T., Schoneveld, L., Davoli, D., Giebenhain, S., Nießner, M.: Gaussian avatars: Photorealistic head avatars with rigged 3d gaussians. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2024)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 442, + 481, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 442, + 481, + 475 + ], + "spans": [ + { + "bbox": [ + 132, + 442, + 481, + 475 + ], + "type": "text", + "content": "38. Qin, M., Liu, Y., Xu, Y., Zhao, X., Liu, Y., Wang, H.: High-fidelity 3d head avatars reconstruction through spatially-varying expression conditioned neural radiance field. In: AAAI Conference on Artificial Intelligence (2023)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 475, + 481, + 509 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 475, + 481, + 509 + ], + "spans": [ + { + "bbox": [ + 132, + 475, + 481, + 509 + ], + "type": "text", + "content": "39. Saito, S., Schwartz, G., Simon, T., Li, J., Nam, G.: Relightable gaussian codec avatars. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2024)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 132, + 509, + 481, + 553 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 509, + 481, + 553 + ], + "spans": [ + { + "bbox": [ + 132, + 509, + 481, + 553 + ], + "type": "text", + "content": "40. Shao, Z., Wang, Z., Li, Z., Wang, D., Lin, X., Zhang, Y., Fan, M., Wang, Z.: SplattingAvatar: Realistic Real-Time Human Avatars with Mesh-Embedded Gaussian Splatting. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2024)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 132, + 554, + 481, + 587 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 554, + 481, + 587 + ], + "spans": [ + { + "bbox": [ + 132, + 554, + 481, + 587 + ], + "type": "text", + "content": "41. Shen, T., Gao, J., Yin, K., Liu, M.Y., Fidler, S.: Deep marching tetrahedra: a hybrid representation for high-resolution 3d shape synthesis. In: Advances in Neural Information Processing Systems (NeurIPS) (2021)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 132, + 587, + 481, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 587, + 481, + 621 + ], + "spans": [ + { + "bbox": [ + 132, + 587, + 481, + 621 + ], + "type": "text", + "content": "42. Sun, J., Wang, X., Shi, Y., Wang, L., Wang, J., Liu, Y.: Ide-3d: Interactive disentangled editing for high-resolution 3d-aware portrait synthesis. ACM Transactions on Graphics (TOG) 41(6), 1-10 (2022)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 132, + 621, + 481, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 621, + 481, + 666 + ], + "spans": [ + { + "bbox": [ + 132, + 621, + 481, + 666 + ], + "type": "text", + "content": "43. Sun, J., Wang, X., Wang, L., Li, X., Zhang, Y., Zhang, H., Liu, Y.: Next3d: Generative neural texture rasterization for 3d-aware head avatars. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2023)" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 91, + 447, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 91, + 447, + 101 + ], + "spans": [ + { + "bbox": [ + 294, + 91, + 447, + 101 + ], + "type": "text", + "content": "3D Gaussian Parametric Head Model" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 481, + 665 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 133, + 116, + 481, + 160 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 116, + 481, + 160 + ], + "spans": [ + { + "bbox": [ + 133, + 116, + 481, + 160 + ], + "type": "text", + "content": "44. Wang, D., Chandran, P., Zoss, G., Bradley, D., Gotardo, P.: Morf: Morphable radiance fields for multiview neural head modeling. In: ACM SIGGRAPH 2022 Conference Proceedings. SIGGRAPH '22, Association for Computing Machinery, New York, NY, USA (2022)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 133, + 160, + 481, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 160, + 481, + 182 + ], + "spans": [ + { + "bbox": [ + 133, + 160, + 481, + 182 + ], + "type": "text", + "content": "45. Wang, J., Xie, J.C., Li, X., Xu, F., Pun, C.M., Gao, H.: Gaussianhead: High-fidelity head avatars with learnable gaussian derivation (2024)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 182, + 481, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 182, + 481, + 224 + ], + "spans": [ + { + "bbox": [ + 132, + 182, + 481, + 224 + ], + "type": "text", + "content": "46. Wang, K., Wu, Q., Song, L., Yang, Z., Wu, W., Qian, C., He, R., Qiao, Y., Loy, C.C.: Mead: A large-scale audio-visual dataset for emotional talking-face generation. In: Proceedings of the European Conference on Computer Vision (ECCV) (August 2020)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 224, + 481, + 267 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 224, + 481, + 267 + ], + "spans": [ + { + "bbox": [ + 132, + 224, + 481, + 267 + ], + "type": "text", + "content": "47. Wang, L., Chen, Z., Yu, T., Ma, C., Li, L., Liu, Y.: Faceverse: a fine-grained and detail-controllable 3d face morphable model from a hybrid dataset. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (Jun 2022)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 268, + 481, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 268, + 481, + 289 + ], + "spans": [ + { + "bbox": [ + 132, + 268, + 481, + 289 + ], + "type": "text", + "content": "48. Wu, G., Yi, T., Fang, J., Xie, L., Zhang, X., Wei, W., Liu, W., Tian, Q., Wang, X.: 4d gaussian splatting for real-time dynamic scene rendering (2024)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 289, + 481, + 332 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 289, + 481, + 332 + ], + "spans": [ + { + "bbox": [ + 132, + 289, + 481, + 332 + ], + "type": "text", + "content": "49. Wu, S., Yan, Y., Li, Y., Cheng, Y., Zhu, W., Gao, K., Li, X., Zhai, G.: Ganhead: Towards generative animatable neural head avatars. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 437-447 (2023)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 332, + 481, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 332, + 481, + 365 + ], + "spans": [ + { + "bbox": [ + 132, + 332, + 481, + 365 + ], + "type": "text", + "content": "50. Wu, Y., Deng, Y., Yang, J., Wei, F., Qifeng, C., Tong, X.: Anifacegan: Animatable 3d-aware face image generation for video avatars. In: Advances in Neural Information Processing Systems (2022)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 365, + 481, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 365, + 481, + 396 + ], + "spans": [ + { + "bbox": [ + 132, + 365, + 481, + 396 + ], + "type": "text", + "content": "51. Wu, Y., Xu, S., Xiang, J., Wei, F., Chen, Q., Yang, J., Tong, X.: Aniportraitgan: Animatable 3d portrait generation from 2d image collections. In: SIGGRAPH Asia 2023 Conference Proceedings (2023)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 396, + 481, + 439 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 396, + 481, + 439 + ], + "spans": [ + { + "bbox": [ + 132, + 396, + 481, + 439 + ], + "type": "text", + "content": "52. Xiang, J., Yang, J., Deng, Y., Tong, X.: Gram-hd: 3d-consistent image generation at high resolution with generative radiance manifolds. Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV) pp. 2195-2205 (2022)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 439, + 481, + 472 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 439, + 481, + 472 + ], + "spans": [ + { + "bbox": [ + 132, + 439, + 481, + 472 + ], + "type": "text", + "content": "53. Xiang, J., Gao, X., Guo, Y., Zhang, J.: Flashavatar: High-fidelity head avatar with efficient gaussian embedding. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2024)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 472, + 481, + 515 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 472, + 481, + 515 + ], + "spans": [ + { + "bbox": [ + 132, + 472, + 481, + 515 + ], + "type": "text", + "content": "54. Xu, Y., Chen, B., Li, Z., Zhang, H., Wang, L., Zheng, Z., Liu, Y.: Gaussian head avatar: Ultra high-fidelity head avatar via dynamic gaussians. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2024)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 132, + 515, + 481, + 548 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 515, + 481, + 548 + ], + "spans": [ + { + "bbox": [ + 132, + 515, + 481, + 548 + ], + "type": "text", + "content": "55. Xu, Y., Wang, L., Zhao, X., Zhang, H., Liu, Y.: Avatarmav: Fast 3d head avatar reconstruction using motion-aware neural voxels. In: ACM SIGGRAPH 2023 Conference Proceedings (2023)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 132, + 548, + 481, + 579 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 548, + 481, + 579 + ], + "spans": [ + { + "bbox": [ + 132, + 548, + 481, + 579 + ], + "type": "text", + "content": "56. Xu, Y., Zhang, H., Wang, L., Zhao, X., Han, H., Guojun, Q., Liu, Y.: Latentavatar: Learning latent expression code for expressive neural head avatar. In: ACM SIGGRAPH 2023 Conference Proceedings (2023)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 132, + 579, + 481, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 579, + 481, + 601 + ], + "spans": [ + { + "bbox": [ + 132, + 579, + 481, + 601 + ], + "type": "text", + "content": "57. Yang, Z., Yang, H., Pan, Z., Zhu, X., Zhang, L.: Real-time photorealistic dynamic scene representation and rendering with 4d gaussian splatting (2023)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 132, + 601, + 481, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 601, + 481, + 622 + ], + "spans": [ + { + "bbox": [ + 132, + 601, + 481, + 622 + ], + "type": "text", + "content": "58. Yang, Z., Gao, X., Zhou, W., Jiao, S., Zhang, Y., Jin, X.: Deformable 3d gaussians for high-fidelity monocular dynamic scene reconstruction (June 2023)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 132, + 622, + 481, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 622, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 132, + 622, + 481, + 665 + ], + "type": "text", + "content": "59. Yenamandra, T., Tewari, A., Bernard, F., Seidel, H., Elgharib, M., Cremers, D., Theobalt, C.: i3dmm: Deep implicit 3d morphable model of human heads. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (June 2021)" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 203, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 203, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 203, + 100 + ], + "type": "text", + "content": "Xuet al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 315 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 160 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 160 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 160 + ], + "type": "text", + "content": "60. Zhang, R., Isola, P., Efros, A.A., Shechtman, E., Wang, O.: The unreasonable effectiveness of deep features as a perceptual metric. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 586-595 (June 2018)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 161, + 482, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 161, + 482, + 194 + ], + "spans": [ + { + "bbox": [ + 130, + 161, + 482, + 194 + ], + "type": "text", + "content": "61. Zhao, X., Wang, L., Sun, J., Zhang, H., Suo, J., Liu, Y.: Havatar: High-fidelity head avatar via facial model conditioned neural radiance field. ACM Trans. Graph. (oct 2023)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 194, + 482, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 194, + 482, + 237 + ], + "spans": [ + { + "bbox": [ + 130, + 194, + 482, + 237 + ], + "type": "text", + "content": "62. Zheng, Y., Abrevaya, V.F., Bühler, M.C., Chen, X., Black, M.J., Hilliges, O.: I m avatar: Implicit morphable head avatars from videos. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 13535-13545 (June 2022)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 237, + 482, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 237, + 482, + 270 + ], + "spans": [ + { + "bbox": [ + 130, + 237, + 482, + 270 + ], + "type": "text", + "content": "63. Zheng, Y., Yifan, W., Wetzstein, G., Black, M.J., Hilliges, O.: Pointavatar: Deformable point-based head avatars from videos. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2023)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 270, + 482, + 303 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 270, + 482, + 303 + ], + "spans": [ + { + "bbox": [ + 130, + 270, + 482, + 303 + ], + "type": "text", + "content": "64. Zhuang, Y., Zhu, H., Sun, X., Cao, X.: Mofanerf: Morphable facial neural radiance field. In: Proceedings of the European Conference on Computer Vision (ECCV) (2022)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 303, + 482, + 315 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 303, + 482, + 315 + ], + "spans": [ + { + "bbox": [ + 130, + 303, + 482, + 315 + ], + "type": "text", + "content": "65. Zielonka, W., Bolkart, T., Thies, J.: Instant volumetric head avatars (June 2023)" + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 294, + 91, + 447, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 91, + 447, + 101 + ], + "spans": [ + { + "bbox": [ + 294, + 91, + 447, + 101 + ], + "type": "text", + "content": "3D Gaussian Parametric Head Model" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 481, + 100 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/3D Hand Pose Estimation in Everyday Egocentric Images/332a64fb-af20-4857-af36-e23eeaad9f91_content_list.json b/2024/3D Hand Pose Estimation in Everyday Egocentric Images/332a64fb-af20-4857-af36-e23eeaad9f91_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..3f0b9571b72f8de08de8fb7558dfbd449f520c6e --- /dev/null +++ b/2024/3D Hand Pose Estimation in Everyday Egocentric Images/332a64fb-af20-4857-af36-e23eeaad9f91_content_list.json @@ -0,0 +1,2179 @@ +[ + { + "type": "text", + "text": "3D Hand Pose Estimation in Everyday Egocentric Images", + "text_level": 1, + "bbox": [ + 336, + 140, + 666, + 186 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Aditya Prakash, Ruisen Tu, Matthew Chang, and Saurabh Gupta", + "bbox": [ + 264, + 212, + 738, + 228 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "University of Illinois Urbana-Champaign {adityap9,ruisent2,mc48,saurabhg}@illinois.edu https://bit.ly/WildHands", + "bbox": [ + 321, + 239, + 679, + 281 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract. 3D hand pose estimation in everyday egocentric images is challenging for several reasons: poor visual signal (occlusion from the object of interaction, low resolution & motion blur), large perspective distortion (hands are close to the camera), and lack of 3D annotations outside of controlled settings. While existing methods often use hand crops as input to focus on fine-grained visual information to deal with poor visual signal, the challenges arising from perspective distortion and lack of 3D annotations in the wild have not been systematically studied. We focus on this gap and explore the impact of different practices, i.e. crops as input, incorporating camera information, auxiliary supervision, scaling up datasets. We provide several insights that are applicable to both convolutional and transformer models, leading to better performance. Based on our findings, we also present WildHands, a system for 3D hand pose estimation in everyday egocentric images. Zero-shot evaluation on 4 diverse datasets (H2O, AssemblyHands, Epic-Kitchens, Ego-Exo4D) demonstrate the effectiveness of our approach across 2D and 3D metrics, where we beat past methods by $7.4\\% - 66\\%$ . In system level comparisons, WildHands achieves the best 3D hand pose on ARCTIC egocentric split, outperforms FrankMocap across all metrics and HaMeR on 3 out of 6 metrics while being $10\\times$ smaller and trained on $5\\times$ less data.", + "bbox": [ + 261, + 311, + 743, + 590 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Keywords: 3D Hand Pose $\\cdot$ Egocentric Vision $\\cdot$ 3D from single image", + "bbox": [ + 261, + 602, + 735, + 616 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 217, + 659, + 375, + 674 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Understanding egocentric hands in 3D enables applications in AR/VR, robotics. While several works have studied exocentric hands [52, 59], no existing approach performs well in diverse egocentric settings outside of lab setups. We focus on this gap & study the impact of common practices, i.e. crops as input, camera information, auxiliary supervision, scaling up datasets, for predicting absolute 3D hand pose from a single egocentric image. We identify 2 important factors: a) modeling the 3D to 2D projection during imaging of the hand in egocentric views, b) scaling up training to diverse datasets by leveraging auxiliary supervision.", + "bbox": [ + 212, + 688, + 787, + 809 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Let's unpack each component. Existing methods often operate on image crops, assume that the image crop is located at the center of the camera's field of view", + "bbox": [ + 215, + 810, + 787, + 839 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/c8a983734fa0ef16e6c36a0732437bebbeeaba8f713b1adf3b594cc895116ebc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 218, + 143, + 330, + 228 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/555546651e1c4816f4b6ff6e9cf099150f687df5a3e07e212e2248e7b139d81a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 143, + 393, + 229 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/61164706a037dad281dc7502b352a79e52a76c99510920b454c54f1fa8b52440.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 395, + 148, + 488, + 229 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/8be37e6ffabc3340f1e52de99851bb95e6214c52b556679afb04c699685b2d2c.jpg", + "image_caption": [ + "Fig. 1: WildHands predicts the 3D shape, 3D articulation and 3D placement of the hand in the camera frame from a single in-the-wild egocentric RGB image and camera intrinsics. It produces better 3D output compared to FrankMocap [59] in occlusion scenarios and is more adept at dealing with perspective distortion than HaMeR [52], in challenging egocentric hand-object interactions from Epic-Kitchens [9] dataset." + ], + "image_footnote": [], + "bbox": [ + 218, + 228, + 330, + 313 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/0c315029780fb05164e7405d7ff604662bb8ee8e77baf134fd84057d9d43afda.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 331, + 228, + 393, + 313 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/5966712a01e6aa30dcacaf998c6a2b8d726d7d52a09a6f846d752d5d7d2b9302.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 395, + 228, + 488, + 313 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/9e13b709292a6fe0d5cde2e92cbeb29c9dd31bb4817ed30da070773203a666ce.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 511, + 143, + 622, + 229 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/8561fecb9e3b3ac613d0b6c554d14a7855a980738fd0d67c9bd8c73c744a255e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 624, + 143, + 686, + 229 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/96a5cdc4b68d0090e5638904945dcb220486759e955b8cb29d0f277835e3758f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 511, + 229, + 624, + 313 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/7effda99691b5585b7dd0befbd10e688d59b53ef2b5821e1b62c281d6789beb4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 625, + 229, + 687, + 313 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/3dfd1dca45072040d1f7982366cd2405e978ed375a8db8424c25670f2c6b8099.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 691, + 147, + 782, + 229 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/ea74be9ec6806b3031ad0a2e623ca017f9369bc239879d729b7561080cc8f420.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 691, + 229, + 785, + 271 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/a8c77955346ec46f4b78f8bc8da5822802ff64ab449087f889c0268071df89fd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 691, + 276, + 784, + 313 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "with a made-up focal length. These choices are reasonable for exocentric settings where the location of the hand in the image does not provide any signal for the hand articulation; and perspective distortion effects are minimal as the hand is far away & occupies a relatively small part of the camera's field of view. However, these assumptions are sub-optimal for processing egocentric images.", + "bbox": [ + 212, + 450, + 787, + 527 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Due to the biomechanics of the hand, its location in egocentric images carries information about its pose. Also, as the hand is closer to the camera in egocentric settings, it undergoes a lot more perspective distortion than in exocentric images. 3D hand pose that correctly explains the 2D hand appearance in one part of an egocentric image, may not be accurate for another part of the image. Thus, the location of the hand in the image must be taken into account while making 3D predictions. This suggests feeding the 2D location of the hand in the image to the network. However, the notion of 2D location in the image frame is camera specific. The more fundamental quantity that generalizes across cameras, is the angular location in the camera's field of view. We thus adopt the recent KPE embedding [54] to augment hand crop features with sinusoidal encodings of its location in the camera's field of view & find this to improve performance.", + "bbox": [ + 212, + 532, + 789, + 714 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "However, just processing image crops the right way is not sufficient for generalization. The model also needs to be trained on broad & diverse datasets outside of lab settings. This is not easy as 3D hand pose is difficult to directly annotate in images. We thus turn to joint training on 3D supervision from lab datasets and 2D auxiliary supervision on in-the-wild data in the form of 2D hand masks [6,10] & grasp labels [6]. To absorb supervision from segmentation labels, we differentiably render [42] the predicted 3D hand into images and back-propagate the loss through the rendering. For grasp supervision, we note", + "bbox": [ + 212, + 719, + 787, + 840 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "A. Prakash et al.", + "bbox": [ + 271, + 114, + 387, + 127 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "that hand pose is indicative of the grasp type and use supervision from a grasp classifier that takes the predicted 3D hand pose as input.", + "bbox": [ + 212, + 146, + 782, + 176 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Lack of accurate 3D annotations outside of lab settings makes it challenging to assess the generalization capabilities. To this end, we adopt a zero-shot evaluation strategy. Even though a single lab dataset has limited diversity, a model that performs well on a lab dataset without having seen any images from it likely generalizes well. Furthermore, we collect Epic-HandKps, containing 2D hand joint annotations on 5K images from the VISOR [10] split of in-the-wild Epic-Kitchens [7] to evaluate the 2D projections of the predicted 3D hand pose on everyday images. We also consider the 3D hand poses provided evaluate on the concurrent Ego-Exo4D [18]. We believe that these evaluations together comprehensively test the generalization capabilities of different models.", + "bbox": [ + 212, + 176, + 785, + 325 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our experiments (Sec. 4) show the utility of (1) using crops (vs. full images), (2) inputting 2D crop location (vs. not), (3) encoding the crop's location in camera's field of view (vs. in the image frame), and (4) 2D mask & grasp supervision. We apply these insights to both convolutional and transformer models, leading to better performance. We also present WildHands (Fig. 1) which outperforms FrankMocap [59] on egocentric images and is competitive to concurrent HaMeR [52] while being $10 \\times$ smaller & trained with $5 \\times$ less data.", + "bbox": [ + 212, + 328, + 787, + 434 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Related Work", + "text_level": 1, + "bbox": [ + 215, + 458, + 387, + 474 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Hand pose estimation & reconstruction: Several decades of work [15,28,56] have studied different aspects: 2D pose [4,63] vs. 3D pose [40,50,68,72] vs. mesh [1, 25,65], RGB [14,22,25] vs. RGBD [57,62-64,66,68] inputs, egocentric [14,50] vs. allocentric [14,21,22], hands in isolation [48,79] vs. interaction with objects [21, 44,73], feed-forward prediction [14,22,25,60] vs. test-time optimization [3,24]. Driven by the advances in parametric hand models [53,58], recent work has moved past 3D joint estimation towards 3D mesh recovery [14,22,25,52,59,77] in 3 contexts: single hands in isolation [78], hands interacting with objects [14,70] and two hands interacting with one another [22,48]. Jointly reasoning about hands & objects has proved fruitful to improve both hand & object reconstruction [25,36, 74]. While several expressive models focus on 3D hand pose estimation in lab settings [22,31-33,60], only a very few works [52] tackle the problem in everyday egocentric images as in Ego4D [17], Epic-Kitchen [7]. We focus on this setting due to challenges involving perspective distortion, dynamic interactions & heavy occlusions. We explore both convolutional [14,59] and transformer models [51,52] to study the impact of using crops, location of the crop in camera's field of view & auxiliary supervision in zero-shot generalization to diverse egocentric settings.", + "bbox": [ + 212, + 491, + 787, + 748 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Hand datasets: Since 3D hand annotations from single images is difficult to get, most datasets are collected in controlled settings to get 3D ground truth using MoCap [14,67], multi-camera setups [21,22,40,44,50], or magnetic sensors [16]. They often include single hands in isolation [79], hand-object interactions [14, 21,22,40] & hand-hand interactions [48]. Different from these datasets with 3D poses, [6,10,61] provide annotations for segmentation masks [6,10], 2D bounding", + "bbox": [ + 212, + 750, + 787, + 840 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "3D Hand Pose Estimation in Everyday Egocentric Images", + "bbox": [ + 344, + 114, + 730, + 128 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/b793bd7f39854e1a3b493108fe9afabb514f73bfc64615ad4150dfb4c2595a23.jpg", + "image_caption": [ + "Fig. 2: Model Overview. We crop the input images around the hand and process them using a convolutional backbone. The hand features along with the global image features (not shown above for clarity) and intrinsics-aware positional encoding (KPE [54]) for each crop are fed to the decoder to predict the 3D hand. The hand decoders predict MANO parameters $\\beta, \\theta_{\\mathrm{local}}, \\theta_{\\mathrm{global}}$ and camera translation which are converted to 3D keypoints & 2D keypoints and trained using 3D supervision on lab datasets, e.g. ARCTIC [14], AssemblyHands [50]. We also use auxiliary supervision from in-the-wild Epic-Kitchens [10] dataset via hand segmentation masks and grasp labels. The hand masks are available with the VISOR dataset [10] whereas grasp labels are estimated using off-the-shelf model from [6]." + ], + "image_footnote": [], + "bbox": [ + 215, + 143, + 787, + 291 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "boxes [61] and grasp labels [6] on internet videos [61] and egocentric images in the wild [9, 17]. Our work combines 3D supervision from datasets [14, 50] captured in controlled settings with 2D auxiliary supervision, i.e. segmentation masks & grasp labels, from datasets outside the lab [6, 10] to learn models that perform well in challenging everyday images. We collect Epic-HandKps dataset with 2D hand keypoints on 5K images from Epic-Kitchens for evaluation in everyday images outside of lab settings. We also use concurrent Ego-Exo4D [18] that annotates 2D keypoints in paired ego & exo views to get 3D hand annotations.", + "bbox": [ + 212, + 472, + 787, + 594 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Auxiliary supervision: Several works on 3D shape prediction from a single image [34,69] often use auxiliary supervision to deal with lack of 3D annotations. [34] uses keypoint supervision for 3D human mesh recovery, while [69] uses multi-view consistency cues for 3D object reconstruction. Aided by differentiable rendering [37,43], segmentation and depth prediction have been used to provide supervision for 3D reconstruction [3,24,35]. We adopt this use of segmentation as an auxiliary cue for 3D poses. In addition, we use supervision from hand grasp labels based on the insight that hand grasp is indicative of the hand pose.", + "bbox": [ + 212, + 595, + 787, + 717 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Ambiguity: 3D estimation from a single image is ill-posed due to ambiguities arising from scale-depth confusion [23] and cropping [54]. Recent work [54] points out the presence of perspective distortion-induced shape ambiguity in image crops and uses camera intrinsic-based location encodings to mitigate it. We investigate the presence of this ambiguity for hand crops in egocentric images and adopt the proposed embedding to mitigate it. Similar embeddings have been used before in literature, primarily from the point of view of training models on images from different cameras [12, 19], to encode extrinsic information [20, 47, 75].", + "bbox": [ + 212, + 719, + 787, + 842 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "A. Prakash et al.", + "bbox": [ + 271, + 114, + 387, + 127 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3 Method", + "text_level": 1, + "bbox": [ + 215, + 143, + 330, + 160 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We present WildHands, a new system for 3D hand pose estimation from egocentric images in the wild. We build on top of ArcticNet-SF [14] and FrankMocap [59]. Given a crop around a hand and associated camera intrinsics, WildHands predicts the 3D hand shape as MANO [58] parameters, shape $\\beta$ and pose $\\theta$ . $\\theta$ consists of angles of articulation $\\theta_{\\mathrm{local}}$ for 15 hand joints and the global pose $\\theta_{\\mathrm{global}}$ of the root joint in the camera coordinate system. WildHands is trained using both lab (ARCTIC, AssemblyHands) and in-the-wild (Epic-Kitchens, Ego4D) datasets with different sources of supervision. Fig. 2 provides an overview of our model. Next, we describe each component of WildHands in detail.", + "bbox": [ + 212, + 176, + 787, + 313 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.1 Architecture", + "text_level": 1, + "bbox": [ + 215, + 337, + 366, + 351 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Hand encoder: Our models use hand crops as input (resized to $224 \\times 224$ resolution), which are processed by a ResNet50 [27] backbone to get $7 \\times 7 \\times 2048$ feature maps. The left and right hand crops are processed separately but the parameters are shared. We also use global image features in our model, computed by average pooling the $7 \\times 7 \\times 2048$ feature map to get a 2048-dimensional vector. Incorporating KPE: Recent work [54] has shown that estimating 3D quantities from image crops suffers from perspective distortion-induced shape ambiguity [54]. This raises concerns about whether this ambiguity is also present when using hand crops for predicting 3D pose and how to deal with it. Following the study in [54], we analyze the hands in the ARCTIC dataset (details in the supplementary) and find evidence of this ambiguity in hand crops as well. Thus, we adopt the intrinsics-aware positional encoding (KPE) proposed in [54] to mitigate this ambiguity. Specifically, we provide the network with information about the location of the hand crop in the field of view of the camera. Consider the principal point as $(p_x, p_y)$ & focal length as $(f_x, f_y)$ . For each pixel $(x, y)$ , we compute $\\theta_x = \\tan^{-1}\\left(\\frac{x - p_x}{f_x}\\right)$ , $\\theta_y = \\tan^{-1}\\left(\\frac{y - p_y}{f_y}\\right)$ & convert them into sinusoidal encoding [46].", + "bbox": [ + 212, + 362, + 787, + 628 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We add KPE to the $7 \\times 7 \\times 2048$ feature map. KPE comprises sinusoidal encoding of the angles $\\theta_{x}$ and $\\theta_{y}$ (Sec. 4.1 in the main paper), resulting in $5 * 4 * K$ dimensional sparse encoding (4 for corners and 1 for center pixel) and $H \\times W \\times 4 * K$ resolution dense encoding, where $K$ is the number of frequency components (set to 4). For the sparse KPE variant, we broadcast it to $7 \\times 7$ resolution whereas for the dense KPE variant, we interpolate it to $7 \\times 7$ resolution and concatenate to the feature map. This concatenated feature is passed to a 3 convolutional layers (with 1024, 512, 256 channels respectively, each with kernel size of $3 \\times 3$ and ReLU [49] non-linearity) to get a $3 \\times 3 \\times 256$ feature map. This is flattened to 2304-dimensional vector and passed through a 1-layer MLP to get a 2048-dimensional feature vector. We do not use batchnorm [30] here since we want to preserve the spatial information in KPE.", + "bbox": [ + 212, + 628, + 787, + 809 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Hand decoder: It consists of an iterative architecture, similar to decoder in HMR [34]. The inputs are the 2048-dimensional feature vector and initial", + "bbox": [ + 212, + 809, + 785, + 839 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "3D Hand Pose Estimation in Everyday Egocentric Images", + "bbox": [ + 344, + 114, + 730, + 128 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "MANO [58] (shape $\\beta$ , articulation $\\theta_{\\mathrm{local}}$ and global pose $\\theta_{\\mathrm{global}}$ , all initialized as 0-vectors) & weak perspective camera parameters (initialized from the 2048-dimensional feature vector). Each of these parameters are predicted using a separate decoder head. The rotation parameters $\\theta_{\\mathrm{local}}$ , $\\theta_{\\mathrm{global}}$ are predicted in matrix form and converted to axis-angle representation to feed to MANO model. Each decoder is a 3-layer MLP with the 2 intermediate layers having 1024 channels and the output layer having the same number of channels as the predicted parameter. The output of each decoder is added to the initial parameters to get the updated parameters. This process is repeated for 3 iterations. The output of the last iteration is used for the final prediction.", + "bbox": [ + 212, + 146, + 787, + 297 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Differentiable rendering for mask prediction: The outputs from the decoder, $\\beta$ , $\\theta_{\\mathrm{local}}$ , and $\\theta_{\\mathrm{global}}$ for the predicted hand, are passed to a differentiable MANO layer [25, 58] to get the hand mesh. This is used to differentiably render a soft segmentation mask, $M$ , using SoftRasterizer [43, 55]. Using a differentiable hand model (MANO) and differentiable rendering lets us train our model end-to-end.", + "bbox": [ + 212, + 297, + 787, + 372 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Grasp classifier: We use the insight that grasp type during interaction with objects is indicative of hand pose. We train a grasp prediction head on $\\theta_{\\mathrm{local}}$ , $\\theta_{\\mathrm{global}}$ & $\\beta$ (predicted by WildHands) via a 4-layer MLP (with 1024, 1024, 512, 128 nodes & ReLU non-linearity after each). The MLP predicts logits for the 8 grasp classes defined in [6] which are converted into probabilities, $G$ via softmax.", + "bbox": [ + 212, + 373, + 787, + 450 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.2 Training supervision", + "text_level": 1, + "bbox": [ + 215, + 470, + 431, + 486 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We train WildHands using: (1) 3D supervision on $\\beta$ , $\\theta_{\\mathrm{local}}$ , $\\theta_{\\mathrm{global}}$ , 3D hand keypoints & 2D projections of 3D keypoints in the image on lab datasets, and (2) hand masks and grasp labels on in-the-wild datasets.", + "bbox": [ + 212, + 497, + 785, + 542 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\theta} = \\left\\| \\theta - \\theta^ {g t} \\right\\| _ {2} ^ {2} \\qquad \\mathcal {L} _ {\\beta} = \\left\\| \\beta - \\beta^ {g t} \\right\\| _ {2} ^ {2} \\qquad \\mathcal {L} _ {c a m} = \\left\\| (s, T) - (s, T) ^ {g t} \\right\\| _ {2} ^ {2} \\quad (1)\n$$\n", + "text_format": "latex", + "bbox": [ + 256, + 554, + 787, + 575 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {k p 3 d} = \\left\\| J _ {3 D} - J _ {3 D} ^ {g t} \\right\\| _ {2} ^ {2} \\quad \\mathcal {L} _ {k p 2 d} = \\left\\| J _ {2 D} - J _ {2 D} ^ {g t} \\right\\| _ {2} ^ {2} \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 256, + 577, + 787, + 599 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {m a s k}} = \\| M - M ^ {g t} \\| \\quad \\mathcal {L} _ {\\text {g r a s p}} = C E (G, G ^ {g t}) \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 256, + 601, + 787, + 618 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Here, $\\mathcal{L}_{\\theta}$ is used for both $\\theta_{local}$ & $\\theta_{global}$ , $(s,T)$ are the weak perspective camera parameters and $CE$ represents cross-entropy loss. $J_{2D} = K[J_{3D} + (T,f / s)]$ , where $J_{3D}$ is the 3D hand keypoints in the MANO coordinate frame, $K$ is the camera intrinsics, $f$ is the focal length, and $s$ is the scale factor of the weak perspective camera. Note that $(.)^{gt}$ represents the ground truth quantities. The total loss is:", + "bbox": [ + 212, + 628, + 787, + 705 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} = \\lambda_ {\\theta} \\mathcal {L} _ {\\theta} + \\lambda_ {\\beta} \\mathcal {L} _ {\\beta} + \\lambda_ {c a m} \\mathcal {L} _ {c a m} + \\lambda_ {k p 3 d} \\mathcal {L} _ {k p 3 d} + \\lambda_ {k p 2 d} \\mathcal {L} _ {k p 2 d} \\\\ + \\lambda_ {m a s k} \\mathcal {L} _ {m a s k} + \\lambda_ {g r a s p} \\mathcal {L} _ {g r a s p} \\tag {4} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 295, + 734, + 785, + 768 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Lab datasets: For ARCTIC, we use $\\lambda_{\\theta} = 10.0, \\lambda_{\\beta} = 0.001, \\lambda_{kp3d} = 5.0, \\lambda_{kp2d} = 5.0, \\mathcal{L}_{cam} = 1.0$ & set other loss weights to 0. AssemblyHands does not use MANO representation for hands, instead provides labels for 3D & 2D keypoints of 21 hand joints. So, we use $\\lambda_{kp3d} = 5, \\lambda_{kp2d} = 5$ & set other loss weights to 0.", + "bbox": [ + 212, + 779, + 787, + 842 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "A. Prakash et al.", + "bbox": [ + 271, + 114, + 387, + 127 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In-the-wild data: For Epic-Kitchens & Ego4D, we use hand masks & grasp labels as auxiliary supervision. While VISOR contains hand masks, grasp labels are not available. Ego4D does not contain either hand masks or grasp labels. To extract these labels, we use predictions from off-the-shelf model [6] as pseudo ground truth. We use $\\lambda_{mask} = 10.0$ , $\\lambda_{grasp} = 0.1$ & set other loss weights to 0.", + "bbox": [ + 212, + 146, + 787, + 224 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.3 Implementation Details", + "text_level": 1, + "bbox": [ + 214, + 244, + 457, + 260 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Our model takes hand crops as input. During training, we use the ground truth bounding box for the hand crop (with small perturbation), estimated using the 2D keypoints & scaled by a fixed value of 1.5 to provide additional context around the hand. At test time, we need to predict the bounding box of the hand in the image. On ARCTIC, we train a bounding box predictor on by finetuning MaskRCNN [26]. This is also used for submitting the model to the ARCTIC leaderboard. For Epic-HandKps, we use the recently released hand detector from [5]. All the ablations use ground truth bounding box for the hand crop.", + "bbox": [ + 212, + 268, + 787, + 388 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We use the training sets of ARCTIC (187K images) & AssemblyHands (360K), VISOR split (30K) of EPIC and 45K images from Ego4D kitchen videos to train our model. WildHands is trained jointly on different datasets with the input batch containing images from multiple datasets. All models are initialized from the ArcticNet-SF model trained on the allocentric split of the ARCTIC dataset [14]. All models are trained for 100 epochs with a learning rate of $1e - 5$ . The multi-dataset training is done on 2 A40 GPUs with a batch size of 144 and Adam optimizer [39]. More details are provided in the supplementary.", + "bbox": [ + 212, + 390, + 789, + 512 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4 Experiments", + "text_level": 1, + "bbox": [ + 214, + 534, + 375, + 551 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We adopt a zero-shot evaluation strategy: 3D evaluation on lab datasets (H2O, AssemblyHands), evaluation of 2D projections of 3D hand predictions on Epic-HandKps & 3D evaluation on EgoExo4D [18]. We systematically analyze the effectiveness of design choices (using crops, KPE), different terms in the loss function and different datasets used for training. We also report a system-level comparison on ARCTIC leaderboard and with FrankMocap [59] & HaMeR [52].", + "bbox": [ + 212, + 565, + 787, + 657 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.1 Protocols", + "text_level": 1, + "bbox": [ + 214, + 679, + 341, + 693 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Training datasets: We consider 4 datasets for training: 2 lab datasets (ARCTIC & AssemblyHands) and 2 in-the-wild datasets (Epic-Kitchens & Ego4D).", + "bbox": [ + 212, + 703, + 785, + 734 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We select ARCTIC since it contains the largest range of hand pose variation [14] among existing datasets [4, 21, 22, 44, 67]. We use the egocentric split with more than $187\\mathrm{K}$ images in the train set. We also use AssemblyHands since it is a large-scale dataset with more than $360\\mathrm{K}$ egocentric images in the train split. Different combinations of these datasets are used for different experiments.", + "bbox": [ + 212, + 734, + 787, + 809 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We use egocentric images from Epic-Kitchens & Ego4D as in-the-wild data for training our model using auxiliary supervision. We use 30K training images", + "bbox": [ + 212, + 809, + 787, + 840 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "3D Hand Pose Estimation in Everyday Egocentric Images", + "bbox": [ + 344, + 114, + 732, + 130 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/3a364e95b635b535472d2dd662ad509e958f83bbef4e525264b5dd18623393d1.jpg", + "image_caption": [ + "Fig. 3: Epic-HandKps annotations. We collect 2D joint annotations (shown in blue) for 5K in-the-wild egocentric images from Epic-Kitchens [8]. We show few annotations here with images cropped around the hand. We also have the label for the joint corresponding to each keypoint. Note the heavy occlusion & large variation in dexterous poses of hands interactiong with objects. More visualizations in supplementary." + ], + "image_footnote": [], + "bbox": [ + 217, + 143, + 785, + 311 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "available in the VISOR split of Epic-Kitchens and 45K images from Ego4D. To extract hand masks and grasp labels, we use off-the-shelf model from [6].", + "bbox": [ + 212, + 424, + 785, + 454 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Evaluation datasets: We consider 4 datasets for zero-shot generalization experiments: H2O [40], AssemblyHands, Epic-HandKps, and Ego-Exo4D. Note that these datasets cover large variation in inputs, H2O contains RGB images in lab settings, AssemblyHands consists of grayscale images and Epic-HandKps and Ego-Exo4D images show hands performing everyday activities in the wild.", + "bbox": [ + 212, + 455, + 785, + 532 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We use the validation splits of H2O and AssemblyHands with 29K and 32K images respectively. Since 3D hand annotations are difficult to collect for in-the-wild images, we instead collect 2D hand keypoints annotations on 5K egocentric images from validation set of VISOR split of Epic-Kitchens. We refer to this dataset as Epic-HandKps. See sample images from the dataset in Fig. 3. We also evaluate on the validation split of Ego-Exo4D hand pose dataset.", + "bbox": [ + 212, + 532, + 785, + 625 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Epic-HandKps: Epic-HandKps contains 2D annotations for the 21 hand joints to facilitate evaluation of 2D projections of the predicted 3D keypoints. We sample 5K images from the validation set of VISOR split of Epic-Kitchens and get the 21 joints annotated via Scale AI. We use the same joint convention as ARCTIC [14]. We crop the images around the hand using the segmentation masks in VISOR and provide the crops to annotators for labeling. Note that most of these images do not have all the 21 keypoints visible. Following ARCTIC, we only consider images with at least 3 visible joints for evaluation. Moreover, since the models in our experiments required hand crops as input, we only evaluate on those images for which hand bounding box is predicted by the recently released hand detector model from [6]. This leaves us with 4724 hand annotations, with 2697 right hands and 2027 left hands. We show some annotations in Fig. 3.", + "bbox": [ + 212, + 626, + 785, + 808 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Metrics: For 3D hand pose evaluation, we consider 2 metrics: (1) Mean PerJoint Position Error (MPJPE): L2 distance (mm) between the 21 predicted", + "bbox": [ + 212, + 809, + 787, + 840 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "A. Prakash et al.", + "bbox": [ + 271, + 114, + 387, + 127 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/52ab8fd9c14230b32892be7d4fdaf4be4705839df319e97f7c8a52344fd4948b.jpg", + "table_caption": [ + "Table 1: Benefits of using crops and KPE. Zero shot generalization performance improves through the use of crops as input (HandNet uses crops vs. ArcticNet-SF uses full image) and KPE helps (WildHands uses KPE with crops vs. HandNet only uses crops). All models use the same backbone and are trained on the same data in each setting for fair comparisons. $\\mathcal{D}$ : {ARCTIC, AssemblyHands, EPIC}." + ], + "table_footnote": [], + "table_body": "
H2OAssemblyEgo-Exo4DEpic-HandKps
MPJPEMRRPEMPJPEMRRPEMPJPEL2 Error
Training dataDD - AssemblyDD - EPIC
ArcticNet-SF83.84325.55110.76326.94114.2435.02
HandNet38.06141.06109.88317.4989.7231.62
WildHands31.0849.4984.91164.9055.8411.05
", + "bbox": [ + 220, + 224, + 782, + 320 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "& ground truth joints for each hand after subtracting the root joint (this captures the relative pose). (2) Mean Relative-Root Position Error (MRRPE): the metric distance between the root joints of left hand and right hand, following [13, 14, 48] (this takes the absolute pose into account). (3) For 2D evaluation on Epic-HandKps, we measure the L2 Error (in pixels for 224x224 image input) between ground truth keypoints & 2D projections of predicted 3D keypoints.", + "bbox": [ + 212, + 349, + 785, + 439 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Baselines: (1) ArcticNet-SF [14] is the single-image model released with the ARCTIC benchmark. It consists of a convolutional backbone (ResNet50 [27]) to process the input image, followed by a HMR [35]-style decoder to predict the hand and object poses. The predicted hand is represented using MANO [58] parameterization. (2) FrankMocap [59] is trained on multiple datasets collected in controlled settings and is a popular choice to apply in the wild setting [3,24,74]. It uses hand crops as input instead of the entire image, which is then processed by a convolutional backbone. The decoder is similar to HMR [35] which outputs MANO parameters for hand and training is done using 3D pose & 2D keypoints supervision. (3) HandNet: Since the training code is not available for FrankMocap, we are unable to train it in our setting. So, we implement a version of ArcticNet-SF which uses crops as input along with HMR-style decoder and train it in our setting using 3D & 2D supervision. This baseline is equivalent to WildHands without KPE and ArcticNet-SF with crops. (4) HandOccNet [51]: It takes crops as input and encodes them using a FPN [41] backbone. These are passed to transformer [71] modules to get a heatmap-based intermediate representation which is then decoded to MANO parameters. (5) HaMeR [52]: It also takes crops as input and processes them using a ViT [11] backbone. The features are then passed to a transformer decoder to predict the MANO parameters. Note that adversarial loss is not used for training any model in our setting.", + "bbox": [ + 212, + 439, + 787, + 743 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.2 Results", + "text_level": 1, + "bbox": [ + 215, + 767, + 323, + 781 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We systematically study the impact of several factors: use of crops (Tab. 1) & KPE (Tab. 1, Tab. 5), perspective distortion (Tab. 4), auxiliary supervision (Tab. 3), training datasets (Tab. 6) on both convolutional (Tab. 1) & transformer", + "bbox": [ + 212, + 794, + 785, + 842 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "3D Hand Pose Estimation in Everyday Egocentric Images", + "bbox": [ + 344, + 114, + 730, + 128 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/668a90d4ece2e84a1b1a795f1e25bc6cdd78fca793ceeda9294f9bb03ac40970.jpg", + "table_caption": [ + "Table 2: Impact on transformer models. We investigate if our insights are useful for transformer models as well, i.e. if KPE helps on top of positional encodings used in transformers & if auxiliary supervision leads to better generalization for large capacity models. All models are trained on the same data in each setting for fair comparisons." + ], + "table_footnote": [], + "table_body": "
H2OAssemblyEgo-Exo4DEpic-HandKps
MPJPEMRRPEMPJPEMRRPEMPJPEL2 Error
Training dataDD - AssemblyDD - EPIC
HandOccNet [51]60.58187.24110.28293.9280.9632.77
HandOccNet + KPE47.5772.25103.30232.8378.6413.54
HaMeR [52] (ViT)30.57113.2679.48227.5955.3625.48
HaMeR (ViT) + KPE24.1562.9971.64184.5547.029.77
", + "bbox": [ + 220, + 212, + 782, + 310 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/2ceaef5e4e395be0510430150288cfcd24a52c0994a91f3272959fe2a5acfaf5.jpg", + "table_caption": [ + "Table 3: Role of auxiliary supervision. We consider grasp and mask supervision from both Epic-Kitchens & Ego4D to train WildHands and show results in zero-shot generalization settings. Both grasp & mask supervision lead to improvements in 3D & 2D metrics, with hand masks providing larger gain compared to grasp labels. Even though auxiliary supervision is on Epic/Ego4D, it leads to improvements in all settings, i.e. benefits from training on broad data extend beyond datasets with auxiliary supervision." + ], + "table_footnote": [], + "table_body": "
H2OAssemblyEgo-Exo4DEpic-HandKps
MPJPEMRRPEMPJPEMRRPEMPJPEL2 Error
Wildhands (no aux)39.5277.0793.44208.3270.3917.07
+ EPIC grasp38.3476.0490.23180.8563.30-
+ EPIC mask34.2960.2387.94175.3156.41-
+ EPIC grasp + EPIC mask31.0849.4984.91164.9055.84-
+ Ego4D grasp41.06111.4786.44222.2369.738.22
+ Ego4D mask38.1757.9382.55145.7863.437.87
+ Ego4D grasp + Ego4D mask35.6262.1079.08148.1260.807.20
", + "bbox": [ + 220, + 420, + 782, + 534 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "models (Tab. 2) through controlled experiments, i.e. all factors outside of what we want to check the affect of, are kept constant. All the results are reported in a zero-shot setting i.e. models are not trained on the evaluation dataset.", + "bbox": [ + 212, + 564, + 785, + 609 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Impact of crops: To understand the benefits due to using crops as input instead of full images, we compare ArcticNet-SF and HandNet in Tab. 1. The only difference between these two models is: ArcticNet-SF uses full image as input whereas HandNet uses crops as input. We see gains of $27.7\\%$ in MPJPE, $29.7\\%$ in MRRPE, $10.7\\%$ in PA-MPJPE, and $9.7\\%$ in 2D pose across different settings. This provides evidence for the utility of using crops as inputs [50,59].", + "bbox": [ + 212, + 611, + 787, + 702 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Benefits of KPE: In Tab. 1, HandNet & WildHands differ only in the use of KPE. This leads go improvements of $20.5\\%$ in MPJPE, $56.4\\%$ in MRRPE & $65.1\\%$ in 2D pose. Compared to impact of crops, the gains are significantly higher in MRRPE (indicating better absolute pose) and on Epic-HandKps (leading to better generalization in the wild).", + "bbox": [ + 212, + 702, + 787, + 777 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Role of auxiliary supervision: We extract hand masks & grasp labels from Epic-Kitchens & Ego4D and show their benefits in Tab. 3 in zero-shot evaluation settings. Mask supervision leads to gains of $8.5\\%$ in MPJPE, $21.5\\%$ in MRRPE and $55.5\\%$ in 2D pose. Grasp labels improve MPJPE by $2.5\\%$ , MRRPE by $7.3\\%$", + "bbox": [ + 212, + 779, + 787, + 840 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "A. Prakash et al.", + "bbox": [ + 271, + 114, + 387, + 127 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/01d96b71dfe019b372cd0c4b086cd9be9f110a4f68c7958fd29ad3cb79f9aeba.jpg", + "table_caption": [ + "Table 4: Comparison of KPE with relevant approaches. KPE is more effective than other methods for dealing with perspective distortion, e.g. Perspective Correction [45], Perspective Crop Layers (PCL [76]), or other encodings, e.g. CamConv [12]" + ], + "table_footnote": [], + "table_body": "
H2OAssemblyEgo-Exo4DEpic-HandKps
MPJPEMRRPEMPJPEMRRPEMPJPEL2 Error
HandNet +
CamConv36.8667.6296.72180.7360.6917.35
Perspective Corr.39.95159.1359.10637.3267.4528.68
PCL [76]36.82158.8845.18483.9263.6528.21
KPE (WildHands)31.0849.4984.91164.9055.8411.05
", + "bbox": [ + 223, + 207, + 782, + 301 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/5856abd155a0555bb50b97510a7cdf918c4e38ae16e031cfa19aa4b68a35fb93.jpg", + "table_caption": [ + "Table 5: KPE Design Choices. We study the impact of different design choices of KPE on WildHands: adding KPE with the input instead of latent features (w/ input), removing intrinsics from KPE (no intrx), dense variant of KPE from [54]. WildHands uses sparse variant of KPE. We observe that all variants of KPE provide significant benefits compared to the model without KPE and the sparse variant performs the best." + ], + "table_footnote": [], + "table_body": "
H2OAssemblyEgo-Exo4DEpic-HandKps
MPJPEMRRPEMPJPEMRRPEMPJPEL2 Error
no KPE38.06141.06109.88317.4989.7231.62
KPE w/ input45.5180.9694.45252.3493.5617.30
KPE no intrx36.9761.9892.12246.4560.8011.63
KPE dense36.8680.5495.34201.3369.1111.24
KPE sparse31.0849.4984.9155.8455.8411.05
", + "bbox": [ + 223, + 404, + 782, + 503 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "and 2D pose by $4.3\\%$ . While both sources of supervision are effective, hand masks lead to larger gains. Combining both mask and grasp supervision leads to further improvements in both 3D & 2D poses across most settings. Moreover, auxiliary supervision on in-the-wild data also aids performance on lab datasets, suggesting that generalization gains from training on broad data are not dataset specific.", + "bbox": [ + 215, + 539, + 787, + 614 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Comparison of KPE with relevant approaches: In Tab. 4, we find KPE to be more effective than other methods for dealing with perspective distortion, e.g. Perspective Correction [45], Perspective Crop Layers (PCL [76]), or different forms of positional encoding, e.g. CamConv [12].", + "bbox": [ + 215, + 619, + 787, + 680 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Impact on transformer models: We investigate if our insights are useful to transformer models as well, i.e. if KPE helps on top of positional encodings already used in transformers and if auxiliary supervision leads to better generalization for large capacity models. For this, we implement these components in HandOccNet [51] & HaMeR [52] and train these models in our settings. From the results in Tab. 2, we see consistent gains across all settings.", + "bbox": [ + 215, + 684, + 787, + 773 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "KPE design choice: We ablate different variants of KPE in Tab. 5: adding KPE with the input instead of latent features (w/ input), removing intrinsics from KPE (no intrx) and dense variant of KPE from [54]. Note that the sparse variant performs the best, so we use sparse KPE in WildHands.", + "bbox": [ + 215, + 777, + 787, + 839 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "3D Hand Pose Estimation in Everyday Egocentric Images", + "bbox": [ + 344, + 114, + 730, + 128 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 767, + 116, + 782, + 126 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/7ce10839fb9aecc289e12d8e4be106f811e43ac5dd9418f52e48a330271068c7.jpg", + "table_caption": [ + "Table 6: Effect of scaling up data. Training on more datasets leads to consistent improvements in models performance on held out datasets." + ], + "table_footnote": [], + "table_body": "
H2OEgo-Exo4DEpic-HandKps
MPJPEMRRPEMPJPEL2 Error
ARCTIC47.3075.1787.7117.07
ARCTIC + Assembly39.5277.0770.3911.05
ARCTIC + Assembly + Ego4D (aux)35.6262.1060.807.20
", + "bbox": [ + 220, + 184, + 785, + 263 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Intrinsics during training: Intrinsics may not always be available in in-the-wild data used to derive auxiliary supervision. To study this setting, we consider in-the-wild Ego4D data since it contains images from multiple cameras, and do not assume access to intrinsics. In this case, we replace the KPE with a sinusoidal positional encoding of normalized image coordinates w.r.t. center. The Ego4D results in Tab. 3 follow this setting and we observe that auxiliary supervision from Ego4D provides benefits even in the absence of camera information.", + "bbox": [ + 212, + 291, + 785, + 397 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Scaling up training data: We ablate variants of WildHands trained with ARCTIC, ARCTIC + AssemblyHands, ARCTIC + Ego4D and ARCTIC + AssemblyHands + Ego4D in zero-shot settings on H2O, Ego-Exo4D, and EpicHandKps. We use 3D supervision on ARCTIC & AssemblyHands and auxiliary supervision (hand masks, grasp labels) on Ego4D. Tab. 6 shows consistent improvements in 3D and 2D metrics from both AssemblyHands and Ego4D datasets, suggesting that further scaling can improve performance further.", + "bbox": [ + 212, + 397, + 787, + 503 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "4.3 System-level Evaluation", + "text_level": 1, + "bbox": [ + 215, + 526, + 459, + 542 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "While all of our earlier experiments are conducted in controlled settings, we also present a system-level comparison to other past methods, specifically to methods submitted to the ARC-TIC leaderboard (as of July 13, 2024), and with the publicly released models of FrankMo-cap [59] and HaMeR [52].", + "bbox": [ + 212, + 551, + 545, + 657 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "ARCTIC Leaderboard: Our method achieves the best 3D hand pose on the ego-centric split, compared to recent state-of-the-art convolutional (e.g. ArcticNet-SF, DIGIT-HRNet, HMR-ResNet50) and transformer (e.g.", + "bbox": [ + 212, + 657, + 545, + 734 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "JointTransformer) models (as of July 13, 2024). However, it is not possible to do a detailed comparison since most of these models are not public.", + "bbox": [ + 212, + 734, + 784, + 762 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Comparison with FrankMocap [59] and HaMeR [52]: We show results with the publicly released models in Tab. 8. Note that HaMeR uses a ViT-H backbone which is much larger and more performant than the ResNet50 backbone used in WildHands. WildHands outperforms FrankMocap across all metrics and HaMeR on 3 of 6 metrics while being $10 \\times$ smaller & trained on $5 \\times$ less data.", + "bbox": [ + 212, + 763, + 785, + 839 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/626eed6d1432471103a626491f1dcda765db059dc49bfc6cad3ddca1de81b237.jpg", + "table_caption": [ + "Table 7: Leaderboard results. WildHands leads the 3D hand pose on the egocentric split of ARCTIC leaderboard (as of July 13, 2024)." + ], + "table_footnote": [], + "table_body": "
MethodMPJPEMRRPE
ArcticNet-SF19.1828.31
ArcticOccNet19.7729.75
DIGIT-HRNet16.7425.49
HMR-ResNet5020.3232.32
JointTransformer16.3326.07
WildHands15.7223.88
", + "bbox": [ + 558, + 625, + 785, + 724 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "A. Prakash et al.", + "bbox": [ + 271, + 114, + 387, + 127 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/b63a51f382d93f665181d1e1032d2f325fc8277e34956f5503d1f02673d81d30.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 222, + 143, + 331, + 228 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/9fd386964b7762560284bb12c6249b81cafea519a2011da79922cff2b8d42782.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 336, + 143, + 395, + 229 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/0ff6ef8d1147bf804c7ce8bc1049909346692990516d11b2bd456122b1390718.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 403, + 146, + 486, + 224 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/a56a373135997bf871a72c1bb33729f3a1ec53bfe97a54b5f96dad8faed716b2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 222, + 231, + 331, + 316 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/5406c867f71f8ced4b0b9687044bd4de2bf2a960e2dff9c10dfcd6e7fd75876e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 336, + 231, + 395, + 316 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/97fe05cba454ef27842d406056da182173533e8f10bbd1f30a913758e0d394fc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 406, + 231, + 488, + 315 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/f276b3dc25b6d5ae71da2b56756af7ac95216e28ff75b635dd4c6a4e01f57ab0.jpg", + "image_caption": [ + "Fig. 4: Visualizations. We show projection of the predicted hand in the image & rendering of the hand mesh from 2 more views. WildHands predicts better hand poses from a single image than FrankMocap [59], HaMeR [14] and ArcticNet [14] in challenging egocentric scenarios involving occlusions and perspective distortion." + ], + "image_footnote": [], + "bbox": [ + 220, + 318, + 331, + 402 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/be3607737c9943517dda8294ac568933abb600cfb43bf29b255fd0f059955145.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 336, + 316, + 395, + 402 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/dd2661b40262104f4125262fe3f8449367bd01fb7ecab650e0db42276640e3ef.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 406, + 319, + 488, + 392 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/99c7713e0bb4e49090213f40ec040645fdc9afb9562a3ce89f2b5cf67c8a4dcd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 514, + 143, + 622, + 229 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/89e7f2789a3def971b2b0c89d2b7acdcf1aadd1cdb8a1a5f021420681ceffaf9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 625, + 143, + 687, + 229 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/a298318701025175b564af3834f40569ba9a2be4a9e2853afeb851d2ee0dae1c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 514, + 231, + 622, + 316 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/dbf18ce05743d49f5063ec749bb540fd70897113ed951a1dcfa7e423c7ce38a4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 625, + 231, + 687, + 316 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/8fb0cca3c4b69efae39213d8dc027f1b0b84d9e50aec9f23d47cbb494df53aa2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 513, + 319, + 622, + 402 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/596c6c2589245dbdc705e84ea48e307981b54a43f6ca77a432e21802a31e1146.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 625, + 319, + 687, + 402 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/e4620473ef14f57826760a17de33d85973dcb3dc5d9f4376dfc332d18330f772.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 694, + 147, + 782, + 229 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/c1a8fbbb48f7143b1b7b0e371ec712155de7870d08f8372dccbe099a5a378922.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 694, + 231, + 779, + 268 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/aa3ed71ed34a1f88529a84444bf12e1b3206f0dee86c1b08b4b9f4f7deffe767.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 696, + 284, + 779, + 311 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/7bcde915d03d079a91588aa90d482336e9df3b3741ab6e33f6ae6509c34e146b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 696, + 325, + 777, + 354 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/3a8157dad25e84e2cca74e1161c40f2553104fab9a80eeefe1254c0f98e29f84.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 696, + 368, + 777, + 395 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "4.4 Visualizations", + "text_level": 1, + "bbox": [ + 215, + 512, + 375, + 526 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We show qualitative comparisons of the hand pose, predicted by WildHands, with FrankMocap on Epic-HandKps (Fig. 4a) and ArcticNet-SF on ARCTIC (Fig. 4b). Looking at the projection of the mesh in the camera view and rendering of the mesh from additional views, we observe that WildHands is able to predict hand pose better in images involving occlusion and interaction, e.g. fingers are curled around the object in contact (Fig. 4) for our model but this is not the case for FrankMocap. We observe similar trends in ARCTIC (Fig. 4b) where our model predicts better hands in contact scenarios. More results in supplementary.", + "bbox": [ + 212, + 559, + 787, + 681 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Failure Cases: We observe that images in which the fingers are barely visible, e.g. when kneading a dough in top row (Fig. 5), or containing extreme poses, e.g. grasps in bottom row (Fig. 5), are quite challenging for all models.", + "bbox": [ + 214, + 684, + 787, + 729 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Limitations: The KPE encoding requires camera intrinsics to be known, which may not be available in certain scenarios. However, in several in-the-wild images, the metadata often contains camera information. Also, we currently set the weights for different loss terms as hyperparameters which may not be ideal since the sources of supervision are quite different leading to different scales in loss values. It could be useful to use a learned weighing scheme, e.g. uncertainty-based loss weighting [2, 29, 38].", + "bbox": [ + 212, + 733, + 787, + 840 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "3D Hand Pose Estimation in Everyday Egocentric Images", + "bbox": [ + 344, + 114, + 732, + 128 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 767, + 114, + 784, + 126 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/614b7f1b8f179a2765c341cac9e3556b80da4dd914c85c25ff6146e88031b0c3.jpg", + "table_caption": [ + "Table 8: Systems comparison. We evaluate against publicly released models: FrankMocap [59] (a popular method for 3D hand pose estimation), and HaMeR [52]. FrankMocap uses a ResNet-50 backbone and is trained on 6 lab datasets. HaMeR uses a ViT-H [11] backbone and is trained on 7 lab + 3 in-the-wild + HInt datasets across nearly 3M frames. WildHands model uses a ResNet-50 backbone and is trained on 3 datasets. WildHands outperforms FrankMocap across all metrics and HaMeR on 3 of 6 metrics while being $10 \\times$ smaller & trained on $5 \\times$ less data. We expect scaling up the backbone and datasets used to train WildHands can lead to even stronger performance." + ], + "table_footnote": [], + "table_body": "
H2OAssemblyEgo-Exo4DEpic-HandKps
MPJPEMRRPEMPJPEMRRPEMPJPEL2 Error
FrankMocap [59] (ResNet-50, 6 lab)58.51-97.59-175.9113.33
HaMeR [52] (ViT-H, 7 lab+3 wild+HInt)23.82147.8745.49334.52116.464.56
WildHands (ResNet-50, 2 lab + 1 wild)31.0849.4980.40148.1255.847.20
", + "bbox": [ + 223, + 267, + 782, + 334 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/b273875b3f49d0d4cd075becad73149e9043941a80f71c2ba98c5d157ed2bbc2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 222, + 352, + 333, + 438 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/86fdcc4c08e0fb9cb8630670013e5e26b4cf74a7e43f17fb123c9459e18feab2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 336, + 352, + 395, + 438 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/76056f448812d1968bfc5cb72da41d04e89588d991fe1def7e6b0d05814d8ddd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 406, + 364, + 483, + 438 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/a9e08c24412fecc4e28b9325d20b72d7d7ae92172ed92877b26a7db2a0f14d24.jpg", + "image_caption": [ + "Fig. 5: Failure cases. We observe that images with (top) barely visible fingers, e.g. kneading dough or (bottom) extreme grasp poses are challenging for all models." + ], + "image_footnote": [], + "bbox": [ + 220, + 440, + 334, + 525 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/323c86de16f0947cd0a393ef2823329466a073d51f3758b1f8aa0ac6acd10c66.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 336, + 440, + 395, + 525 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/dcb1517fbbed8dfd0370c59bef0a5586003b36fa3104b5d1e26208f7d5459871.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 406, + 455, + 483, + 518 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/1aad294fd700e89579ca4a400571141d7256b7c781ac42e7bfc66f7a14ba042e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 519, + 352, + 630, + 438 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/4926c4e0dc2d6cbbe7ef6c6586d91afa2ce7e7f57d1bdd53007622e4502fb647.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 635, + 352, + 694, + 438 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/0b73a7fae6a35a73d7641b7f2beaa1341caa0f7a69b5bd763753a06eb9d3c882.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 702, + 359, + 787, + 440 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/62c03868585d8ce0d0ce0af10d50f7faf21810e79a12245894419c0acc4e86df.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 519, + 440, + 630, + 525 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/5c1857075799195991145c9ade501ea18031f123013d2a7186e4db61b49f5f2b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 635, + 440, + 694, + 525 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/87bd577a8cfc8807ff1d0e35485d2e8df67140aa35bc70b736d8395c8e6d6971.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 702, + 455, + 781, + 518 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "5 Conclusion", + "text_level": 1, + "bbox": [ + 214, + 604, + 359, + 619 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "We present WildHands, a system that adapts best practices from the literature: using crops as input, intrinsics-aware positional encoding, auxiliary sources of supervision and multi-dataset training, for robust prediction of 3D hand poses on egocentric images in the wild. Experiments on both lab datasets and in-the-wild settings show the effectiveness of WildHands. As future direction, WildHands could be used to scale up learning robot policies from human interactions.", + "bbox": [ + 212, + 654, + 787, + 744 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Acknowledgements: We thank Arjun Gupta, Shaowei Liu, Anand Bhattachad & Kashyap Chitta for feedback on the draft, and David Forsyth for useful discussion. This material is based upon work supported by NSF (IIS2007035), NASA (80NSSC21K1030), DARPA (Machine Common Sense program), Amazon Research Award, NVIDIA Academic Hardware Grant, and the NCSA Delta System (supported by NSF OCI 2005572 and the State of Illinois).", + "bbox": [ + 212, + 750, + 787, + 840 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "A. Prakash et al.", + "bbox": [ + 271, + 114, + 387, + 127 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 217, + 143, + 321, + 159 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "1. Ballan, L., Taneja, A., Gall, J., Gool, L.V., Pollefeys, M.: Motion capture of hands in action using discriminative salient points. In: Proceedings of the European Conference on Computer Vision (ECCV) (2012)", + "2. Brazil, G., Kumar, A., Straub, J., Ravi, N., Johnson, J., Gkioxari, G.: Omni3d: A large benchmark and model for 3d object detection in the wild. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR). pp. 13154-13164 (2023)", + "3. Cao, Z., Radosavovic, I., Kanazawa, A., Malik, J.: Reconstructing hand-object interactions in the wild. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2021)", + "4. Chao, Y., Yang, W., Xiang, Y., Molchanov, P., Handa, A., Tremblay, J., Narang, Y.S., Wyk, K.V., Iqbal, U., Birchfield, S., Kautz, J., Fox, D.: Dexycb: A benchmark for capturing hand grasping of objects. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2021)", + "5. Chen, Z., Zhang, H.: Learning implicit fields for generative shape modeling. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2019)", + "6. Cheng, T., Shan, D., Hassen, A.S., Higgins, R.E.L., Fouhey, D.: Towards a richer 2d understanding of hands at scale. In: Advances in Neural Information Processing Systems (NeurIPS) (2023)", + "7. Damen, D., Doughty, H., Farinella, G.M., Fidler, S., Furnari, A., Kazakos, E., Moltisanti, D., Munro, J., Perrett, T., Price, W., Wray, M.: Scaling egocentric vision: The epic-kitchens dataset. Proceedings of the European Conference on Computer Vision (ECCV) (2018)", + "8. Damen, D., Doughty, H., Farinella, G.M., Fidler, S., Furnari, A., Kazakos, E., Moltisanti, D., Munro, J., Perrett, T., Price, W., Wray, M.: The epic-kitchen dataset: Collection, challenges and baselines. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI) (2020)", + "9. Damen, D., Doughty, H., Farinella, G.M., Fidler, S., Furnari, A., Kazakos, E., Moltisanti, D., Munro, J., Perrett, T., Price, W., et al.: Scaling egocentric vision: The epic-kitchens dataset. In: Proceedings of the European Conference on Computer Vision (ECCV) (2018)", + "10. Darkhalil, A., Shan, D., Zhu, B., Ma, J., Kar, A., Higgins, R., Fidler, S., Fouhey, D., Damen, D.: Epic-kitchen visor benchmark: Video segmentations and object relations. In: NeurIPS Track on Datasets and Benchmarks (2022)", + "1. Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., Zhai, X., Unterthiner, T., Dehghani, M., Minderer, M., Heigold, G., Gelly, S., et al.: An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929 (2020)", + "2. Facil, J.M., Ummenhofer, B., Zhou, H., Montesano, L., Brox, T., Civera, J.: Camconvs: Camera-aware multi-scale convolutions for single-view depth. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR). pp 11826-11835 (2019)", + "3. Fan, Z., Spurr, A., Kocabas, M., Tang, S., Black, M.J., Hilliges, O.: Learning to disambiguate strongly interacting hands via probabilistic per-pixel part segmentation In: Proceedings of the International Conference on 3D Vision (3DV) (2021)", + "4. Fan, Z., Taheri, O., Tzionas, D., Kocabas, M., Kaufmann, M., Black, M.J., Hilliges, O.: ARCTIC: A dataset for dexterous bimanual hand-object manipulation. In:" + ], + "bbox": [ + 225, + 175, + 785, + 839 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "3D Hand Pose Estimation in Everyday Egocentric Images", + "bbox": [ + 344, + 114, + 730, + 128 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 767, + 116, + 784, + 126 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2023)", + "15. Freeman, W.T., Roth, M.: Orientation histograms for hand gesture recognition. In: International workshop on automatic face and gesture recognition. vol. 12, pp. 296-301. Citeseer (1995)", + "16. Garcia-Hernando, G., Yuan, S., Baek, S., Kim, T.K.: First-person hand action benchmark with rgb-d videos and 3d hand pose annotations. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2018)", + "17. Grauman, K., Westbury, A., Byrne, E., Chavis, Z., Furnari, A., Girdhar, R., Hamburger, J., Jiang, H., Liu, M., Liu, X., et al.: Ego4d: Around the world in 3,000 hours of egocentric video. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)", + "18. Grauman, K., Westbury, A., Torresani, L., Kitani, K., Malik, J., Afouras, T., Ashutosh, K., Baiyya, V., Bansal, S., Boote, B., et al.: Ego-exo4d: Understanding skilled human activity from first-and third-person perspectives. arXiv preprint arXiv:2311.18259 (2023)", + "19. Guizilini, V., Vasiljevic, I., Chen, D., Ambrus, R., Gaidon, A.: Towards zero-shot scale-aware monocular depth estimation. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2023)", + "20. Guizilini, V., Vasiljevic, I., Fang, J., Ambru, R., Shakhnarovich, G., Walter, M.R., Gaidon, A.: Depth field networks for generalizable multi-view scene representation. In: Proceedings of the European Conference on Computer Vision (ECCV) (2022)", + "21. Hampali, S., Rad, M., Oberweger, M., Lepetit, V.: Honnotate: A method for 3d annotation of hand and object poses. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2020)", + "22. Hampali, S., Sarkar, S.D., Rad, M., Lepetit, V.: Keypoint transformer: Solving joint identification in challenging hands and object interactions for accurate 3d pose estimation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)", + "23. Hartley, R., Zisserman, A.: Multiple view geometry in computer vision. Cambridge university press (2003)", + "24. Hasson, Y., Tekin, B., Bogo, F., Laptev, I., Pollefeys, M., Schmid, C.: Leveraging photometric consistency over time for sparsely supervised hand-object reconstruction. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2020)", + "25. Hasson, Y., Varol, G., Tzionas, D., Kalevatykh, I., Black, M.J., Laptev, I., Schmid, C.: Learning joint reconstruction of hands and manipulated objects. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2019)", + "26. He, K., Gkioxari, G., Dollar, P., Girshick, R.B.: Mask R-CNN. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2017)", + "27. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2016)", + "28. Heap, T., Hogg, D.: Towards 3d hand tracking using a deformable model. In: Proceedings of the Second International Conference on Automatic Face and Gesture Recognition. pp. 140-145. IEEE (1996)", + "29. Hu, A., Murez, Z., Mohan, N., Dudas, S., Hawke, J., Badrinarayanan, V., Cipolla, R., Kendall, A.: FIERY: future instance prediction in bird's-eye view from surround monocular cameras. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2021)" + ], + "bbox": [ + 215, + 146, + 785, + 839 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "A. Prakash et al.", + "bbox": [ + 271, + 114, + 387, + 127 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "30. Ioffe, S., Szegedy, C.: Batch normalization: Accelerating deep network training by reducing internal covariate shift. In: Bach, F.R., Blei, D.M. (eds.) Proceedings of the International Conference on Machine Learning (ICML) (2015)", + "31. Ivashechkin, M., Mendez, O., Bowden, R.: Denoising diffusion for 3d hand pose estimation from images. arXiv 2308.09523 (2023)", + "32. Jiang, C., Xiao, Y., Wu, C., Zhang, M., Zheng, J., Cao, Z., Zhou, J.T.: A2j-transformer: Anchor-to-joint transformer network for 3d interacting hand pose estimation from a single RGB image. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2023)", + "33. Jiang, Z., Rahmani, H., Black, S., Williams, B.M.: A probabilistic attention model with occlusion-aware texture regression for 3d hand reconstruction from a single RGB image. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2023)", + "34. Kanazawa, A., Black, M.J., Jacobs, D.W., Malik, J.: End-to-end recovery of human shape and pose. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2018)", + "35. Kanazawa, A., Tulsiani, S., Efros, A.A., Malik, J.: Learning category-specific mesh reconstruction from image collections. In: Proceedings of the European Conference on Computer Vision (ECCV) (2018)", + "36. Karunratanakul, K., Yang, J., Zhang, Y., Black, M.J., Muandet, K., Tang, S.: Grasping field: Learning implicit representations for human grasps. In: Proceedings of the International Conference on 3D Vision (3DV) (2020)", + "37. Kato, H., Ushiku, Y., Harada, T.: Neural 3d mesh renderer. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2018)", + "38. Kendall, A., Gal, Y., Cipolla, R.: Multi-task learning using uncertainty to weigh losses for scene geometry and semantics. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2018)", + "39. Kingma, D.P., Ba, J.: Adam: A method for stochastic optimization. In: Bengio, Y., LeCun, Y. (eds.) Proceedings of the International Conference on Learning Representations (ICLR) (2015)", + "40. Kwon, T., Tekin, B., Stühmer, J., Bogo, F., Pollefeys, M.: H2o: Two hands manipulating objects for first person interaction recognition. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2021)", + "41. Lin, T., Dollár, P., Girshick, R.B., He, K., Hariharan, B., Belongie, S.J.: Feature pyramid networks for object detection. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2017)", + "42. Liu, S., Chen, W., Li, T., Li, H.: Soft rasterizer: A differentiable renderer for image-based 3d reasoning. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2019)", + "43. Liu, S., Li, T., Chen, W., Li, H.: A general differentiable mesh renderer for image-based 3d reasoning. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI) (2020)", + "44. Liu, Y., Liu, Y., Jiang, C., Lyu, K., Wan, W., Shen, H., Liang, B., Fu, Z., Wang, H., Yi, L.: HOI4D: A 4d egocentric dataset for category-level human-object interaction. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)", + "45. Mehta, D., Rhodin, H., Casas, D., Fua, P., Sotnychenko, O., Xu, W., Theobalt, C.: Monocular 3d human pose estimation in the wild using improved CNN supervision. In: Proceedings of the International Conference on 3D Vision (3DV) (2017)" + ], + "bbox": [ + 212, + 146, + 787, + 840 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "3D Hand Pose Estimation in Everyday Egocentric Images", + "bbox": [ + 344, + 114, + 730, + 128 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 767, + 116, + 784, + 126 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "46. Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: Nerf: Representing scenes as neural radiance fields for view synthesis. In: Proceedings of the European Conference on Computer Vision (ECCV) (2020)", + "47. Miyato, T., Jaeger, B., Welling, M., Geiger, A.: GTA: A geometry-aware attention mechanism for multi-view transformers. arXiv (2023)", + "48. Moon, G., Yu, S., Wen, H., Shiratori, T., Lee, K.M.: Interhand2.6m: A dataset and baseline for 3d interacting hand pose estimation from a single RGB image. In: Proceedings of the European Conference on Computer Vision (ECCV) (2020)", + "49. Nair, V., Hinton, G.E.: Rectified linear units improve restricted boltzmann machines. In: Proceedings of the International Conference on Machine Learning (ICML) (2010)", + "50. Ohkawa, T., He, K., Sener, F., Hodan, T., Tran, L., Keskin, C.: Assemblyhands: Towards egocentric activity understanding via 3d hand pose estimation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR). pp. 12999-13008 (2023)", + "51. Park, J., Oh, Y., Moon, G., Choi, H., Lee, K.M.: Handoccnet: Occlusion-robust 3d hand mesh estimation network. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)", + "52. Pavlakos, G., Shan, D., Radosavovic, I., Kanazawa, A., Fouhey, D., Malik, J.: Reconstructing hands in 3d with transformers. arXiv preprint arXiv:2312.05251 (2023)", + "53. Potamias, R.A., Ploumpis, S., Moschoglou, S., Triantafyllou, V., Zafeiriou, S.: Handy: Towards a high fidelity 3d hand shape and appearance model. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR). pp. 4670-4680 (June 2023)", + "54. Prakash, A., Gupta, A., Gupta, S.: Mitigating perspective distortion-induced shape ambiguity in image crops. arXiv 2312.06594 (2023)", + "55. Ravi, N., Reizenstein, J., Novotny, D., Gordon, T., Lo, W.Y., Johnson, J., Gkioxari, G.: Accelerating 3d deep learning with pytorch3d. arXiv:2007.08501 (2020)", + "56. Rehg, J.M., Kanade, T.: Visual tracking of high dof articulated structures: an application to human hand tracking. In: Proceedings of the European Conference on Computer Vision (ECCV) (1994)", + "57. Rogez, G., Khademi, M., Supancic III, J., Montiel, J.M.M., Ramanan, D.: 3d hand pose detection in egocentric rgb-d images. In: Proceedings of the European Conference on Computer Vision (ECCV) (2014)", + "58. Romero, J., Tzionas, D., Black, M.J.: Embodied hands: Modeling and capturing hands and bodies together. ACM Transactions on Graphics (ToG) (2017)", + "59. Rong, Y., Shiratori, T., Joo, H.: Frankmocap: Fast monocular 3D hand and body motion capture by regression and integration. Proceedings of the IEEE International Conference on Computer Vision Workshops (ICCV Workshops) (2021)", + "60. Sener, F., Chatterjee, D., Shelepov, D., He, K., Singhania, D., Wang, R., Yao, A.: Assembly101: A large-scale multi-view video dataset for understanding procedural activities. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)", + "61. Shan, D., Geng, J., Shu, M., Fouhey, D.F.: Understanding human hands in contact at internet scale. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2020)", + "62. Sharp, T., Keskin, C., Robertson, D., Taylor, J., Shotton, J., Kim, D., Rhemann, C., Leichter, I., Vinnikov, A., Wei, Y., et al.: Accurate, robust, and flexible real-time hand tracking. In: Proceedings of the 33rd annual ACM conference on human factors in computing systems. pp. 3633-3642 (2015)" + ], + "bbox": [ + 215, + 146, + 785, + 839 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "A. Prakash et al.", + "bbox": [ + 271, + 114, + 387, + 127 + ], + "page_idx": 17 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "63. Simon, T., Joo, H., Matthews, I.A., Sheikh, Y.: Hand keypoint detection in single images using multiview bootstrapping. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2017)", + "64. Sridhar, S., Mueller, F., Zollhöfer, M., Casas, D., Oulasvirta, A., Theobalt, C.: Real-time joint tracking of a hand manipulating an object from rgb-d input. In: Proceedings of the European Conference on Computer Vision (ECCV) (2016)", + "65. Sridhar, S., Oulasvirta, A., Theobalt, C.: Interactive markerless articulated hand motion tracking using RGB and depth data. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2013)", + "66. Sun, X., Wei, Y., Liang, S., Tang, X., Sun, J.: Cascaded hand pose regression. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2015)", + "67. Taheri, O., Ghorbani, N., Black, M.J., Tzionas, D.: GRAB: A dataset of whole-body human grasping of objects. In: Proceedings of the European Conference on Computer Vision (ECCV) (2020)", + "68. Tompson, J., Stein, M., Lecun, Y., Perlin, K.: Real-time continuous pose recovery of human hands using convolutional networks. ACM Transactions on Graphics (ToG) 33(5), 1-10 (2014)", + "69. Tulsiani, S., Zhou, T., Efros, A.A., Malik, J.: Multi-view supervision for single-view reconstruction via differentiable ray consistency. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR). pp. 2626-2634 (2017)", + "70. Tzionas, D., Gall, J.: 3d object reconstruction from hand-object interactions. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2015)", + "71. Vaswani, A., Shazeer, N.M., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, L., Polosukhin, I.: Attention is all you need. Advances in Neural Information Processing Systems (NeurIPS) (2017)", + "72. Wan, C., Yao, A., Gool, L.V.: Hand pose estimation from local surface normals. In: Proceedings of the European Conference on Computer Vision (ECCV) (2016)", + "73. Yang, L., Li, K., Zhan, X., Wu, F., Xu, A., Liu, L., Lu, C.: Oakink: A large-scale knowledge repository for understanding hand-object interaction. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)", + "74. Ye, Y., Gupta, A., Tulsiani, S.: What's in your hands? 3D reconstruction of generic objects in hands. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)", + "75. Yifan, W., Doersch, C., Arandjelovic, R., Carreira, J., Zisserman, A.: Input-level inductive biases for 3d reconstruction. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)", + "76. Yu, F., Salzmann, M., Fua, P., Rhodin, H.: Pcls: Geometry-aware neural reconstruction of 3d pose with perspective crop layers. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2021)", + "77. Zhang, X., Li, Q., Mo, H., Zhang, W., Zheng, W.: End-to-end hand mesh recovery from a monocular rgb image. In: ICCV (2019)", + "78. Zimmermann, C., Brox, T.: Learning to estimate 3d hand pose from single rgb images. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2017)", + "79. Zimmermann, C., Ceylan, D., Yang, J., Russell, B.C., Argus, M.J., Brox, T.: Freihand: A dataset for markerless capture of hand pose and shape from single RGB images. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2019)" + ], + "bbox": [ + 212, + 146, + 787, + 834 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "3D Hand Pose Estimation in Everyday Egocentric Images", + "bbox": [ + 344, + 114, + 730, + 128 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 767, + 116, + 784, + 126 + ], + "page_idx": 18 + } +] \ No newline at end of file diff --git a/2024/3D Hand Pose Estimation in Everyday Egocentric Images/332a64fb-af20-4857-af36-e23eeaad9f91_model.json b/2024/3D Hand Pose Estimation in Everyday Egocentric Images/332a64fb-af20-4857-af36-e23eeaad9f91_model.json new file mode 100644 index 0000000000000000000000000000000000000000..c47e2d33134568f8e4de54535e64b26fc9c1b8be --- /dev/null +++ b/2024/3D Hand Pose Estimation in Everyday Egocentric Images/332a64fb-af20-4857-af36-e23eeaad9f91_model.json @@ -0,0 +1,2988 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.338, + 0.141, + 0.668, + 0.187 + ], + "angle": 0, + "content": "3D Hand Pose Estimation in Everyday Egocentric Images" + }, + { + "type": "text", + "bbox": [ + 0.265, + 0.213, + 0.739, + 0.229 + ], + "angle": 0, + "content": "Aditya Prakash, Ruisen Tu, Matthew Chang, and Saurabh Gupta" + }, + { + "type": "text", + "bbox": [ + 0.322, + 0.24, + 0.68, + 0.282 + ], + "angle": 0, + "content": "University of Illinois Urbana-Champaign {adityap9,ruisent2,mc48,saurabhg}@illinois.edu https://bit.ly/WildHands" + }, + { + "type": "text", + "bbox": [ + 0.263, + 0.313, + 0.744, + 0.591 + ], + "angle": 0, + "content": "Abstract. 3D hand pose estimation in everyday egocentric images is challenging for several reasons: poor visual signal (occlusion from the object of interaction, low resolution & motion blur), large perspective distortion (hands are close to the camera), and lack of 3D annotations outside of controlled settings. While existing methods often use hand crops as input to focus on fine-grained visual information to deal with poor visual signal, the challenges arising from perspective distortion and lack of 3D annotations in the wild have not been systematically studied. We focus on this gap and explore the impact of different practices, i.e. crops as input, incorporating camera information, auxiliary supervision, scaling up datasets. We provide several insights that are applicable to both convolutional and transformer models, leading to better performance. Based on our findings, we also present WildHands, a system for 3D hand pose estimation in everyday egocentric images. Zero-shot evaluation on 4 diverse datasets (H2O, AssemblyHands, Epic-Kitchens, Ego-Exo4D) demonstrate the effectiveness of our approach across 2D and 3D metrics, where we beat past methods by \\(7.4\\% - 66\\%\\). In system level comparisons, WildHands achieves the best 3D hand pose on ARCTIC egocentric split, outperforms FrankMocap across all metrics and HaMeR on 3 out of 6 metrics while being \\(10\\times\\) smaller and trained on \\(5\\times\\) less data." + }, + { + "type": "text", + "bbox": [ + 0.263, + 0.603, + 0.736, + 0.617 + ], + "angle": 0, + "content": "Keywords: 3D Hand Pose \\(\\cdot\\) Egocentric Vision \\(\\cdot\\) 3D from single image" + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.66, + 0.377, + 0.675 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.689, + 0.789, + 0.81 + ], + "angle": 0, + "content": "Understanding egocentric hands in 3D enables applications in AR/VR, robotics. While several works have studied exocentric hands [52, 59], no existing approach performs well in diverse egocentric settings outside of lab setups. We focus on this gap & study the impact of common practices, i.e. crops as input, camera information, auxiliary supervision, scaling up datasets, for predicting absolute 3D hand pose from a single egocentric image. We identify 2 important factors: a) modeling the 3D to 2D projection during imaging of the hand in egocentric views, b) scaling up training to diverse datasets by leveraging auxiliary supervision." + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.811, + 0.789, + 0.84 + ], + "angle": 0, + "content": "Let's unpack each component. Existing methods often operate on image crops, assume that the image crop is located at the center of the camera's field of view" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "2" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.388, + 0.128 + ], + "angle": 0, + "content": "A. Prakash et al." + }, + { + "type": "image", + "bbox": [ + 0.219, + 0.144, + 0.331, + 0.229 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.332, + 0.144, + 0.395, + 0.23 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.396, + 0.149, + 0.49, + 0.23 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.219, + 0.229, + 0.331, + 0.314 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.332, + 0.229, + 0.395, + 0.314 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.396, + 0.229, + 0.49, + 0.314 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.512, + 0.144, + 0.624, + 0.23 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.625, + 0.144, + 0.687, + 0.23 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.513, + 0.23, + 0.625, + 0.314 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.626, + 0.23, + 0.688, + 0.314 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.692, + 0.148, + 0.783, + 0.23 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.692, + 0.23, + 0.787, + 0.272 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.692, + 0.277, + 0.785, + 0.314 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.215, + 0.338, + 0.788, + 0.409 + ], + "angle": 0, + "content": "Fig. 1: WildHands predicts the 3D shape, 3D articulation and 3D placement of the hand in the camera frame from a single in-the-wild egocentric RGB image and camera intrinsics. It produces better 3D output compared to FrankMocap [59] in occlusion scenarios and is more adept at dealing with perspective distortion than HaMeR [52], in challenging egocentric hand-object interactions from Epic-Kitchens [9] dataset." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.451, + 0.789, + 0.528 + ], + "angle": 0, + "content": "with a made-up focal length. These choices are reasonable for exocentric settings where the location of the hand in the image does not provide any signal for the hand articulation; and perspective distortion effects are minimal as the hand is far away & occupies a relatively small part of the camera's field of view. However, these assumptions are sub-optimal for processing egocentric images." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.533, + 0.79, + 0.715 + ], + "angle": 0, + "content": "Due to the biomechanics of the hand, its location in egocentric images carries information about its pose. Also, as the hand is closer to the camera in egocentric settings, it undergoes a lot more perspective distortion than in exocentric images. 3D hand pose that correctly explains the 2D hand appearance in one part of an egocentric image, may not be accurate for another part of the image. Thus, the location of the hand in the image must be taken into account while making 3D predictions. This suggests feeding the 2D location of the hand in the image to the network. However, the notion of 2D location in the image frame is camera specific. The more fundamental quantity that generalizes across cameras, is the angular location in the camera's field of view. We thus adopt the recent KPE embedding [54] to augment hand crop features with sinusoidal encodings of its location in the camera's field of view & find this to improve performance." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.72, + 0.789, + 0.842 + ], + "angle": 0, + "content": "However, just processing image crops the right way is not sufficient for generalization. The model also needs to be trained on broad & diverse datasets outside of lab settings. This is not easy as 3D hand pose is difficult to directly annotate in images. We thus turn to joint training on 3D supervision from lab datasets and 2D auxiliary supervision on in-the-wild data in the form of 2D hand masks [6,10] & grasp labels [6]. To absorb supervision from segmentation labels, we differentiably render [42] the predicted 3D hand into images and back-propagate the loss through the rendering. For grasp supervision, we note" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.346, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "3D Hand Pose Estimation in Everyday Egocentric Images" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "3" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.784, + 0.177 + ], + "angle": 0, + "content": "that hand pose is indicative of the grasp type and use supervision from a grasp classifier that takes the predicted 3D hand pose as input." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.178, + 0.787, + 0.327 + ], + "angle": 0, + "content": "Lack of accurate 3D annotations outside of lab settings makes it challenging to assess the generalization capabilities. To this end, we adopt a zero-shot evaluation strategy. Even though a single lab dataset has limited diversity, a model that performs well on a lab dataset without having seen any images from it likely generalizes well. Furthermore, we collect Epic-HandKps, containing 2D hand joint annotations on 5K images from the VISOR [10] split of in-the-wild Epic-Kitchens [7] to evaluate the 2D projections of the predicted 3D hand pose on everyday images. We also consider the 3D hand poses provided evaluate on the concurrent Ego-Exo4D [18]. We believe that these evaluations together comprehensively test the generalization capabilities of different models." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.329, + 0.788, + 0.435 + ], + "angle": 0, + "content": "Our experiments (Sec. 4) show the utility of (1) using crops (vs. full images), (2) inputting 2D crop location (vs. not), (3) encoding the crop's location in camera's field of view (vs. in the image frame), and (4) 2D mask & grasp supervision. We apply these insights to both convolutional and transformer models, leading to better performance. We also present WildHands (Fig. 1) which outperforms FrankMocap [59] on egocentric images and is competitive to concurrent HaMeR [52] while being \\(10 \\times\\) smaller & trained with \\(5 \\times\\) less data." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.459, + 0.388, + 0.475 + ], + "angle": 0, + "content": "2 Related Work" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.492, + 0.789, + 0.749 + ], + "angle": 0, + "content": "Hand pose estimation & reconstruction: Several decades of work [15,28,56] have studied different aspects: 2D pose [4,63] vs. 3D pose [40,50,68,72] vs. mesh [1, 25,65], RGB [14,22,25] vs. RGBD [57,62-64,66,68] inputs, egocentric [14,50] vs. allocentric [14,21,22], hands in isolation [48,79] vs. interaction with objects [21, 44,73], feed-forward prediction [14,22,25,60] vs. test-time optimization [3,24]. Driven by the advances in parametric hand models [53,58], recent work has moved past 3D joint estimation towards 3D mesh recovery [14,22,25,52,59,77] in 3 contexts: single hands in isolation [78], hands interacting with objects [14,70] and two hands interacting with one another [22,48]. Jointly reasoning about hands & objects has proved fruitful to improve both hand & object reconstruction [25,36, 74]. While several expressive models focus on 3D hand pose estimation in lab settings [22,31-33,60], only a very few works [52] tackle the problem in everyday egocentric images as in Ego4D [17], Epic-Kitchen [7]. We focus on this setting due to challenges involving perspective distortion, dynamic interactions & heavy occlusions. We explore both convolutional [14,59] and transformer models [51,52] to study the impact of using crops, location of the crop in camera's field of view & auxiliary supervision in zero-shot generalization to diverse egocentric settings." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.75, + 0.788, + 0.841 + ], + "angle": 0, + "content": "Hand datasets: Since 3D hand annotations from single images is difficult to get, most datasets are collected in controlled settings to get 3D ground truth using MoCap [14,67], multi-camera setups [21,22,40,44,50], or magnetic sensors [16]. They often include single hands in isolation [79], hand-object interactions [14, 21,22,40] & hand-hand interactions [48]. Different from these datasets with 3D poses, [6,10,61] provide annotations for segmentation masks [6,10], 2D bounding" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "4" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.388, + 0.128 + ], + "angle": 0, + "content": "A. Prakash et al." + }, + { + "type": "image", + "bbox": [ + 0.216, + 0.144, + 0.788, + 0.292 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.301, + 0.79, + 0.442 + ], + "angle": 0, + "content": "Fig. 2: Model Overview. We crop the input images around the hand and process them using a convolutional backbone. The hand features along with the global image features (not shown above for clarity) and intrinsics-aware positional encoding (KPE [54]) for each crop are fed to the decoder to predict the 3D hand. The hand decoders predict MANO parameters \\(\\beta, \\theta_{\\mathrm{local}}, \\theta_{\\mathrm{global}}\\) and camera translation which are converted to 3D keypoints & 2D keypoints and trained using 3D supervision on lab datasets, e.g. ARCTIC [14], AssemblyHands [50]. We also use auxiliary supervision from in-the-wild Epic-Kitchens [10] dataset via hand segmentation masks and grasp labels. The hand masks are available with the VISOR dataset [10] whereas grasp labels are estimated using off-the-shelf model from [6]." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.473, + 0.789, + 0.595 + ], + "angle": 0, + "content": "boxes [61] and grasp labels [6] on internet videos [61] and egocentric images in the wild [9, 17]. Our work combines 3D supervision from datasets [14, 50] captured in controlled settings with 2D auxiliary supervision, i.e. segmentation masks & grasp labels, from datasets outside the lab [6, 10] to learn models that perform well in challenging everyday images. We collect Epic-HandKps dataset with 2D hand keypoints on 5K images from Epic-Kitchens for evaluation in everyday images outside of lab settings. We also use concurrent Ego-Exo4D [18] that annotates 2D keypoints in paired ego & exo views to get 3D hand annotations." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.597, + 0.789, + 0.718 + ], + "angle": 0, + "content": "Auxiliary supervision: Several works on 3D shape prediction from a single image [34,69] often use auxiliary supervision to deal with lack of 3D annotations. [34] uses keypoint supervision for 3D human mesh recovery, while [69] uses multi-view consistency cues for 3D object reconstruction. Aided by differentiable rendering [37,43], segmentation and depth prediction have been used to provide supervision for 3D reconstruction [3,24,35]. We adopt this use of segmentation as an auxiliary cue for 3D poses. In addition, we use supervision from hand grasp labels based on the insight that hand grasp is indicative of the hand pose." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.72, + 0.789, + 0.843 + ], + "angle": 0, + "content": "Ambiguity: 3D estimation from a single image is ill-posed due to ambiguities arising from scale-depth confusion [23] and cropping [54]. Recent work [54] points out the presence of perspective distortion-induced shape ambiguity in image crops and uses camera intrinsic-based location encodings to mitigate it. We investigate the presence of this ambiguity for hand crops in egocentric images and adopt the proposed embedding to mitigate it. Similar embeddings have been used before in literature, primarily from the point of view of training models on images from different cameras [12, 19], to encode extrinsic information [20, 47, 75]." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.346, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "3D Hand Pose Estimation in Everyday Egocentric Images" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "5" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.145, + 0.331, + 0.161 + ], + "angle": 0, + "content": "3 Method" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.178, + 0.788, + 0.314 + ], + "angle": 0, + "content": "We present WildHands, a new system for 3D hand pose estimation from egocentric images in the wild. We build on top of ArcticNet-SF [14] and FrankMocap [59]. Given a crop around a hand and associated camera intrinsics, WildHands predicts the 3D hand shape as MANO [58] parameters, shape \\(\\beta\\) and pose \\(\\theta\\). \\(\\theta\\) consists of angles of articulation \\(\\theta_{\\mathrm{local}}\\) for 15 hand joints and the global pose \\(\\theta_{\\mathrm{global}}\\) of the root joint in the camera coordinate system. WildHands is trained using both lab (ARCTIC, AssemblyHands) and in-the-wild (Epic-Kitchens, Ego4D) datasets with different sources of supervision. Fig. 2 provides an overview of our model. Next, we describe each component of WildHands in detail." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.338, + 0.367, + 0.352 + ], + "angle": 0, + "content": "3.1 Architecture" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.363, + 0.789, + 0.629 + ], + "angle": 0, + "content": "Hand encoder: Our models use hand crops as input (resized to \\(224 \\times 224\\) resolution), which are processed by a ResNet50 [27] backbone to get \\(7 \\times 7 \\times 2048\\) feature maps. The left and right hand crops are processed separately but the parameters are shared. We also use global image features in our model, computed by average pooling the \\(7 \\times 7 \\times 2048\\) feature map to get a 2048-dimensional vector. Incorporating KPE: Recent work [54] has shown that estimating 3D quantities from image crops suffers from perspective distortion-induced shape ambiguity [54]. This raises concerns about whether this ambiguity is also present when using hand crops for predicting 3D pose and how to deal with it. Following the study in [54], we analyze the hands in the ARCTIC dataset (details in the supplementary) and find evidence of this ambiguity in hand crops as well. Thus, we adopt the intrinsics-aware positional encoding (KPE) proposed in [54] to mitigate this ambiguity. Specifically, we provide the network with information about the location of the hand crop in the field of view of the camera. Consider the principal point as \\((p_x, p_y)\\) & focal length as \\((f_x, f_y)\\). For each pixel \\((x, y)\\), we compute \\(\\theta_x = \\tan^{-1}\\left(\\frac{x - p_x}{f_x}\\right)\\), \\(\\theta_y = \\tan^{-1}\\left(\\frac{y - p_y}{f_y}\\right)\\) & convert them into sinusoidal encoding [46]." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.63, + 0.788, + 0.81 + ], + "angle": 0, + "content": "We add KPE to the \\(7 \\times 7 \\times 2048\\) feature map. KPE comprises sinusoidal encoding of the angles \\(\\theta_{x}\\) and \\(\\theta_{y}\\) (Sec. 4.1 in the main paper), resulting in \\(5 * 4 * K\\) dimensional sparse encoding (4 for corners and 1 for center pixel) and \\(H \\times W \\times 4 * K\\) resolution dense encoding, where \\(K\\) is the number of frequency components (set to 4). For the sparse KPE variant, we broadcast it to \\(7 \\times 7\\) resolution whereas for the dense KPE variant, we interpolate it to \\(7 \\times 7\\) resolution and concatenate to the feature map. This concatenated feature is passed to a 3 convolutional layers (with 1024, 512, 256 channels respectively, each with kernel size of \\(3 \\times 3\\) and ReLU [49] non-linearity) to get a \\(3 \\times 3 \\times 256\\) feature map. This is flattened to 2304-dimensional vector and passed through a 1-layer MLP to get a 2048-dimensional feature vector. We do not use batchnorm [30] here since we want to preserve the spatial information in KPE." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.81, + 0.787, + 0.84 + ], + "angle": 0, + "content": "Hand decoder: It consists of an iterative architecture, similar to decoder in HMR [34]. The inputs are the 2048-dimensional feature vector and initial" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "6" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.388, + 0.128 + ], + "angle": 0, + "content": "A. Prakash et al." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.298 + ], + "angle": 0, + "content": "MANO [58] (shape \\(\\beta\\), articulation \\(\\theta_{\\mathrm{local}}\\) and global pose \\(\\theta_{\\mathrm{global}}\\), all initialized as 0-vectors) & weak perspective camera parameters (initialized from the 2048-dimensional feature vector). Each of these parameters are predicted using a separate decoder head. The rotation parameters \\(\\theta_{\\mathrm{local}}\\), \\(\\theta_{\\mathrm{global}}\\) are predicted in matrix form and converted to axis-angle representation to feed to MANO model. Each decoder is a 3-layer MLP with the 2 intermediate layers having 1024 channels and the output layer having the same number of channels as the predicted parameter. The output of each decoder is added to the initial parameters to get the updated parameters. This process is repeated for 3 iterations. The output of the last iteration is used for the final prediction." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.299, + 0.789, + 0.373 + ], + "angle": 0, + "content": "Differentiable rendering for mask prediction: The outputs from the decoder, \\(\\beta\\), \\(\\theta_{\\mathrm{local}}\\), and \\(\\theta_{\\mathrm{global}}\\) for the predicted hand, are passed to a differentiable MANO layer [25, 58] to get the hand mesh. This is used to differentiably render a soft segmentation mask, \\(M\\), using SoftRasterizer [43, 55]. Using a differentiable hand model (MANO) and differentiable rendering lets us train our model end-to-end." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.374, + 0.789, + 0.451 + ], + "angle": 0, + "content": "Grasp classifier: We use the insight that grasp type during interaction with objects is indicative of hand pose. We train a grasp prediction head on \\(\\theta_{\\mathrm{local}}\\), \\(\\theta_{\\mathrm{global}}\\) & \\(\\beta\\) (predicted by WildHands) via a 4-layer MLP (with 1024, 1024, 512, 128 nodes & ReLU non-linearity after each). The MLP predicts logits for the 8 grasp classes defined in [6] which are converted into probabilities, \\(G\\) via softmax." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.472, + 0.432, + 0.487 + ], + "angle": 0, + "content": "3.2 Training supervision" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.498, + 0.787, + 0.543 + ], + "angle": 0, + "content": "We train WildHands using: (1) 3D supervision on \\(\\beta\\), \\(\\theta_{\\mathrm{local}}\\), \\(\\theta_{\\mathrm{global}}\\), 3D hand keypoints & 2D projections of 3D keypoints in the image on lab datasets, and (2) hand masks and grasp labels on in-the-wild datasets." + }, + { + "type": "equation", + "bbox": [ + 0.258, + 0.555, + 0.788, + 0.576 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\theta} = \\left\\| \\theta - \\theta^ {g t} \\right\\| _ {2} ^ {2} \\qquad \\mathcal {L} _ {\\beta} = \\left\\| \\beta - \\beta^ {g t} \\right\\| _ {2} ^ {2} \\qquad \\mathcal {L} _ {c a m} = \\left\\| (s, T) - (s, T) ^ {g t} \\right\\| _ {2} ^ {2} \\quad (1)\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.258, + 0.578, + 0.788, + 0.6 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {k p 3 d} = \\left\\| J _ {3 D} - J _ {3 D} ^ {g t} \\right\\| _ {2} ^ {2} \\quad \\mathcal {L} _ {k p 2 d} = \\left\\| J _ {2 D} - J _ {2 D} ^ {g t} \\right\\| _ {2} ^ {2} \\tag {2}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.258, + 0.602, + 0.788, + 0.619 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {m a s k}} = \\| M - M ^ {g t} \\| \\quad \\mathcal {L} _ {\\text {g r a s p}} = C E (G, G ^ {g t}) \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.63, + 0.788, + 0.707 + ], + "angle": 0, + "content": "Here, \\(\\mathcal{L}_{\\theta}\\) is used for both \\(\\theta_{local}\\) & \\(\\theta_{global}\\), \\((s,T)\\) are the weak perspective camera parameters and \\(CE\\) represents cross-entropy loss. \\(J_{2D} = K[J_{3D} + (T,f / s)]\\), where \\(J_{3D}\\) is the 3D hand keypoints in the MANO coordinate frame, \\(K\\) is the camera intrinsics, \\(f\\) is the focal length, and \\(s\\) is the scale factor of the weak perspective camera. Note that \\((.)^{gt}\\) represents the ground truth quantities. The total loss is:" + }, + { + "type": "equation", + "bbox": [ + 0.297, + 0.735, + 0.787, + 0.769 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} = \\lambda_ {\\theta} \\mathcal {L} _ {\\theta} + \\lambda_ {\\beta} \\mathcal {L} _ {\\beta} + \\lambda_ {c a m} \\mathcal {L} _ {c a m} + \\lambda_ {k p 3 d} \\mathcal {L} _ {k p 3 d} + \\lambda_ {k p 2 d} \\mathcal {L} _ {k p 2 d} \\\\ + \\lambda_ {m a s k} \\mathcal {L} _ {m a s k} + \\lambda_ {g r a s p} \\mathcal {L} _ {g r a s p} \\tag {4} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.78, + 0.788, + 0.843 + ], + "angle": 0, + "content": "Lab datasets: For ARCTIC, we use \\(\\lambda_{\\theta} = 10.0, \\lambda_{\\beta} = 0.001, \\lambda_{kp3d} = 5.0, \\lambda_{kp2d} = 5.0, \\mathcal{L}_{cam} = 1.0\\) & set other loss weights to 0. AssemblyHands does not use MANO representation for hands, instead provides labels for 3D & 2D keypoints of 21 hand joints. So, we use \\(\\lambda_{kp3d} = 5, \\lambda_{kp2d} = 5\\) & set other loss weights to 0." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.346, + 0.115, + 0.733, + 0.131 + ], + "angle": 0, + "content": "3D Hand Pose Estimation in Everyday Egocentric Images" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "7" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.225 + ], + "angle": 0, + "content": "In-the-wild data: For Epic-Kitchens & Ego4D, we use hand masks & grasp labels as auxiliary supervision. While VISOR contains hand masks, grasp labels are not available. Ego4D does not contain either hand masks or grasp labels. To extract these labels, we use predictions from off-the-shelf model [6] as pseudo ground truth. We use \\(\\lambda_{mask} = 10.0\\), \\(\\lambda_{grasp} = 0.1\\) & set other loss weights to 0." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.245, + 0.458, + 0.261 + ], + "angle": 0, + "content": "3.3 Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.27, + 0.788, + 0.39 + ], + "angle": 0, + "content": "Our model takes hand crops as input. During training, we use the ground truth bounding box for the hand crop (with small perturbation), estimated using the 2D keypoints & scaled by a fixed value of 1.5 to provide additional context around the hand. At test time, we need to predict the bounding box of the hand in the image. On ARCTIC, we train a bounding box predictor on by finetuning MaskRCNN [26]. This is also used for submitting the model to the ARCTIC leaderboard. For Epic-HandKps, we use the recently released hand detector from [5]. All the ablations use ground truth bounding box for the hand crop." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.391, + 0.79, + 0.513 + ], + "angle": 0, + "content": "We use the training sets of ARCTIC (187K images) & AssemblyHands (360K), VISOR split (30K) of EPIC and 45K images from Ego4D kitchen videos to train our model. WildHands is trained jointly on different datasets with the input batch containing images from multiple datasets. All models are initialized from the ArcticNet-SF model trained on the allocentric split of the ARCTIC dataset [14]. All models are trained for 100 epochs with a learning rate of \\(1e - 5\\). The multi-dataset training is done on 2 A40 GPUs with a batch size of 144 and Adam optimizer [39]. More details are provided in the supplementary." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.535, + 0.377, + 0.552 + ], + "angle": 0, + "content": "4 Experiments" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.566, + 0.789, + 0.658 + ], + "angle": 0, + "content": "We adopt a zero-shot evaluation strategy: 3D evaluation on lab datasets (H2O, AssemblyHands), evaluation of 2D projections of 3D hand predictions on Epic-HandKps & 3D evaluation on EgoExo4D [18]. We systematically analyze the effectiveness of design choices (using crops, KPE), different terms in the loss function and different datasets used for training. We also report a system-level comparison on ARCTIC leaderboard and with FrankMocap [59] & HaMeR [52]." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.68, + 0.342, + 0.694 + ], + "angle": 0, + "content": "4.1 Protocols" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.704, + 0.787, + 0.735 + ], + "angle": 0, + "content": "Training datasets: We consider 4 datasets for training: 2 lab datasets (ARCTIC & AssemblyHands) and 2 in-the-wild datasets (Epic-Kitchens & Ego4D)." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.735, + 0.788, + 0.81 + ], + "angle": 0, + "content": "We select ARCTIC since it contains the largest range of hand pose variation [14] among existing datasets [4, 21, 22, 44, 67]. We use the egocentric split with more than \\(187\\mathrm{K}\\) images in the train set. We also use AssemblyHands since it is a large-scale dataset with more than \\(360\\mathrm{K}\\) egocentric images in the train split. Different combinations of these datasets are used for different experiments." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.81, + 0.788, + 0.842 + ], + "angle": 0, + "content": "We use egocentric images from Epic-Kitchens & Ego4D as in-the-wild data for training our model using auxiliary supervision. We use 30K training images" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "8" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.388, + 0.128 + ], + "angle": 0, + "content": "A. Prakash et al." + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.145, + 0.787, + 0.312 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.322, + 0.788, + 0.392 + ], + "angle": 0, + "content": "Fig. 3: Epic-HandKps annotations. We collect 2D joint annotations (shown in blue) for 5K in-the-wild egocentric images from Epic-Kitchens [8]. We show few annotations here with images cropped around the hand. We also have the label for the joint corresponding to each keypoint. Note the heavy occlusion & large variation in dexterous poses of hands interactiong with objects. More visualizations in supplementary." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.425, + 0.786, + 0.455 + ], + "angle": 0, + "content": "available in the VISOR split of Epic-Kitchens and 45K images from Ego4D. To extract hand masks and grasp labels, we use off-the-shelf model from [6]." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.457, + 0.787, + 0.533 + ], + "angle": 0, + "content": "Evaluation datasets: We consider 4 datasets for zero-shot generalization experiments: H2O [40], AssemblyHands, Epic-HandKps, and Ego-Exo4D. Note that these datasets cover large variation in inputs, H2O contains RGB images in lab settings, AssemblyHands consists of grayscale images and Epic-HandKps and Ego-Exo4D images show hands performing everyday activities in the wild." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.534, + 0.787, + 0.626 + ], + "angle": 0, + "content": "We use the validation splits of H2O and AssemblyHands with 29K and 32K images respectively. Since 3D hand annotations are difficult to collect for in-the-wild images, we instead collect 2D hand keypoints annotations on 5K egocentric images from validation set of VISOR split of Epic-Kitchens. We refer to this dataset as Epic-HandKps. See sample images from the dataset in Fig. 3. We also evaluate on the validation split of Ego-Exo4D hand pose dataset." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.627, + 0.787, + 0.809 + ], + "angle": 0, + "content": "Epic-HandKps: Epic-HandKps contains 2D annotations for the 21 hand joints to facilitate evaluation of 2D projections of the predicted 3D keypoints. We sample 5K images from the validation set of VISOR split of Epic-Kitchens and get the 21 joints annotated via Scale AI. We use the same joint convention as ARCTIC [14]. We crop the images around the hand using the segmentation masks in VISOR and provide the crops to annotators for labeling. Note that most of these images do not have all the 21 keypoints visible. Following ARCTIC, we only consider images with at least 3 visible joints for evaluation. Moreover, since the models in our experiments required hand crops as input, we only evaluate on those images for which hand bounding box is predicted by the recently released hand detector model from [6]. This leaves us with 4724 hand annotations, with 2697 right hands and 2027 left hands. We show some annotations in Fig. 3." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.81, + 0.789, + 0.842 + ], + "angle": 0, + "content": "Metrics: For 3D hand pose evaluation, we consider 2 metrics: (1) Mean PerJoint Position Error (MPJPE): L2 distance (mm) between the 21 predicted" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.346, + 0.115, + 0.732, + 0.13 + ], + "angle": 0, + "content": "3D Hand Pose Estimation in Everyday Egocentric Images" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "9" + }, + { + "type": "table_caption", + "bbox": [ + 0.214, + 0.145, + 0.788, + 0.215 + ], + "angle": 0, + "content": "Table 1: Benefits of using crops and KPE. Zero shot generalization performance improves through the use of crops as input (HandNet uses crops vs. ArcticNet-SF uses full image) and KPE helps (WildHands uses KPE with crops vs. HandNet only uses crops). All models use the same backbone and are trained on the same data in each setting for fair comparisons. \\(\\mathcal{D}\\): {ARCTIC, AssemblyHands, EPIC}." + }, + { + "type": "table", + "bbox": [ + 0.221, + 0.226, + 0.784, + 0.321 + ], + "angle": 0, + "content": "
H2OAssemblyEgo-Exo4DEpic-HandKps
MPJPEMRRPEMPJPEMRRPEMPJPEL2 Error
Training dataDD - AssemblyDD - EPIC
ArcticNet-SF83.84325.55110.76326.94114.2435.02
HandNet38.06141.06109.88317.4989.7231.62
WildHands31.0849.4984.91164.9055.8411.05
" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.35, + 0.787, + 0.44 + ], + "angle": 0, + "content": "& ground truth joints for each hand after subtracting the root joint (this captures the relative pose). (2) Mean Relative-Root Position Error (MRRPE): the metric distance between the root joints of left hand and right hand, following [13, 14, 48] (this takes the absolute pose into account). (3) For 2D evaluation on Epic-HandKps, we measure the L2 Error (in pixels for 224x224 image input) between ground truth keypoints & 2D projections of predicted 3D keypoints." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.44, + 0.788, + 0.744 + ], + "angle": 0, + "content": "Baselines: (1) ArcticNet-SF [14] is the single-image model released with the ARCTIC benchmark. It consists of a convolutional backbone (ResNet50 [27]) to process the input image, followed by a HMR [35]-style decoder to predict the hand and object poses. The predicted hand is represented using MANO [58] parameterization. (2) FrankMocap [59] is trained on multiple datasets collected in controlled settings and is a popular choice to apply in the wild setting [3,24,74]. It uses hand crops as input instead of the entire image, which is then processed by a convolutional backbone. The decoder is similar to HMR [35] which outputs MANO parameters for hand and training is done using 3D pose & 2D keypoints supervision. (3) HandNet: Since the training code is not available for FrankMocap, we are unable to train it in our setting. So, we implement a version of ArcticNet-SF which uses crops as input along with HMR-style decoder and train it in our setting using 3D & 2D supervision. This baseline is equivalent to WildHands without KPE and ArcticNet-SF with crops. (4) HandOccNet [51]: It takes crops as input and encodes them using a FPN [41] backbone. These are passed to transformer [71] modules to get a heatmap-based intermediate representation which is then decoded to MANO parameters. (5) HaMeR [52]: It also takes crops as input and processes them using a ViT [11] backbone. The features are then passed to a transformer decoder to predict the MANO parameters. Note that adversarial loss is not used for training any model in our setting." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.768, + 0.325, + 0.782 + ], + "angle": 0, + "content": "4.2 Results" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.795, + 0.787, + 0.843 + ], + "angle": 0, + "content": "We systematically study the impact of several factors: use of crops (Tab. 1) & KPE (Tab. 1, Tab. 5), perspective distortion (Tab. 4), auxiliary supervision (Tab. 3), training datasets (Tab. 6) on both convolutional (Tab. 1) & transformer" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "10" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.388, + 0.128 + ], + "angle": 0, + "content": "A. Prakash et al." + }, + { + "type": "table_caption", + "bbox": [ + 0.214, + 0.145, + 0.788, + 0.203 + ], + "angle": 0, + "content": "Table 2: Impact on transformer models. We investigate if our insights are useful for transformer models as well, i.e. if KPE helps on top of positional encodings used in transformers & if auxiliary supervision leads to better generalization for large capacity models. All models are trained on the same data in each setting for fair comparisons." + }, + { + "type": "table", + "bbox": [ + 0.221, + 0.213, + 0.784, + 0.311 + ], + "angle": 0, + "content": "
H2OAssemblyEgo-Exo4DEpic-HandKps
MPJPEMRRPEMPJPEMRRPEMPJPEL2 Error
Training dataDD - AssemblyDD - EPIC
HandOccNet [51]60.58187.24110.28293.9280.9632.77
HandOccNet + KPE47.5772.25103.30232.8378.6413.54
HaMeR [52] (ViT)30.57113.2679.48227.5955.3625.48
HaMeR (ViT) + KPE24.1562.9971.64184.5547.029.77
" + }, + { + "type": "table_caption", + "bbox": [ + 0.214, + 0.325, + 0.788, + 0.41 + ], + "angle": 0, + "content": "Table 3: Role of auxiliary supervision. We consider grasp and mask supervision from both Epic-Kitchens & Ego4D to train WildHands and show results in zero-shot generalization settings. Both grasp & mask supervision lead to improvements in 3D & 2D metrics, with hand masks providing larger gain compared to grasp labels. Even though auxiliary supervision is on Epic/Ego4D, it leads to improvements in all settings, i.e. benefits from training on broad data extend beyond datasets with auxiliary supervision." + }, + { + "type": "table", + "bbox": [ + 0.221, + 0.421, + 0.784, + 0.535 + ], + "angle": 0, + "content": "
H2OAssemblyEgo-Exo4DEpic-HandKps
MPJPEMRRPEMPJPEMRRPEMPJPEL2 Error
Wildhands (no aux)39.5277.0793.44208.3270.3917.07
+ EPIC grasp38.3476.0490.23180.8563.30-
+ EPIC mask34.2960.2387.94175.3156.41-
+ EPIC grasp + EPIC mask31.0849.4984.91164.9055.84-
+ Ego4D grasp41.06111.4786.44222.2369.738.22
+ Ego4D mask38.1757.9382.55145.7863.437.87
+ Ego4D grasp + Ego4D mask35.6262.1079.08148.1260.807.20
" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.565, + 0.787, + 0.611 + ], + "angle": 0, + "content": "models (Tab. 2) through controlled experiments, i.e. all factors outside of what we want to check the affect of, are kept constant. All the results are reported in a zero-shot setting i.e. models are not trained on the evaluation dataset." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.612, + 0.788, + 0.703 + ], + "angle": 0, + "content": "Impact of crops: To understand the benefits due to using crops as input instead of full images, we compare ArcticNet-SF and HandNet in Tab. 1. The only difference between these two models is: ArcticNet-SF uses full image as input whereas HandNet uses crops as input. We see gains of \\(27.7\\%\\) in MPJPE, \\(29.7\\%\\) in MRRPE, \\(10.7\\%\\) in PA-MPJPE, and \\(9.7\\%\\) in 2D pose across different settings. This provides evidence for the utility of using crops as inputs [50,59]." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.703, + 0.788, + 0.779 + ], + "angle": 0, + "content": "Benefits of KPE: In Tab. 1, HandNet & WildHands differ only in the use of KPE. This leads go improvements of \\(20.5\\%\\) in MPJPE, \\(56.4\\%\\) in MRRPE & \\(65.1\\%\\) in 2D pose. Compared to impact of crops, the gains are significantly higher in MRRPE (indicating better absolute pose) and on Epic-HandKps (leading to better generalization in the wild)." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.78, + 0.788, + 0.841 + ], + "angle": 0, + "content": "Role of auxiliary supervision: We extract hand masks & grasp labels from Epic-Kitchens & Ego4D and show their benefits in Tab. 3 in zero-shot evaluation settings. Mask supervision leads to gains of \\(8.5\\%\\) in MPJPE, \\(21.5\\%\\) in MRRPE and \\(55.5\\%\\) in 2D pose. Grasp labels improve MPJPE by \\(2.5\\%\\), MRRPE by \\(7.3\\%\\)" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.346, + 0.115, + 0.732, + 0.13 + ], + "angle": 0, + "content": "3D Hand Pose Estimation in Everyday Egocentric Images" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.117, + 0.784, + 0.127 + ], + "angle": 0, + "content": "11" + }, + { + "type": "table_caption", + "bbox": [ + 0.217, + 0.145, + 0.788, + 0.188 + ], + "angle": 0, + "content": "Table 4: Comparison of KPE with relevant approaches. KPE is more effective than other methods for dealing with perspective distortion, e.g. Perspective Correction [45], Perspective Crop Layers (PCL [76]), or other encodings, e.g. CamConv [12]" + }, + { + "type": "table", + "bbox": [ + 0.224, + 0.208, + 0.784, + 0.303 + ], + "angle": 0, + "content": "
H2OAssemblyEgo-Exo4DEpic-HandKps
MPJPEMRRPEMPJPEMRRPEMPJPEL2 Error
HandNet +
CamConv36.8667.6296.72180.7360.6917.35
Perspective Corr.39.95159.1359.10637.3267.4528.68
PCL [76]36.82158.8845.18483.9263.6528.21
KPE (WildHands)31.0849.4984.91164.9055.8411.05
" + }, + { + "type": "table_caption", + "bbox": [ + 0.217, + 0.323, + 0.788, + 0.392 + ], + "angle": 0, + "content": "Table 5: KPE Design Choices. We study the impact of different design choices of KPE on WildHands: adding KPE with the input instead of latent features (w/ input), removing intrinsics from KPE (no intrx), dense variant of KPE from [54]. WildHands uses sparse variant of KPE. We observe that all variants of KPE provide significant benefits compared to the model without KPE and the sparse variant performs the best." + }, + { + "type": "table", + "bbox": [ + 0.225, + 0.405, + 0.784, + 0.504 + ], + "angle": 0, + "content": "
H2OAssemblyEgo-Exo4DEpic-HandKps
MPJPEMRRPEMPJPEMRRPEMPJPEL2 Error
no KPE38.06141.06109.88317.4989.7231.62
KPE w/ input45.5180.9694.45252.3493.5617.30
KPE no intrx36.9761.9892.12246.4560.8011.63
KPE dense36.8680.5495.34201.3369.1111.24
KPE sparse31.0849.4984.9155.8455.8411.05
" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.54, + 0.788, + 0.616 + ], + "angle": 0, + "content": "and 2D pose by \\(4.3\\%\\). While both sources of supervision are effective, hand masks lead to larger gains. Combining both mask and grasp supervision leads to further improvements in both 3D & 2D poses across most settings. Moreover, auxiliary supervision on in-the-wild data also aids performance on lab datasets, suggesting that generalization gains from training on broad data are not dataset specific." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.62, + 0.788, + 0.681 + ], + "angle": 0, + "content": "Comparison of KPE with relevant approaches: In Tab. 4, we find KPE to be more effective than other methods for dealing with perspective distortion, e.g. Perspective Correction [45], Perspective Crop Layers (PCL [76]), or different forms of positional encoding, e.g. CamConv [12]." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.685, + 0.788, + 0.775 + ], + "angle": 0, + "content": "Impact on transformer models: We investigate if our insights are useful to transformer models as well, i.e. if KPE helps on top of positional encodings already used in transformers and if auxiliary supervision leads to better generalization for large capacity models. For this, we implement these components in HandOccNet [51] & HaMeR [52] and train these models in our settings. From the results in Tab. 2, we see consistent gains across all settings." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.779, + 0.788, + 0.84 + ], + "angle": 0, + "content": "KPE design choice: We ablate different variants of KPE in Tab. 5: adding KPE with the input instead of latent features (w/ input), removing intrinsics from KPE (no intrx) and dense variant of KPE from [54]. Note that the sparse variant performs the best, so we use sparse KPE in WildHands." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "12" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.388, + 0.128 + ], + "angle": 0, + "content": "A. Prakash et al." + }, + { + "type": "table_caption", + "bbox": [ + 0.216, + 0.145, + 0.788, + 0.173 + ], + "angle": 0, + "content": "Table 6: Effect of scaling up data. Training on more datasets leads to consistent improvements in models performance on held out datasets." + }, + { + "type": "table", + "bbox": [ + 0.221, + 0.185, + 0.787, + 0.264 + ], + "angle": 0, + "content": "
H2OEgo-Exo4DEpic-HandKps
MPJPEMRRPEMPJPEL2 Error
ARCTIC47.3075.1787.7117.07
ARCTIC + Assembly39.5277.0770.3911.05
ARCTIC + Assembly + Ego4D (aux)35.6262.1060.807.20
" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.292, + 0.787, + 0.398 + ], + "angle": 0, + "content": "Intrinsics during training: Intrinsics may not always be available in in-the-wild data used to derive auxiliary supervision. To study this setting, we consider in-the-wild Ego4D data since it contains images from multiple cameras, and do not assume access to intrinsics. In this case, we replace the KPE with a sinusoidal positional encoding of normalized image coordinates w.r.t. center. The Ego4D results in Tab. 3 follow this setting and we observe that auxiliary supervision from Ego4D provides benefits even in the absence of camera information." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.398, + 0.789, + 0.504 + ], + "angle": 0, + "content": "Scaling up training data: We ablate variants of WildHands trained with ARCTIC, ARCTIC + AssemblyHands, ARCTIC + Ego4D and ARCTIC + AssemblyHands + Ego4D in zero-shot settings on H2O, Ego-Exo4D, and EpicHandKps. We use 3D supervision on ARCTIC & AssemblyHands and auxiliary supervision (hand masks, grasp labels) on Ego4D. Tab. 6 shows consistent improvements in 3D and 2D metrics from both AssemblyHands and Ego4D datasets, suggesting that further scaling can improve performance further." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.527, + 0.46, + 0.543 + ], + "angle": 0, + "content": "4.3 System-level Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.553, + 0.546, + 0.658 + ], + "angle": 0, + "content": "While all of our earlier experiments are conducted in controlled settings, we also present a system-level comparison to other past methods, specifically to methods submitted to the ARC-TIC leaderboard (as of July 13, 2024), and with the publicly released models of FrankMo-cap [59] and HaMeR [52]." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.659, + 0.546, + 0.735 + ], + "angle": 0, + "content": "ARCTIC Leaderboard: Our method achieves the best 3D hand pose on the ego-centric split, compared to recent state-of-the-art convolutional (e.g. ArcticNet-SF, DIGIT-HRNet, HMR-ResNet50) and transformer (e.g." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.735, + 0.785, + 0.763 + ], + "angle": 0, + "content": "JointTransformer) models (as of July 13, 2024). However, it is not possible to do a detailed comparison since most of these models are not public." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.765, + 0.787, + 0.84 + ], + "angle": 0, + "content": "Comparison with FrankMocap [59] and HaMeR [52]: We show results with the publicly released models in Tab. 8. Note that HaMeR uses a ViT-H backbone which is much larger and more performant than the ResNet50 backbone used in WildHands. WildHands outperforms FrankMocap across all metrics and HaMeR on 3 of 6 metrics while being \\(10 \\times\\) smaller & trained on \\(5 \\times\\) less data." + }, + { + "type": "table_caption", + "bbox": [ + 0.554, + 0.569, + 0.788, + 0.624 + ], + "angle": 0, + "content": "Table 7: Leaderboard results. WildHands leads the 3D hand pose on the egocentric split of ARCTIC leaderboard (as of July 13, 2024)." + }, + { + "type": "table", + "bbox": [ + 0.56, + 0.626, + 0.787, + 0.726 + ], + "angle": 0, + "content": "
MethodMPJPEMRRPE
ArcticNet-SF19.1828.31
ArcticOccNet19.7729.75
DIGIT-HRNet16.7425.49
HMR-ResNet5020.3232.32
JointTransformer16.3326.07
WildHands15.7223.88
" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.346, + 0.115, + 0.733, + 0.13 + ], + "angle": 0, + "content": "3D Hand Pose Estimation in Everyday Egocentric Images" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.116, + 0.785, + 0.127 + ], + "angle": 0, + "content": "13" + }, + { + "type": "image", + "bbox": [ + 0.223, + 0.144, + 0.333, + 0.229 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.338, + 0.144, + 0.397, + 0.23 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.405, + 0.147, + 0.487, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.223, + 0.232, + 0.333, + 0.317 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.338, + 0.232, + 0.397, + 0.317 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.408, + 0.232, + 0.489, + 0.316 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.222, + 0.319, + 0.333, + 0.403 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.338, + 0.318, + 0.397, + 0.403 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.408, + 0.32, + 0.489, + 0.393 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.515, + 0.144, + 0.624, + 0.23 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.627, + 0.144, + 0.688, + 0.23 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.515, + 0.232, + 0.624, + 0.317 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.627, + 0.232, + 0.688, + 0.317 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.514, + 0.32, + 0.624, + 0.403 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.627, + 0.32, + 0.688, + 0.403 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.696, + 0.148, + 0.784, + 0.23 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.696, + 0.232, + 0.781, + 0.27 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.697, + 0.285, + 0.781, + 0.312 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.697, + 0.326, + 0.779, + 0.355 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.697, + 0.369, + 0.779, + 0.396 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.215, + 0.418, + 0.788, + 0.475 + ], + "angle": 0, + "content": "Fig. 4: Visualizations. We show projection of the predicted hand in the image & rendering of the hand mesh from 2 more views. WildHands predicts better hand poses from a single image than FrankMocap [59], HaMeR [14] and ArcticNet [14] in challenging egocentric scenarios involving occlusions and perspective distortion." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.513, + 0.377, + 0.527 + ], + "angle": 0, + "content": "4.4 Visualizations" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.56, + 0.788, + 0.682 + ], + "angle": 0, + "content": "We show qualitative comparisons of the hand pose, predicted by WildHands, with FrankMocap on Epic-HandKps (Fig. 4a) and ArcticNet-SF on ARCTIC (Fig. 4b). Looking at the projection of the mesh in the camera view and rendering of the mesh from additional views, we observe that WildHands is able to predict hand pose better in images involving occlusion and interaction, e.g. fingers are curled around the object in contact (Fig. 4) for our model but this is not the case for FrankMocap. We observe similar trends in ARCTIC (Fig. 4b) where our model predicts better hands in contact scenarios. More results in supplementary." + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.685, + 0.788, + 0.731 + ], + "angle": 0, + "content": "Failure Cases: We observe that images in which the fingers are barely visible, e.g. when kneading a dough in top row (Fig. 5), or containing extreme poses, e.g. grasps in bottom row (Fig. 5), are quite challenging for all models." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.734, + 0.788, + 0.841 + ], + "angle": 0, + "content": "Limitations: The KPE encoding requires camera intrinsics to be known, which may not be available in certain scenarios. However, in several in-the-wild images, the metadata often contains camera information. Also, we currently set the weights for different loss terms as hyperparameters which may not be ideal since the sources of supervision are quite different leading to different scales in loss values. It could be useful to use a learned weighing scheme, e.g. uncertainty-based loss weighting [2, 29, 38]." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "14" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.388, + 0.128 + ], + "angle": 0, + "content": "A. Prakash et al." + }, + { + "type": "table_caption", + "bbox": [ + 0.214, + 0.145, + 0.788, + 0.256 + ], + "angle": 0, + "content": "Table 8: Systems comparison. We evaluate against publicly released models: FrankMocap [59] (a popular method for 3D hand pose estimation), and HaMeR [52]. FrankMocap uses a ResNet-50 backbone and is trained on 6 lab datasets. HaMeR uses a ViT-H [11] backbone and is trained on 7 lab + 3 in-the-wild + HInt datasets across nearly 3M frames. WildHands model uses a ResNet-50 backbone and is trained on 3 datasets. WildHands outperforms FrankMocap across all metrics and HaMeR on 3 of 6 metrics while being \\(10 \\times\\) smaller & trained on \\(5 \\times\\) less data. We expect scaling up the backbone and datasets used to train WildHands can lead to even stronger performance." + }, + { + "type": "table", + "bbox": [ + 0.224, + 0.268, + 0.784, + 0.335 + ], + "angle": 0, + "content": "
H2OAssemblyEgo-Exo4DEpic-HandKps
MPJPEMRRPEMPJPEMRRPEMPJPEL2 Error
FrankMocap [59] (ResNet-50, 6 lab)58.51-97.59-175.9113.33
HaMeR [52] (ViT-H, 7 lab+3 wild+HInt)23.82147.8745.49334.52116.464.56
WildHands (ResNet-50, 2 lab + 1 wild)31.0849.4980.40148.1255.847.20
" + }, + { + "type": "image", + "bbox": [ + 0.223, + 0.353, + 0.334, + 0.439 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.338, + 0.353, + 0.397, + 0.439 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.408, + 0.366, + 0.485, + 0.439 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.222, + 0.441, + 0.335, + 0.526 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.338, + 0.441, + 0.397, + 0.526 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.408, + 0.456, + 0.485, + 0.519 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.52, + 0.353, + 0.632, + 0.439 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.636, + 0.353, + 0.695, + 0.439 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.703, + 0.361, + 0.789, + 0.441 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.52, + 0.441, + 0.632, + 0.526 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.636, + 0.441, + 0.695, + 0.526 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.703, + 0.456, + 0.782, + 0.519 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.541, + 0.788, + 0.571 + ], + "angle": 0, + "content": "Fig. 5: Failure cases. We observe that images with (top) barely visible fingers, e.g. kneading dough or (bottom) extreme grasp poses are challenging for all models." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.605, + 0.36, + 0.621 + ], + "angle": 0, + "content": "5 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.655, + 0.788, + 0.746 + ], + "angle": 0, + "content": "We present WildHands, a system that adapts best practices from the literature: using crops as input, intrinsics-aware positional encoding, auxiliary sources of supervision and multi-dataset training, for robust prediction of 3D hand poses on egocentric images in the wild. Experiments on both lab datasets and in-the-wild settings show the effectiveness of WildHands. As future direction, WildHands could be used to scale up learning robot policies from human interactions." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.75, + 0.788, + 0.841 + ], + "angle": 0, + "content": "Acknowledgements: We thank Arjun Gupta, Shaowei Liu, Anand Bhattachad & Kashyap Chitta for feedback on the draft, and David Forsyth for useful discussion. This material is based upon work supported by NSF (IIS2007035), NASA (80NSSC21K1030), DARPA (Machine Common Sense program), Amazon Research Award, NVIDIA Academic Hardware Grant, and the NCSA Delta System (supported by NSF OCI 2005572 and the State of Illinois)." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.346, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "3D Hand Pose Estimation in Everyday Egocentric Images" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "15" + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.145, + 0.323, + 0.16 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.176, + 0.785, + 0.218 + ], + "angle": 0, + "content": "1. Ballan, L., Taneja, A., Gall, J., Gool, L.V., Pollefeys, M.: Motion capture of hands in action using discriminative salient points. In: Proceedings of the European Conference on Computer Vision (ECCV) (2012)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.219, + 0.787, + 0.272 + ], + "angle": 0, + "content": "2. Brazil, G., Kumar, A., Straub, J., Ravi, N., Johnson, J., Gkioxari, G.: Omni3d: A large benchmark and model for 3d object detection in the wild. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR). pp. 13154-13164 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.273, + 0.785, + 0.314 + ], + "angle": 0, + "content": "3. Cao, Z., Radosavovic, I., Kanazawa, A., Malik, J.: Reconstructing hand-object interactions in the wild. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.315, + 0.785, + 0.37 + ], + "angle": 0, + "content": "4. Chao, Y., Yang, W., Xiang, Y., Molchanov, P., Handa, A., Tremblay, J., Narang, Y.S., Wyk, K.V., Iqbal, U., Birchfield, S., Kautz, J., Fox, D.: Dexycb: A benchmark for capturing hand grasping of objects. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.371, + 0.785, + 0.412 + ], + "angle": 0, + "content": "5. Chen, Z., Zhang, H.: Learning implicit fields for generative shape modeling. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.412, + 0.785, + 0.453 + ], + "angle": 0, + "content": "6. Cheng, T., Shan, D., Hassen, A.S., Higgins, R.E.L., Fouhey, D.: Towards a richer 2d understanding of hands at scale. In: Advances in Neural Information Processing Systems (NeurIPS) (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.453, + 0.785, + 0.508 + ], + "angle": 0, + "content": "7. Damen, D., Doughty, H., Farinella, G.M., Fidler, S., Furnari, A., Kazakos, E., Moltisanti, D., Munro, J., Perrett, T., Price, W., Wray, M.: Scaling egocentric vision: The epic-kitchens dataset. Proceedings of the European Conference on Computer Vision (ECCV) (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.509, + 0.785, + 0.564 + ], + "angle": 0, + "content": "8. Damen, D., Doughty, H., Farinella, G.M., Fidler, S., Furnari, A., Kazakos, E., Moltisanti, D., Munro, J., Perrett, T., Price, W., Wray, M.: The epic-kitchen dataset: Collection, challenges and baselines. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI) (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.565, + 0.785, + 0.619 + ], + "angle": 0, + "content": "9. Damen, D., Doughty, H., Farinella, G.M., Fidler, S., Furnari, A., Kazakos, E., Moltisanti, D., Munro, J., Perrett, T., Price, W., et al.: Scaling egocentric vision: The epic-kitchens dataset. In: Proceedings of the European Conference on Computer Vision (ECCV) (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.62, + 0.785, + 0.66 + ], + "angle": 0, + "content": "10. Darkhalil, A., Shan, D., Zhu, B., Ma, J., Kar, A., Higgins, R., Fidler, S., Fouhey, D., Damen, D.: Epic-kitchen visor benchmark: Video segmentations and object relations. In: NeurIPS Track on Datasets and Benchmarks (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.661, + 0.785, + 0.715 + ], + "angle": 0, + "content": "1. Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., Zhai, X., Unterthiner, T., Dehghani, M., Minderer, M., Heigold, G., Gelly, S., et al.: An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.716, + 0.785, + 0.77 + ], + "angle": 0, + "content": "2. Facil, J.M., Ummenhofer, B., Zhou, H., Montesano, L., Brox, T., Civera, J.: Camconvs: Camera-aware multi-scale convolutions for single-view depth. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR). pp 11826-11835 (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.771, + 0.785, + 0.813 + ], + "angle": 0, + "content": "3. Fan, Z., Spurr, A., Kocabas, M., Tang, S., Black, M.J., Hilliges, O.: Learning to disambiguate strongly interacting hands via probabilistic per-pixel part segmentation In: Proceedings of the International Conference on 3D Vision (3DV) (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.813, + 0.785, + 0.84 + ], + "angle": 0, + "content": "4. Fan, Z., Taheri, O., Tzionas, D., Kocabas, M., Kaufmann, M., Black, M.J., Hilliges, O.: ARCTIC: A dataset for dexterous bimanual hand-object manipulation. In:" + }, + { + "type": "list", + "bbox": [ + 0.226, + 0.176, + 0.787, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "16" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.388, + 0.128 + ], + "angle": 0, + "content": "A. Prakash et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.241, + 0.147, + 0.786, + 0.175 + ], + "angle": 0, + "content": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.175, + 0.787, + 0.215 + ], + "angle": 0, + "content": "15. Freeman, W.T., Roth, M.: Orientation histograms for hand gesture recognition. In: International workshop on automatic face and gesture recognition. vol. 12, pp. 296-301. Citeseer (1995)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.216, + 0.786, + 0.257 + ], + "angle": 0, + "content": "16. Garcia-Hernando, G., Yuan, S., Baek, S., Kim, T.K.: First-person hand action benchmark with rgb-d videos and 3d hand pose annotations. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.257, + 0.787, + 0.311 + ], + "angle": 0, + "content": "17. Grauman, K., Westbury, A., Byrne, E., Chavis, Z., Furnari, A., Girdhar, R., Hamburger, J., Jiang, H., Liu, M., Liu, X., et al.: Ego4d: Around the world in 3,000 hours of egocentric video. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.311, + 0.787, + 0.365 + ], + "angle": 0, + "content": "18. Grauman, K., Westbury, A., Torresani, L., Kitani, K., Malik, J., Afouras, T., Ashutosh, K., Baiyya, V., Bansal, S., Boote, B., et al.: Ego-exo4d: Understanding skilled human activity from first-and third-person perspectives. arXiv preprint arXiv:2311.18259 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.365, + 0.787, + 0.406 + ], + "angle": 0, + "content": "19. Guizilini, V., Vasiljevic, I., Chen, D., Ambrus, R., Gaidon, A.: Towards zero-shot scale-aware monocular depth estimation. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.406, + 0.787, + 0.447 + ], + "angle": 0, + "content": "20. Guizilini, V., Vasiljevic, I., Fang, J., Ambru, R., Shakhnarovich, G., Walter, M.R., Gaidon, A.: Depth field networks for generalizable multi-view scene representation. In: Proceedings of the European Conference on Computer Vision (ECCV) (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.447, + 0.787, + 0.488 + ], + "angle": 0, + "content": "21. Hampali, S., Rad, M., Oberweger, M., Lepetit, V.: Honnotate: A method for 3d annotation of hand and object poses. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.488, + 0.787, + 0.542 + ], + "angle": 0, + "content": "22. Hampali, S., Sarkar, S.D., Rad, M., Lepetit, V.: Keypoint transformer: Solving joint identification in challenging hands and object interactions for accurate 3d pose estimation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.542, + 0.787, + 0.569 + ], + "angle": 0, + "content": "23. Hartley, R., Zisserman, A.: Multiple view geometry in computer vision. Cambridge university press (2003)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.569, + 0.787, + 0.623 + ], + "angle": 0, + "content": "24. Hasson, Y., Tekin, B., Bogo, F., Laptev, I., Pollefeys, M., Schmid, C.: Leveraging photometric consistency over time for sparsely supervised hand-object reconstruction. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.623, + 0.787, + 0.678 + ], + "angle": 0, + "content": "25. Hasson, Y., Varol, G., Tzionas, D., Kalevatykh, I., Black, M.J., Laptev, I., Schmid, C.: Learning joint reconstruction of hands and manipulated objects. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.678, + 0.787, + 0.704 + ], + "angle": 0, + "content": "26. He, K., Gkioxari, G., Dollar, P., Girshick, R.B.: Mask R-CNN. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.704, + 0.787, + 0.745 + ], + "angle": 0, + "content": "27. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2016)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.745, + 0.787, + 0.786 + ], + "angle": 0, + "content": "28. Heap, T., Hogg, D.: Towards 3d hand tracking using a deformable model. In: Proceedings of the Second International Conference on Automatic Face and Gesture Recognition. pp. 140-145. IEEE (1996)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.786, + 0.787, + 0.84 + ], + "angle": 0, + "content": "29. Hu, A., Murez, Z., Mohan, N., Dudas, S., Hawke, J., Badrinarayanan, V., Cipolla, R., Kendall, A.: FIERY: future instance prediction in bird's-eye view from surround monocular cameras. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2021)" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.147, + 0.787, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.346, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "3D Hand Pose Estimation in Everyday Egocentric Images" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "17" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.19 + ], + "angle": 0, + "content": "30. Ioffe, S., Szegedy, C.: Batch normalization: Accelerating deep network training by reducing internal covariate shift. In: Bach, F.R., Blei, D.M. (eds.) Proceedings of the International Conference on Machine Learning (ICML) (2015)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.19, + 0.787, + 0.218 + ], + "angle": 0, + "content": "31. Ivashechkin, M., Mendez, O., Bowden, R.: Denoising diffusion for 3d hand pose estimation from images. arXiv 2308.09523 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.218, + 0.787, + 0.275 + ], + "angle": 0, + "content": "32. Jiang, C., Xiao, Y., Wu, C., Zhang, M., Zheng, J., Cao, Z., Zhou, J.T.: A2j-transformer: Anchor-to-joint transformer network for 3d interacting hand pose estimation from a single RGB image. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.276, + 0.787, + 0.331 + ], + "angle": 0, + "content": "33. Jiang, Z., Rahmani, H., Black, S., Williams, B.M.: A probabilistic attention model with occlusion-aware texture regression for 3d hand reconstruction from a single RGB image. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.332, + 0.787, + 0.374 + ], + "angle": 0, + "content": "34. Kanazawa, A., Black, M.J., Jacobs, D.W., Malik, J.: End-to-end recovery of human shape and pose. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.374, + 0.787, + 0.416 + ], + "angle": 0, + "content": "35. Kanazawa, A., Tulsiani, S., Efros, A.A., Malik, J.: Learning category-specific mesh reconstruction from image collections. In: Proceedings of the European Conference on Computer Vision (ECCV) (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.416, + 0.787, + 0.459 + ], + "angle": 0, + "content": "36. Karunratanakul, K., Yang, J., Zhang, Y., Black, M.J., Muandet, K., Tang, S.: Grasping field: Learning implicit representations for human grasps. In: Proceedings of the International Conference on 3D Vision (3DV) (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.459, + 0.787, + 0.487 + ], + "angle": 0, + "content": "37. Kato, H., Ushiku, Y., Harada, T.: Neural 3d mesh renderer. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.487, + 0.787, + 0.53 + ], + "angle": 0, + "content": "38. Kendall, A., Gal, Y., Cipolla, R.: Multi-task learning using uncertainty to weigh losses for scene geometry and semantics. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.53, + 0.787, + 0.572 + ], + "angle": 0, + "content": "39. Kingma, D.P., Ba, J.: Adam: A method for stochastic optimization. In: Bengio, Y., LeCun, Y. (eds.) Proceedings of the International Conference on Learning Representations (ICLR) (2015)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.572, + 0.787, + 0.614 + ], + "angle": 0, + "content": "40. Kwon, T., Tekin, B., Stühmer, J., Bogo, F., Pollefeys, M.: H2o: Two hands manipulating objects for first person interaction recognition. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.614, + 0.787, + 0.657 + ], + "angle": 0, + "content": "41. Lin, T., Dollár, P., Girshick, R.B., He, K., Hariharan, B., Belongie, S.J.: Feature pyramid networks for object detection. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.657, + 0.787, + 0.699 + ], + "angle": 0, + "content": "42. Liu, S., Chen, W., Li, T., Li, H.: Soft rasterizer: A differentiable renderer for image-based 3d reasoning. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.7, + 0.787, + 0.742 + ], + "angle": 0, + "content": "43. Liu, S., Li, T., Chen, W., Li, H.: A general differentiable mesh renderer for image-based 3d reasoning. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI) (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.742, + 0.787, + 0.798 + ], + "angle": 0, + "content": "44. Liu, Y., Liu, Y., Jiang, C., Lyu, K., Wan, W., Shen, H., Liang, B., Fu, Z., Wang, H., Yi, L.: HOI4D: A 4d egocentric dataset for category-level human-object interaction. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.799, + 0.787, + 0.842 + ], + "angle": 0, + "content": "45. Mehta, D., Rhodin, H., Casas, D., Fua, P., Sotnychenko, O., Xu, W., Theobalt, C.: Monocular 3d human pose estimation in the wild using improved CNN supervision. In: Proceedings of the International Conference on 3D Vision (3DV) (2017)" + }, + { + "type": "list", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.842 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "18" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.388, + 0.128 + ], + "angle": 0, + "content": "A. Prakash et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.147, + 0.787, + 0.189 + ], + "angle": 0, + "content": "46. Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: Nerf: Representing scenes as neural radiance fields for view synthesis. In: Proceedings of the European Conference on Computer Vision (ECCV) (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.19, + 0.786, + 0.217 + ], + "angle": 0, + "content": "47. Miyato, T., Jaeger, B., Welling, M., Geiger, A.: GTA: A geometry-aware attention mechanism for multi-view transformers. arXiv (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.218, + 0.787, + 0.259 + ], + "angle": 0, + "content": "48. Moon, G., Yu, S., Wen, H., Shiratori, T., Lee, K.M.: Interhand2.6m: A dataset and baseline for 3d interacting hand pose estimation from a single RGB image. In: Proceedings of the European Conference on Computer Vision (ECCV) (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.26, + 0.787, + 0.286 + ], + "angle": 0, + "content": "49. Nair, V., Hinton, G.E.: Rectified linear units improve restricted boltzmann machines. In: Proceedings of the International Conference on Machine Learning (ICML) (2010)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.287, + 0.787, + 0.342 + ], + "angle": 0, + "content": "50. Ohkawa, T., He, K., Sener, F., Hodan, T., Tran, L., Keskin, C.: Assemblyhands: Towards egocentric activity understanding via 3d hand pose estimation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR). pp. 12999-13008 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.343, + 0.787, + 0.383 + ], + "angle": 0, + "content": "51. Park, J., Oh, Y., Moon, G., Choi, H., Lee, K.M.: Handoccnet: Occlusion-robust 3d hand mesh estimation network. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.384, + 0.787, + 0.424 + ], + "angle": 0, + "content": "52. Pavlakos, G., Shan, D., Radosavovic, I., Kanazawa, A., Fouhey, D., Malik, J.: Reconstructing hands in 3d with transformers. arXiv preprint arXiv:2312.05251 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.425, + 0.787, + 0.48 + ], + "angle": 0, + "content": "53. Potamias, R.A., Ploumpis, S., Moschoglou, S., Triantafyllou, V., Zafeiriou, S.: Handy: Towards a high fidelity 3d hand shape and appearance model. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR). pp. 4670-4680 (June 2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.481, + 0.787, + 0.508 + ], + "angle": 0, + "content": "54. Prakash, A., Gupta, A., Gupta, S.: Mitigating perspective distortion-induced shape ambiguity in image crops. arXiv 2312.06594 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.509, + 0.787, + 0.536 + ], + "angle": 0, + "content": "55. Ravi, N., Reizenstein, J., Novotny, D., Gordon, T., Lo, W.Y., Johnson, J., Gkioxari, G.: Accelerating 3d deep learning with pytorch3d. arXiv:2007.08501 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.537, + 0.787, + 0.577 + ], + "angle": 0, + "content": "56. Rehg, J.M., Kanade, T.: Visual tracking of high dof articulated structures: an application to human hand tracking. In: Proceedings of the European Conference on Computer Vision (ECCV) (1994)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.578, + 0.787, + 0.619 + ], + "angle": 0, + "content": "57. Rogez, G., Khademi, M., Supancic III, J., Montiel, J.M.M., Ramanan, D.: 3d hand pose detection in egocentric rgb-d images. In: Proceedings of the European Conference on Computer Vision (ECCV) (2014)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.62, + 0.787, + 0.646 + ], + "angle": 0, + "content": "58. Romero, J., Tzionas, D., Black, M.J.: Embodied hands: Modeling and capturing hands and bodies together. ACM Transactions on Graphics (ToG) (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.647, + 0.787, + 0.688 + ], + "angle": 0, + "content": "59. Rong, Y., Shiratori, T., Joo, H.: Frankmocap: Fast monocular 3D hand and body motion capture by regression and integration. Proceedings of the IEEE International Conference on Computer Vision Workshops (ICCV Workshops) (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.689, + 0.787, + 0.743 + ], + "angle": 0, + "content": "60. Sener, F., Chatterjee, D., Shelepov, D., He, K., Singhania, D., Wang, R., Yao, A.: Assembly101: A large-scale multi-view video dataset for understanding procedural activities. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.744, + 0.787, + 0.785 + ], + "angle": 0, + "content": "61. Shan, D., Geng, J., Shu, M., Fouhey, D.F.: Understanding human hands in contact at internet scale. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.786, + 0.787, + 0.84 + ], + "angle": 0, + "content": "62. Sharp, T., Keskin, C., Robertson, D., Taylor, J., Shotton, J., Kim, D., Rhemann, C., Leichter, I., Vinnikov, A., Wei, Y., et al.: Accurate, robust, and flexible real-time hand tracking. In: Proceedings of the 33rd annual ACM conference on human factors in computing systems. pp. 3633-3642 (2015)" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.147, + 0.787, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.346, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "3D Hand Pose Estimation in Everyday Egocentric Images" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "19" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.189 + ], + "angle": 0, + "content": "63. Simon, T., Joo, H., Matthews, I.A., Sheikh, Y.: Hand keypoint detection in single images using multiview bootstrapping. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.189, + 0.788, + 0.23 + ], + "angle": 0, + "content": "64. Sridhar, S., Mueller, F., Zollhöfer, M., Casas, D., Oulasvirta, A., Theobalt, C.: Real-time joint tracking of a hand manipulating an object from rgb-d input. In: Proceedings of the European Conference on Computer Vision (ECCV) (2016)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.23, + 0.788, + 0.27 + ], + "angle": 0, + "content": "65. Sridhar, S., Oulasvirta, A., Theobalt, C.: Interactive markerless articulated hand motion tracking using RGB and depth data. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2013)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.27, + 0.788, + 0.31 + ], + "angle": 0, + "content": "66. Sun, X., Wei, Y., Liang, S., Tang, X., Sun, J.: Cascaded hand pose regression. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2015)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.31, + 0.788, + 0.351 + ], + "angle": 0, + "content": "67. Taheri, O., Ghorbani, N., Black, M.J., Tzionas, D.: GRAB: A dataset of whole-body human grasping of objects. In: Proceedings of the European Conference on Computer Vision (ECCV) (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.351, + 0.788, + 0.391 + ], + "angle": 0, + "content": "68. Tompson, J., Stein, M., Lecun, Y., Perlin, K.: Real-time continuous pose recovery of human hands using convolutional networks. ACM Transactions on Graphics (ToG) 33(5), 1-10 (2014)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.391, + 0.788, + 0.445 + ], + "angle": 0, + "content": "69. Tulsiani, S., Zhou, T., Efros, A.A., Malik, J.: Multi-view supervision for single-view reconstruction via differentiable ray consistency. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR). pp. 2626-2634 (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.445, + 0.788, + 0.485 + ], + "angle": 0, + "content": "70. Tzionas, D., Gall, J.: 3d object reconstruction from hand-object interactions. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2015)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.484, + 0.788, + 0.525 + ], + "angle": 0, + "content": "71. Vaswani, A., Shazeer, N.M., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, L., Polosukhin, I.: Attention is all you need. Advances in Neural Information Processing Systems (NeurIPS) (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.525, + 0.788, + 0.553 + ], + "angle": 0, + "content": "72. Wan, C., Yao, A., Gool, L.V.: Hand pose estimation from local surface normals. In: Proceedings of the European Conference on Computer Vision (ECCV) (2016)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.553, + 0.788, + 0.592 + ], + "angle": 0, + "content": "73. Yang, L., Li, K., Zhan, X., Wu, F., Xu, A., Liu, L., Lu, C.: Oakink: A large-scale knowledge repository for understanding hand-object interaction. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.592, + 0.788, + 0.632 + ], + "angle": 0, + "content": "74. Ye, Y., Gupta, A., Tulsiani, S.: What's in your hands? 3D reconstruction of generic objects in hands. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.632, + 0.788, + 0.673 + ], + "angle": 0, + "content": "75. Yifan, W., Doersch, C., Arandjelovic, R., Carreira, J., Zisserman, A.: Input-level inductive biases for 3d reconstruction. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.673, + 0.788, + 0.712 + ], + "angle": 0, + "content": "76. Yu, F., Salzmann, M., Fua, P., Rhodin, H.: Pcls: Geometry-aware neural reconstruction of 3d pose with perspective crop layers. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.712, + 0.788, + 0.739 + ], + "angle": 0, + "content": "77. Zhang, X., Li, Q., Mo, H., Zhang, W., Zheng, W.: End-to-end hand mesh recovery from a monocular rgb image. In: ICCV (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.739, + 0.788, + 0.779 + ], + "angle": 0, + "content": "78. Zimmermann, C., Brox, T.: Learning to estimate 3d hand pose from single rgb images. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.779, + 0.788, + 0.835 + ], + "angle": 0, + "content": "79. Zimmermann, C., Ceylan, D., Yang, J., Russell, B.C., Argus, M.J., Brox, T.: Freihand: A dataset for markerless capture of hand pose and shape from single RGB images. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2019)" + }, + { + "type": "list", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.835 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/2024/3D Hand Pose Estimation in Everyday Egocentric Images/332a64fb-af20-4857-af36-e23eeaad9f91_origin.pdf b/2024/3D Hand Pose Estimation in Everyday Egocentric Images/332a64fb-af20-4857-af36-e23eeaad9f91_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..d2a694605e1e46865cceacf4ce6fc27eb2c00f5a --- /dev/null +++ b/2024/3D Hand Pose Estimation in Everyday Egocentric Images/332a64fb-af20-4857-af36-e23eeaad9f91_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b1051e6a84652634dd1f4f7151d9fb031510d2da940bef53b16a7bd36e3c6402 +size 2078203 diff --git a/2024/3D Hand Pose Estimation in Everyday Egocentric Images/full.md b/2024/3D Hand Pose Estimation in Everyday Egocentric Images/full.md new file mode 100644 index 0000000000000000000000000000000000000000..7d205d673262e422e301536a22fc2597c782a9d9 --- /dev/null +++ b/2024/3D Hand Pose Estimation in Everyday Egocentric Images/full.md @@ -0,0 +1,381 @@ +# 3D Hand Pose Estimation in Everyday Egocentric Images + +Aditya Prakash, Ruisen Tu, Matthew Chang, and Saurabh Gupta + +University of Illinois Urbana-Champaign {adityap9,ruisent2,mc48,saurabhg}@illinois.edu https://bit.ly/WildHands + +Abstract. 3D hand pose estimation in everyday egocentric images is challenging for several reasons: poor visual signal (occlusion from the object of interaction, low resolution & motion blur), large perspective distortion (hands are close to the camera), and lack of 3D annotations outside of controlled settings. While existing methods often use hand crops as input to focus on fine-grained visual information to deal with poor visual signal, the challenges arising from perspective distortion and lack of 3D annotations in the wild have not been systematically studied. We focus on this gap and explore the impact of different practices, i.e. crops as input, incorporating camera information, auxiliary supervision, scaling up datasets. We provide several insights that are applicable to both convolutional and transformer models, leading to better performance. Based on our findings, we also present WildHands, a system for 3D hand pose estimation in everyday egocentric images. Zero-shot evaluation on 4 diverse datasets (H2O, AssemblyHands, Epic-Kitchens, Ego-Exo4D) demonstrate the effectiveness of our approach across 2D and 3D metrics, where we beat past methods by $7.4\% - 66\%$ . In system level comparisons, WildHands achieves the best 3D hand pose on ARCTIC egocentric split, outperforms FrankMocap across all metrics and HaMeR on 3 out of 6 metrics while being $10\times$ smaller and trained on $5\times$ less data. + +Keywords: 3D Hand Pose $\cdot$ Egocentric Vision $\cdot$ 3D from single image + +# 1 Introduction + +Understanding egocentric hands in 3D enables applications in AR/VR, robotics. While several works have studied exocentric hands [52, 59], no existing approach performs well in diverse egocentric settings outside of lab setups. We focus on this gap & study the impact of common practices, i.e. crops as input, camera information, auxiliary supervision, scaling up datasets, for predicting absolute 3D hand pose from a single egocentric image. We identify 2 important factors: a) modeling the 3D to 2D projection during imaging of the hand in egocentric views, b) scaling up training to diverse datasets by leveraging auxiliary supervision. + +Let's unpack each component. Existing methods often operate on image crops, assume that the image crop is located at the center of the camera's field of view + +![](images/c8a983734fa0ef16e6c36a0732437bebbeeaba8f713b1adf3b594cc895116ebc.jpg) + +![](images/555546651e1c4816f4b6ff6e9cf099150f687df5a3e07e212e2248e7b139d81a.jpg) + +![](images/61164706a037dad281dc7502b352a79e52a76c99510920b454c54f1fa8b52440.jpg) + +![](images/8be37e6ffabc3340f1e52de99851bb95e6214c52b556679afb04c699685b2d2c.jpg) +Fig. 1: WildHands predicts the 3D shape, 3D articulation and 3D placement of the hand in the camera frame from a single in-the-wild egocentric RGB image and camera intrinsics. It produces better 3D output compared to FrankMocap [59] in occlusion scenarios and is more adept at dealing with perspective distortion than HaMeR [52], in challenging egocentric hand-object interactions from Epic-Kitchens [9] dataset. + +![](images/0c315029780fb05164e7405d7ff604662bb8ee8e77baf134fd84057d9d43afda.jpg) + +![](images/5966712a01e6aa30dcacaf998c6a2b8d726d7d52a09a6f846d752d5d7d2b9302.jpg) + +![](images/9e13b709292a6fe0d5cde2e92cbeb29c9dd31bb4817ed30da070773203a666ce.jpg) + +![](images/8561fecb9e3b3ac613d0b6c554d14a7855a980738fd0d67c9bd8c73c744a255e.jpg) + +![](images/96a5cdc4b68d0090e5638904945dcb220486759e955b8cb29d0f277835e3758f.jpg) + +![](images/7effda99691b5585b7dd0befbd10e688d59b53ef2b5821e1b62c281d6789beb4.jpg) + +![](images/3dfd1dca45072040d1f7982366cd2405e978ed375a8db8424c25670f2c6b8099.jpg) + +![](images/ea74be9ec6806b3031ad0a2e623ca017f9369bc239879d729b7561080cc8f420.jpg) + +![](images/a8c77955346ec46f4b78f8bc8da5822802ff64ab449087f889c0268071df89fd.jpg) + +with a made-up focal length. These choices are reasonable for exocentric settings where the location of the hand in the image does not provide any signal for the hand articulation; and perspective distortion effects are minimal as the hand is far away & occupies a relatively small part of the camera's field of view. However, these assumptions are sub-optimal for processing egocentric images. + +Due to the biomechanics of the hand, its location in egocentric images carries information about its pose. Also, as the hand is closer to the camera in egocentric settings, it undergoes a lot more perspective distortion than in exocentric images. 3D hand pose that correctly explains the 2D hand appearance in one part of an egocentric image, may not be accurate for another part of the image. Thus, the location of the hand in the image must be taken into account while making 3D predictions. This suggests feeding the 2D location of the hand in the image to the network. However, the notion of 2D location in the image frame is camera specific. The more fundamental quantity that generalizes across cameras, is the angular location in the camera's field of view. We thus adopt the recent KPE embedding [54] to augment hand crop features with sinusoidal encodings of its location in the camera's field of view & find this to improve performance. + +However, just processing image crops the right way is not sufficient for generalization. The model also needs to be trained on broad & diverse datasets outside of lab settings. This is not easy as 3D hand pose is difficult to directly annotate in images. We thus turn to joint training on 3D supervision from lab datasets and 2D auxiliary supervision on in-the-wild data in the form of 2D hand masks [6,10] & grasp labels [6]. To absorb supervision from segmentation labels, we differentiably render [42] the predicted 3D hand into images and back-propagate the loss through the rendering. For grasp supervision, we note + +that hand pose is indicative of the grasp type and use supervision from a grasp classifier that takes the predicted 3D hand pose as input. + +Lack of accurate 3D annotations outside of lab settings makes it challenging to assess the generalization capabilities. To this end, we adopt a zero-shot evaluation strategy. Even though a single lab dataset has limited diversity, a model that performs well on a lab dataset without having seen any images from it likely generalizes well. Furthermore, we collect Epic-HandKps, containing 2D hand joint annotations on 5K images from the VISOR [10] split of in-the-wild Epic-Kitchens [7] to evaluate the 2D projections of the predicted 3D hand pose on everyday images. We also consider the 3D hand poses provided evaluate on the concurrent Ego-Exo4D [18]. We believe that these evaluations together comprehensively test the generalization capabilities of different models. + +Our experiments (Sec. 4) show the utility of (1) using crops (vs. full images), (2) inputting 2D crop location (vs. not), (3) encoding the crop's location in camera's field of view (vs. in the image frame), and (4) 2D mask & grasp supervision. We apply these insights to both convolutional and transformer models, leading to better performance. We also present WildHands (Fig. 1) which outperforms FrankMocap [59] on egocentric images and is competitive to concurrent HaMeR [52] while being $10 \times$ smaller & trained with $5 \times$ less data. + +# 2 Related Work + +Hand pose estimation & reconstruction: Several decades of work [15,28,56] have studied different aspects: 2D pose [4,63] vs. 3D pose [40,50,68,72] vs. mesh [1, 25,65], RGB [14,22,25] vs. RGBD [57,62-64,66,68] inputs, egocentric [14,50] vs. allocentric [14,21,22], hands in isolation [48,79] vs. interaction with objects [21, 44,73], feed-forward prediction [14,22,25,60] vs. test-time optimization [3,24]. Driven by the advances in parametric hand models [53,58], recent work has moved past 3D joint estimation towards 3D mesh recovery [14,22,25,52,59,77] in 3 contexts: single hands in isolation [78], hands interacting with objects [14,70] and two hands interacting with one another [22,48]. Jointly reasoning about hands & objects has proved fruitful to improve both hand & object reconstruction [25,36, 74]. While several expressive models focus on 3D hand pose estimation in lab settings [22,31-33,60], only a very few works [52] tackle the problem in everyday egocentric images as in Ego4D [17], Epic-Kitchen [7]. We focus on this setting due to challenges involving perspective distortion, dynamic interactions & heavy occlusions. We explore both convolutional [14,59] and transformer models [51,52] to study the impact of using crops, location of the crop in camera's field of view & auxiliary supervision in zero-shot generalization to diverse egocentric settings. + +Hand datasets: Since 3D hand annotations from single images is difficult to get, most datasets are collected in controlled settings to get 3D ground truth using MoCap [14,67], multi-camera setups [21,22,40,44,50], or magnetic sensors [16]. They often include single hands in isolation [79], hand-object interactions [14, 21,22,40] & hand-hand interactions [48]. Different from these datasets with 3D poses, [6,10,61] provide annotations for segmentation masks [6,10], 2D bounding + +![](images/b793bd7f39854e1a3b493108fe9afabb514f73bfc64615ad4150dfb4c2595a23.jpg) +Fig. 2: Model Overview. We crop the input images around the hand and process them using a convolutional backbone. The hand features along with the global image features (not shown above for clarity) and intrinsics-aware positional encoding (KPE [54]) for each crop are fed to the decoder to predict the 3D hand. The hand decoders predict MANO parameters $\beta, \theta_{\mathrm{local}}, \theta_{\mathrm{global}}$ and camera translation which are converted to 3D keypoints & 2D keypoints and trained using 3D supervision on lab datasets, e.g. ARCTIC [14], AssemblyHands [50]. We also use auxiliary supervision from in-the-wild Epic-Kitchens [10] dataset via hand segmentation masks and grasp labels. The hand masks are available with the VISOR dataset [10] whereas grasp labels are estimated using off-the-shelf model from [6]. + +boxes [61] and grasp labels [6] on internet videos [61] and egocentric images in the wild [9, 17]. Our work combines 3D supervision from datasets [14, 50] captured in controlled settings with 2D auxiliary supervision, i.e. segmentation masks & grasp labels, from datasets outside the lab [6, 10] to learn models that perform well in challenging everyday images. We collect Epic-HandKps dataset with 2D hand keypoints on 5K images from Epic-Kitchens for evaluation in everyday images outside of lab settings. We also use concurrent Ego-Exo4D [18] that annotates 2D keypoints in paired ego & exo views to get 3D hand annotations. + +Auxiliary supervision: Several works on 3D shape prediction from a single image [34,69] often use auxiliary supervision to deal with lack of 3D annotations. [34] uses keypoint supervision for 3D human mesh recovery, while [69] uses multi-view consistency cues for 3D object reconstruction. Aided by differentiable rendering [37,43], segmentation and depth prediction have been used to provide supervision for 3D reconstruction [3,24,35]. We adopt this use of segmentation as an auxiliary cue for 3D poses. In addition, we use supervision from hand grasp labels based on the insight that hand grasp is indicative of the hand pose. + +Ambiguity: 3D estimation from a single image is ill-posed due to ambiguities arising from scale-depth confusion [23] and cropping [54]. Recent work [54] points out the presence of perspective distortion-induced shape ambiguity in image crops and uses camera intrinsic-based location encodings to mitigate it. We investigate the presence of this ambiguity for hand crops in egocentric images and adopt the proposed embedding to mitigate it. Similar embeddings have been used before in literature, primarily from the point of view of training models on images from different cameras [12, 19], to encode extrinsic information [20, 47, 75]. + +# 3 Method + +We present WildHands, a new system for 3D hand pose estimation from egocentric images in the wild. We build on top of ArcticNet-SF [14] and FrankMocap [59]. Given a crop around a hand and associated camera intrinsics, WildHands predicts the 3D hand shape as MANO [58] parameters, shape $\beta$ and pose $\theta$ . $\theta$ consists of angles of articulation $\theta_{\mathrm{local}}$ for 15 hand joints and the global pose $\theta_{\mathrm{global}}$ of the root joint in the camera coordinate system. WildHands is trained using both lab (ARCTIC, AssemblyHands) and in-the-wild (Epic-Kitchens, Ego4D) datasets with different sources of supervision. Fig. 2 provides an overview of our model. Next, we describe each component of WildHands in detail. + +# 3.1 Architecture + +Hand encoder: Our models use hand crops as input (resized to $224 \times 224$ resolution), which are processed by a ResNet50 [27] backbone to get $7 \times 7 \times 2048$ feature maps. The left and right hand crops are processed separately but the parameters are shared. We also use global image features in our model, computed by average pooling the $7 \times 7 \times 2048$ feature map to get a 2048-dimensional vector. Incorporating KPE: Recent work [54] has shown that estimating 3D quantities from image crops suffers from perspective distortion-induced shape ambiguity [54]. This raises concerns about whether this ambiguity is also present when using hand crops for predicting 3D pose and how to deal with it. Following the study in [54], we analyze the hands in the ARCTIC dataset (details in the supplementary) and find evidence of this ambiguity in hand crops as well. Thus, we adopt the intrinsics-aware positional encoding (KPE) proposed in [54] to mitigate this ambiguity. Specifically, we provide the network with information about the location of the hand crop in the field of view of the camera. Consider the principal point as $(p_x, p_y)$ & focal length as $(f_x, f_y)$ . For each pixel $(x, y)$ , we compute $\theta_x = \tan^{-1}\left(\frac{x - p_x}{f_x}\right)$ , $\theta_y = \tan^{-1}\left(\frac{y - p_y}{f_y}\right)$ & convert them into sinusoidal encoding [46]. + +We add KPE to the $7 \times 7 \times 2048$ feature map. KPE comprises sinusoidal encoding of the angles $\theta_{x}$ and $\theta_{y}$ (Sec. 4.1 in the main paper), resulting in $5 * 4 * K$ dimensional sparse encoding (4 for corners and 1 for center pixel) and $H \times W \times 4 * K$ resolution dense encoding, where $K$ is the number of frequency components (set to 4). For the sparse KPE variant, we broadcast it to $7 \times 7$ resolution whereas for the dense KPE variant, we interpolate it to $7 \times 7$ resolution and concatenate to the feature map. This concatenated feature is passed to a 3 convolutional layers (with 1024, 512, 256 channels respectively, each with kernel size of $3 \times 3$ and ReLU [49] non-linearity) to get a $3 \times 3 \times 256$ feature map. This is flattened to 2304-dimensional vector and passed through a 1-layer MLP to get a 2048-dimensional feature vector. We do not use batchnorm [30] here since we want to preserve the spatial information in KPE. + +Hand decoder: It consists of an iterative architecture, similar to decoder in HMR [34]. The inputs are the 2048-dimensional feature vector and initial + +MANO [58] (shape $\beta$ , articulation $\theta_{\mathrm{local}}$ and global pose $\theta_{\mathrm{global}}$ , all initialized as 0-vectors) & weak perspective camera parameters (initialized from the 2048-dimensional feature vector). Each of these parameters are predicted using a separate decoder head. The rotation parameters $\theta_{\mathrm{local}}$ , $\theta_{\mathrm{global}}$ are predicted in matrix form and converted to axis-angle representation to feed to MANO model. Each decoder is a 3-layer MLP with the 2 intermediate layers having 1024 channels and the output layer having the same number of channels as the predicted parameter. The output of each decoder is added to the initial parameters to get the updated parameters. This process is repeated for 3 iterations. The output of the last iteration is used for the final prediction. + +Differentiable rendering for mask prediction: The outputs from the decoder, $\beta$ , $\theta_{\mathrm{local}}$ , and $\theta_{\mathrm{global}}$ for the predicted hand, are passed to a differentiable MANO layer [25, 58] to get the hand mesh. This is used to differentiably render a soft segmentation mask, $M$ , using SoftRasterizer [43, 55]. Using a differentiable hand model (MANO) and differentiable rendering lets us train our model end-to-end. + +Grasp classifier: We use the insight that grasp type during interaction with objects is indicative of hand pose. We train a grasp prediction head on $\theta_{\mathrm{local}}$ , $\theta_{\mathrm{global}}$ & $\beta$ (predicted by WildHands) via a 4-layer MLP (with 1024, 1024, 512, 128 nodes & ReLU non-linearity after each). The MLP predicts logits for the 8 grasp classes defined in [6] which are converted into probabilities, $G$ via softmax. + +# 3.2 Training supervision + +We train WildHands using: (1) 3D supervision on $\beta$ , $\theta_{\mathrm{local}}$ , $\theta_{\mathrm{global}}$ , 3D hand keypoints & 2D projections of 3D keypoints in the image on lab datasets, and (2) hand masks and grasp labels on in-the-wild datasets. + +$$ +\mathcal {L} _ {\theta} = \left\| \theta - \theta^ {g t} \right\| _ {2} ^ {2} \qquad \mathcal {L} _ {\beta} = \left\| \beta - \beta^ {g t} \right\| _ {2} ^ {2} \qquad \mathcal {L} _ {c a m} = \left\| (s, T) - (s, T) ^ {g t} \right\| _ {2} ^ {2} \quad (1) +$$ + +$$ +\mathcal {L} _ {k p 3 d} = \left\| J _ {3 D} - J _ {3 D} ^ {g t} \right\| _ {2} ^ {2} \quad \mathcal {L} _ {k p 2 d} = \left\| J _ {2 D} - J _ {2 D} ^ {g t} \right\| _ {2} ^ {2} \tag {2} +$$ + +$$ +\mathcal {L} _ {\text {m a s k}} = \| M - M ^ {g t} \| \quad \mathcal {L} _ {\text {g r a s p}} = C E (G, G ^ {g t}) \tag {3} +$$ + +Here, $\mathcal{L}_{\theta}$ is used for both $\theta_{local}$ & $\theta_{global}$ , $(s,T)$ are the weak perspective camera parameters and $CE$ represents cross-entropy loss. $J_{2D} = K[J_{3D} + (T,f / s)]$ , where $J_{3D}$ is the 3D hand keypoints in the MANO coordinate frame, $K$ is the camera intrinsics, $f$ is the focal length, and $s$ is the scale factor of the weak perspective camera. Note that $(.)^{gt}$ represents the ground truth quantities. The total loss is: + +$$ +\begin{array}{l} \mathcal {L} = \lambda_ {\theta} \mathcal {L} _ {\theta} + \lambda_ {\beta} \mathcal {L} _ {\beta} + \lambda_ {c a m} \mathcal {L} _ {c a m} + \lambda_ {k p 3 d} \mathcal {L} _ {k p 3 d} + \lambda_ {k p 2 d} \mathcal {L} _ {k p 2 d} \\ + \lambda_ {m a s k} \mathcal {L} _ {m a s k} + \lambda_ {g r a s p} \mathcal {L} _ {g r a s p} \tag {4} \\ \end{array} +$$ + +Lab datasets: For ARCTIC, we use $\lambda_{\theta} = 10.0, \lambda_{\beta} = 0.001, \lambda_{kp3d} = 5.0, \lambda_{kp2d} = 5.0, \mathcal{L}_{cam} = 1.0$ & set other loss weights to 0. AssemblyHands does not use MANO representation for hands, instead provides labels for 3D & 2D keypoints of 21 hand joints. So, we use $\lambda_{kp3d} = 5, \lambda_{kp2d} = 5$ & set other loss weights to 0. + +In-the-wild data: For Epic-Kitchens & Ego4D, we use hand masks & grasp labels as auxiliary supervision. While VISOR contains hand masks, grasp labels are not available. Ego4D does not contain either hand masks or grasp labels. To extract these labels, we use predictions from off-the-shelf model [6] as pseudo ground truth. We use $\lambda_{mask} = 10.0$ , $\lambda_{grasp} = 0.1$ & set other loss weights to 0. + +# 3.3 Implementation Details + +Our model takes hand crops as input. During training, we use the ground truth bounding box for the hand crop (with small perturbation), estimated using the 2D keypoints & scaled by a fixed value of 1.5 to provide additional context around the hand. At test time, we need to predict the bounding box of the hand in the image. On ARCTIC, we train a bounding box predictor on by finetuning MaskRCNN [26]. This is also used for submitting the model to the ARCTIC leaderboard. For Epic-HandKps, we use the recently released hand detector from [5]. All the ablations use ground truth bounding box for the hand crop. + +We use the training sets of ARCTIC (187K images) & AssemblyHands (360K), VISOR split (30K) of EPIC and 45K images from Ego4D kitchen videos to train our model. WildHands is trained jointly on different datasets with the input batch containing images from multiple datasets. All models are initialized from the ArcticNet-SF model trained on the allocentric split of the ARCTIC dataset [14]. All models are trained for 100 epochs with a learning rate of $1e - 5$ . The multi-dataset training is done on 2 A40 GPUs with a batch size of 144 and Adam optimizer [39]. More details are provided in the supplementary. + +# 4 Experiments + +We adopt a zero-shot evaluation strategy: 3D evaluation on lab datasets (H2O, AssemblyHands), evaluation of 2D projections of 3D hand predictions on Epic-HandKps & 3D evaluation on EgoExo4D [18]. We systematically analyze the effectiveness of design choices (using crops, KPE), different terms in the loss function and different datasets used for training. We also report a system-level comparison on ARCTIC leaderboard and with FrankMocap [59] & HaMeR [52]. + +# 4.1 Protocols + +Training datasets: We consider 4 datasets for training: 2 lab datasets (ARCTIC & AssemblyHands) and 2 in-the-wild datasets (Epic-Kitchens & Ego4D). + +We select ARCTIC since it contains the largest range of hand pose variation [14] among existing datasets [4, 21, 22, 44, 67]. We use the egocentric split with more than $187\mathrm{K}$ images in the train set. We also use AssemblyHands since it is a large-scale dataset with more than $360\mathrm{K}$ egocentric images in the train split. Different combinations of these datasets are used for different experiments. + +We use egocentric images from Epic-Kitchens & Ego4D as in-the-wild data for training our model using auxiliary supervision. We use 30K training images + +![](images/3a364e95b635b535472d2dd662ad509e958f83bbef4e525264b5dd18623393d1.jpg) +Fig. 3: Epic-HandKps annotations. We collect 2D joint annotations (shown in blue) for 5K in-the-wild egocentric images from Epic-Kitchens [8]. We show few annotations here with images cropped around the hand. We also have the label for the joint corresponding to each keypoint. Note the heavy occlusion & large variation in dexterous poses of hands interactiong with objects. More visualizations in supplementary. + +available in the VISOR split of Epic-Kitchens and 45K images from Ego4D. To extract hand masks and grasp labels, we use off-the-shelf model from [6]. + +Evaluation datasets: We consider 4 datasets for zero-shot generalization experiments: H2O [40], AssemblyHands, Epic-HandKps, and Ego-Exo4D. Note that these datasets cover large variation in inputs, H2O contains RGB images in lab settings, AssemblyHands consists of grayscale images and Epic-HandKps and Ego-Exo4D images show hands performing everyday activities in the wild. + +We use the validation splits of H2O and AssemblyHands with 29K and 32K images respectively. Since 3D hand annotations are difficult to collect for in-the-wild images, we instead collect 2D hand keypoints annotations on 5K egocentric images from validation set of VISOR split of Epic-Kitchens. We refer to this dataset as Epic-HandKps. See sample images from the dataset in Fig. 3. We also evaluate on the validation split of Ego-Exo4D hand pose dataset. + +Epic-HandKps: Epic-HandKps contains 2D annotations for the 21 hand joints to facilitate evaluation of 2D projections of the predicted 3D keypoints. We sample 5K images from the validation set of VISOR split of Epic-Kitchens and get the 21 joints annotated via Scale AI. We use the same joint convention as ARCTIC [14]. We crop the images around the hand using the segmentation masks in VISOR and provide the crops to annotators for labeling. Note that most of these images do not have all the 21 keypoints visible. Following ARCTIC, we only consider images with at least 3 visible joints for evaluation. Moreover, since the models in our experiments required hand crops as input, we only evaluate on those images for which hand bounding box is predicted by the recently released hand detector model from [6]. This leaves us with 4724 hand annotations, with 2697 right hands and 2027 left hands. We show some annotations in Fig. 3. + +Metrics: For 3D hand pose evaluation, we consider 2 metrics: (1) Mean PerJoint Position Error (MPJPE): L2 distance (mm) between the 21 predicted + +Table 1: Benefits of using crops and KPE. Zero shot generalization performance improves through the use of crops as input (HandNet uses crops vs. ArcticNet-SF uses full image) and KPE helps (WildHands uses KPE with crops vs. HandNet only uses crops). All models use the same backbone and are trained on the same data in each setting for fair comparisons. $\mathcal{D}$ : {ARCTIC, AssemblyHands, EPIC}. + +
H2OAssemblyEgo-Exo4DEpic-HandKps
MPJPEMRRPEMPJPEMRRPEMPJPEL2 Error
Training dataDD - AssemblyDD - EPIC
ArcticNet-SF83.84325.55110.76326.94114.2435.02
HandNet38.06141.06109.88317.4989.7231.62
WildHands31.0849.4984.91164.9055.8411.05
+ +& ground truth joints for each hand after subtracting the root joint (this captures the relative pose). (2) Mean Relative-Root Position Error (MRRPE): the metric distance between the root joints of left hand and right hand, following [13, 14, 48] (this takes the absolute pose into account). (3) For 2D evaluation on Epic-HandKps, we measure the L2 Error (in pixels for 224x224 image input) between ground truth keypoints & 2D projections of predicted 3D keypoints. + +Baselines: (1) ArcticNet-SF [14] is the single-image model released with the ARCTIC benchmark. It consists of a convolutional backbone (ResNet50 [27]) to process the input image, followed by a HMR [35]-style decoder to predict the hand and object poses. The predicted hand is represented using MANO [58] parameterization. (2) FrankMocap [59] is trained on multiple datasets collected in controlled settings and is a popular choice to apply in the wild setting [3,24,74]. It uses hand crops as input instead of the entire image, which is then processed by a convolutional backbone. The decoder is similar to HMR [35] which outputs MANO parameters for hand and training is done using 3D pose & 2D keypoints supervision. (3) HandNet: Since the training code is not available for FrankMocap, we are unable to train it in our setting. So, we implement a version of ArcticNet-SF which uses crops as input along with HMR-style decoder and train it in our setting using 3D & 2D supervision. This baseline is equivalent to WildHands without KPE and ArcticNet-SF with crops. (4) HandOccNet [51]: It takes crops as input and encodes them using a FPN [41] backbone. These are passed to transformer [71] modules to get a heatmap-based intermediate representation which is then decoded to MANO parameters. (5) HaMeR [52]: It also takes crops as input and processes them using a ViT [11] backbone. The features are then passed to a transformer decoder to predict the MANO parameters. Note that adversarial loss is not used for training any model in our setting. + +# 4.2 Results + +We systematically study the impact of several factors: use of crops (Tab. 1) & KPE (Tab. 1, Tab. 5), perspective distortion (Tab. 4), auxiliary supervision (Tab. 3), training datasets (Tab. 6) on both convolutional (Tab. 1) & transformer + +Table 2: Impact on transformer models. We investigate if our insights are useful for transformer models as well, i.e. if KPE helps on top of positional encodings used in transformers & if auxiliary supervision leads to better generalization for large capacity models. All models are trained on the same data in each setting for fair comparisons. + +
H2OAssemblyEgo-Exo4DEpic-HandKps
MPJPEMRRPEMPJPEMRRPEMPJPEL2 Error
Training dataDD - AssemblyDD - EPIC
HandOccNet [51]60.58187.24110.28293.9280.9632.77
HandOccNet + KPE47.5772.25103.30232.8378.6413.54
HaMeR [52] (ViT)30.57113.2679.48227.5955.3625.48
HaMeR (ViT) + KPE24.1562.9971.64184.5547.029.77
+ +Table 3: Role of auxiliary supervision. We consider grasp and mask supervision from both Epic-Kitchens & Ego4D to train WildHands and show results in zero-shot generalization settings. Both grasp & mask supervision lead to improvements in 3D & 2D metrics, with hand masks providing larger gain compared to grasp labels. Even though auxiliary supervision is on Epic/Ego4D, it leads to improvements in all settings, i.e. benefits from training on broad data extend beyond datasets with auxiliary supervision. + +
H2OAssemblyEgo-Exo4DEpic-HandKps
MPJPEMRRPEMPJPEMRRPEMPJPEL2 Error
Wildhands (no aux)39.5277.0793.44208.3270.3917.07
+ EPIC grasp38.3476.0490.23180.8563.30-
+ EPIC mask34.2960.2387.94175.3156.41-
+ EPIC grasp + EPIC mask31.0849.4984.91164.9055.84-
+ Ego4D grasp41.06111.4786.44222.2369.738.22
+ Ego4D mask38.1757.9382.55145.7863.437.87
+ Ego4D grasp + Ego4D mask35.6262.1079.08148.1260.807.20
+ +models (Tab. 2) through controlled experiments, i.e. all factors outside of what we want to check the affect of, are kept constant. All the results are reported in a zero-shot setting i.e. models are not trained on the evaluation dataset. + +Impact of crops: To understand the benefits due to using crops as input instead of full images, we compare ArcticNet-SF and HandNet in Tab. 1. The only difference between these two models is: ArcticNet-SF uses full image as input whereas HandNet uses crops as input. We see gains of $27.7\%$ in MPJPE, $29.7\%$ in MRRPE, $10.7\%$ in PA-MPJPE, and $9.7\%$ in 2D pose across different settings. This provides evidence for the utility of using crops as inputs [50,59]. + +Benefits of KPE: In Tab. 1, HandNet & WildHands differ only in the use of KPE. This leads go improvements of $20.5\%$ in MPJPE, $56.4\%$ in MRRPE & $65.1\%$ in 2D pose. Compared to impact of crops, the gains are significantly higher in MRRPE (indicating better absolute pose) and on Epic-HandKps (leading to better generalization in the wild). + +Role of auxiliary supervision: We extract hand masks & grasp labels from Epic-Kitchens & Ego4D and show their benefits in Tab. 3 in zero-shot evaluation settings. Mask supervision leads to gains of $8.5\%$ in MPJPE, $21.5\%$ in MRRPE and $55.5\%$ in 2D pose. Grasp labels improve MPJPE by $2.5\%$ , MRRPE by $7.3\%$ + +Table 4: Comparison of KPE with relevant approaches. KPE is more effective than other methods for dealing with perspective distortion, e.g. Perspective Correction [45], Perspective Crop Layers (PCL [76]), or other encodings, e.g. CamConv [12] + +
H2OAssemblyEgo-Exo4DEpic-HandKps
MPJPEMRRPEMPJPEMRRPEMPJPEL2 Error
HandNet +
CamConv36.8667.6296.72180.7360.6917.35
Perspective Corr.39.95159.1359.10637.3267.4528.68
PCL [76]36.82158.8845.18483.9263.6528.21
KPE (WildHands)31.0849.4984.91164.9055.8411.05
+ +Table 5: KPE Design Choices. We study the impact of different design choices of KPE on WildHands: adding KPE with the input instead of latent features (w/ input), removing intrinsics from KPE (no intrx), dense variant of KPE from [54]. WildHands uses sparse variant of KPE. We observe that all variants of KPE provide significant benefits compared to the model without KPE and the sparse variant performs the best. + +
H2OAssemblyEgo-Exo4DEpic-HandKps
MPJPEMRRPEMPJPEMRRPEMPJPEL2 Error
no KPE38.06141.06109.88317.4989.7231.62
KPE w/ input45.5180.9694.45252.3493.5617.30
KPE no intrx36.9761.9892.12246.4560.8011.63
KPE dense36.8680.5495.34201.3369.1111.24
KPE sparse31.0849.4984.9155.8455.8411.05
+ +and 2D pose by $4.3\%$ . While both sources of supervision are effective, hand masks lead to larger gains. Combining both mask and grasp supervision leads to further improvements in both 3D & 2D poses across most settings. Moreover, auxiliary supervision on in-the-wild data also aids performance on lab datasets, suggesting that generalization gains from training on broad data are not dataset specific. + +Comparison of KPE with relevant approaches: In Tab. 4, we find KPE to be more effective than other methods for dealing with perspective distortion, e.g. Perspective Correction [45], Perspective Crop Layers (PCL [76]), or different forms of positional encoding, e.g. CamConv [12]. + +Impact on transformer models: We investigate if our insights are useful to transformer models as well, i.e. if KPE helps on top of positional encodings already used in transformers and if auxiliary supervision leads to better generalization for large capacity models. For this, we implement these components in HandOccNet [51] & HaMeR [52] and train these models in our settings. From the results in Tab. 2, we see consistent gains across all settings. + +KPE design choice: We ablate different variants of KPE in Tab. 5: adding KPE with the input instead of latent features (w/ input), removing intrinsics from KPE (no intrx) and dense variant of KPE from [54]. Note that the sparse variant performs the best, so we use sparse KPE in WildHands. + +Table 6: Effect of scaling up data. Training on more datasets leads to consistent improvements in models performance on held out datasets. + +
H2OEgo-Exo4DEpic-HandKps
MPJPEMRRPEMPJPEL2 Error
ARCTIC47.3075.1787.7117.07
ARCTIC + Assembly39.5277.0770.3911.05
ARCTIC + Assembly + Ego4D (aux)35.6262.1060.807.20
+ +Intrinsics during training: Intrinsics may not always be available in in-the-wild data used to derive auxiliary supervision. To study this setting, we consider in-the-wild Ego4D data since it contains images from multiple cameras, and do not assume access to intrinsics. In this case, we replace the KPE with a sinusoidal positional encoding of normalized image coordinates w.r.t. center. The Ego4D results in Tab. 3 follow this setting and we observe that auxiliary supervision from Ego4D provides benefits even in the absence of camera information. + +Scaling up training data: We ablate variants of WildHands trained with ARCTIC, ARCTIC + AssemblyHands, ARCTIC + Ego4D and ARCTIC + AssemblyHands + Ego4D in zero-shot settings on H2O, Ego-Exo4D, and EpicHandKps. We use 3D supervision on ARCTIC & AssemblyHands and auxiliary supervision (hand masks, grasp labels) on Ego4D. Tab. 6 shows consistent improvements in 3D and 2D metrics from both AssemblyHands and Ego4D datasets, suggesting that further scaling can improve performance further. + +# 4.3 System-level Evaluation + +While all of our earlier experiments are conducted in controlled settings, we also present a system-level comparison to other past methods, specifically to methods submitted to the ARC-TIC leaderboard (as of July 13, 2024), and with the publicly released models of FrankMo-cap [59] and HaMeR [52]. + +ARCTIC Leaderboard: Our method achieves the best 3D hand pose on the ego-centric split, compared to recent state-of-the-art convolutional (e.g. ArcticNet-SF, DIGIT-HRNet, HMR-ResNet50) and transformer (e.g. + +JointTransformer) models (as of July 13, 2024). However, it is not possible to do a detailed comparison since most of these models are not public. + +Comparison with FrankMocap [59] and HaMeR [52]: We show results with the publicly released models in Tab. 8. Note that HaMeR uses a ViT-H backbone which is much larger and more performant than the ResNet50 backbone used in WildHands. WildHands outperforms FrankMocap across all metrics and HaMeR on 3 of 6 metrics while being $10 \times$ smaller & trained on $5 \times$ less data. + +Table 7: Leaderboard results. WildHands leads the 3D hand pose on the egocentric split of ARCTIC leaderboard (as of July 13, 2024). + +
MethodMPJPEMRRPE
ArcticNet-SF19.1828.31
ArcticOccNet19.7729.75
DIGIT-HRNet16.7425.49
HMR-ResNet5020.3232.32
JointTransformer16.3326.07
WildHands15.7223.88
+ +![](images/b63a51f382d93f665181d1e1032d2f325fc8277e34956f5503d1f02673d81d30.jpg) + +![](images/9fd386964b7762560284bb12c6249b81cafea519a2011da79922cff2b8d42782.jpg) + +![](images/0ff6ef8d1147bf804c7ce8bc1049909346692990516d11b2bd456122b1390718.jpg) + +![](images/a56a373135997bf871a72c1bb33729f3a1ec53bfe97a54b5f96dad8faed716b2.jpg) + +![](images/5406c867f71f8ced4b0b9687044bd4de2bf2a960e2dff9c10dfcd6e7fd75876e.jpg) + +![](images/97fe05cba454ef27842d406056da182173533e8f10bbd1f30a913758e0d394fc.jpg) + +![](images/f276b3dc25b6d5ae71da2b56756af7ac95216e28ff75b635dd4c6a4e01f57ab0.jpg) +Fig. 4: Visualizations. We show projection of the predicted hand in the image & rendering of the hand mesh from 2 more views. WildHands predicts better hand poses from a single image than FrankMocap [59], HaMeR [14] and ArcticNet [14] in challenging egocentric scenarios involving occlusions and perspective distortion. + +![](images/be3607737c9943517dda8294ac568933abb600cfb43bf29b255fd0f059955145.jpg) + +![](images/dd2661b40262104f4125262fe3f8449367bd01fb7ecab650e0db42276640e3ef.jpg) + +![](images/99c7713e0bb4e49090213f40ec040645fdc9afb9562a3ce89f2b5cf67c8a4dcd.jpg) + +![](images/89e7f2789a3def971b2b0c89d2b7acdcf1aadd1cdb8a1a5f021420681ceffaf9.jpg) + +![](images/a298318701025175b564af3834f40569ba9a2be4a9e2853afeb851d2ee0dae1c.jpg) + +![](images/dbf18ce05743d49f5063ec749bb540fd70897113ed951a1dcfa7e423c7ce38a4.jpg) + +![](images/8fb0cca3c4b69efae39213d8dc027f1b0b84d9e50aec9f23d47cbb494df53aa2.jpg) + +![](images/596c6c2589245dbdc705e84ea48e307981b54a43f6ca77a432e21802a31e1146.jpg) + +![](images/e4620473ef14f57826760a17de33d85973dcb3dc5d9f4376dfc332d18330f772.jpg) + +![](images/c1a8fbbb48f7143b1b7b0e371ec712155de7870d08f8372dccbe099a5a378922.jpg) + +![](images/aa3ed71ed34a1f88529a84444bf12e1b3206f0dee86c1b08b4b9f4f7deffe767.jpg) + +![](images/7bcde915d03d079a91588aa90d482336e9df3b3741ab6e33f6ae6509c34e146b.jpg) + +![](images/3a8157dad25e84e2cca74e1161c40f2553104fab9a80eeefe1254c0f98e29f84.jpg) + +# 4.4 Visualizations + +We show qualitative comparisons of the hand pose, predicted by WildHands, with FrankMocap on Epic-HandKps (Fig. 4a) and ArcticNet-SF on ARCTIC (Fig. 4b). Looking at the projection of the mesh in the camera view and rendering of the mesh from additional views, we observe that WildHands is able to predict hand pose better in images involving occlusion and interaction, e.g. fingers are curled around the object in contact (Fig. 4) for our model but this is not the case for FrankMocap. We observe similar trends in ARCTIC (Fig. 4b) where our model predicts better hands in contact scenarios. More results in supplementary. + +Failure Cases: We observe that images in which the fingers are barely visible, e.g. when kneading a dough in top row (Fig. 5), or containing extreme poses, e.g. grasps in bottom row (Fig. 5), are quite challenging for all models. + +Limitations: The KPE encoding requires camera intrinsics to be known, which may not be available in certain scenarios. However, in several in-the-wild images, the metadata often contains camera information. Also, we currently set the weights for different loss terms as hyperparameters which may not be ideal since the sources of supervision are quite different leading to different scales in loss values. It could be useful to use a learned weighing scheme, e.g. uncertainty-based loss weighting [2, 29, 38]. + +Table 8: Systems comparison. We evaluate against publicly released models: FrankMocap [59] (a popular method for 3D hand pose estimation), and HaMeR [52]. FrankMocap uses a ResNet-50 backbone and is trained on 6 lab datasets. HaMeR uses a ViT-H [11] backbone and is trained on 7 lab + 3 in-the-wild + HInt datasets across nearly 3M frames. WildHands model uses a ResNet-50 backbone and is trained on 3 datasets. WildHands outperforms FrankMocap across all metrics and HaMeR on 3 of 6 metrics while being $10 \times$ smaller & trained on $5 \times$ less data. We expect scaling up the backbone and datasets used to train WildHands can lead to even stronger performance. + +
H2OAssemblyEgo-Exo4DEpic-HandKps
MPJPEMRRPEMPJPEMRRPEMPJPEL2 Error
FrankMocap [59] (ResNet-50, 6 lab)58.51-97.59-175.9113.33
HaMeR [52] (ViT-H, 7 lab+3 wild+HInt)23.82147.8745.49334.52116.464.56
WildHands (ResNet-50, 2 lab + 1 wild)31.0849.4980.40148.1255.847.20
+ +![](images/b273875b3f49d0d4cd075becad73149e9043941a80f71c2ba98c5d157ed2bbc2.jpg) + +![](images/86fdcc4c08e0fb9cb8630670013e5e26b4cf74a7e43f17fb123c9459e18feab2.jpg) + +![](images/76056f448812d1968bfc5cb72da41d04e89588d991fe1def7e6b0d05814d8ddd.jpg) + +![](images/a9e08c24412fecc4e28b9325d20b72d7d7ae92172ed92877b26a7db2a0f14d24.jpg) +Fig. 5: Failure cases. We observe that images with (top) barely visible fingers, e.g. kneading dough or (bottom) extreme grasp poses are challenging for all models. + +![](images/323c86de16f0947cd0a393ef2823329466a073d51f3758b1f8aa0ac6acd10c66.jpg) + +![](images/dcb1517fbbed8dfd0370c59bef0a5586003b36fa3104b5d1e26208f7d5459871.jpg) + +![](images/1aad294fd700e89579ca4a400571141d7256b7c781ac42e7bfc66f7a14ba042e.jpg) + +![](images/4926c4e0dc2d6cbbe7ef6c6586d91afa2ce7e7f57d1bdd53007622e4502fb647.jpg) + +![](images/0b73a7fae6a35a73d7641b7f2beaa1341caa0f7a69b5bd763753a06eb9d3c882.jpg) + +![](images/62c03868585d8ce0d0ce0af10d50f7faf21810e79a12245894419c0acc4e86df.jpg) + +![](images/5c1857075799195991145c9ade501ea18031f123013d2a7186e4db61b49f5f2b.jpg) + +![](images/87bd577a8cfc8807ff1d0e35485d2e8df67140aa35bc70b736d8395c8e6d6971.jpg) + +# 5 Conclusion + +We present WildHands, a system that adapts best practices from the literature: using crops as input, intrinsics-aware positional encoding, auxiliary sources of supervision and multi-dataset training, for robust prediction of 3D hand poses on egocentric images in the wild. Experiments on both lab datasets and in-the-wild settings show the effectiveness of WildHands. As future direction, WildHands could be used to scale up learning robot policies from human interactions. + +Acknowledgements: We thank Arjun Gupta, Shaowei Liu, Anand Bhattachad & Kashyap Chitta for feedback on the draft, and David Forsyth for useful discussion. This material is based upon work supported by NSF (IIS2007035), NASA (80NSSC21K1030), DARPA (Machine Common Sense program), Amazon Research Award, NVIDIA Academic Hardware Grant, and the NCSA Delta System (supported by NSF OCI 2005572 and the State of Illinois). + +# References + +1. Ballan, L., Taneja, A., Gall, J., Gool, L.V., Pollefeys, M.: Motion capture of hands in action using discriminative salient points. In: Proceedings of the European Conference on Computer Vision (ECCV) (2012) +2. Brazil, G., Kumar, A., Straub, J., Ravi, N., Johnson, J., Gkioxari, G.: Omni3d: A large benchmark and model for 3d object detection in the wild. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR). pp. 13154-13164 (2023) +3. Cao, Z., Radosavovic, I., Kanazawa, A., Malik, J.: Reconstructing hand-object interactions in the wild. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2021) +4. Chao, Y., Yang, W., Xiang, Y., Molchanov, P., Handa, A., Tremblay, J., Narang, Y.S., Wyk, K.V., Iqbal, U., Birchfield, S., Kautz, J., Fox, D.: Dexycb: A benchmark for capturing hand grasping of objects. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2021) +5. Chen, Z., Zhang, H.: Learning implicit fields for generative shape modeling. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2019) +6. Cheng, T., Shan, D., Hassen, A.S., Higgins, R.E.L., Fouhey, D.: Towards a richer 2d understanding of hands at scale. In: Advances in Neural Information Processing Systems (NeurIPS) (2023) +7. Damen, D., Doughty, H., Farinella, G.M., Fidler, S., Furnari, A., Kazakos, E., Moltisanti, D., Munro, J., Perrett, T., Price, W., Wray, M.: Scaling egocentric vision: The epic-kitchens dataset. Proceedings of the European Conference on Computer Vision (ECCV) (2018) +8. Damen, D., Doughty, H., Farinella, G.M., Fidler, S., Furnari, A., Kazakos, E., Moltisanti, D., Munro, J., Perrett, T., Price, W., Wray, M.: The epic-kitchen dataset: Collection, challenges and baselines. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI) (2020) +9. Damen, D., Doughty, H., Farinella, G.M., Fidler, S., Furnari, A., Kazakos, E., Moltisanti, D., Munro, J., Perrett, T., Price, W., et al.: Scaling egocentric vision: The epic-kitchens dataset. In: Proceedings of the European Conference on Computer Vision (ECCV) (2018) +10. Darkhalil, A., Shan, D., Zhu, B., Ma, J., Kar, A., Higgins, R., Fidler, S., Fouhey, D., Damen, D.: Epic-kitchen visor benchmark: Video segmentations and object relations. In: NeurIPS Track on Datasets and Benchmarks (2022) +1. Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., Zhai, X., Unterthiner, T., Dehghani, M., Minderer, M., Heigold, G., Gelly, S., et al.: An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929 (2020) +2. Facil, J.M., Ummenhofer, B., Zhou, H., Montesano, L., Brox, T., Civera, J.: Camconvs: Camera-aware multi-scale convolutions for single-view depth. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR). pp 11826-11835 (2019) +3. Fan, Z., Spurr, A., Kocabas, M., Tang, S., Black, M.J., Hilliges, O.: Learning to disambiguate strongly interacting hands via probabilistic per-pixel part segmentation In: Proceedings of the International Conference on 3D Vision (3DV) (2021) +4. Fan, Z., Taheri, O., Tzionas, D., Kocabas, M., Kaufmann, M., Black, M.J., Hilliges, O.: ARCTIC: A dataset for dexterous bimanual hand-object manipulation. In: + +Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2023) +15. Freeman, W.T., Roth, M.: Orientation histograms for hand gesture recognition. In: International workshop on automatic face and gesture recognition. vol. 12, pp. 296-301. Citeseer (1995) +16. Garcia-Hernando, G., Yuan, S., Baek, S., Kim, T.K.: First-person hand action benchmark with rgb-d videos and 3d hand pose annotations. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2018) +17. Grauman, K., Westbury, A., Byrne, E., Chavis, Z., Furnari, A., Girdhar, R., Hamburger, J., Jiang, H., Liu, M., Liu, X., et al.: Ego4d: Around the world in 3,000 hours of egocentric video. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022) +18. Grauman, K., Westbury, A., Torresani, L., Kitani, K., Malik, J., Afouras, T., Ashutosh, K., Baiyya, V., Bansal, S., Boote, B., et al.: Ego-exo4d: Understanding skilled human activity from first-and third-person perspectives. arXiv preprint arXiv:2311.18259 (2023) +19. Guizilini, V., Vasiljevic, I., Chen, D., Ambrus, R., Gaidon, A.: Towards zero-shot scale-aware monocular depth estimation. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2023) +20. Guizilini, V., Vasiljevic, I., Fang, J., Ambru, R., Shakhnarovich, G., Walter, M.R., Gaidon, A.: Depth field networks for generalizable multi-view scene representation. In: Proceedings of the European Conference on Computer Vision (ECCV) (2022) +21. Hampali, S., Rad, M., Oberweger, M., Lepetit, V.: Honnotate: A method for 3d annotation of hand and object poses. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2020) +22. Hampali, S., Sarkar, S.D., Rad, M., Lepetit, V.: Keypoint transformer: Solving joint identification in challenging hands and object interactions for accurate 3d pose estimation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022) +23. Hartley, R., Zisserman, A.: Multiple view geometry in computer vision. Cambridge university press (2003) +24. Hasson, Y., Tekin, B., Bogo, F., Laptev, I., Pollefeys, M., Schmid, C.: Leveraging photometric consistency over time for sparsely supervised hand-object reconstruction. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2020) +25. Hasson, Y., Varol, G., Tzionas, D., Kalevatykh, I., Black, M.J., Laptev, I., Schmid, C.: Learning joint reconstruction of hands and manipulated objects. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2019) +26. He, K., Gkioxari, G., Dollar, P., Girshick, R.B.: Mask R-CNN. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2017) +27. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2016) +28. Heap, T., Hogg, D.: Towards 3d hand tracking using a deformable model. In: Proceedings of the Second International Conference on Automatic Face and Gesture Recognition. pp. 140-145. IEEE (1996) +29. Hu, A., Murez, Z., Mohan, N., Dudas, S., Hawke, J., Badrinarayanan, V., Cipolla, R., Kendall, A.: FIERY: future instance prediction in bird's-eye view from surround monocular cameras. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2021) + +30. Ioffe, S., Szegedy, C.: Batch normalization: Accelerating deep network training by reducing internal covariate shift. In: Bach, F.R., Blei, D.M. (eds.) Proceedings of the International Conference on Machine Learning (ICML) (2015) +31. Ivashechkin, M., Mendez, O., Bowden, R.: Denoising diffusion for 3d hand pose estimation from images. arXiv 2308.09523 (2023) +32. Jiang, C., Xiao, Y., Wu, C., Zhang, M., Zheng, J., Cao, Z., Zhou, J.T.: A2j-transformer: Anchor-to-joint transformer network for 3d interacting hand pose estimation from a single RGB image. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2023) +33. Jiang, Z., Rahmani, H., Black, S., Williams, B.M.: A probabilistic attention model with occlusion-aware texture regression for 3d hand reconstruction from a single RGB image. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2023) +34. Kanazawa, A., Black, M.J., Jacobs, D.W., Malik, J.: End-to-end recovery of human shape and pose. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2018) +35. Kanazawa, A., Tulsiani, S., Efros, A.A., Malik, J.: Learning category-specific mesh reconstruction from image collections. In: Proceedings of the European Conference on Computer Vision (ECCV) (2018) +36. Karunratanakul, K., Yang, J., Zhang, Y., Black, M.J., Muandet, K., Tang, S.: Grasping field: Learning implicit representations for human grasps. In: Proceedings of the International Conference on 3D Vision (3DV) (2020) +37. Kato, H., Ushiku, Y., Harada, T.: Neural 3d mesh renderer. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2018) +38. Kendall, A., Gal, Y., Cipolla, R.: Multi-task learning using uncertainty to weigh losses for scene geometry and semantics. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2018) +39. Kingma, D.P., Ba, J.: Adam: A method for stochastic optimization. In: Bengio, Y., LeCun, Y. (eds.) Proceedings of the International Conference on Learning Representations (ICLR) (2015) +40. Kwon, T., Tekin, B., Stühmer, J., Bogo, F., Pollefeys, M.: H2o: Two hands manipulating objects for first person interaction recognition. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2021) +41. Lin, T., Dollár, P., Girshick, R.B., He, K., Hariharan, B., Belongie, S.J.: Feature pyramid networks for object detection. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2017) +42. Liu, S., Chen, W., Li, T., Li, H.: Soft rasterizer: A differentiable renderer for image-based 3d reasoning. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2019) +43. Liu, S., Li, T., Chen, W., Li, H.: A general differentiable mesh renderer for image-based 3d reasoning. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI) (2020) +44. Liu, Y., Liu, Y., Jiang, C., Lyu, K., Wan, W., Shen, H., Liang, B., Fu, Z., Wang, H., Yi, L.: HOI4D: A 4d egocentric dataset for category-level human-object interaction. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022) +45. Mehta, D., Rhodin, H., Casas, D., Fua, P., Sotnychenko, O., Xu, W., Theobalt, C.: Monocular 3d human pose estimation in the wild using improved CNN supervision. In: Proceedings of the International Conference on 3D Vision (3DV) (2017) + +46. Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: Nerf: Representing scenes as neural radiance fields for view synthesis. In: Proceedings of the European Conference on Computer Vision (ECCV) (2020) +47. Miyato, T., Jaeger, B., Welling, M., Geiger, A.: GTA: A geometry-aware attention mechanism for multi-view transformers. arXiv (2023) +48. Moon, G., Yu, S., Wen, H., Shiratori, T., Lee, K.M.: Interhand2.6m: A dataset and baseline for 3d interacting hand pose estimation from a single RGB image. In: Proceedings of the European Conference on Computer Vision (ECCV) (2020) +49. Nair, V., Hinton, G.E.: Rectified linear units improve restricted boltzmann machines. In: Proceedings of the International Conference on Machine Learning (ICML) (2010) +50. Ohkawa, T., He, K., Sener, F., Hodan, T., Tran, L., Keskin, C.: Assemblyhands: Towards egocentric activity understanding via 3d hand pose estimation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR). pp. 12999-13008 (2023) +51. Park, J., Oh, Y., Moon, G., Choi, H., Lee, K.M.: Handoccnet: Occlusion-robust 3d hand mesh estimation network. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022) +52. Pavlakos, G., Shan, D., Radosavovic, I., Kanazawa, A., Fouhey, D., Malik, J.: Reconstructing hands in 3d with transformers. arXiv preprint arXiv:2312.05251 (2023) +53. Potamias, R.A., Ploumpis, S., Moschoglou, S., Triantafyllou, V., Zafeiriou, S.: Handy: Towards a high fidelity 3d hand shape and appearance model. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR). pp. 4670-4680 (June 2023) +54. Prakash, A., Gupta, A., Gupta, S.: Mitigating perspective distortion-induced shape ambiguity in image crops. arXiv 2312.06594 (2023) +55. Ravi, N., Reizenstein, J., Novotny, D., Gordon, T., Lo, W.Y., Johnson, J., Gkioxari, G.: Accelerating 3d deep learning with pytorch3d. arXiv:2007.08501 (2020) +56. Rehg, J.M., Kanade, T.: Visual tracking of high dof articulated structures: an application to human hand tracking. In: Proceedings of the European Conference on Computer Vision (ECCV) (1994) +57. Rogez, G., Khademi, M., Supancic III, J., Montiel, J.M.M., Ramanan, D.: 3d hand pose detection in egocentric rgb-d images. In: Proceedings of the European Conference on Computer Vision (ECCV) (2014) +58. Romero, J., Tzionas, D., Black, M.J.: Embodied hands: Modeling and capturing hands and bodies together. ACM Transactions on Graphics (ToG) (2017) +59. Rong, Y., Shiratori, T., Joo, H.: Frankmocap: Fast monocular 3D hand and body motion capture by regression and integration. Proceedings of the IEEE International Conference on Computer Vision Workshops (ICCV Workshops) (2021) +60. Sener, F., Chatterjee, D., Shelepov, D., He, K., Singhania, D., Wang, R., Yao, A.: Assembly101: A large-scale multi-view video dataset for understanding procedural activities. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022) +61. Shan, D., Geng, J., Shu, M., Fouhey, D.F.: Understanding human hands in contact at internet scale. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2020) +62. Sharp, T., Keskin, C., Robertson, D., Taylor, J., Shotton, J., Kim, D., Rhemann, C., Leichter, I., Vinnikov, A., Wei, Y., et al.: Accurate, robust, and flexible real-time hand tracking. In: Proceedings of the 33rd annual ACM conference on human factors in computing systems. pp. 3633-3642 (2015) + +63. Simon, T., Joo, H., Matthews, I.A., Sheikh, Y.: Hand keypoint detection in single images using multiview bootstrapping. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2017) +64. Sridhar, S., Mueller, F., Zollhöfer, M., Casas, D., Oulasvirta, A., Theobalt, C.: Real-time joint tracking of a hand manipulating an object from rgb-d input. In: Proceedings of the European Conference on Computer Vision (ECCV) (2016) +65. Sridhar, S., Oulasvirta, A., Theobalt, C.: Interactive markerless articulated hand motion tracking using RGB and depth data. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2013) +66. Sun, X., Wei, Y., Liang, S., Tang, X., Sun, J.: Cascaded hand pose regression. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2015) +67. Taheri, O., Ghorbani, N., Black, M.J., Tzionas, D.: GRAB: A dataset of whole-body human grasping of objects. In: Proceedings of the European Conference on Computer Vision (ECCV) (2020) +68. Tompson, J., Stein, M., Lecun, Y., Perlin, K.: Real-time continuous pose recovery of human hands using convolutional networks. ACM Transactions on Graphics (ToG) 33(5), 1-10 (2014) +69. Tulsiani, S., Zhou, T., Efros, A.A., Malik, J.: Multi-view supervision for single-view reconstruction via differentiable ray consistency. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR). pp. 2626-2634 (2017) +70. Tzionas, D., Gall, J.: 3d object reconstruction from hand-object interactions. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2015) +71. Vaswani, A., Shazeer, N.M., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, L., Polosukhin, I.: Attention is all you need. Advances in Neural Information Processing Systems (NeurIPS) (2017) +72. Wan, C., Yao, A., Gool, L.V.: Hand pose estimation from local surface normals. In: Proceedings of the European Conference on Computer Vision (ECCV) (2016) +73. Yang, L., Li, K., Zhan, X., Wu, F., Xu, A., Liu, L., Lu, C.: Oakink: A large-scale knowledge repository for understanding hand-object interaction. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022) +74. Ye, Y., Gupta, A., Tulsiani, S.: What's in your hands? 3D reconstruction of generic objects in hands. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022) +75. Yifan, W., Doersch, C., Arandjelovic, R., Carreira, J., Zisserman, A.: Input-level inductive biases for 3d reconstruction. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022) +76. Yu, F., Salzmann, M., Fua, P., Rhodin, H.: Pcls: Geometry-aware neural reconstruction of 3d pose with perspective crop layers. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2021) +77. Zhang, X., Li, Q., Mo, H., Zhang, W., Zheng, W.: End-to-end hand mesh recovery from a monocular rgb image. In: ICCV (2019) +78. Zimmermann, C., Brox, T.: Learning to estimate 3d hand pose from single rgb images. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2017) +79. Zimmermann, C., Ceylan, D., Yang, J., Russell, B.C., Argus, M.J., Brox, T.: Freihand: A dataset for markerless capture of hand pose and shape from single RGB images. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2019) \ No newline at end of file diff --git a/2024/3D Hand Pose Estimation in Everyday Egocentric Images/images.zip b/2024/3D Hand Pose Estimation in Everyday Egocentric Images/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..4e3a8799a9af91e3264740bced74b65f36a4830d --- /dev/null +++ b/2024/3D Hand Pose Estimation in Everyday Egocentric Images/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:28885831d483fa6b98128391c9ebab540ca61bb6e98f77c24783ea1a539cdc30 +size 585046 diff --git a/2024/3D Hand Pose Estimation in Everyday Egocentric Images/layout.json b/2024/3D Hand Pose Estimation in Everyday Egocentric Images/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..ed597fbe8e70fb9291cc6d051c8e1fa0296a355c --- /dev/null +++ b/2024/3D Hand Pose Estimation in Everyday Egocentric Images/layout.json @@ -0,0 +1,11285 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 206, + 111, + 408, + 148 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 206, + 111, + 408, + 148 + ], + "spans": [ + { + "bbox": [ + 206, + 111, + 408, + 148 + ], + "type": "text", + "content": "3D Hand Pose Estimation in Everyday Egocentric Images" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 162, + 168, + 452, + 181 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 162, + 168, + 452, + 181 + ], + "spans": [ + { + "bbox": [ + 162, + 168, + 452, + 181 + ], + "type": "text", + "content": "Aditya Prakash, Ruisen Tu, Matthew Chang, and Saurabh Gupta" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 197, + 190, + 416, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 197, + 190, + 416, + 223 + ], + "spans": [ + { + "bbox": [ + 197, + 190, + 416, + 223 + ], + "type": "text", + "content": "University of Illinois Urbana-Champaign {adityap9,ruisent2,mc48,saurabhg}@illinois.edu https://bit.ly/WildHands" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 160, + 247, + 455, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 247, + 455, + 468 + ], + "spans": [ + { + "bbox": [ + 160, + 247, + 455, + 468 + ], + "type": "text", + "content": "Abstract. 3D hand pose estimation in everyday egocentric images is challenging for several reasons: poor visual signal (occlusion from the object of interaction, low resolution & motion blur), large perspective distortion (hands are close to the camera), and lack of 3D annotations outside of controlled settings. While existing methods often use hand crops as input to focus on fine-grained visual information to deal with poor visual signal, the challenges arising from perspective distortion and lack of 3D annotations in the wild have not been systematically studied. We focus on this gap and explore the impact of different practices, i.e. crops as input, incorporating camera information, auxiliary supervision, scaling up datasets. We provide several insights that are applicable to both convolutional and transformer models, leading to better performance. Based on our findings, we also present WildHands, a system for 3D hand pose estimation in everyday egocentric images. Zero-shot evaluation on 4 diverse datasets (H2O, AssemblyHands, Epic-Kitchens, Ego-Exo4D) demonstrate the effectiveness of our approach across 2D and 3D metrics, where we beat past methods by " + }, + { + "bbox": [ + 160, + 247, + 455, + 468 + ], + "type": "inline_equation", + "content": "7.4\\% - 66\\%" + }, + { + "bbox": [ + 160, + 247, + 455, + 468 + ], + "type": "text", + "content": ". In system level comparisons, WildHands achieves the best 3D hand pose on ARCTIC egocentric split, outperforms FrankMocap across all metrics and HaMeR on 3 out of 6 metrics while being " + }, + { + "bbox": [ + 160, + 247, + 455, + 468 + ], + "type": "inline_equation", + "content": "10\\times" + }, + { + "bbox": [ + 160, + 247, + 455, + 468 + ], + "type": "text", + "content": " smaller and trained on " + }, + { + "bbox": [ + 160, + 247, + 455, + 468 + ], + "type": "inline_equation", + "content": "5\\times" + }, + { + "bbox": [ + 160, + 247, + 455, + 468 + ], + "type": "text", + "content": " less data." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 160, + 477, + 450, + 488 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 477, + 450, + 488 + ], + "spans": [ + { + "bbox": [ + 160, + 477, + 450, + 488 + ], + "type": "text", + "content": "Keywords: 3D Hand Pose " + }, + { + "bbox": [ + 160, + 477, + 450, + 488 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 160, + 477, + 450, + 488 + ], + "type": "text", + "content": " Egocentric Vision " + }, + { + "bbox": [ + 160, + 477, + 450, + 488 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 160, + 477, + 450, + 488 + ], + "type": "text", + "content": " 3D from single image" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 133, + 522, + 230, + 534 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 522, + 230, + 534 + ], + "spans": [ + { + "bbox": [ + 133, + 522, + 230, + 534 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 545, + 482, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 545, + 482, + 641 + ], + "spans": [ + { + "bbox": [ + 130, + 545, + 482, + 641 + ], + "type": "text", + "content": "Understanding egocentric hands in 3D enables applications in AR/VR, robotics. While several works have studied exocentric hands [52, 59], no existing approach performs well in diverse egocentric settings outside of lab setups. We focus on this gap & study the impact of common practices, i.e. crops as input, camera information, auxiliary supervision, scaling up datasets, for predicting absolute 3D hand pose from a single egocentric image. We identify 2 important factors: a) modeling the 3D to 2D projection during imaging of the hand in egocentric views, b) scaling up training to diverse datasets by leveraging auxiliary supervision." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 642, + 482, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 642, + 482, + 665 + ], + "spans": [ + { + "bbox": [ + 132, + 642, + 482, + 665 + ], + "type": "text", + "content": "Let's unpack each component. Existing methods often operate on image crops, assume that the image crop is located at the center of the camera's field of view" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 134, + 114, + 202, + 181 + ], + "blocks": [ + { + "bbox": [ + 134, + 114, + 202, + 181 + ], + "lines": [ + { + "bbox": [ + 134, + 114, + 202, + 181 + ], + "spans": [ + { + "bbox": [ + 134, + 114, + 202, + 181 + ], + "type": "image", + "image_path": "c8a983734fa0ef16e6c36a0732437bebbeeaba8f713b1adf3b594cc895116ebc.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 203, + 114, + 241, + 182 + ], + "blocks": [ + { + "bbox": [ + 203, + 114, + 241, + 182 + ], + "lines": [ + { + "bbox": [ + 203, + 114, + 241, + 182 + ], + "spans": [ + { + "bbox": [ + 203, + 114, + 241, + 182 + ], + "type": "image", + "image_path": "555546651e1c4816f4b6ff6e9cf099150f687df5a3e07e212e2248e7b139d81a.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 242, + 118, + 299, + 182 + ], + "blocks": [ + { + "bbox": [ + 242, + 118, + 299, + 182 + ], + "lines": [ + { + "bbox": [ + 242, + 118, + 299, + 182 + ], + "spans": [ + { + "bbox": [ + 242, + 118, + 299, + 182 + ], + "type": "image", + "image_path": "61164706a037dad281dc7502b352a79e52a76c99510920b454c54f1fa8b52440.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 134, + 181, + 202, + 248 + ], + "blocks": [ + { + "bbox": [ + 134, + 181, + 202, + 248 + ], + "lines": [ + { + "bbox": [ + 134, + 181, + 202, + 248 + ], + "spans": [ + { + "bbox": [ + 134, + 181, + 202, + 248 + ], + "type": "image", + "image_path": "8be37e6ffabc3340f1e52de99851bb95e6214c52b556679afb04c699685b2d2c.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 131, + 267, + 482, + 323 + ], + "lines": [ + { + "bbox": [ + 131, + 267, + 482, + 323 + ], + "spans": [ + { + "bbox": [ + 131, + 267, + 482, + 323 + ], + "type": "text", + "content": "Fig. 1: WildHands predicts the 3D shape, 3D articulation and 3D placement of the hand in the camera frame from a single in-the-wild egocentric RGB image and camera intrinsics. It produces better 3D output compared to FrankMocap [59] in occlusion scenarios and is more adept at dealing with perspective distortion than HaMeR [52], in challenging egocentric hand-object interactions from Epic-Kitchens [9] dataset." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 203, + 181, + 241, + 248 + ], + "blocks": [ + { + "bbox": [ + 203, + 181, + 241, + 248 + ], + "lines": [ + { + "bbox": [ + 203, + 181, + 241, + 248 + ], + "spans": [ + { + "bbox": [ + 203, + 181, + 241, + 248 + ], + "type": "image", + "image_path": "0c315029780fb05164e7405d7ff604662bb8ee8e77baf134fd84057d9d43afda.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 242, + 181, + 299, + 248 + ], + "blocks": [ + { + "bbox": [ + 242, + 181, + 299, + 248 + ], + "lines": [ + { + "bbox": [ + 242, + 181, + 299, + 248 + ], + "spans": [ + { + "bbox": [ + 242, + 181, + 299, + 248 + ], + "type": "image", + "image_path": "5966712a01e6aa30dcacaf998c6a2b8d726d7d52a09a6f846d752d5d7d2b9302.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 313, + 114, + 381, + 182 + ], + "blocks": [ + { + "bbox": [ + 313, + 114, + 381, + 182 + ], + "lines": [ + { + "bbox": [ + 313, + 114, + 381, + 182 + ], + "spans": [ + { + "bbox": [ + 313, + 114, + 381, + 182 + ], + "type": "image", + "image_path": "9e13b709292a6fe0d5cde2e92cbeb29c9dd31bb4817ed30da070773203a666ce.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 382, + 114, + 420, + 182 + ], + "blocks": [ + { + "bbox": [ + 382, + 114, + 420, + 182 + ], + "lines": [ + { + "bbox": [ + 382, + 114, + 420, + 182 + ], + "spans": [ + { + "bbox": [ + 382, + 114, + 420, + 182 + ], + "type": "image", + "image_path": "8561fecb9e3b3ac613d0b6c554d14a7855a980738fd0d67c9bd8c73c744a255e.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 313, + 182, + 382, + 248 + ], + "blocks": [ + { + "bbox": [ + 313, + 182, + 382, + 248 + ], + "lines": [ + { + "bbox": [ + 313, + 182, + 382, + 248 + ], + "spans": [ + { + "bbox": [ + 313, + 182, + 382, + 248 + ], + "type": "image", + "image_path": "96a5cdc4b68d0090e5638904945dcb220486759e955b8cb29d0f277835e3758f.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 383, + 182, + 421, + 248 + ], + "blocks": [ + { + "bbox": [ + 383, + 182, + 421, + 248 + ], + "lines": [ + { + "bbox": [ + 383, + 182, + 421, + 248 + ], + "spans": [ + { + "bbox": [ + 383, + 182, + 421, + 248 + ], + "type": "image", + "image_path": "7effda99691b5585b7dd0befbd10e688d59b53ef2b5821e1b62c281d6789beb4.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 423, + 117, + 479, + 182 + ], + "blocks": [ + { + "bbox": [ + 423, + 117, + 479, + 182 + ], + "lines": [ + { + "bbox": [ + 423, + 117, + 479, + 182 + ], + "spans": [ + { + "bbox": [ + 423, + 117, + 479, + 182 + ], + "type": "image", + "image_path": "3dfd1dca45072040d1f7982366cd2405e978ed375a8db8424c25670f2c6b8099.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 423, + 182, + 481, + 215 + ], + "blocks": [ + { + "bbox": [ + 423, + 182, + 481, + 215 + ], + "lines": [ + { + "bbox": [ + 423, + 182, + 481, + 215 + ], + "spans": [ + { + "bbox": [ + 423, + 182, + 481, + 215 + ], + "type": "image", + "image_path": "ea74be9ec6806b3031ad0a2e623ca017f9369bc239879d729b7561080cc8f420.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 423, + 219, + 480, + 248 + ], + "blocks": [ + { + "bbox": [ + 423, + 219, + 480, + 248 + ], + "lines": [ + { + "bbox": [ + 423, + 219, + 480, + 248 + ], + "spans": [ + { + "bbox": [ + 423, + 219, + 480, + 248 + ], + "type": "image", + "image_path": "a8c77955346ec46f4b78f8bc8da5822802ff64ab449087f889c0268071df89fd.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "bbox": [ + 130, + 357, + 482, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 357, + 482, + 418 + ], + "spans": [ + { + "bbox": [ + 130, + 357, + 482, + 418 + ], + "type": "text", + "content": "with a made-up focal length. These choices are reasonable for exocentric settings where the location of the hand in the image does not provide any signal for the hand articulation; and perspective distortion effects are minimal as the hand is far away & occupies a relatively small part of the camera's field of view. However, these assumptions are sub-optimal for processing egocentric images." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 130, + 422, + 483, + 566 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 422, + 483, + 566 + ], + "spans": [ + { + "bbox": [ + 130, + 422, + 483, + 566 + ], + "type": "text", + "content": "Due to the biomechanics of the hand, its location in egocentric images carries information about its pose. Also, as the hand is closer to the camera in egocentric settings, it undergoes a lot more perspective distortion than in exocentric images. 3D hand pose that correctly explains the 2D hand appearance in one part of an egocentric image, may not be accurate for another part of the image. Thus, the location of the hand in the image must be taken into account while making 3D predictions. This suggests feeding the 2D location of the hand in the image to the network. However, the notion of 2D location in the image frame is camera specific. The more fundamental quantity that generalizes across cameras, is the angular location in the camera's field of view. We thus adopt the recent KPE embedding [54] to augment hand crop features with sinusoidal encodings of its location in the camera's field of view & find this to improve performance." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 130, + 570, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 570, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 570, + 482, + 666 + ], + "type": "text", + "content": "However, just processing image crops the right way is not sufficient for generalization. The model also needs to be trained on broad & diverse datasets outside of lab settings. This is not easy as 3D hand pose is difficult to directly annotate in images. We thus turn to joint training on 3D supervision from lab datasets and 2D auxiliary supervision on in-the-wild data in the form of 2D hand masks [6,10] & grasp labels [6]. To absorb supervision from segmentation labels, we differentiably render [42] the predicted 3D hand into images and back-propagate the loss through the rendering. For grasp supervision, we note" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "text", + "content": "A. Prakash et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "type": "text", + "content": "that hand pose is indicative of the grasp type and use supervision from a grasp classifier that takes the predicted 3D hand pose as input." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 140, + 481, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 140, + 481, + 258 + ], + "spans": [ + { + "bbox": [ + 130, + 140, + 481, + 258 + ], + "type": "text", + "content": "Lack of accurate 3D annotations outside of lab settings makes it challenging to assess the generalization capabilities. To this end, we adopt a zero-shot evaluation strategy. Even though a single lab dataset has limited diversity, a model that performs well on a lab dataset without having seen any images from it likely generalizes well. Furthermore, we collect Epic-HandKps, containing 2D hand joint annotations on 5K images from the VISOR [10] split of in-the-wild Epic-Kitchens [7] to evaluate the 2D projections of the predicted 3D hand pose on everyday images. We also consider the 3D hand poses provided evaluate on the concurrent Ego-Exo4D [18]. We believe that these evaluations together comprehensively test the generalization capabilities of different models." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 260, + 482, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 260, + 482, + 344 + ], + "spans": [ + { + "bbox": [ + 130, + 260, + 482, + 344 + ], + "type": "text", + "content": "Our experiments (Sec. 4) show the utility of (1) using crops (vs. full images), (2) inputting 2D crop location (vs. not), (3) encoding the crop's location in camera's field of view (vs. in the image frame), and (4) 2D mask & grasp supervision. We apply these insights to both convolutional and transformer models, leading to better performance. We also present WildHands (Fig. 1) which outperforms FrankMocap [59] on egocentric images and is competitive to concurrent HaMeR [52] while being " + }, + { + "bbox": [ + 130, + 260, + 482, + 344 + ], + "type": "inline_equation", + "content": "10 \\times" + }, + { + "bbox": [ + 130, + 260, + 482, + 344 + ], + "type": "text", + "content": " smaller & trained with " + }, + { + "bbox": [ + 130, + 260, + 482, + 344 + ], + "type": "inline_equation", + "content": "5 \\times" + }, + { + "bbox": [ + 130, + 260, + 482, + 344 + ], + "type": "text", + "content": " less data." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 363, + 237, + 376 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 363, + 237, + 376 + ], + "spans": [ + { + "bbox": [ + 132, + 363, + 237, + 376 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 389, + 482, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 389, + 482, + 593 + ], + "spans": [ + { + "bbox": [ + 130, + 389, + 482, + 593 + ], + "type": "text", + "content": "Hand pose estimation & reconstruction: Several decades of work [15,28,56] have studied different aspects: 2D pose [4,63] vs. 3D pose [40,50,68,72] vs. mesh [1, 25,65], RGB [14,22,25] vs. RGBD [57,62-64,66,68] inputs, egocentric [14,50] vs. allocentric [14,21,22], hands in isolation [48,79] vs. interaction with objects [21, 44,73], feed-forward prediction [14,22,25,60] vs. test-time optimization [3,24]. Driven by the advances in parametric hand models [53,58], recent work has moved past 3D joint estimation towards 3D mesh recovery [14,22,25,52,59,77] in 3 contexts: single hands in isolation [78], hands interacting with objects [14,70] and two hands interacting with one another [22,48]. Jointly reasoning about hands & objects has proved fruitful to improve both hand & object reconstruction [25,36, 74]. While several expressive models focus on 3D hand pose estimation in lab settings [22,31-33,60], only a very few works [52] tackle the problem in everyday egocentric images as in Ego4D [17], Epic-Kitchen [7]. We focus on this setting due to challenges involving perspective distortion, dynamic interactions & heavy occlusions. We explore both convolutional [14,59] and transformer models [51,52] to study the impact of using crops, location of the crop in camera's field of view & auxiliary supervision in zero-shot generalization to diverse egocentric settings." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "type": "text", + "content": "Hand datasets: Since 3D hand annotations from single images is difficult to get, most datasets are collected in controlled settings to get 3D ground truth using MoCap [14,67], multi-camera setups [21,22,40,44,50], or magnetic sensors [16]. They often include single hands in isolation [79], hand-object interactions [14, 21,22,40] & hand-hand interactions [48]. Different from these datasets with 3D poses, [6,10,61] provide annotations for segmentation masks [6,10], 2D bounding" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 211, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 211, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 211, + 91, + 447, + 102 + ], + "type": "text", + "content": "3D Hand Pose Estimation in Everyday Egocentric Images" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 132, + 114, + 482, + 231 + ], + "blocks": [ + { + "bbox": [ + 132, + 114, + 482, + 231 + ], + "lines": [ + { + "bbox": [ + 132, + 114, + 482, + 231 + ], + "spans": [ + { + "bbox": [ + 132, + 114, + 482, + 231 + ], + "type": "image", + "image_path": "b793bd7f39854e1a3b493108fe9afabb514f73bfc64615ad4150dfb4c2595a23.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 238, + 483, + 350 + ], + "lines": [ + { + "bbox": [ + 130, + 238, + 483, + 350 + ], + "spans": [ + { + "bbox": [ + 130, + 238, + 483, + 350 + ], + "type": "text", + "content": "Fig. 2: Model Overview. We crop the input images around the hand and process them using a convolutional backbone. The hand features along with the global image features (not shown above for clarity) and intrinsics-aware positional encoding (KPE [54]) for each crop are fed to the decoder to predict the 3D hand. The hand decoders predict MANO parameters " + }, + { + "bbox": [ + 130, + 238, + 483, + 350 + ], + "type": "inline_equation", + "content": "\\beta, \\theta_{\\mathrm{local}}, \\theta_{\\mathrm{global}}" + }, + { + "bbox": [ + 130, + 238, + 483, + 350 + ], + "type": "text", + "content": " and camera translation which are converted to 3D keypoints & 2D keypoints and trained using 3D supervision on lab datasets, e.g. ARCTIC [14], AssemblyHands [50]. We also use auxiliary supervision from in-the-wild Epic-Kitchens [10] dataset via hand segmentation masks and grasp labels. The hand masks are available with the VISOR dataset [10] whereas grasp labels are estimated using off-the-shelf model from [6]." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 374, + 482, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 374, + 482, + 471 + ], + "spans": [ + { + "bbox": [ + 130, + 374, + 482, + 471 + ], + "type": "text", + "content": "boxes [61] and grasp labels [6] on internet videos [61] and egocentric images in the wild [9, 17]. Our work combines 3D supervision from datasets [14, 50] captured in controlled settings with 2D auxiliary supervision, i.e. segmentation masks & grasp labels, from datasets outside the lab [6, 10] to learn models that perform well in challenging everyday images. We collect Epic-HandKps dataset with 2D hand keypoints on 5K images from Epic-Kitchens for evaluation in everyday images outside of lab settings. We also use concurrent Ego-Exo4D [18] that annotates 2D keypoints in paired ego & exo views to get 3D hand annotations." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 472, + 482, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 472, + 482, + 568 + ], + "spans": [ + { + "bbox": [ + 130, + 472, + 482, + 568 + ], + "type": "text", + "content": "Auxiliary supervision: Several works on 3D shape prediction from a single image [34,69] often use auxiliary supervision to deal with lack of 3D annotations. [34] uses keypoint supervision for 3D human mesh recovery, while [69] uses multi-view consistency cues for 3D object reconstruction. Aided by differentiable rendering [37,43], segmentation and depth prediction have been used to provide supervision for 3D reconstruction [3,24,35]. We adopt this use of segmentation as an auxiliary cue for 3D poses. In addition, we use supervision from hand grasp labels based on the insight that hand grasp is indicative of the hand pose." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 570, + 482, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 570, + 482, + 667 + ], + "spans": [ + { + "bbox": [ + 130, + 570, + 482, + 667 + ], + "type": "text", + "content": "Ambiguity: 3D estimation from a single image is ill-posed due to ambiguities arising from scale-depth confusion [23] and cropping [54]. Recent work [54] points out the presence of perspective distortion-induced shape ambiguity in image crops and uses camera intrinsic-based location encodings to mitigate it. We investigate the presence of this ambiguity for hand crops in egocentric images and adopt the proposed embedding to mitigate it. Similar embeddings have been used before in literature, primarily from the point of view of training models on images from different cameras [12, 19], to encode extrinsic information [20, 47, 75]." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "text", + "content": "A. Prakash et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 114, + 202, + 127 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 114, + 202, + 127 + ], + "spans": [ + { + "bbox": [ + 132, + 114, + 202, + 127 + ], + "type": "text", + "content": "3 Method" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 140, + 482, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 140, + 482, + 248 + ], + "spans": [ + { + "bbox": [ + 130, + 140, + 482, + 248 + ], + "type": "text", + "content": "We present WildHands, a new system for 3D hand pose estimation from egocentric images in the wild. We build on top of ArcticNet-SF [14] and FrankMocap [59]. Given a crop around a hand and associated camera intrinsics, WildHands predicts the 3D hand shape as MANO [58] parameters, shape " + }, + { + "bbox": [ + 130, + 140, + 482, + 248 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 130, + 140, + 482, + 248 + ], + "type": "text", + "content": " and pose " + }, + { + "bbox": [ + 130, + 140, + 482, + 248 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 130, + 140, + 482, + 248 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 130, + 140, + 482, + 248 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 130, + 140, + 482, + 248 + ], + "type": "text", + "content": " consists of angles of articulation " + }, + { + "bbox": [ + 130, + 140, + 482, + 248 + ], + "type": "inline_equation", + "content": "\\theta_{\\mathrm{local}}" + }, + { + "bbox": [ + 130, + 140, + 482, + 248 + ], + "type": "text", + "content": " for 15 hand joints and the global pose " + }, + { + "bbox": [ + 130, + 140, + 482, + 248 + ], + "type": "inline_equation", + "content": "\\theta_{\\mathrm{global}}" + }, + { + "bbox": [ + 130, + 140, + 482, + 248 + ], + "type": "text", + "content": " of the root joint in the camera coordinate system. WildHands is trained using both lab (ARCTIC, AssemblyHands) and in-the-wild (Epic-Kitchens, Ego4D) datasets with different sources of supervision. Fig. 2 provides an overview of our model. Next, we describe each component of WildHands in detail." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 267, + 224, + 278 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 267, + 224, + 278 + ], + "spans": [ + { + "bbox": [ + 132, + 267, + 224, + 278 + ], + "type": "text", + "content": "3.1 Architecture" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 287, + 482, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 287, + 482, + 498 + ], + "spans": [ + { + "bbox": [ + 130, + 287, + 482, + 498 + ], + "type": "text", + "content": "Hand encoder: Our models use hand crops as input (resized to " + }, + { + "bbox": [ + 130, + 287, + 482, + 498 + ], + "type": "inline_equation", + "content": "224 \\times 224" + }, + { + "bbox": [ + 130, + 287, + 482, + 498 + ], + "type": "text", + "content": " resolution), which are processed by a ResNet50 [27] backbone to get " + }, + { + "bbox": [ + 130, + 287, + 482, + 498 + ], + "type": "inline_equation", + "content": "7 \\times 7 \\times 2048" + }, + { + "bbox": [ + 130, + 287, + 482, + 498 + ], + "type": "text", + "content": " feature maps. The left and right hand crops are processed separately but the parameters are shared. We also use global image features in our model, computed by average pooling the " + }, + { + "bbox": [ + 130, + 287, + 482, + 498 + ], + "type": "inline_equation", + "content": "7 \\times 7 \\times 2048" + }, + { + "bbox": [ + 130, + 287, + 482, + 498 + ], + "type": "text", + "content": " feature map to get a 2048-dimensional vector. Incorporating KPE: Recent work [54] has shown that estimating 3D quantities from image crops suffers from perspective distortion-induced shape ambiguity [54]. This raises concerns about whether this ambiguity is also present when using hand crops for predicting 3D pose and how to deal with it. Following the study in [54], we analyze the hands in the ARCTIC dataset (details in the supplementary) and find evidence of this ambiguity in hand crops as well. Thus, we adopt the intrinsics-aware positional encoding (KPE) proposed in [54] to mitigate this ambiguity. Specifically, we provide the network with information about the location of the hand crop in the field of view of the camera. Consider the principal point as " + }, + { + "bbox": [ + 130, + 287, + 482, + 498 + ], + "type": "inline_equation", + "content": "(p_x, p_y)" + }, + { + "bbox": [ + 130, + 287, + 482, + 498 + ], + "type": "text", + "content": " & focal length as " + }, + { + "bbox": [ + 130, + 287, + 482, + 498 + ], + "type": "inline_equation", + "content": "(f_x, f_y)" + }, + { + "bbox": [ + 130, + 287, + 482, + 498 + ], + "type": "text", + "content": ". For each pixel " + }, + { + "bbox": [ + 130, + 287, + 482, + 498 + ], + "type": "inline_equation", + "content": "(x, y)" + }, + { + "bbox": [ + 130, + 287, + 482, + 498 + ], + "type": "text", + "content": ", we compute " + }, + { + "bbox": [ + 130, + 287, + 482, + 498 + ], + "type": "inline_equation", + "content": "\\theta_x = \\tan^{-1}\\left(\\frac{x - p_x}{f_x}\\right)" + }, + { + "bbox": [ + 130, + 287, + 482, + 498 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 287, + 482, + 498 + ], + "type": "inline_equation", + "content": "\\theta_y = \\tan^{-1}\\left(\\frac{y - p_y}{f_y}\\right)" + }, + { + "bbox": [ + 130, + 287, + 482, + 498 + ], + "type": "text", + "content": " & convert them into sinusoidal encoding [46]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 498, + 482, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 498, + 482, + 641 + ], + "spans": [ + { + "bbox": [ + 130, + 498, + 482, + 641 + ], + "type": "text", + "content": "We add KPE to the " + }, + { + "bbox": [ + 130, + 498, + 482, + 641 + ], + "type": "inline_equation", + "content": "7 \\times 7 \\times 2048" + }, + { + "bbox": [ + 130, + 498, + 482, + 641 + ], + "type": "text", + "content": " feature map. KPE comprises sinusoidal encoding of the angles " + }, + { + "bbox": [ + 130, + 498, + 482, + 641 + ], + "type": "inline_equation", + "content": "\\theta_{x}" + }, + { + "bbox": [ + 130, + 498, + 482, + 641 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 498, + 482, + 641 + ], + "type": "inline_equation", + "content": "\\theta_{y}" + }, + { + "bbox": [ + 130, + 498, + 482, + 641 + ], + "type": "text", + "content": " (Sec. 4.1 in the main paper), resulting in " + }, + { + "bbox": [ + 130, + 498, + 482, + 641 + ], + "type": "inline_equation", + "content": "5 * 4 * K" + }, + { + "bbox": [ + 130, + 498, + 482, + 641 + ], + "type": "text", + "content": " dimensional sparse encoding (4 for corners and 1 for center pixel) and " + }, + { + "bbox": [ + 130, + 498, + 482, + 641 + ], + "type": "inline_equation", + "content": "H \\times W \\times 4 * K" + }, + { + "bbox": [ + 130, + 498, + 482, + 641 + ], + "type": "text", + "content": " resolution dense encoding, where " + }, + { + "bbox": [ + 130, + 498, + 482, + 641 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 130, + 498, + 482, + 641 + ], + "type": "text", + "content": " is the number of frequency components (set to 4). For the sparse KPE variant, we broadcast it to " + }, + { + "bbox": [ + 130, + 498, + 482, + 641 + ], + "type": "inline_equation", + "content": "7 \\times 7" + }, + { + "bbox": [ + 130, + 498, + 482, + 641 + ], + "type": "text", + "content": " resolution whereas for the dense KPE variant, we interpolate it to " + }, + { + "bbox": [ + 130, + 498, + 482, + 641 + ], + "type": "inline_equation", + "content": "7 \\times 7" + }, + { + "bbox": [ + 130, + 498, + 482, + 641 + ], + "type": "text", + "content": " resolution and concatenate to the feature map. This concatenated feature is passed to a 3 convolutional layers (with 1024, 512, 256 channels respectively, each with kernel size of " + }, + { + "bbox": [ + 130, + 498, + 482, + 641 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 130, + 498, + 482, + 641 + ], + "type": "text", + "content": " and ReLU [49] non-linearity) to get a " + }, + { + "bbox": [ + 130, + 498, + 482, + 641 + ], + "type": "inline_equation", + "content": "3 \\times 3 \\times 256" + }, + { + "bbox": [ + 130, + 498, + 482, + 641 + ], + "type": "text", + "content": " feature map. This is flattened to 2304-dimensional vector and passed through a 1-layer MLP to get a 2048-dimensional feature vector. We do not use batchnorm [30] here since we want to preserve the spatial information in KPE." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 641, + 481, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 641, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 641, + 481, + 665 + ], + "type": "text", + "content": "Hand decoder: It consists of an iterative architecture, similar to decoder in HMR [34]. The inputs are the 2048-dimensional feature vector and initial" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 211, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 211, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 211, + 91, + 447, + 102 + ], + "type": "text", + "content": "3D Hand Pose Estimation in Everyday Egocentric Images" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 236 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 236 + ], + "type": "text", + "content": "MANO [58] (shape " + }, + { + "bbox": [ + 130, + 116, + 482, + 236 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 130, + 116, + 482, + 236 + ], + "type": "text", + "content": ", articulation " + }, + { + "bbox": [ + 130, + 116, + 482, + 236 + ], + "type": "inline_equation", + "content": "\\theta_{\\mathrm{local}}" + }, + { + "bbox": [ + 130, + 116, + 482, + 236 + ], + "type": "text", + "content": " and global pose " + }, + { + "bbox": [ + 130, + 116, + 482, + 236 + ], + "type": "inline_equation", + "content": "\\theta_{\\mathrm{global}}" + }, + { + "bbox": [ + 130, + 116, + 482, + 236 + ], + "type": "text", + "content": ", all initialized as 0-vectors) & weak perspective camera parameters (initialized from the 2048-dimensional feature vector). Each of these parameters are predicted using a separate decoder head. The rotation parameters " + }, + { + "bbox": [ + 130, + 116, + 482, + 236 + ], + "type": "inline_equation", + "content": "\\theta_{\\mathrm{local}}" + }, + { + "bbox": [ + 130, + 116, + 482, + 236 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 116, + 482, + 236 + ], + "type": "inline_equation", + "content": "\\theta_{\\mathrm{global}}" + }, + { + "bbox": [ + 130, + 116, + 482, + 236 + ], + "type": "text", + "content": " are predicted in matrix form and converted to axis-angle representation to feed to MANO model. Each decoder is a 3-layer MLP with the 2 intermediate layers having 1024 channels and the output layer having the same number of channels as the predicted parameter. The output of each decoder is added to the initial parameters to get the updated parameters. This process is repeated for 3 iterations. The output of the last iteration is used for the final prediction." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 236, + 482, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 236, + 482, + 295 + ], + "spans": [ + { + "bbox": [ + 130, + 236, + 482, + 295 + ], + "type": "text", + "content": "Differentiable rendering for mask prediction: The outputs from the decoder, " + }, + { + "bbox": [ + 130, + 236, + 482, + 295 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 130, + 236, + 482, + 295 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 236, + 482, + 295 + ], + "type": "inline_equation", + "content": "\\theta_{\\mathrm{local}}" + }, + { + "bbox": [ + 130, + 236, + 482, + 295 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 130, + 236, + 482, + 295 + ], + "type": "inline_equation", + "content": "\\theta_{\\mathrm{global}}" + }, + { + "bbox": [ + 130, + 236, + 482, + 295 + ], + "type": "text", + "content": " for the predicted hand, are passed to a differentiable MANO layer [25, 58] to get the hand mesh. This is used to differentiably render a soft segmentation mask, " + }, + { + "bbox": [ + 130, + 236, + 482, + 295 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 130, + 236, + 482, + 295 + ], + "type": "text", + "content": ", using SoftRasterizer [43, 55]. Using a differentiable hand model (MANO) and differentiable rendering lets us train our model end-to-end." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 296, + 482, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 296, + 482, + 357 + ], + "spans": [ + { + "bbox": [ + 130, + 296, + 482, + 357 + ], + "type": "text", + "content": "Grasp classifier: We use the insight that grasp type during interaction with objects is indicative of hand pose. We train a grasp prediction head on " + }, + { + "bbox": [ + 130, + 296, + 482, + 357 + ], + "type": "inline_equation", + "content": "\\theta_{\\mathrm{local}}" + }, + { + "bbox": [ + 130, + 296, + 482, + 357 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 296, + 482, + 357 + ], + "type": "inline_equation", + "content": "\\theta_{\\mathrm{global}}" + }, + { + "bbox": [ + 130, + 296, + 482, + 357 + ], + "type": "text", + "content": " & " + }, + { + "bbox": [ + 130, + 296, + 482, + 357 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 130, + 296, + 482, + 357 + ], + "type": "text", + "content": " (predicted by WildHands) via a 4-layer MLP (with 1024, 1024, 512, 128 nodes & ReLU non-linearity after each). The MLP predicts logits for the 8 grasp classes defined in [6] which are converted into probabilities, " + }, + { + "bbox": [ + 130, + 296, + 482, + 357 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 130, + 296, + 482, + 357 + ], + "type": "text", + "content": " via softmax." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 373, + 264, + 385 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 373, + 264, + 385 + ], + "spans": [ + { + "bbox": [ + 132, + 373, + 264, + 385 + ], + "type": "text", + "content": "3.2 Training supervision" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 394, + 481, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 394, + 481, + 430 + ], + "spans": [ + { + "bbox": [ + 130, + 394, + 481, + 430 + ], + "type": "text", + "content": "We train WildHands using: (1) 3D supervision on " + }, + { + "bbox": [ + 130, + 394, + 481, + 430 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 130, + 394, + 481, + 430 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 394, + 481, + 430 + ], + "type": "inline_equation", + "content": "\\theta_{\\mathrm{local}}" + }, + { + "bbox": [ + 130, + 394, + 481, + 430 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 394, + 481, + 430 + ], + "type": "inline_equation", + "content": "\\theta_{\\mathrm{global}}" + }, + { + "bbox": [ + 130, + 394, + 481, + 430 + ], + "type": "text", + "content": ", 3D hand keypoints & 2D projections of 3D keypoints in the image on lab datasets, and (2) hand masks and grasp labels on in-the-wild datasets." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 157, + 439, + 482, + 456 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 157, + 439, + 482, + 456 + ], + "spans": [ + { + "bbox": [ + 157, + 439, + 482, + 456 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\theta} = \\left\\| \\theta - \\theta^ {g t} \\right\\| _ {2} ^ {2} \\qquad \\mathcal {L} _ {\\beta} = \\left\\| \\beta - \\beta^ {g t} \\right\\| _ {2} ^ {2} \\qquad \\mathcal {L} _ {c a m} = \\left\\| (s, T) - (s, T) ^ {g t} \\right\\| _ {2} ^ {2} \\quad (1)", + "image_path": "d81d0c5d7e9e61e6838386d2f054a76d5ad5b2f0dc7e6efa9553396abea90a1f.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 157, + 457, + 482, + 475 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 157, + 457, + 482, + 475 + ], + "spans": [ + { + "bbox": [ + 157, + 457, + 482, + 475 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {k p 3 d} = \\left\\| J _ {3 D} - J _ {3 D} ^ {g t} \\right\\| _ {2} ^ {2} \\quad \\mathcal {L} _ {k p 2 d} = \\left\\| J _ {2 D} - J _ {2 D} ^ {g t} \\right\\| _ {2} ^ {2} \\tag {2}", + "image_path": "6648c066e7b7a94d0554414524b749af70b918d99bf4e98db4661d1c216d67cd.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 157, + 476, + 482, + 490 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 157, + 476, + 482, + 490 + ], + "spans": [ + { + "bbox": [ + 157, + 476, + 482, + 490 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {m a s k}} = \\| M - M ^ {g t} \\| \\quad \\mathcal {L} _ {\\text {g r a s p}} = C E (G, G ^ {g t}) \\tag {3}", + "image_path": "d42f9a3efdda82b6fdd1065965f14578b36287bbf0e856bdfecd4b0c90fb91a0.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 498, + 482, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 498, + 482, + 559 + ], + "spans": [ + { + "bbox": [ + 130, + 498, + 482, + 559 + ], + "type": "text", + "content": "Here, " + }, + { + "bbox": [ + 130, + 498, + 482, + 559 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\theta}" + }, + { + "bbox": [ + 130, + 498, + 482, + 559 + ], + "type": "text", + "content": " is used for both " + }, + { + "bbox": [ + 130, + 498, + 482, + 559 + ], + "type": "inline_equation", + "content": "\\theta_{local}" + }, + { + "bbox": [ + 130, + 498, + 482, + 559 + ], + "type": "text", + "content": " & " + }, + { + "bbox": [ + 130, + 498, + 482, + 559 + ], + "type": "inline_equation", + "content": "\\theta_{global}" + }, + { + "bbox": [ + 130, + 498, + 482, + 559 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 498, + 482, + 559 + ], + "type": "inline_equation", + "content": "(s,T)" + }, + { + "bbox": [ + 130, + 498, + 482, + 559 + ], + "type": "text", + "content": " are the weak perspective camera parameters and " + }, + { + "bbox": [ + 130, + 498, + 482, + 559 + ], + "type": "inline_equation", + "content": "CE" + }, + { + "bbox": [ + 130, + 498, + 482, + 559 + ], + "type": "text", + "content": " represents cross-entropy loss. " + }, + { + "bbox": [ + 130, + 498, + 482, + 559 + ], + "type": "inline_equation", + "content": "J_{2D} = K[J_{3D} + (T,f / s)]" + }, + { + "bbox": [ + 130, + 498, + 482, + 559 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 130, + 498, + 482, + 559 + ], + "type": "inline_equation", + "content": "J_{3D}" + }, + { + "bbox": [ + 130, + 498, + 482, + 559 + ], + "type": "text", + "content": " is the 3D hand keypoints in the MANO coordinate frame, " + }, + { + "bbox": [ + 130, + 498, + 482, + 559 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 130, + 498, + 482, + 559 + ], + "type": "text", + "content": " is the camera intrinsics, " + }, + { + "bbox": [ + 130, + 498, + 482, + 559 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 130, + 498, + 482, + 559 + ], + "type": "text", + "content": " is the focal length, and " + }, + { + "bbox": [ + 130, + 498, + 482, + 559 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 130, + 498, + 482, + 559 + ], + "type": "text", + "content": " is the scale factor of the weak perspective camera. Note that " + }, + { + "bbox": [ + 130, + 498, + 482, + 559 + ], + "type": "inline_equation", + "content": "(.)^{gt}" + }, + { + "bbox": [ + 130, + 498, + 482, + 559 + ], + "type": "text", + "content": " represents the ground truth quantities. The total loss is:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 181, + 582, + 481, + 609 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 181, + 582, + 481, + 609 + ], + "spans": [ + { + "bbox": [ + 181, + 582, + 481, + 609 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} = \\lambda_ {\\theta} \\mathcal {L} _ {\\theta} + \\lambda_ {\\beta} \\mathcal {L} _ {\\beta} + \\lambda_ {c a m} \\mathcal {L} _ {c a m} + \\lambda_ {k p 3 d} \\mathcal {L} _ {k p 3 d} + \\lambda_ {k p 2 d} \\mathcal {L} _ {k p 2 d} \\\\ + \\lambda_ {m a s k} \\mathcal {L} _ {m a s k} + \\lambda_ {g r a s p} \\mathcal {L} _ {g r a s p} \\tag {4} \\\\ \\end{array}", + "image_path": "f9af3228c61d521facdeb07276307b6d904bd06afc7fa722c72c2b32c72bb6b0.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 130, + 617, + 482, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 617, + 482, + 667 + ], + "spans": [ + { + "bbox": [ + 130, + 617, + 482, + 667 + ], + "type": "text", + "content": "Lab datasets: For ARCTIC, we use " + }, + { + "bbox": [ + 130, + 617, + 482, + 667 + ], + "type": "inline_equation", + "content": "\\lambda_{\\theta} = 10.0, \\lambda_{\\beta} = 0.001, \\lambda_{kp3d} = 5.0, \\lambda_{kp2d} = 5.0, \\mathcal{L}_{cam} = 1.0" + }, + { + "bbox": [ + 130, + 617, + 482, + 667 + ], + "type": "text", + "content": " & set other loss weights to 0. AssemblyHands does not use MANO representation for hands, instead provides labels for 3D & 2D keypoints of 21 hand joints. So, we use " + }, + { + "bbox": [ + 130, + 617, + 482, + 667 + ], + "type": "inline_equation", + "content": "\\lambda_{kp3d} = 5, \\lambda_{kp2d} = 5" + }, + { + "bbox": [ + 130, + 617, + 482, + 667 + ], + "type": "text", + "content": " & set other loss weights to 0." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "text", + "content": "A. Prakash et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 178 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 178 + ], + "type": "text", + "content": "In-the-wild data: For Epic-Kitchens & Ego4D, we use hand masks & grasp labels as auxiliary supervision. While VISOR contains hand masks, grasp labels are not available. Ego4D does not contain either hand masks or grasp labels. To extract these labels, we use predictions from off-the-shelf model [6] as pseudo ground truth. We use " + }, + { + "bbox": [ + 130, + 116, + 482, + 178 + ], + "type": "inline_equation", + "content": "\\lambda_{mask} = 10.0" + }, + { + "bbox": [ + 130, + 116, + 482, + 178 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 116, + 482, + 178 + ], + "type": "inline_equation", + "content": "\\lambda_{grasp} = 0.1" + }, + { + "bbox": [ + 130, + 116, + 482, + 178 + ], + "type": "text", + "content": " & set other loss weights to 0." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 131, + 194, + 280, + 206 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 194, + 280, + 206 + ], + "spans": [ + { + "bbox": [ + 131, + 194, + 280, + 206 + ], + "type": "text", + "content": "3.3 Implementation Details" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 213, + 482, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 213, + 482, + 308 + ], + "spans": [ + { + "bbox": [ + 130, + 213, + 482, + 308 + ], + "type": "text", + "content": "Our model takes hand crops as input. During training, we use the ground truth bounding box for the hand crop (with small perturbation), estimated using the 2D keypoints & scaled by a fixed value of 1.5 to provide additional context around the hand. At test time, we need to predict the bounding box of the hand in the image. On ARCTIC, we train a bounding box predictor on by finetuning MaskRCNN [26]. This is also used for submitting the model to the ARCTIC leaderboard. For Epic-HandKps, we use the recently released hand detector from [5]. All the ablations use ground truth bounding box for the hand crop." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 309, + 483, + 406 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 309, + 483, + 406 + ], + "spans": [ + { + "bbox": [ + 130, + 309, + 483, + 406 + ], + "type": "text", + "content": "We use the training sets of ARCTIC (187K images) & AssemblyHands (360K), VISOR split (30K) of EPIC and 45K images from Ego4D kitchen videos to train our model. WildHands is trained jointly on different datasets with the input batch containing images from multiple datasets. All models are initialized from the ArcticNet-SF model trained on the allocentric split of the ARCTIC dataset [14]. All models are trained for 100 epochs with a learning rate of " + }, + { + "bbox": [ + 130, + 309, + 483, + 406 + ], + "type": "inline_equation", + "content": "1e - 5" + }, + { + "bbox": [ + 130, + 309, + 483, + 406 + ], + "type": "text", + "content": ". The multi-dataset training is done on 2 A40 GPUs with a batch size of 144 and Adam optimizer [39]. More details are provided in the supplementary." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 131, + 423, + 230, + 437 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 423, + 230, + 437 + ], + "spans": [ + { + "bbox": [ + 131, + 423, + 230, + 437 + ], + "type": "text", + "content": "4 Experiments" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 448, + 482, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 448, + 482, + 521 + ], + "spans": [ + { + "bbox": [ + 130, + 448, + 482, + 521 + ], + "type": "text", + "content": "We adopt a zero-shot evaluation strategy: 3D evaluation on lab datasets (H2O, AssemblyHands), evaluation of 2D projections of 3D hand predictions on Epic-HandKps & 3D evaluation on EgoExo4D [18]. We systematically analyze the effectiveness of design choices (using crops, KPE), different terms in the loss function and different datasets used for training. We also report a system-level comparison on ARCTIC leaderboard and with FrankMocap [59] & HaMeR [52]." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 131, + 538, + 209, + 549 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 538, + 209, + 549 + ], + "spans": [ + { + "bbox": [ + 131, + 538, + 209, + 549 + ], + "type": "text", + "content": "4.1 Protocols" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 557, + 481, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 557, + 481, + 582 + ], + "spans": [ + { + "bbox": [ + 130, + 557, + 481, + 582 + ], + "type": "text", + "content": "Training datasets: We consider 4 datasets for training: 2 lab datasets (ARCTIC & AssemblyHands) and 2 in-the-wild datasets (Epic-Kitchens & Ego4D)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 582, + 482, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 582, + 482, + 641 + ], + "spans": [ + { + "bbox": [ + 130, + 582, + 482, + 641 + ], + "type": "text", + "content": "We select ARCTIC since it contains the largest range of hand pose variation [14] among existing datasets [4, 21, 22, 44, 67]. We use the egocentric split with more than " + }, + { + "bbox": [ + 130, + 582, + 482, + 641 + ], + "type": "inline_equation", + "content": "187\\mathrm{K}" + }, + { + "bbox": [ + 130, + 582, + 482, + 641 + ], + "type": "text", + "content": " images in the train set. We also use AssemblyHands since it is a large-scale dataset with more than " + }, + { + "bbox": [ + 130, + 582, + 482, + 641 + ], + "type": "inline_equation", + "content": "360\\mathrm{K}" + }, + { + "bbox": [ + 130, + 582, + 482, + 641 + ], + "type": "text", + "content": " egocentric images in the train split. Different combinations of these datasets are used for different experiments." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 641, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 641, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 641, + 482, + 666 + ], + "type": "text", + "content": "We use egocentric images from Epic-Kitchens & Ego4D as in-the-wild data for training our model using auxiliary supervision. We use 30K training images" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 211, + 91, + 448, + 103 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 211, + 91, + 448, + 103 + ], + "spans": [ + { + "bbox": [ + 211, + 91, + 448, + 103 + ], + "type": "text", + "content": "3D Hand Pose Estimation in Everyday Egocentric Images" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 133, + 114, + 481, + 247 + ], + "blocks": [ + { + "bbox": [ + 133, + 114, + 481, + 247 + ], + "lines": [ + { + "bbox": [ + 133, + 114, + 481, + 247 + ], + "spans": [ + { + "bbox": [ + 133, + 114, + 481, + 247 + ], + "type": "image", + "image_path": "3a364e95b635b535472d2dd662ad509e958f83bbef4e525264b5dd18623393d1.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 255, + 482, + 310 + ], + "lines": [ + { + "bbox": [ + 130, + 255, + 482, + 310 + ], + "spans": [ + { + "bbox": [ + 130, + 255, + 482, + 310 + ], + "type": "text", + "content": "Fig. 3: Epic-HandKps annotations. We collect 2D joint annotations (shown in blue) for 5K in-the-wild egocentric images from Epic-Kitchens [8]. We show few annotations here with images cropped around the hand. We also have the label for the joint corresponding to each keypoint. Note the heavy occlusion & large variation in dexterous poses of hands interactiong with objects. More visualizations in supplementary." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 336, + 481, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 336, + 481, + 360 + ], + "spans": [ + { + "bbox": [ + 130, + 336, + 481, + 360 + ], + "type": "text", + "content": "available in the VISOR split of Epic-Kitchens and 45K images from Ego4D. To extract hand masks and grasp labels, we use off-the-shelf model from [6]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 361, + 481, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 361, + 481, + 422 + ], + "spans": [ + { + "bbox": [ + 130, + 361, + 481, + 422 + ], + "type": "text", + "content": "Evaluation datasets: We consider 4 datasets for zero-shot generalization experiments: H2O [40], AssemblyHands, Epic-HandKps, and Ego-Exo4D. Note that these datasets cover large variation in inputs, H2O contains RGB images in lab settings, AssemblyHands consists of grayscale images and Epic-HandKps and Ego-Exo4D images show hands performing everyday activities in the wild." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 422, + 481, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 422, + 481, + 495 + ], + "spans": [ + { + "bbox": [ + 130, + 422, + 481, + 495 + ], + "type": "text", + "content": "We use the validation splits of H2O and AssemblyHands with 29K and 32K images respectively. Since 3D hand annotations are difficult to collect for in-the-wild images, we instead collect 2D hand keypoints annotations on 5K egocentric images from validation set of VISOR split of Epic-Kitchens. We refer to this dataset as Epic-HandKps. See sample images from the dataset in Fig. 3. We also evaluate on the validation split of Ego-Exo4D hand pose dataset." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 496, + 481, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 496, + 481, + 640 + ], + "spans": [ + { + "bbox": [ + 130, + 496, + 481, + 640 + ], + "type": "text", + "content": "Epic-HandKps: Epic-HandKps contains 2D annotations for the 21 hand joints to facilitate evaluation of 2D projections of the predicted 3D keypoints. We sample 5K images from the validation set of VISOR split of Epic-Kitchens and get the 21 joints annotated via Scale AI. We use the same joint convention as ARCTIC [14]. We crop the images around the hand using the segmentation masks in VISOR and provide the crops to annotators for labeling. Note that most of these images do not have all the 21 keypoints visible. Following ARCTIC, we only consider images with at least 3 visible joints for evaluation. Moreover, since the models in our experiments required hand crops as input, we only evaluate on those images for which hand bounding box is predicted by the recently released hand detector model from [6]. This leaves us with 4724 hand annotations, with 2697 right hands and 2027 left hands. We show some annotations in Fig. 3." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 641, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 641, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 641, + 482, + 666 + ], + "type": "text", + "content": "Metrics: For 3D hand pose evaluation, we consider 2 metrics: (1) Mean PerJoint Position Error (MPJPE): L2 distance (mm) between the 21 predicted" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "text", + "content": "A. Prakash et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 135, + 178, + 479, + 254 + ], + "blocks": [ + { + "bbox": [ + 130, + 114, + 482, + 170 + ], + "lines": [ + { + "bbox": [ + 130, + 114, + 482, + 170 + ], + "spans": [ + { + "bbox": [ + 130, + 114, + 482, + 170 + ], + "type": "text", + "content": "Table 1: Benefits of using crops and KPE. Zero shot generalization performance improves through the use of crops as input (HandNet uses crops vs. ArcticNet-SF uses full image) and KPE helps (WildHands uses KPE with crops vs. HandNet only uses crops). All models use the same backbone and are trained on the same data in each setting for fair comparisons. " + }, + { + "bbox": [ + 130, + 114, + 482, + 170 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 130, + 114, + 482, + 170 + ], + "type": "text", + "content": ": {ARCTIC, AssemblyHands, EPIC}." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 135, + 178, + 479, + 254 + ], + "lines": [ + { + "bbox": [ + 135, + 178, + 479, + 254 + ], + "spans": [ + { + "bbox": [ + 135, + 178, + 479, + 254 + ], + "type": "table", + "html": "
H2OAssemblyEgo-Exo4DEpic-HandKps
MPJPEMRRPEMPJPEMRRPEMPJPEL2 Error
Training dataDD - AssemblyDD - EPIC
ArcticNet-SF83.84325.55110.76326.94114.2435.02
HandNet38.06141.06109.88317.4989.7231.62
WildHands31.0849.4984.91164.9055.8411.05
", + "image_path": "52ab8fd9c14230b32892be7d4fdaf4be4705839df319e97f7c8a52344fd4948b.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 277, + 481, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 277, + 481, + 348 + ], + "spans": [ + { + "bbox": [ + 130, + 277, + 481, + 348 + ], + "type": "text", + "content": "& ground truth joints for each hand after subtracting the root joint (this captures the relative pose). (2) Mean Relative-Root Position Error (MRRPE): the metric distance between the root joints of left hand and right hand, following [13, 14, 48] (this takes the absolute pose into account). (3) For 2D evaluation on Epic-HandKps, we measure the L2 Error (in pixels for 224x224 image input) between ground truth keypoints & 2D projections of predicted 3D keypoints." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 348, + 482, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 348, + 482, + 589 + ], + "spans": [ + { + "bbox": [ + 130, + 348, + 482, + 589 + ], + "type": "text", + "content": "Baselines: (1) ArcticNet-SF [14] is the single-image model released with the ARCTIC benchmark. It consists of a convolutional backbone (ResNet50 [27]) to process the input image, followed by a HMR [35]-style decoder to predict the hand and object poses. The predicted hand is represented using MANO [58] parameterization. (2) FrankMocap [59] is trained on multiple datasets collected in controlled settings and is a popular choice to apply in the wild setting [3,24,74]. It uses hand crops as input instead of the entire image, which is then processed by a convolutional backbone. The decoder is similar to HMR [35] which outputs MANO parameters for hand and training is done using 3D pose & 2D keypoints supervision. (3) HandNet: Since the training code is not available for FrankMocap, we are unable to train it in our setting. So, we implement a version of ArcticNet-SF which uses crops as input along with HMR-style decoder and train it in our setting using 3D & 2D supervision. This baseline is equivalent to WildHands without KPE and ArcticNet-SF with crops. (4) HandOccNet [51]: It takes crops as input and encodes them using a FPN [41] backbone. These are passed to transformer [71] modules to get a heatmap-based intermediate representation which is then decoded to MANO parameters. (5) HaMeR [52]: It also takes crops as input and processes them using a ViT [11] backbone. The features are then passed to a transformer decoder to predict the MANO parameters. Note that adversarial loss is not used for training any model in our setting." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 608, + 198, + 619 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 608, + 198, + 619 + ], + "spans": [ + { + "bbox": [ + 132, + 608, + 198, + 619 + ], + "type": "text", + "content": "4.2 Results" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 629, + 481, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 629, + 481, + 667 + ], + "spans": [ + { + "bbox": [ + 130, + 629, + 481, + 667 + ], + "type": "text", + "content": "We systematically study the impact of several factors: use of crops (Tab. 1) & KPE (Tab. 1, Tab. 5), perspective distortion (Tab. 4), auxiliary supervision (Tab. 3), training datasets (Tab. 6) on both convolutional (Tab. 1) & transformer" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 211, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 211, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 211, + 91, + 447, + 102 + ], + "type": "text", + "content": "3D Hand Pose Estimation in Everyday Egocentric Images" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 135, + 168, + 479, + 246 + ], + "blocks": [ + { + "bbox": [ + 130, + 114, + 482, + 160 + ], + "lines": [ + { + "bbox": [ + 130, + 114, + 482, + 160 + ], + "spans": [ + { + "bbox": [ + 130, + 114, + 482, + 160 + ], + "type": "text", + "content": "Table 2: Impact on transformer models. We investigate if our insights are useful for transformer models as well, i.e. if KPE helps on top of positional encodings used in transformers & if auxiliary supervision leads to better generalization for large capacity models. All models are trained on the same data in each setting for fair comparisons." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 135, + 168, + 479, + 246 + ], + "lines": [ + { + "bbox": [ + 135, + 168, + 479, + 246 + ], + "spans": [ + { + "bbox": [ + 135, + 168, + 479, + 246 + ], + "type": "table", + "html": "
H2OAssemblyEgo-Exo4DEpic-HandKps
MPJPEMRRPEMPJPEMRRPEMPJPEL2 Error
Training dataDD - AssemblyDD - EPIC
HandOccNet [51]60.58187.24110.28293.9280.9632.77
HandOccNet + KPE47.5772.25103.30232.8378.6413.54
HaMeR [52] (ViT)30.57113.2679.48227.5955.3625.48
HaMeR (ViT) + KPE24.1562.9971.64184.5547.029.77
", + "image_path": "668a90d4ece2e84a1b1a795f1e25bc6cdd78fca793ceeda9294f9bb03ac40970.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 135, + 333, + 479, + 423 + ], + "blocks": [ + { + "bbox": [ + 130, + 257, + 482, + 324 + ], + "lines": [ + { + "bbox": [ + 130, + 257, + 482, + 324 + ], + "spans": [ + { + "bbox": [ + 130, + 257, + 482, + 324 + ], + "type": "text", + "content": "Table 3: Role of auxiliary supervision. We consider grasp and mask supervision from both Epic-Kitchens & Ego4D to train WildHands and show results in zero-shot generalization settings. Both grasp & mask supervision lead to improvements in 3D & 2D metrics, with hand masks providing larger gain compared to grasp labels. Even though auxiliary supervision is on Epic/Ego4D, it leads to improvements in all settings, i.e. benefits from training on broad data extend beyond datasets with auxiliary supervision." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 135, + 333, + 479, + 423 + ], + "lines": [ + { + "bbox": [ + 135, + 333, + 479, + 423 + ], + "spans": [ + { + "bbox": [ + 135, + 333, + 479, + 423 + ], + "type": "table", + "html": "
H2OAssemblyEgo-Exo4DEpic-HandKps
MPJPEMRRPEMPJPEMRRPEMPJPEL2 Error
Wildhands (no aux)39.5277.0793.44208.3270.3917.07
+ EPIC grasp38.3476.0490.23180.8563.30-
+ EPIC mask34.2960.2387.94175.3156.41-
+ EPIC grasp + EPIC mask31.0849.4984.91164.9055.84-
+ Ego4D grasp41.06111.4786.44222.2369.738.22
+ Ego4D mask38.1757.9382.55145.7863.437.87
+ Ego4D grasp + Ego4D mask35.6262.1079.08148.1260.807.20
", + "image_path": "2ceaef5e4e395be0510430150288cfcd24a52c0994a91f3272959fe2a5acfaf5.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 447, + 481, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 447, + 481, + 483 + ], + "spans": [ + { + "bbox": [ + 130, + 447, + 481, + 483 + ], + "type": "text", + "content": "models (Tab. 2) through controlled experiments, i.e. all factors outside of what we want to check the affect of, are kept constant. All the results are reported in a zero-shot setting i.e. models are not trained on the evaluation dataset." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 484, + 482, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 484, + 482, + 556 + ], + "spans": [ + { + "bbox": [ + 130, + 484, + 482, + 556 + ], + "type": "text", + "content": "Impact of crops: To understand the benefits due to using crops as input instead of full images, we compare ArcticNet-SF and HandNet in Tab. 1. The only difference between these two models is: ArcticNet-SF uses full image as input whereas HandNet uses crops as input. We see gains of " + }, + { + "bbox": [ + 130, + 484, + 482, + 556 + ], + "type": "inline_equation", + "content": "27.7\\%" + }, + { + "bbox": [ + 130, + 484, + 482, + 556 + ], + "type": "text", + "content": " in MPJPE, " + }, + { + "bbox": [ + 130, + 484, + 482, + 556 + ], + "type": "inline_equation", + "content": "29.7\\%" + }, + { + "bbox": [ + 130, + 484, + 482, + 556 + ], + "type": "text", + "content": " in MRRPE, " + }, + { + "bbox": [ + 130, + 484, + 482, + 556 + ], + "type": "inline_equation", + "content": "10.7\\%" + }, + { + "bbox": [ + 130, + 484, + 482, + 556 + ], + "type": "text", + "content": " in PA-MPJPE, and " + }, + { + "bbox": [ + 130, + 484, + 482, + 556 + ], + "type": "inline_equation", + "content": "9.7\\%" + }, + { + "bbox": [ + 130, + 484, + 482, + 556 + ], + "type": "text", + "content": " in 2D pose across different settings. This provides evidence for the utility of using crops as inputs [50,59]." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 556, + 482, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 556, + 482, + 616 + ], + "spans": [ + { + "bbox": [ + 130, + 556, + 482, + 616 + ], + "type": "text", + "content": "Benefits of KPE: In Tab. 1, HandNet & WildHands differ only in the use of KPE. This leads go improvements of " + }, + { + "bbox": [ + 130, + 556, + 482, + 616 + ], + "type": "inline_equation", + "content": "20.5\\%" + }, + { + "bbox": [ + 130, + 556, + 482, + 616 + ], + "type": "text", + "content": " in MPJPE, " + }, + { + "bbox": [ + 130, + 556, + 482, + 616 + ], + "type": "inline_equation", + "content": "56.4\\%" + }, + { + "bbox": [ + 130, + 556, + 482, + 616 + ], + "type": "text", + "content": " in MRRPE & " + }, + { + "bbox": [ + 130, + 556, + 482, + 616 + ], + "type": "inline_equation", + "content": "65.1\\%" + }, + { + "bbox": [ + 130, + 556, + 482, + 616 + ], + "type": "text", + "content": " in 2D pose. Compared to impact of crops, the gains are significantly higher in MRRPE (indicating better absolute pose) and on Epic-HandKps (leading to better generalization in the wild)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 617, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 617, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 617, + 482, + 666 + ], + "type": "text", + "content": "Role of auxiliary supervision: We extract hand masks & grasp labels from Epic-Kitchens & Ego4D and show their benefits in Tab. 3 in zero-shot evaluation settings. Mask supervision leads to gains of " + }, + { + "bbox": [ + 130, + 617, + 482, + 666 + ], + "type": "inline_equation", + "content": "8.5\\%" + }, + { + "bbox": [ + 130, + 617, + 482, + 666 + ], + "type": "text", + "content": " in MPJPE, " + }, + { + "bbox": [ + 130, + 617, + 482, + 666 + ], + "type": "inline_equation", + "content": "21.5\\%" + }, + { + "bbox": [ + 130, + 617, + 482, + 666 + ], + "type": "text", + "content": " in MRRPE and " + }, + { + "bbox": [ + 130, + 617, + 482, + 666 + ], + "type": "inline_equation", + "content": "55.5\\%" + }, + { + "bbox": [ + 130, + 617, + 482, + 666 + ], + "type": "text", + "content": " in 2D pose. Grasp labels improve MPJPE by " + }, + { + "bbox": [ + 130, + 617, + 482, + 666 + ], + "type": "inline_equation", + "content": "2.5\\%" + }, + { + "bbox": [ + 130, + 617, + 482, + 666 + ], + "type": "text", + "content": ", MRRPE by " + }, + { + "bbox": [ + 130, + 617, + 482, + 666 + ], + "type": "inline_equation", + "content": "7.3\\%" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "text", + "content": "A. Prakash et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 137, + 164, + 479, + 239 + ], + "blocks": [ + { + "bbox": [ + 132, + 114, + 482, + 148 + ], + "lines": [ + { + "bbox": [ + 132, + 114, + 482, + 148 + ], + "spans": [ + { + "bbox": [ + 132, + 114, + 482, + 148 + ], + "type": "text", + "content": "Table 4: Comparison of KPE with relevant approaches. KPE is more effective than other methods for dealing with perspective distortion, e.g. Perspective Correction [45], Perspective Crop Layers (PCL [76]), or other encodings, e.g. CamConv [12]" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 137, + 164, + 479, + 239 + ], + "lines": [ + { + "bbox": [ + 137, + 164, + 479, + 239 + ], + "spans": [ + { + "bbox": [ + 137, + 164, + 479, + 239 + ], + "type": "table", + "html": "
H2OAssemblyEgo-Exo4DEpic-HandKps
MPJPEMRRPEMPJPEMRRPEMPJPEL2 Error
HandNet +
CamConv36.8667.6296.72180.7360.6917.35
Perspective Corr.39.95159.1359.10637.3267.4528.68
PCL [76]36.82158.8845.18483.9263.6528.21
KPE (WildHands)31.0849.4984.91164.9055.8411.05
", + "image_path": "01d96b71dfe019b372cd0c4b086cd9be9f110a4f68c7958fd29ad3cb79f9aeba.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 137, + 320, + 479, + 399 + ], + "blocks": [ + { + "bbox": [ + 132, + 255, + 482, + 310 + ], + "lines": [ + { + "bbox": [ + 132, + 255, + 482, + 310 + ], + "spans": [ + { + "bbox": [ + 132, + 255, + 482, + 310 + ], + "type": "text", + "content": "Table 5: KPE Design Choices. We study the impact of different design choices of KPE on WildHands: adding KPE with the input instead of latent features (w/ input), removing intrinsics from KPE (no intrx), dense variant of KPE from [54]. WildHands uses sparse variant of KPE. We observe that all variants of KPE provide significant benefits compared to the model without KPE and the sparse variant performs the best." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 137, + 320, + 479, + 399 + ], + "lines": [ + { + "bbox": [ + 137, + 320, + 479, + 399 + ], + "spans": [ + { + "bbox": [ + 137, + 320, + 479, + 399 + ], + "type": "table", + "html": "
H2OAssemblyEgo-Exo4DEpic-HandKps
MPJPEMRRPEMPJPEMRRPEMPJPEL2 Error
no KPE38.06141.06109.88317.4989.7231.62
KPE w/ input45.5180.9694.45252.3493.5617.30
KPE no intrx36.9761.9892.12246.4560.8011.63
KPE dense36.8680.5495.34201.3369.1111.24
KPE sparse31.0849.4984.9155.8455.8411.05
", + "image_path": "5856abd155a0555bb50b97510a7cdf918c4e38ae16e031cfa19aa4b68a35fb93.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 427, + 482, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 427, + 482, + 487 + ], + "spans": [ + { + "bbox": [ + 132, + 427, + 482, + 487 + ], + "type": "text", + "content": "and 2D pose by " + }, + { + "bbox": [ + 132, + 427, + 482, + 487 + ], + "type": "inline_equation", + "content": "4.3\\%" + }, + { + "bbox": [ + 132, + 427, + 482, + 487 + ], + "type": "text", + "content": ". While both sources of supervision are effective, hand masks lead to larger gains. Combining both mask and grasp supervision leads to further improvements in both 3D & 2D poses across most settings. Moreover, auxiliary supervision on in-the-wild data also aids performance on lab datasets, suggesting that generalization gains from training on broad data are not dataset specific." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 491, + 482, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 491, + 482, + 539 + ], + "spans": [ + { + "bbox": [ + 132, + 491, + 482, + 539 + ], + "type": "text", + "content": "Comparison of KPE with relevant approaches: In Tab. 4, we find KPE to be more effective than other methods for dealing with perspective distortion, e.g. Perspective Correction [45], Perspective Crop Layers (PCL [76]), or different forms of positional encoding, e.g. CamConv [12]." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 542, + 482, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 542, + 482, + 613 + ], + "spans": [ + { + "bbox": [ + 132, + 542, + 482, + 613 + ], + "type": "text", + "content": "Impact on transformer models: We investigate if our insights are useful to transformer models as well, i.e. if KPE helps on top of positional encodings already used in transformers and if auxiliary supervision leads to better generalization for large capacity models. For this, we implement these components in HandOccNet [51] & HaMeR [52] and train these models in our settings. From the results in Tab. 2, we see consistent gains across all settings." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 616, + 482, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 616, + 482, + 665 + ], + "spans": [ + { + "bbox": [ + 132, + 616, + 482, + 665 + ], + "type": "text", + "content": "KPE design choice: We ablate different variants of KPE in Tab. 5: adding KPE with the input instead of latent features (w/ input), removing intrinsics from KPE (no intrx) and dense variant of KPE from [54]. Note that the sparse variant performs the best, so we use sparse KPE in WildHands." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 211, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 211, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 211, + 91, + 447, + 102 + ], + "type": "text", + "content": "3D Hand Pose Estimation in Everyday Egocentric Images" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 479, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 479, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 479, + 100 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 135, + 146, + 481, + 209 + ], + "blocks": [ + { + "bbox": [ + 132, + 114, + 482, + 137 + ], + "lines": [ + { + "bbox": [ + 132, + 114, + 482, + 137 + ], + "spans": [ + { + "bbox": [ + 132, + 114, + 482, + 137 + ], + "type": "text", + "content": "Table 6: Effect of scaling up data. Training on more datasets leads to consistent improvements in models performance on held out datasets." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 135, + 146, + 481, + 209 + ], + "lines": [ + { + "bbox": [ + 135, + 146, + 481, + 209 + ], + "spans": [ + { + "bbox": [ + 135, + 146, + 481, + 209 + ], + "type": "table", + "html": "
H2OEgo-Exo4DEpic-HandKps
MPJPEMRRPEMPJPEL2 Error
ARCTIC47.3075.1787.7117.07
ARCTIC + Assembly39.5277.0770.3911.05
ARCTIC + Assembly + Ego4D (aux)35.6262.1060.807.20
", + "image_path": "7ce10839fb9aecc289e12d8e4be106f811e43ac5dd9418f52e48a330271068c7.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 231, + 481, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 231, + 481, + 315 + ], + "spans": [ + { + "bbox": [ + 130, + 231, + 481, + 315 + ], + "type": "text", + "content": "Intrinsics during training: Intrinsics may not always be available in in-the-wild data used to derive auxiliary supervision. To study this setting, we consider in-the-wild Ego4D data since it contains images from multiple cameras, and do not assume access to intrinsics. In this case, we replace the KPE with a sinusoidal positional encoding of normalized image coordinates w.r.t. center. The Ego4D results in Tab. 3 follow this setting and we observe that auxiliary supervision from Ego4D provides benefits even in the absence of camera information." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 315, + 482, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 315, + 482, + 399 + ], + "spans": [ + { + "bbox": [ + 130, + 315, + 482, + 399 + ], + "type": "text", + "content": "Scaling up training data: We ablate variants of WildHands trained with ARCTIC, ARCTIC + AssemblyHands, ARCTIC + Ego4D and ARCTIC + AssemblyHands + Ego4D in zero-shot settings on H2O, Ego-Exo4D, and EpicHandKps. We use 3D supervision on ARCTIC & AssemblyHands and auxiliary supervision (hand masks, grasp labels) on Ego4D. Tab. 6 shows consistent improvements in 3D and 2D metrics from both AssemblyHands and Ego4D datasets, suggesting that further scaling can improve performance further." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 417, + 281, + 430 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 417, + 281, + 430 + ], + "spans": [ + { + "bbox": [ + 132, + 417, + 281, + 430 + ], + "type": "text", + "content": "4.3 System-level Evaluation" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 437, + 334, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 437, + 334, + 521 + ], + "spans": [ + { + "bbox": [ + 130, + 437, + 334, + 521 + ], + "type": "text", + "content": "While all of our earlier experiments are conducted in controlled settings, we also present a system-level comparison to other past methods, specifically to methods submitted to the ARC-TIC leaderboard (as of July 13, 2024), and with the publicly released models of FrankMo-cap [59] and HaMeR [52]." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 521, + 334, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 521, + 334, + 582 + ], + "spans": [ + { + "bbox": [ + 130, + 521, + 334, + 582 + ], + "type": "text", + "content": "ARCTIC Leaderboard: Our method achieves the best 3D hand pose on the ego-centric split, compared to recent state-of-the-art convolutional (e.g. ArcticNet-SF, DIGIT-HRNet, HMR-ResNet50) and transformer (e.g." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 582, + 480, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 582, + 480, + 604 + ], + "spans": [ + { + "bbox": [ + 130, + 582, + 480, + 604 + ], + "type": "text", + "content": "JointTransformer) models (as of July 13, 2024). However, it is not possible to do a detailed comparison since most of these models are not public." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 605, + 481, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 605, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 605, + 481, + 665 + ], + "type": "text", + "content": "Comparison with FrankMocap [59] and HaMeR [52]: We show results with the publicly released models in Tab. 8. Note that HaMeR uses a ViT-H backbone which is much larger and more performant than the ResNet50 backbone used in WildHands. WildHands outperforms FrankMocap across all metrics and HaMeR on 3 of 6 metrics while being " + }, + { + "bbox": [ + 130, + 605, + 481, + 665 + ], + "type": "inline_equation", + "content": "10 \\times" + }, + { + "bbox": [ + 130, + 605, + 481, + 665 + ], + "type": "text", + "content": " smaller & trained on " + }, + { + "bbox": [ + 130, + 605, + 481, + 665 + ], + "type": "inline_equation", + "content": "5 \\times" + }, + { + "bbox": [ + 130, + 605, + 481, + 665 + ], + "type": "text", + "content": " less data." + } + ] + } + ], + "index": 10 + }, + { + "type": "table", + "bbox": [ + 342, + 495, + 481, + 574 + ], + "blocks": [ + { + "bbox": [ + 339, + 450, + 482, + 494 + ], + "lines": [ + { + "bbox": [ + 339, + 450, + 482, + 494 + ], + "spans": [ + { + "bbox": [ + 339, + 450, + 482, + 494 + ], + "type": "text", + "content": "Table 7: Leaderboard results. WildHands leads the 3D hand pose on the egocentric split of ARCTIC leaderboard (as of July 13, 2024)." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 342, + 495, + 481, + 574 + ], + "lines": [ + { + "bbox": [ + 342, + 495, + 481, + 574 + ], + "spans": [ + { + "bbox": [ + 342, + 495, + 481, + 574 + ], + "type": "table", + "html": "
MethodMPJPEMRRPE
ArcticNet-SF19.1828.31
ArcticOccNet19.7729.75
DIGIT-HRNet16.7425.49
HMR-ResNet5020.3232.32
JointTransformer16.3326.07
WildHands15.7223.88
", + "image_path": "626eed6d1432471103a626491f1dcda765db059dc49bfc6cad3ddca1de81b237.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_body" + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "text", + "content": "A. Prakash et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 136, + 114, + 203, + 181 + ], + "blocks": [ + { + "bbox": [ + 136, + 114, + 203, + 181 + ], + "lines": [ + { + "bbox": [ + 136, + 114, + 203, + 181 + ], + "spans": [ + { + "bbox": [ + 136, + 114, + 203, + 181 + ], + "type": "image", + "image_path": "b63a51f382d93f665181d1e1032d2f325fc8277e34956f5503d1f02673d81d30.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 206, + 114, + 242, + 182 + ], + "blocks": [ + { + "bbox": [ + 206, + 114, + 242, + 182 + ], + "lines": [ + { + "bbox": [ + 206, + 114, + 242, + 182 + ], + "spans": [ + { + "bbox": [ + 206, + 114, + 242, + 182 + ], + "type": "image", + "image_path": "9fd386964b7762560284bb12c6249b81cafea519a2011da79922cff2b8d42782.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 247, + 116, + 298, + 178 + ], + "blocks": [ + { + "bbox": [ + 247, + 116, + 298, + 178 + ], + "lines": [ + { + "bbox": [ + 247, + 116, + 298, + 178 + ], + "spans": [ + { + "bbox": [ + 247, + 116, + 298, + 178 + ], + "type": "image", + "image_path": "0ff6ef8d1147bf804c7ce8bc1049909346692990516d11b2bd456122b1390718.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 136, + 183, + 203, + 251 + ], + "blocks": [ + { + "bbox": [ + 136, + 183, + 203, + 251 + ], + "lines": [ + { + "bbox": [ + 136, + 183, + 203, + 251 + ], + "spans": [ + { + "bbox": [ + 136, + 183, + 203, + 251 + ], + "type": "image", + "image_path": "a56a373135997bf871a72c1bb33729f3a1ec53bfe97a54b5f96dad8faed716b2.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 206, + 183, + 242, + 251 + ], + "blocks": [ + { + "bbox": [ + 206, + 183, + 242, + 251 + ], + "lines": [ + { + "bbox": [ + 206, + 183, + 242, + 251 + ], + "spans": [ + { + "bbox": [ + 206, + 183, + 242, + 251 + ], + "type": "image", + "image_path": "5406c867f71f8ced4b0b9687044bd4de2bf2a960e2dff9c10dfcd6e7fd75876e.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 249, + 183, + 299, + 250 + ], + "blocks": [ + { + "bbox": [ + 249, + 183, + 299, + 250 + ], + "lines": [ + { + "bbox": [ + 249, + 183, + 299, + 250 + ], + "spans": [ + { + "bbox": [ + 249, + 183, + 299, + 250 + ], + "type": "image", + "image_path": "97fe05cba454ef27842d406056da182173533e8f10bbd1f30a913758e0d394fc.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 135, + 252, + 203, + 319 + ], + "blocks": [ + { + "bbox": [ + 135, + 252, + 203, + 319 + ], + "lines": [ + { + "bbox": [ + 135, + 252, + 203, + 319 + ], + "spans": [ + { + "bbox": [ + 135, + 252, + 203, + 319 + ], + "type": "image", + "image_path": "f276b3dc25b6d5ae71da2b56756af7ac95216e28ff75b635dd4c6a4e01f57ab0.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 131, + 331, + 482, + 376 + ], + "lines": [ + { + "bbox": [ + 131, + 331, + 482, + 376 + ], + "spans": [ + { + "bbox": [ + 131, + 331, + 482, + 376 + ], + "type": "text", + "content": "Fig. 4: Visualizations. We show projection of the predicted hand in the image & rendering of the hand mesh from 2 more views. WildHands predicts better hand poses from a single image than FrankMocap [59], HaMeR [14] and ArcticNet [14] in challenging egocentric scenarios involving occlusions and perspective distortion." + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 206, + 251, + 242, + 319 + ], + "blocks": [ + { + "bbox": [ + 206, + 251, + 242, + 319 + ], + "lines": [ + { + "bbox": [ + 206, + 251, + 242, + 319 + ], + "spans": [ + { + "bbox": [ + 206, + 251, + 242, + 319 + ], + "type": "image", + "image_path": "be3607737c9943517dda8294ac568933abb600cfb43bf29b255fd0f059955145.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 249, + 253, + 299, + 311 + ], + "blocks": [ + { + "bbox": [ + 249, + 253, + 299, + 311 + ], + "lines": [ + { + "bbox": [ + 249, + 253, + 299, + 311 + ], + "spans": [ + { + "bbox": [ + 249, + 253, + 299, + 311 + ], + "type": "image", + "image_path": "dd2661b40262104f4125262fe3f8449367bd01fb7ecab650e0db42276640e3ef.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 315, + 114, + 381, + 182 + ], + "blocks": [ + { + "bbox": [ + 315, + 114, + 381, + 182 + ], + "lines": [ + { + "bbox": [ + 315, + 114, + 381, + 182 + ], + "spans": [ + { + "bbox": [ + 315, + 114, + 381, + 182 + ], + "type": "image", + "image_path": "99c7713e0bb4e49090213f40ec040645fdc9afb9562a3ce89f2b5cf67c8a4dcd.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 383, + 114, + 421, + 182 + ], + "blocks": [ + { + "bbox": [ + 383, + 114, + 421, + 182 + ], + "lines": [ + { + "bbox": [ + 383, + 114, + 421, + 182 + ], + "spans": [ + { + "bbox": [ + 383, + 114, + 421, + 182 + ], + "type": "image", + "image_path": "89e7f2789a3def971b2b0c89d2b7acdcf1aadd1cdb8a1a5f021420681ceffaf9.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 315, + 183, + 381, + 251 + ], + "blocks": [ + { + "bbox": [ + 315, + 183, + 381, + 251 + ], + "lines": [ + { + "bbox": [ + 315, + 183, + 381, + 251 + ], + "spans": [ + { + "bbox": [ + 315, + 183, + 381, + 251 + ], + "type": "image", + "image_path": "a298318701025175b564af3834f40569ba9a2be4a9e2853afeb851d2ee0dae1c.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 383, + 183, + 421, + 251 + ], + "blocks": [ + { + "bbox": [ + 383, + 183, + 421, + 251 + ], + "lines": [ + { + "bbox": [ + 383, + 183, + 421, + 251 + ], + "spans": [ + { + "bbox": [ + 383, + 183, + 421, + 251 + ], + "type": "image", + "image_path": "dbf18ce05743d49f5063ec749bb540fd70897113ed951a1dcfa7e423c7ce38a4.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 314, + 253, + 381, + 319 + ], + "blocks": [ + { + "bbox": [ + 314, + 253, + 381, + 319 + ], + "lines": [ + { + "bbox": [ + 314, + 253, + 381, + 319 + ], + "spans": [ + { + "bbox": [ + 314, + 253, + 381, + 319 + ], + "type": "image", + "image_path": "8fb0cca3c4b69efae39213d8dc027f1b0b84d9e50aec9f23d47cbb494df53aa2.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 383, + 253, + 421, + 319 + ], + "blocks": [ + { + "bbox": [ + 383, + 253, + 421, + 319 + ], + "lines": [ + { + "bbox": [ + 383, + 253, + 421, + 319 + ], + "spans": [ + { + "bbox": [ + 383, + 253, + 421, + 319 + ], + "type": "image", + "image_path": "596c6c2589245dbdc705e84ea48e307981b54a43f6ca77a432e21802a31e1146.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 425, + 117, + 479, + 182 + ], + "blocks": [ + { + "bbox": [ + 425, + 117, + 479, + 182 + ], + "lines": [ + { + "bbox": [ + 425, + 117, + 479, + 182 + ], + "spans": [ + { + "bbox": [ + 425, + 117, + 479, + 182 + ], + "type": "image", + "image_path": "e4620473ef14f57826760a17de33d85973dcb3dc5d9f4376dfc332d18330f772.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 425, + 183, + 477, + 213 + ], + "blocks": [ + { + "bbox": [ + 425, + 183, + 477, + 213 + ], + "lines": [ + { + "bbox": [ + 425, + 183, + 477, + 213 + ], + "spans": [ + { + "bbox": [ + 425, + 183, + 477, + 213 + ], + "type": "image", + "image_path": "c1a8fbbb48f7143b1b7b0e371ec712155de7870d08f8372dccbe099a5a378922.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 426, + 225, + 477, + 247 + ], + "blocks": [ + { + "bbox": [ + 426, + 225, + 477, + 247 + ], + "lines": [ + { + "bbox": [ + 426, + 225, + 477, + 247 + ], + "spans": [ + { + "bbox": [ + 426, + 225, + 477, + 247 + ], + "type": "image", + "image_path": "aa3ed71ed34a1f88529a84444bf12e1b3206f0dee86c1b08b4b9f4f7deffe767.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 426, + 258, + 476, + 281 + ], + "blocks": [ + { + "bbox": [ + 426, + 258, + 476, + 281 + ], + "lines": [ + { + "bbox": [ + 426, + 258, + 476, + 281 + ], + "spans": [ + { + "bbox": [ + 426, + 258, + 476, + 281 + ], + "type": "image", + "image_path": "7bcde915d03d079a91588aa90d482336e9df3b3741ab6e33f6ae6509c34e146b.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 426, + 292, + 476, + 313 + ], + "blocks": [ + { + "bbox": [ + 426, + 292, + 476, + 313 + ], + "lines": [ + { + "bbox": [ + 426, + 292, + 476, + 313 + ], + "spans": [ + { + "bbox": [ + 426, + 292, + 476, + 313 + ], + "type": "image", + "image_path": "3a8157dad25e84e2cca74e1161c40f2553104fab9a80eeefe1254c0f98e29f84.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "bbox": [ + 132, + 406, + 230, + 417 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 406, + 230, + 417 + ], + "spans": [ + { + "bbox": [ + 132, + 406, + 230, + 417 + ], + "type": "text", + "content": "4.4 Visualizations" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 130, + 443, + 482, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 443, + 482, + 540 + ], + "spans": [ + { + "bbox": [ + 130, + 443, + 482, + 540 + ], + "type": "text", + "content": "We show qualitative comparisons of the hand pose, predicted by WildHands, with FrankMocap on Epic-HandKps (Fig. 4a) and ArcticNet-SF on ARCTIC (Fig. 4b). Looking at the projection of the mesh in the camera view and rendering of the mesh from additional views, we observe that WildHands is able to predict hand pose better in images involving occlusion and interaction, e.g. fingers are curled around the object in contact (Fig. 4) for our model but this is not the case for FrankMocap. We observe similar trends in ARCTIC (Fig. 4b) where our model predicts better hands in contact scenarios. More results in supplementary." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 131, + 542, + 482, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 542, + 482, + 578 + ], + "spans": [ + { + "bbox": [ + 131, + 542, + 482, + 578 + ], + "type": "text", + "content": "Failure Cases: We observe that images in which the fingers are barely visible, e.g. when kneading a dough in top row (Fig. 5), or containing extreme poses, e.g. grasps in bottom row (Fig. 5), are quite challenging for all models." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 130, + 581, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 581, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 581, + 482, + 666 + ], + "type": "text", + "content": "Limitations: The KPE encoding requires camera intrinsics to be known, which may not be available in certain scenarios. However, in several in-the-wild images, the metadata often contains camera information. Also, we currently set the weights for different loss terms as hyperparameters which may not be ideal since the sources of supervision are quite different leading to different scales in loss values. It could be useful to use a learned weighing scheme, e.g. uncertainty-based loss weighting [2, 29, 38]." + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 211, + 91, + 448, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 211, + 91, + 448, + 102 + ], + "spans": [ + { + "bbox": [ + 211, + 91, + 448, + 102 + ], + "type": "text", + "content": "3D Hand Pose Estimation in Everyday Egocentric Images" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 480, + 100 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 137, + 212, + 479, + 265 + ], + "blocks": [ + { + "bbox": [ + 130, + 114, + 482, + 202 + ], + "lines": [ + { + "bbox": [ + 130, + 114, + 482, + 202 + ], + "spans": [ + { + "bbox": [ + 130, + 114, + 482, + 202 + ], + "type": "text", + "content": "Table 8: Systems comparison. We evaluate against publicly released models: FrankMocap [59] (a popular method for 3D hand pose estimation), and HaMeR [52]. FrankMocap uses a ResNet-50 backbone and is trained on 6 lab datasets. HaMeR uses a ViT-H [11] backbone and is trained on 7 lab + 3 in-the-wild + HInt datasets across nearly 3M frames. WildHands model uses a ResNet-50 backbone and is trained on 3 datasets. WildHands outperforms FrankMocap across all metrics and HaMeR on 3 of 6 metrics while being " + }, + { + "bbox": [ + 130, + 114, + 482, + 202 + ], + "type": "inline_equation", + "content": "10 \\times" + }, + { + "bbox": [ + 130, + 114, + 482, + 202 + ], + "type": "text", + "content": " smaller & trained on " + }, + { + "bbox": [ + 130, + 114, + 482, + 202 + ], + "type": "inline_equation", + "content": "5 \\times" + }, + { + "bbox": [ + 130, + 114, + 482, + 202 + ], + "type": "text", + "content": " less data. We expect scaling up the backbone and datasets used to train WildHands can lead to even stronger performance." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 137, + 212, + 479, + 265 + ], + "lines": [ + { + "bbox": [ + 137, + 212, + 479, + 265 + ], + "spans": [ + { + "bbox": [ + 137, + 212, + 479, + 265 + ], + "type": "table", + "html": "
H2OAssemblyEgo-Exo4DEpic-HandKps
MPJPEMRRPEMPJPEMRRPEMPJPEL2 Error
FrankMocap [59] (ResNet-50, 6 lab)58.51-97.59-175.9113.33
HaMeR [52] (ViT-H, 7 lab+3 wild+HInt)23.82147.8745.49334.52116.464.56
WildHands (ResNet-50, 2 lab + 1 wild)31.0849.4980.40148.1255.847.20
", + "image_path": "614b7f1b8f179a2765c341cac9e3556b80da4dd914c85c25ff6146e88031b0c3.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 136, + 279, + 204, + 347 + ], + "blocks": [ + { + "bbox": [ + 136, + 279, + 204, + 347 + ], + "lines": [ + { + "bbox": [ + 136, + 279, + 204, + 347 + ], + "spans": [ + { + "bbox": [ + 136, + 279, + 204, + 347 + ], + "type": "image", + "image_path": "b273875b3f49d0d4cd075becad73149e9043941a80f71c2ba98c5d157ed2bbc2.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 206, + 279, + 242, + 347 + ], + "blocks": [ + { + "bbox": [ + 206, + 279, + 242, + 347 + ], + "lines": [ + { + "bbox": [ + 206, + 279, + 242, + 347 + ], + "spans": [ + { + "bbox": [ + 206, + 279, + 242, + 347 + ], + "type": "image", + "image_path": "86fdcc4c08e0fb9cb8630670013e5e26b4cf74a7e43f17fb123c9459e18feab2.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 249, + 289, + 296, + 347 + ], + "blocks": [ + { + "bbox": [ + 249, + 289, + 296, + 347 + ], + "lines": [ + { + "bbox": [ + 249, + 289, + 296, + 347 + ], + "spans": [ + { + "bbox": [ + 249, + 289, + 296, + 347 + ], + "type": "image", + "image_path": "76056f448812d1968bfc5cb72da41d04e89588d991fe1def7e6b0d05814d8ddd.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 135, + 349, + 205, + 416 + ], + "blocks": [ + { + "bbox": [ + 135, + 349, + 205, + 416 + ], + "lines": [ + { + "bbox": [ + 135, + 349, + 205, + 416 + ], + "spans": [ + { + "bbox": [ + 135, + 349, + 205, + 416 + ], + "type": "image", + "image_path": "a9e08c24412fecc4e28b9325d20b72d7d7ae92172ed92877b26a7db2a0f14d24.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 428, + 482, + 452 + ], + "lines": [ + { + "bbox": [ + 130, + 428, + 482, + 452 + ], + "spans": [ + { + "bbox": [ + 130, + 428, + 482, + 452 + ], + "type": "text", + "content": "Fig. 5: Failure cases. We observe that images with (top) barely visible fingers, e.g. kneading dough or (bottom) extreme grasp poses are challenging for all models." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 206, + 349, + 242, + 416 + ], + "blocks": [ + { + "bbox": [ + 206, + 349, + 242, + 416 + ], + "lines": [ + { + "bbox": [ + 206, + 349, + 242, + 416 + ], + "spans": [ + { + "bbox": [ + 206, + 349, + 242, + 416 + ], + "type": "image", + "image_path": "323c86de16f0947cd0a393ef2823329466a073d51f3758b1f8aa0ac6acd10c66.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 249, + 361, + 296, + 411 + ], + "blocks": [ + { + "bbox": [ + 249, + 361, + 296, + 411 + ], + "lines": [ + { + "bbox": [ + 249, + 361, + 296, + 411 + ], + "spans": [ + { + "bbox": [ + 249, + 361, + 296, + 411 + ], + "type": "image", + "image_path": "dcb1517fbbed8dfd0370c59bef0a5586003b36fa3104b5d1e26208f7d5459871.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 318, + 279, + 386, + 347 + ], + "blocks": [ + { + "bbox": [ + 318, + 279, + 386, + 347 + ], + "lines": [ + { + "bbox": [ + 318, + 279, + 386, + 347 + ], + "spans": [ + { + "bbox": [ + 318, + 279, + 386, + 347 + ], + "type": "image", + "image_path": "1aad294fd700e89579ca4a400571141d7256b7c781ac42e7bfc66f7a14ba042e.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 389, + 279, + 425, + 347 + ], + "blocks": [ + { + "bbox": [ + 389, + 279, + 425, + 347 + ], + "lines": [ + { + "bbox": [ + 389, + 279, + 425, + 347 + ], + "spans": [ + { + "bbox": [ + 389, + 279, + 425, + 347 + ], + "type": "image", + "image_path": "4926c4e0dc2d6cbbe7ef6c6586d91afa2ce7e7f57d1bdd53007622e4502fb647.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 430, + 285, + 482, + 349 + ], + "blocks": [ + { + "bbox": [ + 430, + 285, + 482, + 349 + ], + "lines": [ + { + "bbox": [ + 430, + 285, + 482, + 349 + ], + "spans": [ + { + "bbox": [ + 430, + 285, + 482, + 349 + ], + "type": "image", + "image_path": "0b73a7fae6a35a73d7641b7f2beaa1341caa0f7a69b5bd763753a06eb9d3c882.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 318, + 349, + 386, + 416 + ], + "blocks": [ + { + "bbox": [ + 318, + 349, + 386, + 416 + ], + "lines": [ + { + "bbox": [ + 318, + 349, + 386, + 416 + ], + "spans": [ + { + "bbox": [ + 318, + 349, + 386, + 416 + ], + "type": "image", + "image_path": "62c03868585d8ce0d0ce0af10d50f7faf21810e79a12245894419c0acc4e86df.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 389, + 349, + 425, + 416 + ], + "blocks": [ + { + "bbox": [ + 389, + 349, + 425, + 416 + ], + "lines": [ + { + "bbox": [ + 389, + 349, + 425, + 416 + ], + "spans": [ + { + "bbox": [ + 389, + 349, + 425, + 416 + ], + "type": "image", + "image_path": "5c1857075799195991145c9ade501ea18031f123013d2a7186e4db61b49f5f2b.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 430, + 361, + 478, + 411 + ], + "blocks": [ + { + "bbox": [ + 430, + 361, + 478, + 411 + ], + "lines": [ + { + "bbox": [ + 430, + 361, + 478, + 411 + ], + "spans": [ + { + "bbox": [ + 430, + 361, + 478, + 411 + ], + "type": "image", + "image_path": "87bd577a8cfc8807ff1d0e35485d2e8df67140aa35bc70b736d8395c8e6d6971.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 131, + 479, + 220, + 491 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 479, + 220, + 491 + ], + "spans": [ + { + "bbox": [ + 131, + 479, + 220, + 491 + ], + "type": "text", + "content": "5 Conclusion" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 130, + 518, + 482, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 518, + 482, + 590 + ], + "spans": [ + { + "bbox": [ + 130, + 518, + 482, + 590 + ], + "type": "text", + "content": "We present WildHands, a system that adapts best practices from the literature: using crops as input, intrinsics-aware positional encoding, auxiliary sources of supervision and multi-dataset training, for robust prediction of 3D hand poses on egocentric images in the wild. Experiments on both lab datasets and in-the-wild settings show the effectiveness of WildHands. As future direction, WildHands could be used to scale up learning robot policies from human interactions." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "type": "text", + "content": "Acknowledgements: We thank Arjun Gupta, Shaowei Liu, Anand Bhattachad & Kashyap Chitta for feedback on the draft, and David Forsyth for useful discussion. This material is based upon work supported by NSF (IIS2007035), NASA (80NSSC21K1030), DARPA (Machine Common Sense program), Amazon Research Award, NVIDIA Academic Hardware Grant, and the NCSA Delta System (supported by NSF OCI 2005572 and the State of Illinois)." + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "text", + "content": "A. Prakash et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 133, + 114, + 197, + 126 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 114, + 197, + 126 + ], + "spans": [ + { + "bbox": [ + 133, + 114, + 197, + 126 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 138, + 139, + 481, + 665 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 138, + 139, + 480, + 172 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 139, + 480, + 172 + ], + "spans": [ + { + "bbox": [ + 138, + 139, + 480, + 172 + ], + "type": "text", + "content": "1. Ballan, L., Taneja, A., Gall, J., Gool, L.V., Pollefeys, M.: Motion capture of hands in action using discriminative salient points. In: Proceedings of the European Conference on Computer Vision (ECCV) (2012)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 138, + 173, + 481, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 173, + 481, + 215 + ], + "spans": [ + { + "bbox": [ + 138, + 173, + 481, + 215 + ], + "type": "text", + "content": "2. Brazil, G., Kumar, A., Straub, J., Ravi, N., Johnson, J., Gkioxari, G.: Omni3d: A large benchmark and model for 3d object detection in the wild. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR). pp. 13154-13164 (2023)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 138, + 216, + 480, + 248 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 216, + 480, + 248 + ], + "spans": [ + { + "bbox": [ + 138, + 216, + 480, + 248 + ], + "type": "text", + "content": "3. Cao, Z., Radosavovic, I., Kanazawa, A., Malik, J.: Reconstructing hand-object interactions in the wild. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2021)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 138, + 249, + 480, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 249, + 480, + 293 + ], + "spans": [ + { + "bbox": [ + 138, + 249, + 480, + 293 + ], + "type": "text", + "content": "4. Chao, Y., Yang, W., Xiang, Y., Molchanov, P., Handa, A., Tremblay, J., Narang, Y.S., Wyk, K.V., Iqbal, U., Birchfield, S., Kautz, J., Fox, D.: Dexycb: A benchmark for capturing hand grasping of objects. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2021)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 138, + 293, + 480, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 293, + 480, + 326 + ], + "spans": [ + { + "bbox": [ + 138, + 293, + 480, + 326 + ], + "type": "text", + "content": "5. Chen, Z., Zhang, H.: Learning implicit fields for generative shape modeling. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2019)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 138, + 326, + 480, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 326, + 480, + 358 + ], + "spans": [ + { + "bbox": [ + 138, + 326, + 480, + 358 + ], + "type": "text", + "content": "6. Cheng, T., Shan, D., Hassen, A.S., Higgins, R.E.L., Fouhey, D.: Towards a richer 2d understanding of hands at scale. In: Advances in Neural Information Processing Systems (NeurIPS) (2023)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 138, + 358, + 480, + 402 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 358, + 480, + 402 + ], + "spans": [ + { + "bbox": [ + 138, + 358, + 480, + 402 + ], + "type": "text", + "content": "7. Damen, D., Doughty, H., Farinella, G.M., Fidler, S., Furnari, A., Kazakos, E., Moltisanti, D., Munro, J., Perrett, T., Price, W., Wray, M.: Scaling egocentric vision: The epic-kitchens dataset. Proceedings of the European Conference on Computer Vision (ECCV) (2018)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 138, + 403, + 480, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 403, + 480, + 446 + ], + "spans": [ + { + "bbox": [ + 138, + 403, + 480, + 446 + ], + "type": "text", + "content": "8. Damen, D., Doughty, H., Farinella, G.M., Fidler, S., Furnari, A., Kazakos, E., Moltisanti, D., Munro, J., Perrett, T., Price, W., Wray, M.: The epic-kitchen dataset: Collection, challenges and baselines. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI) (2020)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 138, + 447, + 480, + 490 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 447, + 480, + 490 + ], + "spans": [ + { + "bbox": [ + 138, + 447, + 480, + 490 + ], + "type": "text", + "content": "9. Damen, D., Doughty, H., Farinella, G.M., Fidler, S., Furnari, A., Kazakos, E., Moltisanti, D., Munro, J., Perrett, T., Price, W., et al.: Scaling egocentric vision: The epic-kitchens dataset. In: Proceedings of the European Conference on Computer Vision (ECCV) (2018)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 138, + 491, + 480, + 522 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 491, + 480, + 522 + ], + "spans": [ + { + "bbox": [ + 138, + 491, + 480, + 522 + ], + "type": "text", + "content": "10. Darkhalil, A., Shan, D., Zhu, B., Ma, J., Kar, A., Higgins, R., Fidler, S., Fouhey, D., Damen, D.: Epic-kitchen visor benchmark: Video segmentations and object relations. In: NeurIPS Track on Datasets and Benchmarks (2022)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 138, + 523, + 480, + 566 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 523, + 480, + 566 + ], + "spans": [ + { + "bbox": [ + 138, + 523, + 480, + 566 + ], + "type": "text", + "content": "1. Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., Zhai, X., Unterthiner, T., Dehghani, M., Minderer, M., Heigold, G., Gelly, S., et al.: An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929 (2020)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 138, + 567, + 480, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 567, + 480, + 609 + ], + "spans": [ + { + "bbox": [ + 138, + 567, + 480, + 609 + ], + "type": "text", + "content": "2. Facil, J.M., Ummenhofer, B., Zhou, H., Montesano, L., Brox, T., Civera, J.: Camconvs: Camera-aware multi-scale convolutions for single-view depth. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR). pp 11826-11835 (2019)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 138, + 610, + 480, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 610, + 480, + 643 + ], + "spans": [ + { + "bbox": [ + 138, + 610, + 480, + 643 + ], + "type": "text", + "content": "3. Fan, Z., Spurr, A., Kocabas, M., Tang, S., Black, M.J., Hilliges, O.: Learning to disambiguate strongly interacting hands via probabilistic per-pixel part segmentation In: Proceedings of the International Conference on 3D Vision (3DV) (2021)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 138, + 643, + 480, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 643, + 480, + 665 + ], + "spans": [ + { + "bbox": [ + 138, + 643, + 480, + 665 + ], + "type": "text", + "content": "4. Fan, Z., Taheri, O., Tzionas, D., Kocabas, M., Kaufmann, M., Black, M.J., Hilliges, O.: ARCTIC: A dataset for dexterous bimanual hand-object manipulation. In:" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 211, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 211, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 211, + 91, + 447, + 102 + ], + "type": "text", + "content": "3D Hand Pose Estimation in Everyday Egocentric Images" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 481, + 665 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 147, + 116, + 481, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 147, + 116, + 481, + 138 + ], + "spans": [ + { + "bbox": [ + 147, + 116, + 481, + 138 + ], + "type": "text", + "content": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2023)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 133, + 138, + 481, + 170 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 138, + 481, + 170 + ], + "spans": [ + { + "bbox": [ + 133, + 138, + 481, + 170 + ], + "type": "text", + "content": "15. Freeman, W.T., Roth, M.: Orientation histograms for hand gesture recognition. In: International workshop on automatic face and gesture recognition. vol. 12, pp. 296-301. Citeseer (1995)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 133, + 171, + 481, + 203 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 171, + 481, + 203 + ], + "spans": [ + { + "bbox": [ + 133, + 171, + 481, + 203 + ], + "type": "text", + "content": "16. Garcia-Hernando, G., Yuan, S., Baek, S., Kim, T.K.: First-person hand action benchmark with rgb-d videos and 3d hand pose annotations. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2018)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 133, + 203, + 481, + 246 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 203, + 481, + 246 + ], + "spans": [ + { + "bbox": [ + 133, + 203, + 481, + 246 + ], + "type": "text", + "content": "17. Grauman, K., Westbury, A., Byrne, E., Chavis, Z., Furnari, A., Girdhar, R., Hamburger, J., Jiang, H., Liu, M., Liu, X., et al.: Ego4d: Around the world in 3,000 hours of egocentric video. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 133, + 246, + 481, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 246, + 481, + 289 + ], + "spans": [ + { + "bbox": [ + 133, + 246, + 481, + 289 + ], + "type": "text", + "content": "18. Grauman, K., Westbury, A., Torresani, L., Kitani, K., Malik, J., Afouras, T., Ashutosh, K., Baiyya, V., Bansal, S., Boote, B., et al.: Ego-exo4d: Understanding skilled human activity from first-and third-person perspectives. arXiv preprint arXiv:2311.18259 (2023)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 133, + 289, + 481, + 321 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 289, + 481, + 321 + ], + "spans": [ + { + "bbox": [ + 133, + 289, + 481, + 321 + ], + "type": "text", + "content": "19. Guizilini, V., Vasiljevic, I., Chen, D., Ambrus, R., Gaidon, A.: Towards zero-shot scale-aware monocular depth estimation. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2023)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 321, + 481, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 321, + 481, + 354 + ], + "spans": [ + { + "bbox": [ + 132, + 321, + 481, + 354 + ], + "type": "text", + "content": "20. Guizilini, V., Vasiljevic, I., Fang, J., Ambru, R., Shakhnarovich, G., Walter, M.R., Gaidon, A.: Depth field networks for generalizable multi-view scene representation. In: Proceedings of the European Conference on Computer Vision (ECCV) (2022)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 354, + 481, + 386 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 354, + 481, + 386 + ], + "spans": [ + { + "bbox": [ + 132, + 354, + 481, + 386 + ], + "type": "text", + "content": "21. Hampali, S., Rad, M., Oberweger, M., Lepetit, V.: Honnotate: A method for 3d annotation of hand and object poses. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2020)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 386, + 481, + 429 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 386, + 481, + 429 + ], + "spans": [ + { + "bbox": [ + 132, + 386, + 481, + 429 + ], + "type": "text", + "content": "22. Hampali, S., Sarkar, S.D., Rad, M., Lepetit, V.: Keypoint transformer: Solving joint identification in challenging hands and object interactions for accurate 3d pose estimation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 429, + 481, + 450 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 429, + 481, + 450 + ], + "spans": [ + { + "bbox": [ + 132, + 429, + 481, + 450 + ], + "type": "text", + "content": "23. Hartley, R., Zisserman, A.: Multiple view geometry in computer vision. Cambridge university press (2003)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 450, + 481, + 493 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 450, + 481, + 493 + ], + "spans": [ + { + "bbox": [ + 132, + 450, + 481, + 493 + ], + "type": "text", + "content": "24. Hasson, Y., Tekin, B., Bogo, F., Laptev, I., Pollefeys, M., Schmid, C.: Leveraging photometric consistency over time for sparsely supervised hand-object reconstruction. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2020)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 132, + 493, + 481, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 493, + 481, + 536 + ], + "spans": [ + { + "bbox": [ + 132, + 493, + 481, + 536 + ], + "type": "text", + "content": "25. Hasson, Y., Varol, G., Tzionas, D., Kalevatykh, I., Black, M.J., Laptev, I., Schmid, C.: Learning joint reconstruction of hands and manipulated objects. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2019)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 132, + 536, + 481, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 536, + 481, + 557 + ], + "spans": [ + { + "bbox": [ + 132, + 536, + 481, + 557 + ], + "type": "text", + "content": "26. He, K., Gkioxari, G., Dollar, P., Girshick, R.B.: Mask R-CNN. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2017)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 132, + 557, + 481, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 557, + 481, + 590 + ], + "spans": [ + { + "bbox": [ + 132, + 557, + 481, + 590 + ], + "type": "text", + "content": "27. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2016)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 132, + 590, + 481, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 590, + 481, + 622 + ], + "spans": [ + { + "bbox": [ + 132, + 590, + 481, + 622 + ], + "type": "text", + "content": "28. Heap, T., Hogg, D.: Towards 3d hand tracking using a deformable model. In: Proceedings of the Second International Conference on Automatic Face and Gesture Recognition. pp. 140-145. IEEE (1996)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 132, + 622, + 481, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 622, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 132, + 622, + 481, + 665 + ], + "type": "text", + "content": "29. Hu, A., Murez, Z., Mohan, N., Dudas, S., Hawke, J., Badrinarayanan, V., Cipolla, R., Kendall, A.: FIERY: future instance prediction in bird's-eye view from surround monocular cameras. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2021)" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "text", + "content": "A. Prakash et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 666 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 150 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 150 + ], + "type": "text", + "content": "30. Ioffe, S., Szegedy, C.: Batch normalization: Accelerating deep network training by reducing internal covariate shift. In: Bach, F.R., Blei, D.M. (eds.) Proceedings of the International Conference on Machine Learning (ICML) (2015)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 150, + 481, + 172 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 150, + 481, + 172 + ], + "spans": [ + { + "bbox": [ + 132, + 150, + 481, + 172 + ], + "type": "text", + "content": "31. Ivashechkin, M., Mendez, O., Bowden, R.: Denoising diffusion for 3d hand pose estimation from images. arXiv 2308.09523 (2023)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 172, + 481, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 172, + 481, + 217 + ], + "spans": [ + { + "bbox": [ + 132, + 172, + 481, + 217 + ], + "type": "text", + "content": "32. Jiang, C., Xiao, Y., Wu, C., Zhang, M., Zheng, J., Cao, Z., Zhou, J.T.: A2j-transformer: Anchor-to-joint transformer network for 3d interacting hand pose estimation from a single RGB image. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2023)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 218, + 481, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 218, + 481, + 262 + ], + "spans": [ + { + "bbox": [ + 132, + 218, + 481, + 262 + ], + "type": "text", + "content": "33. Jiang, Z., Rahmani, H., Black, S., Williams, B.M.: A probabilistic attention model with occlusion-aware texture regression for 3d hand reconstruction from a single RGB image. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2023)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 262, + 481, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 262, + 481, + 296 + ], + "spans": [ + { + "bbox": [ + 132, + 262, + 481, + 296 + ], + "type": "text", + "content": "34. Kanazawa, A., Black, M.J., Jacobs, D.W., Malik, J.: End-to-end recovery of human shape and pose. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2018)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 296, + 481, + 329 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 296, + 481, + 329 + ], + "spans": [ + { + "bbox": [ + 132, + 296, + 481, + 329 + ], + "type": "text", + "content": "35. Kanazawa, A., Tulsiani, S., Efros, A.A., Malik, J.: Learning category-specific mesh reconstruction from image collections. In: Proceedings of the European Conference on Computer Vision (ECCV) (2018)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 329, + 481, + 363 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 329, + 481, + 363 + ], + "spans": [ + { + "bbox": [ + 132, + 329, + 481, + 363 + ], + "type": "text", + "content": "36. Karunratanakul, K., Yang, J., Zhang, Y., Black, M.J., Muandet, K., Tang, S.: Grasping field: Learning implicit representations for human grasps. In: Proceedings of the International Conference on 3D Vision (3DV) (2020)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 363, + 481, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 363, + 481, + 385 + ], + "spans": [ + { + "bbox": [ + 132, + 363, + 481, + 385 + ], + "type": "text", + "content": "37. Kato, H., Ushiku, Y., Harada, T.: Neural 3d mesh renderer. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2018)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 385, + 481, + 419 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 385, + 481, + 419 + ], + "spans": [ + { + "bbox": [ + 132, + 385, + 481, + 419 + ], + "type": "text", + "content": "38. Kendall, A., Gal, Y., Cipolla, R.: Multi-task learning using uncertainty to weigh losses for scene geometry and semantics. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2018)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 419, + 481, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 419, + 481, + 453 + ], + "spans": [ + { + "bbox": [ + 132, + 419, + 481, + 453 + ], + "type": "text", + "content": "39. Kingma, D.P., Ba, J.: Adam: A method for stochastic optimization. In: Bengio, Y., LeCun, Y. (eds.) Proceedings of the International Conference on Learning Representations (ICLR) (2015)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 453, + 481, + 486 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 453, + 481, + 486 + ], + "spans": [ + { + "bbox": [ + 132, + 453, + 481, + 486 + ], + "type": "text", + "content": "40. Kwon, T., Tekin, B., Stühmer, J., Bogo, F., Pollefeys, M.: H2o: Two hands manipulating objects for first person interaction recognition. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2021)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 132, + 486, + 481, + 520 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 486, + 481, + 520 + ], + "spans": [ + { + "bbox": [ + 132, + 486, + 481, + 520 + ], + "type": "text", + "content": "41. Lin, T., Dollár, P., Girshick, R.B., He, K., Hariharan, B., Belongie, S.J.: Feature pyramid networks for object detection. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2017)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 132, + 520, + 481, + 553 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 520, + 481, + 553 + ], + "spans": [ + { + "bbox": [ + 132, + 520, + 481, + 553 + ], + "type": "text", + "content": "42. Liu, S., Chen, W., Li, T., Li, H.: Soft rasterizer: A differentiable renderer for image-based 3d reasoning. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2019)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 132, + 554, + 481, + 587 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 554, + 481, + 587 + ], + "spans": [ + { + "bbox": [ + 132, + 554, + 481, + 587 + ], + "type": "text", + "content": "43. Liu, S., Li, T., Chen, W., Li, H.: A general differentiable mesh renderer for image-based 3d reasoning. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI) (2020)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 132, + 587, + 481, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 587, + 481, + 632 + ], + "spans": [ + { + "bbox": [ + 132, + 587, + 481, + 632 + ], + "type": "text", + "content": "44. Liu, Y., Liu, Y., Jiang, C., Lyu, K., Wan, W., Shen, H., Liang, B., Fu, Z., Wang, H., Yi, L.: HOI4D: A 4d egocentric dataset for category-level human-object interaction. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 132, + 632, + 481, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 632, + 481, + 666 + ], + "spans": [ + { + "bbox": [ + 132, + 632, + 481, + 666 + ], + "type": "text", + "content": "45. Mehta, D., Rhodin, H., Casas, D., Fua, P., Sotnychenko, O., Xu, W., Theobalt, C.: Monocular 3d human pose estimation in the wild using improved CNN supervision. In: Proceedings of the International Conference on 3D Vision (3DV) (2017)" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 211, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 211, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 211, + 91, + 447, + 102 + ], + "type": "text", + "content": "3D Hand Pose Estimation in Everyday Egocentric Images" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 481, + 665 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 133, + 116, + 481, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 116, + 481, + 149 + ], + "spans": [ + { + "bbox": [ + 133, + 116, + 481, + 149 + ], + "type": "text", + "content": "46. Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: Nerf: Representing scenes as neural radiance fields for view synthesis. In: Proceedings of the European Conference on Computer Vision (ECCV) (2020)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 150, + 481, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 150, + 481, + 171 + ], + "spans": [ + { + "bbox": [ + 132, + 150, + 481, + 171 + ], + "type": "text", + "content": "47. Miyato, T., Jaeger, B., Welling, M., Geiger, A.: GTA: A geometry-aware attention mechanism for multi-view transformers. arXiv (2023)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 172, + 481, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 172, + 481, + 205 + ], + "spans": [ + { + "bbox": [ + 132, + 172, + 481, + 205 + ], + "type": "text", + "content": "48. Moon, G., Yu, S., Wen, H., Shiratori, T., Lee, K.M.: Interhand2.6m: A dataset and baseline for 3d interacting hand pose estimation from a single RGB image. In: Proceedings of the European Conference on Computer Vision (ECCV) (2020)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 205, + 481, + 226 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 205, + 481, + 226 + ], + "spans": [ + { + "bbox": [ + 132, + 205, + 481, + 226 + ], + "type": "text", + "content": "49. Nair, V., Hinton, G.E.: Rectified linear units improve restricted boltzmann machines. In: Proceedings of the International Conference on Machine Learning (ICML) (2010)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 227, + 481, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 227, + 481, + 270 + ], + "spans": [ + { + "bbox": [ + 132, + 227, + 481, + 270 + ], + "type": "text", + "content": "50. Ohkawa, T., He, K., Sener, F., Hodan, T., Tran, L., Keskin, C.: Assemblyhands: Towards egocentric activity understanding via 3d hand pose estimation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR). pp. 12999-13008 (2023)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 271, + 481, + 303 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 271, + 481, + 303 + ], + "spans": [ + { + "bbox": [ + 132, + 271, + 481, + 303 + ], + "type": "text", + "content": "51. Park, J., Oh, Y., Moon, G., Choi, H., Lee, K.M.: Handoccnet: Occlusion-robust 3d hand mesh estimation network. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 304, + 481, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 304, + 481, + 335 + ], + "spans": [ + { + "bbox": [ + 132, + 304, + 481, + 335 + ], + "type": "text", + "content": "52. Pavlakos, G., Shan, D., Radosavovic, I., Kanazawa, A., Fouhey, D., Malik, J.: Reconstructing hands in 3d with transformers. arXiv preprint arXiv:2312.05251 (2023)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 336, + 481, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 336, + 481, + 380 + ], + "spans": [ + { + "bbox": [ + 132, + 336, + 481, + 380 + ], + "type": "text", + "content": "53. Potamias, R.A., Ploumpis, S., Moschoglou, S., Triantafyllou, V., Zafeiriou, S.: Handy: Towards a high fidelity 3d hand shape and appearance model. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR). pp. 4670-4680 (June 2023)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 380, + 481, + 402 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 380, + 481, + 402 + ], + "spans": [ + { + "bbox": [ + 132, + 380, + 481, + 402 + ], + "type": "text", + "content": "54. Prakash, A., Gupta, A., Gupta, S.: Mitigating perspective distortion-induced shape ambiguity in image crops. arXiv 2312.06594 (2023)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 403, + 481, + 424 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 403, + 481, + 424 + ], + "spans": [ + { + "bbox": [ + 132, + 403, + 481, + 424 + ], + "type": "text", + "content": "55. Ravi, N., Reizenstein, J., Novotny, D., Gordon, T., Lo, W.Y., Johnson, J., Gkioxari, G.: Accelerating 3d deep learning with pytorch3d. arXiv:2007.08501 (2020)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 425, + 481, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 425, + 481, + 456 + ], + "spans": [ + { + "bbox": [ + 132, + 425, + 481, + 456 + ], + "type": "text", + "content": "56. Rehg, J.M., Kanade, T.: Visual tracking of high dof articulated structures: an application to human hand tracking. In: Proceedings of the European Conference on Computer Vision (ECCV) (1994)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 132, + 457, + 481, + 490 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 457, + 481, + 490 + ], + "spans": [ + { + "bbox": [ + 132, + 457, + 481, + 490 + ], + "type": "text", + "content": "57. Rogez, G., Khademi, M., Supancic III, J., Montiel, J.M.M., Ramanan, D.: 3d hand pose detection in egocentric rgb-d images. In: Proceedings of the European Conference on Computer Vision (ECCV) (2014)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 132, + 491, + 481, + 511 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 491, + 481, + 511 + ], + "spans": [ + { + "bbox": [ + 132, + 491, + 481, + 511 + ], + "type": "text", + "content": "58. Romero, J., Tzionas, D., Black, M.J.: Embodied hands: Modeling and capturing hands and bodies together. ACM Transactions on Graphics (ToG) (2017)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 132, + 512, + 481, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 512, + 481, + 544 + ], + "spans": [ + { + "bbox": [ + 132, + 512, + 481, + 544 + ], + "type": "text", + "content": "59. Rong, Y., Shiratori, T., Joo, H.: Frankmocap: Fast monocular 3D hand and body motion capture by regression and integration. Proceedings of the IEEE International Conference on Computer Vision Workshops (ICCV Workshops) (2021)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 132, + 545, + 481, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 545, + 481, + 588 + ], + "spans": [ + { + "bbox": [ + 132, + 545, + 481, + 588 + ], + "type": "text", + "content": "60. Sener, F., Chatterjee, D., Shelepov, D., He, K., Singhania, D., Wang, R., Yao, A.: Assembly101: A large-scale multi-view video dataset for understanding procedural activities. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 132, + 589, + 481, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 589, + 481, + 621 + ], + "spans": [ + { + "bbox": [ + 132, + 589, + 481, + 621 + ], + "type": "text", + "content": "61. Shan, D., Geng, J., Shu, M., Fouhey, D.F.: Understanding human hands in contact at internet scale. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2020)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 132, + 622, + 481, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 622, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 132, + 622, + 481, + 665 + ], + "type": "text", + "content": "62. Sharp, T., Keskin, C., Robertson, D., Taylor, J., Shotton, J., Kim, D., Rhemann, C., Leichter, I., Vinnikov, A., Wei, Y., et al.: Accurate, robust, and flexible real-time hand tracking. In: Proceedings of the 33rd annual ACM conference on human factors in computing systems. pp. 3633-3642 (2015)" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "text", + "content": "A. Prakash et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 661 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 149 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 149 + ], + "type": "text", + "content": "63. Simon, T., Joo, H., Matthews, I.A., Sheikh, Y.: Hand keypoint detection in single images using multiview bootstrapping. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2017)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 149, + 482, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 149, + 482, + 182 + ], + "spans": [ + { + "bbox": [ + 130, + 149, + 482, + 182 + ], + "type": "text", + "content": "64. Sridhar, S., Mueller, F., Zollhöfer, M., Casas, D., Oulasvirta, A., Theobalt, C.: Real-time joint tracking of a hand manipulating an object from rgb-d input. In: Proceedings of the European Conference on Computer Vision (ECCV) (2016)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 182, + 482, + 213 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 182, + 482, + 213 + ], + "spans": [ + { + "bbox": [ + 130, + 182, + 482, + 213 + ], + "type": "text", + "content": "65. Sridhar, S., Oulasvirta, A., Theobalt, C.: Interactive markerless articulated hand motion tracking using RGB and depth data. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2013)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 213, + 482, + 245 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 213, + 482, + 245 + ], + "spans": [ + { + "bbox": [ + 130, + 213, + 482, + 245 + ], + "type": "text", + "content": "66. Sun, X., Wei, Y., Liang, S., Tang, X., Sun, J.: Cascaded hand pose regression. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2015)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 245, + 482, + 277 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 245, + 482, + 277 + ], + "spans": [ + { + "bbox": [ + 130, + 245, + 482, + 277 + ], + "type": "text", + "content": "67. Taheri, O., Ghorbani, N., Black, M.J., Tzionas, D.: GRAB: A dataset of whole-body human grasping of objects. In: Proceedings of the European Conference on Computer Vision (ECCV) (2020)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 277, + 482, + 309 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 277, + 482, + 309 + ], + "spans": [ + { + "bbox": [ + 130, + 277, + 482, + 309 + ], + "type": "text", + "content": "68. Tompson, J., Stein, M., Lecun, Y., Perlin, K.: Real-time continuous pose recovery of human hands using convolutional networks. ACM Transactions on Graphics (ToG) 33(5), 1-10 (2014)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 309, + 482, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 309, + 482, + 352 + ], + "spans": [ + { + "bbox": [ + 130, + 309, + 482, + 352 + ], + "type": "text", + "content": "69. Tulsiani, S., Zhou, T., Efros, A.A., Malik, J.: Multi-view supervision for single-view reconstruction via differentiable ray consistency. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR). pp. 2626-2634 (2017)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 352, + 482, + 384 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 352, + 482, + 384 + ], + "spans": [ + { + "bbox": [ + 130, + 352, + 482, + 384 + ], + "type": "text", + "content": "70. Tzionas, D., Gall, J.: 3d object reconstruction from hand-object interactions. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2015)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 383, + 482, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 383, + 482, + 415 + ], + "spans": [ + { + "bbox": [ + 130, + 383, + 482, + 415 + ], + "type": "text", + "content": "71. Vaswani, A., Shazeer, N.M., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, L., Polosukhin, I.: Attention is all you need. Advances in Neural Information Processing Systems (NeurIPS) (2017)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 415, + 482, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 415, + 482, + 437 + ], + "spans": [ + { + "bbox": [ + 130, + 415, + 482, + 437 + ], + "type": "text", + "content": "72. Wan, C., Yao, A., Gool, L.V.: Hand pose estimation from local surface normals. In: Proceedings of the European Conference on Computer Vision (ECCV) (2016)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 130, + 437, + 482, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 437, + 482, + 468 + ], + "spans": [ + { + "bbox": [ + 130, + 437, + 482, + 468 + ], + "type": "text", + "content": "73. Yang, L., Li, K., Zhan, X., Wu, F., Xu, A., Liu, L., Lu, C.: Oakink: A large-scale knowledge repository for understanding hand-object interaction. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 130, + 468, + 482, + 500 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 468, + 482, + 500 + ], + "spans": [ + { + "bbox": [ + 130, + 468, + 482, + 500 + ], + "type": "text", + "content": "74. Ye, Y., Gupta, A., Tulsiani, S.: What's in your hands? 3D reconstruction of generic objects in hands. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 130, + 500, + 482, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 500, + 482, + 533 + ], + "spans": [ + { + "bbox": [ + 130, + 500, + 482, + 533 + ], + "type": "text", + "content": "75. Yifan, W., Doersch, C., Arandjelovic, R., Carreira, J., Zisserman, A.: Input-level inductive biases for 3d reconstruction. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 130, + 533, + 482, + 563 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 533, + 482, + 563 + ], + "spans": [ + { + "bbox": [ + 130, + 533, + 482, + 563 + ], + "type": "text", + "content": "76. Yu, F., Salzmann, M., Fua, P., Rhodin, H.: Pcls: Geometry-aware neural reconstruction of 3d pose with perspective crop layers. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2021)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 130, + 563, + 482, + 585 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 563, + 482, + 585 + ], + "spans": [ + { + "bbox": [ + 130, + 563, + 482, + 585 + ], + "type": "text", + "content": "77. Zhang, X., Li, Q., Mo, H., Zhang, W., Zheng, W.: End-to-end hand mesh recovery from a monocular rgb image. In: ICCV (2019)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 130, + 585, + 482, + 616 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 585, + 482, + 616 + ], + "spans": [ + { + "bbox": [ + 130, + 585, + 482, + 616 + ], + "type": "text", + "content": "78. Zimmermann, C., Brox, T.: Learning to estimate 3d hand pose from single rgb images. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2017)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 130, + 616, + 482, + 661 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 616, + 482, + 661 + ], + "spans": [ + { + "bbox": [ + 130, + 616, + 482, + 661 + ], + "type": "text", + "content": "79. Zimmermann, C., Ceylan, D., Yang, J., Russell, B.C., Argus, M.J., Brox, T.: Freihand: A dataset for markerless capture of hand pose and shape from single RGB images. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2019)" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 211, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 211, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 211, + 91, + 447, + 102 + ], + "type": "text", + "content": "3D Hand Pose Estimation in Everyday Egocentric Images" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/3D Hand Sequence Recovery from Real Blurry Images and Event Stream/c806d671-8f45-4954-8ba0-1ca55bf4fc0d_content_list.json b/2024/3D Hand Sequence Recovery from Real Blurry Images and Event Stream/c806d671-8f45-4954-8ba0-1ca55bf4fc0d_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..a7089d6c481c649d45a0d816ab4ea3ee1b2eaad6 --- /dev/null +++ b/2024/3D Hand Sequence Recovery from Real Blurry Images and Event Stream/c806d671-8f45-4954-8ba0-1ca55bf4fc0d_content_list.json @@ -0,0 +1,1941 @@ +[ + { + "type": "text", + "text": "3D Hand Sequence Recovery from Real Blurry Images and Event Stream", + "text_level": 1, + "bbox": [ + 282, + 141, + 722, + 186 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Joonkyu Park1, Gyeongsik Moon3,4, Weipeng Xu4, Evan Kaseman4, Takaaki Shiratori4, and Kyoung Mu Lee1,2", + "bbox": [ + 243, + 210, + 759, + 244 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Dept. of ECE&ASRI, $^{2}$ IPAI, Seoul National University, Korea $^{3}$ DGIST", + "bbox": [ + 290, + 253, + 712, + 280 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{4}$ Codec Avatars Lab, Meta", + "bbox": [ + 411, + 282, + 591, + 296 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{jkpark0825, kyoungmu}@snu.ac.kr, mks0601@gmail.com,", + "bbox": [ + 300, + 297, + 700, + 310 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{xuweipeng, ekaseman, tshiratori}@meta.com", + "bbox": [ + 338, + 311, + 663, + 324 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "https://jkpark08.github.io/EBH", + "bbox": [ + 392, + 325, + 609, + 338 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/d2b17a2bdf9cc07f82963459c864da272144348600f5f02a417f0368f5d65d9c.jpg", + "image_caption": [ + "Fig. 1: Blurry hand to 3D hand sequences. We address 3D hand sequence recovery with real motion blur, showing proficiency in predicting 3D hands at novel time steps." + ], + "image_footnote": [], + "bbox": [ + 261, + 359, + 405, + 455 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/d361cb97edfb193a40e6d119186db73008f45f94798a8dc42ddebd799b458427.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 406, + 359, + 580, + 450 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/4666be7000d8cfc471df6840bfb1d0ca6a737ea3e6d355969a7332998d609b1d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 581, + 359, + 738, + 452 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract. Although hands frequently exhibit motion blur due to their dynamic nature, existing approaches for 3D hand recovery often disregard the impact of motion blur in hand images. Blurry hand images contain hands from multiple time steps, lack precise hand location at a specific time step, and introduce temporal ambiguity, leading to multiple possible hand trajectories. To address this issue and in the absence of datasets with real blur, we introduce the EBH dataset, which provides 1) hand images with real motion blur and 2) event data for authentic representation of fast hand movements. In conjunction with our new dataset, we present EBHNet, a novel network capable of recovering 3D hands from diverse input combinations, including blurry hand images, events, or both. Here, the event stream enhances motion understanding in blurry hands, addressing temporal ambiguity. Recognizing that blurry hand images include not only single 3D hands at a time step but also multiple hands along their motion trajectories, we design EBHNet to generate 3D hand sequences in motion. Moreover, to enable our EBHNet to predict 3D hands at novel, unsupervised time steps using a single shared module, we employ a Transformer-based module, temporal splitter, into EBHNet. Our experiments show the superior performance of EBH and EBHNet, especially in handling blurry hand images, making them valuable in real-world applications.", + "bbox": [ + 259, + 521, + 743, + 815 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Keywords: 3D hand sequence recovery $\\cdot$ Blurry hands $\\cdot$ Event stream", + "bbox": [ + 261, + 825, + 736, + 839 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/d78fdff3c51bac024426b61aa935ffd6c74c49c2c2d02ffdb4cb3846840fca43.jpg", + "image_caption": [ + "(a) Blurry image" + ], + "image_footnote": [], + "bbox": [ + 217, + 143, + 312, + 215 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/67c106cc55eb268194d38a21adc381bf55a3dfd164d3f494da935a6955d00aa2.jpg", + "image_caption": [ + "(b) Mesh" + ], + "image_footnote": [], + "bbox": [ + 313, + 143, + 406, + 215 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/2d8833f3e8127159f9a356b79309c4c54994b145aa8d41ee3ec625a676012f4c.jpg", + "image_caption": [ + "(c) Event", + "Fig. 2: Examples of the proposed EBH dataset. Our EBH consists of real motion-blurred hand images with the corresponding meshes derived from sharp images captured from three different viewpoints during motion. Also, we offer an event stream." + ], + "image_footnote": [], + "bbox": [ + 408, + 143, + 500, + 215 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/d30f090a6fb9baa4ff7fc257c8296e4db75dbd1bd576c2eb236129cb70ec9aca.jpg", + "image_caption": [ + "(d) Blurry image" + ], + "image_footnote": [], + "bbox": [ + 501, + 143, + 596, + 215 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/e428866d5c4bac0b4cd783f378ea3b7d7ab79d984716e8aa13b37ebd9716ebfd.jpg", + "image_caption": [ + "(e) Mesh" + ], + "image_footnote": [], + "bbox": [ + 598, + 143, + 691, + 215 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/5e228bdb5e7338373c4dee15fbdf3ffe8598f8d0806375eb9e798e4b0efcd02a.jpg", + "image_caption": [ + "(f) Event" + ], + "image_footnote": [], + "bbox": [ + 694, + 143, + 785, + 215 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 215, + 311, + 374, + 328 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The dynamic nature of hands leads to prevalent blurriness during rapid movements (e.g., sports, typing, and dancing), posing a challenge for accurately capturing hand information. Moreover, isolating a single hand during motion is impractical because blurry hands convey multiple hand information during the motion. To address this, there is a need for a robust framework that can effectively recover 3D hand sequence even in the presence of motion blur. However, existing methods [3, 5, 10, 12, 13, 22] have exhibited subpar performance when faced with motion blur present in hand images.", + "bbox": [ + 212, + 351, + 784, + 472 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The main issue arises from the lack of datasets containing both blurry hand images and accurate 3D ground truth. Although Oh et al. [27] introduced the BlurHand dataset, it has limitations. First, their motion blur is artificially generated by averaging sequential frames [25, 36], which doesn't accurately reflect real-world blur. Second, BlurHand [27] provides only images, which can lead to challenges in accurately capturing the trajectory due to temporal ambiguity. The images display the hand's position over time, lacking exact information about its location at particular time steps and overall motion. As a result, a single blurry image can exhibit multiple potential motion trajectories, leading to temporal ambiguity. While BlurHand includes corresponding sharp image pairs to assist in determining the motion trajectory, relying solely on these sharp images may not provide an accurate trajectory beyond the frame rates of the sharp images.", + "bbox": [ + 212, + 474, + 785, + 655 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address these issues, we propose the EBH dataset, which captures real motion blur in hand images using cameras with different exposure times, where the examples are shown in Fig. 2. Specifically, we use an RGB camera with extended exposure times to generate blurry hand images with real motion blur. At the same time, multiple RGB cameras with shorter exposure times are employed to capture sharp images. The 3D annotations (i.e., meshes and keypoints) are derived from pairs of these multi-view sharp images. Also, we incorporate event data to compensate for the temporal ambiguity in the blurry hand images. This event data captures instantaneous changes in brightness, helping find the hand's position and track its movement in the blurry image.", + "bbox": [ + 212, + 657, + 787, + 808 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Building upon the EBH dataset, we present EBHNet, a flexible network, designed to predict a 3D hand sequence from diverse input combinations: 1)", + "bbox": [ + 214, + 809, + 785, + 840 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "JK. Park et al.", + "bbox": [ + 271, + 114, + 372, + 127 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "only a single blurry image, 2) only event data, and 3) both a blurry image and event data. In each case, we adapt feature extraction based on input types.", + "bbox": [ + 212, + 146, + 782, + 176 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "With the feature extracted by considering various input types, our goal is to predict 3D hand sequences, which consist of 3D hands at different time steps. Here, using a dedicated 3D hand recovery module for each time step, such as BlurHandNet [27], can inflate model parameters, especially with more 3D hands at different time steps. To address this, we propose a Transformer-based module, temporal splitter, in our EBHNet. This enables the generation of multiple 3D hands at novel time steps using a single shared module, with the temporal splitter estimating based on the temporal embedding. Additionally, our temporal embeddings enable EBHNet to generate 3D hands at novel time steps without requiring supervision for those steps.", + "bbox": [ + 212, + 176, + 785, + 327 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Using our newly introduced dataset, EBH, and the baseline network, EBHNet, we tackle the task of recovering 3D hands from blurry hand images. Our experiments show the efficacy of our dataset when applied to real-world blurry hand scenarios and the robustness of EBHNet in handling such cases. We summarize our contributions as follows:", + "bbox": [ + 212, + 328, + 785, + 402 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We introduce the EBH dataset for 3D hand recovery from motion-blurred images, providing real-world motion blur and event information.", + "- We introduce EBHNet, a novel network for 3D hand sequence recovery from blurry hands, capable of handling diverse input combinations.", + "- The temporal splitter in EBHNet allows our network to produce 3D hands at novel time steps using a single shared module." + ], + "bbox": [ + 225, + 417, + 782, + 508 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Related works", + "text_level": 1, + "bbox": [ + 215, + 534, + 390, + 549 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3D hand recovery. Since the introduction of RGB-based hand benchmark datasets [5, 24, 32, 43], several methods [1, 3, 22, 42] for hand estimation have emerged. Although several approaches [6,17,32] have focused on addressing hand occlusion, especially in cases where objects are being held, others have taken on the task of handling interacting hands [24, 29], which are frequently encountered in real-world scenarios. However, there is a notable research gap on the challenge of fast-moving hands despite its prevalence in real-world situations.", + "bbox": [ + 212, + 566, + 785, + 672 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Recently, there have been several attempts to make a 3D hand recovery system robust to motion blur. FrankMocap [34] synthetically generated motion blur for data augmentation. BlurHand [27] presented synthetic blurry hand data in conjunction with their baseline network, BlurHandNet, with the goal of recovering three temporal 3D hands. However, both rely on artificially generated motion blur, which has a domain gap from real-world blur [41]. In particular, the motion detail of the BlurHand dataset is limited by the frame rate of their RGB cameras. To address these shortcomings, we introduce the EBH dataset, which features real blur-hand images with an event stream.", + "bbox": [ + 212, + 674, + 785, + 808 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Event stream. An event stream comprises a sequence of data points that chronicle changes within a scene over time. Specialized sensors capture these changes", + "bbox": [ + 212, + 809, + 785, + 839 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "EBH Dataset and Network", + "bbox": [ + 550, + 114, + 730, + 126 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/585d10ab9586d5b6b17a9ac14d1627949587580061fee12aa23fe5359d6e3a6c.jpg", + "image_caption": [ + "Fig. 3: Overview of our EBH dataset. We capture blurry hand images with one camera using a longer exposure time and obtain sharp hand images with six additional cameras, three triggered simultaneously for the blurry image and three with delayed triggers for different time steps. Additionally, we enrich our dataset with event stream. The right figure depicts the cameras, color-circled for their respective groups." + ], + "image_footnote": [], + "bbox": [ + 230, + 143, + 488, + 268 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/d825495986d77b5dc941fb475fa7a91a9fe9e36bb01c1d259aa00cf777b7be1a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 511, + 161, + 764, + 268 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "per-pixel basis, including attributes such as location, timestamp, and polarity, which indicate brightness changes. These data prove valuable in mitigating image blurriness by facilitating precise motion estimation, making them applicable in various tasks, including object tracking [18,40], and visual odometry for accurate camera trajectory [28]. More recently, inspired by the advantages of using event data, several works [26,35] have used event information for 3D hand recovery. Specifically, EventHand [35] introduced synthetic event data and trained their model on these events. Furthermore, [26] introduced an event stream simulator designed to generate event information for hands, simulating data from real event cameras. Based on this, we incorporate event information into our EBH dataset, offering a comprehensive view of overall motion and providing more data than limited 3D annotations at specific time intervals.", + "bbox": [ + 212, + 390, + 787, + 571 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3 EBH dataset", + "text_level": 1, + "bbox": [ + 215, + 595, + 377, + 611 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Fig. 3 shows the process of constructing our EBH dataset. Instead of artificially generating blur [27], we employ RGB cameras (Microsoft Kinects) with different exposure times for real motion blur. First, we configure a single camera (blue in Fig. 3) with an 80 millisecond (ms) exposure time and a 5 fps frame rate to capture the blurry hand image. Next, we employ two groups of three cameras (six cameras) with a 2.5 ms exposure time and a 30 fps frame rate. These camera groups are triggered differently: one group (red in Fig. 3) shares the same trigger as the camera used for the blurry image, while the other (orange in Fig. 3) has a trigger signal shifted by 16 ms. As a result, we obtain a total of 18 sharp images (6-time steps $\\times$ 3 cameras), as indicated by the red and orange boxed hand images in Fig. 3. Motivated by recent neural network-based annotators [8,11,21, 23], we train a MANO parameter [33] estimation network on our dataset and test it on the training set, where the output becomes the 3D ground truth. Given the additional depth information from our RGB cameras (Microsoft Kinects),", + "bbox": [ + 212, + 628, + 787, + 840 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "JK. Park et al.", + "bbox": [ + 271, + 114, + 372, + 126 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "our model processes RGB images from three viewpoints and the corresponding depth maps to estimate MANO parameters. The network is self-supervised by minimizing loss functions between 1) 2D joints projected from the estimated hand mesh and those obtained by an off-the-shelf 2D joint detection model [39], 2) differentiable rendered silhouettes and masks from a matting model [15], and 3) rendered depth and their corresponding ground truth depth map.", + "bbox": [ + 212, + 146, + 782, + 236 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Furthermore, since obtaining 3D annotations from sharp images is done discretely due to the limited shutter speed of the RGB cameras, we introduce event stream (green in Fig. 3) to supplement the limited information obtained from the discrete 3D annotations. To this end, we employ an event camera (Prophesee EVK4) with an exceptionally high frame rate of more than $10\\mathrm{K}$ fps, which is calibrated to the RGB camera that captures the blurry hand image. We calibrate the event camera and RGB camera using a blinking checkerboard, with additional details that can be found in our supplementary material. Also, the event camera is synchronized with the RGB camera to capture the blurry hand image using the same external trigger. The event stream from the event camera provides continuous pixel-wise information, but it can be data-intensive. To address this, we accumulate event data at specific intervals, reducing the overall volume of the data. Specifically, to prevent information loss, we employ a sliding window approach to accumulate light intensity over predefined time intervals (e.g., 1ms). Then, we utilize color encoding to differentiate event occurrences at various time steps, assigning different colors to events happening at different times. This color-encoding process entails converting accumulated light intensity into a grayscale image, which is then mapped to an RGB representation using a light blue palette provided by the event camera's SDK.", + "bbox": [ + 212, + 238, + 787, + 525 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Finally, our EBH dataset includes hand images from 10 individuals displaying various commonly occurring gestures, totaling 40,213 annotated blurry hand images with corresponding event streams. The dataset is divided into training (32,482) and test sets (7,731). Please note that each blurry hand image in our dataset has six 3D annotations from the different time steps, represented as three in red and three in orange in Fig. 3. Consequently, our EBH dataset comprises a total of 241,278 $(40,213 \\times 6)$ 3D annotations. Furthermore, unlike the synthetic blurry hand dataset BlurHand [27], which mainly comprises hands with minimal or no blur, our EBH dataset includes more dynamic blur, as shown in Fig. 4. We measure the length of hand trajectory by summing the distances between 3D keypoints for each blurry hand image using ground-truth data, providing an indication of the hand blur level. For the details (e.g., camera ID, pose examples, and camera calibration), please refer to the supplementary material.", + "bbox": [ + 212, + 527, + 787, + 724 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4 EBHNet", + "text_level": 1, + "bbox": [ + 215, + 753, + 336, + 768 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Fig. 5 shows an overview of our EBHNet framework. Our goal is to recover sequences of 3D hands from a given input, changing the output number to match the time steps. To accommodate the diverse cases encountered in practical ap", + "bbox": [ + 212, + 794, + 785, + 840 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "EBH Dataset and Network", + "bbox": [ + 550, + 114, + 730, + 127 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/b67bcc046a94ee0c69f1196b044181428fa7034085c2a7f68827582a7fa30a3f.jpg", + "image_caption": [ + "(a) training set", + "Fig.4: Statistics on blur strength of EBH dataset compared to Blur-Hand (BH). $x$ -axis denotes the length of hand trajectory, showing 3D joint movement in a single blurry hand image. $y$ -axis shows the image proportion in the dataset." + ], + "image_footnote": [], + "bbox": [ + 264, + 154, + 491, + 321 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/495468db9416b1e8512ca14af6512de2f8102c6dba65deb0c562d03b97588946.jpg", + "image_caption": [ + "(b) testing set" + ], + "image_footnote": [], + "bbox": [ + 509, + 154, + 730, + 321 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "plications, we introduce various variants of EBHNet, each designed to utilize either blurry hand image alone, event data alone, or a combination of both.", + "bbox": [ + 212, + 436, + 785, + 468 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1 Feature Extraction from Various Inputs", + "text_level": 1, + "bbox": [ + 214, + 508, + 589, + 523 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To facilitate understanding, let us consider a scenario where both a blurry image $\\mathbf{I} \\in \\mathbb{R}^{H \\times W \\times 3}$ and an event $\\mathbf{E}$ are available. Here, $H = 256$ , and $W = 256$ correspond to the height and width of the input image, respectively. Initially, we extract two types of features: the hand feature $\\mathbf{F}_{\\mathrm{I}}$ from the blurry hand image $\\mathbf{I}$ and the event feature $\\mathbf{F}_{\\mathrm{E}}$ from the event data $\\mathbf{E}$ . We achieve this by utilizing a pre-trained Feature Pyramid Network (FPN) [16] trained on ImageNet [4] for the image feature and shallow convolutional networks for the event feature. Here, unlike a single image that can be used to recover multiple 3D hands at different time steps, each event feature corresponds to a single 3D hand. For example, with an exposure time of $80~\\mathrm{ms}$ for blurry images and accumulating events within 1 ms intervals in our EBH dataset, a single blurry image can be aligned with up to 80 event frames. Therefore, we design our event feature extraction network to be shallow for efficiency. Also, note that both of these features, $\\mathbf{F}_{\\mathrm{I}}$ and $\\mathbf{F}_{\\mathrm{E}}$ , share the same dimensions, which are $\\mathbb{R}^{h \\times w \\times c}$ , where $h = \\frac{H}{8}$ , $h = \\frac{H}{8}$ , and $c = 256$ represent the height, width, and number of channels of the extracted features. Subsequently, we concatenate these two features and pass the combined feature through five residual blocks to obtain a fused feature $\\mathbf{F}$ . However, when only one of an image and an event is available, we do not perform the concatenation step. Instead, we feed either $\\mathbf{F}_{\\mathrm{I}}$ or $\\mathbf{F}_{\\mathrm{E}}$ into the residual blocks to obtain the feature $\\mathbf{F}$ .", + "bbox": [ + 212, + 553, + 787, + 840 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "JK. Park et al.", + "bbox": [ + 271, + 114, + 372, + 126 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/58faa59bf870ca72203aef8bd4a1628711022aad6446d9a948c2e255bf153be5.jpg", + "image_caption": [ + "Fig. 5: The overall architecture of EBHNet. Our EBHNet first extracts feature $\\mathbf{F}$ in three scenarios: image only, event only, and both. During training and evaluation, we employ a temporal splitter with different temporal embedding strategies. Specifically, during the training phase, we utilize temporal embedding value when corresponding ground truths are available. Conversely, during the testing phase, we adopt novel temporal embedding values to generate 3D hand meshes at novel time steps." + ], + "image_footnote": [], + "bbox": [ + 222, + 145, + 781, + 256 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2 Temporal Splitter", + "text_level": 1, + "bbox": [ + 215, + 366, + 408, + 382 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Unlike event data, which can determine a single 3D hand for each time step, a blurry image contains a hand's trajectory, making 3D hand recovery from it much more challenging. Hence, we introduce our temporal splitter, particularly useful for blurry image input. Our temporal splitter splits the 3D hand trajectory in the blurry image into a single 3D hand of a given query time step $\\mathbf{t}$ . When an event is included in the input, our temporal splitter is trained to produce a single 3D hand that corresponds to the time step of the input event data.", + "bbox": [ + 212, + 397, + 787, + 503 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Network architecture. The architectural design of our temporal splitter is depicted in Fig. 5. Starting with the feature $\\mathbf{F}$ that contains information related to the hand across the entire temporal axis, we introduce our temporal splitter to refine this feature and obtain the feature of the hand at a specific time step $\\mathbf{t}$ , denoted as $\\mathbf{F}_{\\mathbf{t}}$ . To acquire the hand feature at a particular time step, we incorporate a temporal embedding $\\mathbf{t}$ . Here, $\\mathbf{t}$ ranges from 0 to 1 and represents a normalized value within the exposure time, where $\\mathbf{t} = 0$ indicates the motion's initiation, $\\mathbf{t} = 0.5$ corresponds to the middle, and $\\mathbf{t} = 1$ marks its conclusion. An important difference from the frequently used frequency encoding method [19] is our straightforward decision to append the temporal embedding $\\mathbf{t}$ to the channel dimension of the feature $\\mathbf{F}$ , and then consolidate these features by applying a $1 \\times 1$ convolution. The justification for our temporal embedding can be found in our experimental section.", + "bbox": [ + 212, + 505, + 787, + 700 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Then, the final output of the temporal splitter $\\mathbf{F}_{\\mathrm{t}}$ is generated using a conventional self-attention Transformer [38]. To do this, we first derive the query $\\mathbf{q}_{\\mathrm{t}}$ and the key-value pairs $\\mathbf{k}_{\\mathrm{t}} - \\mathbf{v}_{\\mathrm{t}}$ from the feature maps after applying both positional and temporal embedding. This extraction is accomplished through the use of three separate $1\\times 1$ convolutions. Subsequently, these query and key-value pairs are input into self-attention-based Transformer blocks:", + "bbox": [ + 212, + 702, + 787, + 792 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {R} _ {\\mathrm {t}} = \\mathbf {q} _ {\\mathrm {t}} + \\operatorname {s o f t m a x} \\left(\\frac {\\mathbf {q} _ {\\mathrm {t}} \\mathbf {k} _ {\\mathrm {t}} ^ {T}}{\\sqrt {d _ {\\mathbf {k} _ {\\mathrm {t}}}}} \\right) \\mathbf {v} _ {\\mathrm {t}}, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 390, + 805, + 785, + 844 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "EBH Dataset and Network", + "bbox": [ + 550, + 114, + 730, + 126 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 774, + 114, + 784, + 126 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {F} _ {\\mathrm {t}} = \\mathbf {R} _ {\\mathrm {t}} + \\operatorname {M L P} \\left(\\mathbf {R} _ {\\mathrm {t}}\\right), \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 419, + 161, + 785, + 178 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "where $d_{\\mathbf{k}_{\\mathrm{t}}} = 256$ is the feature dimension of the key $\\mathbf{k}_{\\mathrm{t}}$ , and $\\mathbf{R}_{\\mathrm{t}}$ is the residual feature of Transformer. In addition, MLP refers to a multi-layer perceptron, which is responsible for increasing the dimension of the input feature by a factor of 2 and subsequently reducing it back to its original dimension using two separate $1 \\times 1$ convolution layers.", + "bbox": [ + 212, + 184, + 784, + 258 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Novel time step prediction. To enable our temporal splitter to predict 3D hands at novel, unsupervised time steps, we adjust the temporal embedding $\\mathbf{t}$ between the training and testing phases. During training, where we have access to ground truth 3D hands at specific time points, we restrict the values of our temporal embedding $\\mathbf{t}$ to correspond to these ground truth time steps. For example, in the case of the BlurHand dataset [27], which provides 3D annotations at the motion's start, middle, and end, we set $\\mathbf{t} = \\{0,0.5,1\\}$ . This configuration guarantees the desired generation of 3D hands at specific time steps, ensuring alignment with the available ground-truth data during the training phase.", + "bbox": [ + 212, + 260, + 785, + 395 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Conversely, during testing, we use an unconstrained approach, allowing continuous values for temporal embedding $\\mathbf{t}$ within the range of 0 to 1. This differs from the training phase, where we restrict the temporal embedding values to time steps with ground truth. This flexibility enables our model to generate 3D hands at novel time steps that were not explicitly provided in the training data. For example, we can produce 3D hands at time steps like 0.25 or 0.75, enhancing the versatility of our model's output when confronted with unseen time steps.", + "bbox": [ + 212, + 396, + 787, + 502 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.3 Obtaining Final Outputs", + "text_level": 1, + "bbox": [ + 215, + 523, + 465, + 540 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "From the hand feature $\\mathbf{F}_{\\mathrm{t}}$ , we extract the joint features by projecting $\\mathbf{F}_{\\mathrm{t}}$ into a $dJ$ -dimensional feature through a $1\\times 1$ convolution layer [20]. These features are then reshaped into 3D heatmaps $\\mathbf{H}_{\\mathrm{t}}\\in \\mathbb{R}^{J\\times h\\times w\\times d}$ , where $d = 32$ represents predefined depth discretization, and $J = 21$ is the number of hand joints. Subsequently, a soft-argmax operation [37] is applied to the heatmap to obtain 3D joint coordinates $\\mathbf{J}_{\\mathrm{t}}\\in \\mathbb{R}^{J\\times 3}$ . From $\\mathbf{F}_{\\mathrm{t}}$ and $\\mathbf{J}_{\\mathrm{t}}$ , we derive the MANO pose $\\theta_{\\mathrm{t}}$ and shape $\\beta_{\\mathrm{t}}$ parameters. Specifically, the shape parameter $\\beta_{\\mathrm{t}}$ is obtained through a fully connected layer applied to $\\mathbf{F}_{\\mathrm{t}}$ after global average pooling [14]. For the pose parameter $\\theta_{\\mathrm{t}}$ , grid-sampling [7,20] is conducted on $\\mathbf{F}_{\\mathrm{t}}$ with $\\mathbf{J}_{\\mathrm{t}}$ to obtain joint features $\\mathbf{F}_{\\mathrm{J}_{\\mathrm{t}}}$ , and then the pose parameter $\\theta_{\\mathrm{t}}$ is obtained by feeding $\\mathbf{F}_{\\mathrm{J}_{\\mathrm{t}}}$ into a fully connected layer after flattening. Subsequently, the MANO parameters are passed to the MANO layer to produce 3D hand meshes $\\mathbf{V}_{\\mathrm{t}}$ .", + "bbox": [ + 212, + 549, + 787, + 731 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.4 Objective Functions", + "text_level": 1, + "bbox": [ + 215, + 753, + 428, + 768 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We minimize objective functions, defined as a weighted sum of L1 distances between estimated values $(\\theta_{\\mathrm{t}},\\beta_{\\mathrm{t}},\\mathbf{J}_{\\mathrm{t}},$ and $\\mathbf{V}_{\\mathrm{t}})$ and their respective ground truth. Among our input combinations (e.g., only image, only event, and both), the event stream effectively reduces temporal ambiguity, while using only the image", + "bbox": [ + 212, + 779, + 785, + 840 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "JK. Park et al.", + "bbox": [ + 271, + 114, + 372, + 127 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/1e80e5b78cf72f2ed339c60e5bfa69dad07d93fb1f43bdfd243a874b1d95eddf.jpg", + "table_caption": [ + "Table 1: Accuracy of the GT. We measure metrics by comparing predicted keypoints, masks, and their depth maps with their ground truth counterparts, which are obtained from an off-the-shelf 2D keypoint estimator [39], an off-the-shelf matting model [15], and a Kinect camera." + ], + "table_footnote": [], + "table_body": "
Evaluation onMetricvalue
2D keypointsdistance0.18 (pixel)
MaskIoU84 (%)
Depthmapdistance8 (mm)
", + "bbox": [ + 348, + 212, + 661, + 292 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "may struggle. Therefore, we employ different objective functions for each case. For the image-only input, we calculate losses for both the original and reversed ground truth, obtained by converting the ground truth in the reverse temporal order. The model is then updated by selecting the smaller loss between them. On the other hand, when the event stream is used as input (event-only or combined with images), we supervise EBHNet with a loss obtained from our prediction and the original ground truth without considering temporal ambiguity.", + "bbox": [ + 215, + 320, + 785, + 426 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5 Experiments", + "text_level": 1, + "bbox": [ + 217, + 450, + 374, + 467 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5.1 Datasets and Evaluation Metrics", + "text_level": 1, + "bbox": [ + 217, + 481, + 529, + 496 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "InterHand2.6M. InterHand2.6M [24] is a large-scale dataset, which consists of sharp hand images and 3D annotations. We use the BlurHand [27]'s splits for the training and evaluation on InterHand2.6M.", + "bbox": [ + 215, + 507, + 785, + 551 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "BlurHand. BlurHand [27] consists of blurry hand images created by synthetically averaging consecutive 5 frames from the InterHand2.6M dataset [24] (30fps). This process imitates motion blur typically seen with a 6fps (30/5) shutter speed. Note that BlurHand is derived from the InterHand2.6M dataset, which contains a substantial amount of static hand images; hence, many images in BlurHand do not have large blur, as shown in Fig. 4. We follow their protocol for the training and evaluation on BlurHand.", + "bbox": [ + 215, + 551, + 785, + 657 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "EBH. Among the six ground truths for each blurry hand image (red and orange in Fig. 3), we use only a subset during training to assess our model's ability to generate 3D hands at both learned and novel time steps. During training, we use four of the ground truth samples. For example, in Fig. 3, out of the ground truth for six-time steps (depicted as red and orange steps), we use four for training (the first and last of red and the first two orange). During testing, we evaluate models on the remaining two ground truth samples (middle red and the last orange in Fig. 3) to check the generalizability to unseen time steps. We also evaluate on the trained time steps to assess the model's proficiency in recovering 3D hands at the seen time steps.", + "bbox": [ + 215, + 657, + 785, + 809 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Evaluation metrics. To assess the accuracy of predicted hands, we employ two metrics: the Mean Per Joint Position Error (MPJPE) and the Mean Per Vertex", + "bbox": [ + 217, + 809, + 785, + 839 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "EBH Dataset and Network", + "bbox": [ + 550, + 114, + 730, + 127 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/321bbab1bed0095e519d467609a5f32dd947fa6843098ad87a211d91958f1bdd.jpg", + "image_caption": [ + "(a) Input I" + ], + "image_footnote": [], + "bbox": [ + 217, + 143, + 292, + 200 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/857c88f58b97558498095c23da5cd485492a5c7b7e9f8a54d742fb6f7cc3ad41.jpg", + "image_caption": [ + "(b) From I" + ], + "image_footnote": [], + "bbox": [ + 297, + 150, + 357, + 200 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/efca711afac477bc32089ac3e911c75b7c4ac426e40eb33e999fad5257ba6ddb.jpg", + "image_caption": [ + "(c) Deblur $\\mathbf{I}_{\\mathrm{D}}$", + "(d) From $\\mathbf{I}_{\\mathrm{D}}$", + "Fig. 6 & Table 2: (Left) Visual comparison with deblurring. (b) and (d) show the estimated 3D hands using $\\mathbf{I}$ and $\\mathbf{I}_{\\mathrm{D}}$ , respectively. (Right) Efficacy of EBHNet compared to deblurring baseline. Results are reported when only images are used as input. For the EBH dataset, we employ $\\mathbf{t} = 0.6$ for evaluating the Mid." + ], + "image_footnote": [], + "bbox": [ + 367, + 143, + 442, + 200 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/78baca8b44dce9563952fe87e8d691e8437cce35e2ad560ad40848d8211ba4d9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 446, + 148, + 511, + 195 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/72bfe4e37fd8ad5c4a57a8c2fc51cd165087109fb4b221101d98fbb8b7cd90ed.jpg", + "image_caption": [ + "(e) GT" + ], + "image_footnote": [], + "bbox": [ + 524, + 151, + 586, + 193 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/dc74ea62e5f637bbee8d8d2c0b976559942d972a7bb0b13b619774cd498833e0.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
DataDeblurMPJPE↓
Init.Mid. Final
BH-16.79
17.2316.45
EBH-16.14
16.1015.23
", + "bbox": [ + 614, + 152, + 754, + 212 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Position Error (MPVPE). These metrics gauge the Euclidean distance (mm) between the estimated coordinates and the ground truth coordinates. Here, we measure the metrics after aligning the translation of the root joint (i.e., wrist), following the prior researches [3, 27]. Also, to evaluate the temporal consistency of hand motion, we use the acceleration error proposed in HMMR [9]. Here, the acceleration error calculates the average difference between the predicted and ground truth accelerations of each joint of hands.", + "bbox": [ + 212, + 316, + 787, + 425 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "5.2 Ablation Studies", + "text_level": 1, + "bbox": [ + 215, + 449, + 401, + 462 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Validation of EBH Dataset Accuracy To validate the accuracy of our EBH dataset, Tab. 1 shows metrics for predicted 2D keypoints, masks, and depth maps against their ground truth counterparts. The predicted 2D keypoints, masks, and depth maps are either projected (2D keypoints) or rendered (masks and depth maps) from the predicted 3D meshes. Ground truth 2D keypoints, masks, and depth maps are obtained from a 2D keypoint detection model [39], a matting model [15], and the output of a Kinect camera. For 2D keypoints, we calculate the mean distance between 21 joints from the projected and detected keypoints. For masks, we compute the Intersection over Union (IoU) between the rendered mask and the mask from the matting model. For the depth map, we measure the pixel-wise L1 distance between the predicted and camera-derived depth maps. Here, all metrics are calculated after cropping and resizing the sharp hand images to $\\mathbb{R}^{256\\times 256\\times 3}$ . For additional details, refer to our supplementary material.", + "bbox": [ + 212, + 476, + 787, + 672 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Comparison with deblurring. In assessing EBHNet's performance in producing 3D hands from blurry hand images, we compare it with deblurring methods from prior works [2, 30, 31]. To this end, we integrate a state-of-the-art deblurring network [2] before applying EBHNet. Tab. 2 reveals that ours significantly outperforms the one that employs deblurring before 3D hand recovery. The performance decline can be attributed to several factors.", + "bbox": [ + 212, + 672, + 787, + 763 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "First, as shown in Fig. 6a, even advanced deblurring networks struggle to restore sharp hand images from blurry ones. Second, deblurring processes often eliminate valuable temporal information for 3D hand recovery. For example, Fig. 6c exhibits the absence of the middle finger, leading to inaccurate 3D hand mesh in Fig. 6d. Also, deblurring can restrict networks from producing single", + "bbox": [ + 212, + 763, + 787, + 840 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "JK. Park et al.", + "bbox": [ + 271, + 114, + 372, + 126 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/cf8abb563101a359ca692799f5cb286ab89a6f0b6e70b178e077e9c98470f8be.jpg", + "table_caption": [ + "Table 3: Comparison of models with various temporal encoding. Results are reported using images as input. Freq. indicates frequency embedding [19]." + ], + "table_footnote": [], + "table_body": "
DataMethodsMPJPE↓
Init.Mid.Final
BHFreq.18.0917.0718.27
Ours17.2316.4517.17
EBHFreq.16.7116.2117.98
Ours16.1015.2317.15
", + "bbox": [ + 241, + 212, + 475, + 301 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/a2b54b3d13ba2107a575e5d9ce27b27cd7a5ee86427cbeff8c43f1157819b117.jpg", + "table_caption": [ + "Table 4: Comparison of EBHNet with various input combinations. We use four GTs in training and evaluate the model on the corresponding time steps." + ], + "table_footnote": [], + "table_body": "
InputMPJPE↓
t=0t=0.2t=0.6t=0.8
I16.1014.2915.2316.18
E28.9129.3528.5930.80
I & E12.7312.0014.2416.35
", + "bbox": [ + 516, + 212, + 785, + 301 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/5f96b90fd9ea1cbed99e36a345d3d40257cb29f227f01aa5d34f453423041549.jpg", + "image_caption": [ + "(a) Image I" + ], + "image_footnote": [], + "bbox": [ + 217, + 314, + 303, + 380 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/f7cb76269bb1a18991c68d91d552945c90f6ddd0e5d2972f50c707c573d8298a.jpg", + "image_caption": [ + "(b) Event E" + ], + "image_footnote": [], + "bbox": [ + 313, + 314, + 400, + 380 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/e4aff721f6d1d4d7e6bb7130091f0f3991aa8962422bb712177689925ec01f5a.jpg", + "image_caption": [ + "(c) On $\\mathbf{E}$" + ], + "image_footnote": [], + "bbox": [ + 424, + 316, + 478, + 375 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/028ff2548d4bf0d37c1ddb1a72c18a5c0208ebf3f7f89ef7dc5f03ae98334c03.jpg", + "image_caption": [ + "(d) On $\\mathbf{I}$" + ], + "image_footnote": [], + "bbox": [ + 522, + 320, + 576, + 375 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/7902e7b36cef40a32316e7ab9a607fc098bad619636a39a074d9446bc5e31a23.jpg", + "image_caption": [ + "(e) On both" + ], + "image_footnote": [], + "bbox": [ + 617, + 318, + 673, + 376 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/cfae70374d49965e73dfa987f57abb9ab62adb6f136e952603a9f733981c4431.jpg", + "image_caption": [ + "(f) GT", + "Fig.7: Comparison of different input combinations. Event information in severely blurry hand images can offer valuable complementary data, validating the effectiveness of incorporating event streams in our approach to address blurry hands." + ], + "image_footnote": [], + "bbox": [ + 715, + 316, + 769, + 377 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "outputs instead of sequences. Conversely, EBHNet excels at using the image's temporal information, producing multiple 3D hands at different time steps.", + "bbox": [ + 212, + 476, + 784, + 507 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Comparison with frequency encoding. Tab. 3 compares our temporal embedding with conventional frequency encoding [19]. For frequency encoding, following BlurHandNet [27], we apply a sinusoidal operation on $\\mathbf{t}$ , expand the dimension to match feature $\\mathbf{F}$ using $1 \\times 1$ convolution layers, and then add the output to $\\mathbf{F}$ . The table shows that our more straightforward approach, which concatenates the temporal encoding $\\mathbf{t}$ across the channel of feature $\\mathbf{F}$ , and processes them through $1 \\times 1$ convolution layers, consistently outperforms the frequency encoding on both BlurHand [27] and our EBH datasets.", + "bbox": [ + 212, + 508, + 785, + 627 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Various input combinations for EBHNet. Tab. 4 shows the results for different input combinations, including images only, events only, and both. Among six available ground truths, note that we employ only four ground truths at $t = \\{0,0.2,0.6,0.8\\}$ for supervision and evaluate the model on these four time steps. As shown, when using event stream as an input (second row in Tab. 4), metric shows significant degradation. This is because events lack information for static hands, leading to a failure in recovering 3D hands. On the other hand, when both image and event streams are utilized (third row in Tab. 4), events complement the image in capturing 3D hands across time steps, particularly in blurry regions, yielding the best performance. Furthermore, Fig. 7 demonstrates that combining event and image input (Fig. 7e) produces results closest to the ground truth (Fig. 7f), consistent with Tab. 4.", + "bbox": [ + 212, + 628, + 787, + 809 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Novel time step reconstruction. Tab. 5 evaluates the metrics at novel time steps, not included in the training phase, to further assess EBHNet's capacity to", + "bbox": [ + 214, + 809, + 785, + 839 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "EBH Dataset and Network", + "bbox": [ + 550, + 114, + 730, + 126 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 767, + 116, + 782, + 126 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/c5a86776204449c5368751b3152a73bf026c11fa3045a933b301bbc371092f79.jpg", + "image_caption": [ + "(a) Input I", + "(b) Interpolate", + "Fig. 8 & Table 5: (Left) Visual comparison between linear interpolation and temporal embedding. We generate middle hand from two predicted 3D hands (red and white) in two ways: (b) linear interpolation, and (c) applying the temporal embedding value between temporal embeddings to obtain neighboring 3D hands. $\\times$ , $\\times$ , and $\\times$ show the same joint (tip of middle finger) at different time steps. (Right) Comparison of models at a novel time step. For BH, we show the metrics at time steps $t = 1.0$ , while for EBH, we present the metrics at time steps $t = \\{0.4, 1.0\\}$ ." + ], + "image_footnote": [], + "bbox": [ + 217, + 143, + 295, + 205 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/15553539fee7d02e74a3b1d9b542b686e1f6b4a74d34e39235af8f4eb5d8f24e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 297, + 143, + 375, + 202 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/ba113c5a7c8c0c6b9f2015cfeac3438ff74670dd6e363038813474876d97cb3e.jpg", + "image_caption": [ + "(c) Ours" + ], + "image_footnote": [], + "bbox": [ + 377, + 143, + 457, + 202 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/d9e088dff0d2c5e1f5ac9e2c9487a8a9bb7f5fa104eefdd61617e78da0e5bd2f.jpg", + "image_caption": [ + "(d) GT" + ], + "image_footnote": [], + "bbox": [ + 459, + 143, + 537, + 202 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/90e2e48a441d93a9b235979c9016e6c95e7aeeacdc64538e8fbfde25e2154ca1.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
DataMethods\\( MPJPE^{\\downarrow} \\) t = 0.4 t = 1.0\\( MPVPE^{\\downarrow} \\) t = 0.4 t = 1.0
BHLinear-18.12-16.81
Ours (I)-17.87-16.30
EBHLinear14.6617.5319.2017.95
Ours (I)14.3817.1518.7317.58
Ours (E)28.7431.1225.4125.92
Ours (I & E)14.0316.7616.8817.09
", + "bbox": [ + 560, + 148, + 764, + 218 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "generate 3D hands at untrained time steps. For EBH, we evaluate 3D hand recovery at two novel time steps not included in the training phase. For BlurHand, our model is trained only on initial and middle hands ( $t = \\{0,0.5\\}$ ), and we evaluate its performance in recovering hands located at the last time step ( $t = \\{1\\}$ ). Tab. 5 compares the model's performance using three different input combinations with linear interpolation for reference. To clarify, linear interpolation outputs a hand at $t = 0.4$ by linearly interpolating the neighboring hands, which are obtained by our EBHNet trained on $\\mathbf{I}$ (e.g., $\\mathbf{V}_{0.4} = \\frac{1}{2}\\mathbf{V}_{0.2} + \\frac{1}{2}\\mathbf{V}_{0.6}$ ). The table shows the superior performance of our approach with both image and event and with only image. This is attributed to our temporal splitter and event stream, guiding the network to predict 3D hands at specific time steps.", + "bbox": [ + 212, + 359, + 787, + 525 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Fig. 8 visually compares linear interpolation and our temporal embedding for generating novel hand sequences. In Fig. 8b, while linear interpolation restricts all articulation movements in a linear way (see purple line in Fig. 8), our method produces more plausible results by using the corresponding temporal embedding values, exhibiting motion trajectories similar to GT.", + "bbox": [ + 212, + 526, + 787, + 602 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "5.3 Comparisons with Previous Works", + "text_level": 1, + "bbox": [ + 214, + 621, + 547, + 636 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Performance comparison. Tabs. 6 and 7 clearly show that our EBHNet surpasses previous 3D hand mesh estimation methods. In Tab. 6, the most prior approaches [12, 20, 22] do not account for motion blur in hand images, resulting in inaccuracies. While BlurHandNet [27] performs admirably by considering three-hand time steps, it may overlook crucial hand information between those time steps, as it extracts features based on supervision with 3D hands at those specific time steps. Moreover, BlurHandNet consists of modules dedicated to each time step; thus, it cannot generate hands at novel time steps that were not included in the training phase. In contrast, our approach does not constrain the extracted feature $\\mathbf{F}$ to 3D hand at particular time steps, outperforming prior methods by leveraging temporal information from the entire motion trajectory. Also, our EBHNet can generate 3D hands at novel time steps without requiring training data for those specific time points, distinguishing it from BlurHandNet.", + "bbox": [ + 212, + 643, + 787, + 840 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "JK. Park et al.", + "bbox": [ + 271, + 114, + 372, + 126 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/739d296e568e24d90ffa87bced605e82949ea7356a8abda51136580b3aa3e62a.jpg", + "table_caption": [ + "Table 6: Comparison with SoTA methods on BH [27]. For MPVPE, we evaluate metrics at the midpoint of the motion. As BH provides only images, our results are based on image inputs." + ], + "table_footnote": [], + "table_body": "
MethodsMPJPE↓MPVPE↓Accel↓
Init.Mid.Final
I2L-MeshNet [22]-24.32-23.07-
Pose2Pose [20]-18.80-17.42-
METRO [12]-20.54-27.03-
BlurHandNet [27]18.0816.8018.2115.303.94
EBHHNet (Ours)17.2316.4517.1715.023.37
", + "bbox": [ + 240, + 226, + 470, + 290 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/60b429a3ba48094c78ca7e277648874387fd444e5c41ed80faf255282d67b0f0.jpg", + "table_caption": [ + "Table 7: Comparison with SoTA methods on our proposed EBH. For MPVPE, we assess metrics for the hand at the midpoint of the motion (t=0.6)." + ], + "table_footnote": [], + "table_body": "
InputMethods\\( MPJPE^{\\ddagger} \\)\\( MPVPE^{\\ddagger} \\)\\( Accel^{\\ddagger} \\)
t=0t=0.2t=0.6t=0.8
II2L-MeshNet [22]--28.12-30.86-
Pose2Pose [20]--17.28-20.41-
BlurHandNet [27]17.0815.5316.1317.5417.995.78
EBHNet (Ours)16.1014.2915.2316.1817.894.69
EEventHands [35]28.8029.8128.9130.9725.7010.75
EBHNet (Ours)28.9129.3528.5930.8025.309.25
I & EEBHNet (Ours)12.7312.0014.2416.3516.933.19
", + "bbox": [ + 513, + 212, + 785, + 290 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/039b8b95277ed331e33037a1d4fa9ca3bc532af0a0ed625c5adefa712de48210.jpg", + "image_caption": [ + "(a) Event E" + ], + "image_footnote": [], + "bbox": [ + 217, + 303, + 289, + 358 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/c72ed01e6b65d1d4b540f09f7b10102ff78d9d2a4387e363223933d99a363e5e.jpg", + "image_caption": [ + "(b) [35]" + ], + "image_footnote": [], + "bbox": [ + 297, + 304, + 349, + 356 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/70ced0da09006ab91bf5e141dc1f3de08986df2c22c6b859cdc6ec8b6f3d6f83.jpg", + "image_caption": [ + "(c) EBHNet" + ], + "image_footnote": [], + "bbox": [ + 362, + 306, + 424, + 354 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/25c137b8b3710a1fe2fe74a219a7be74023311904e303e426dbfd128875b414b.jpg", + "image_caption": [ + "(d) GT" + ], + "image_footnote": [], + "bbox": [ + 434, + 308, + 496, + 356 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/c7f3f1f62de32c53a85ecf3ed167a3393275da4009c755251133067fdc6bdc19.jpg", + "image_caption": [ + "(e) Event E", + "Fig. 9: Visual comparison with ours and EventHand [35]. When only the event is provided, ours yields results that closely resemble the corresponding ground truth." + ], + "image_footnote": [], + "bbox": [ + 501, + 303, + 573, + 357 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/5482e82d7d5f52a4040ea21eb3a5011be13afade62e3caeaf388ae6f0aa56613.jpg", + "image_caption": [ + "(f) [35]" + ], + "image_footnote": [], + "bbox": [ + 581, + 306, + 637, + 349 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/22802507b6651d0fd17d0f636c81c17a5f4dbcf7105ed10d7f758e97d58468ed.jpg", + "image_caption": [ + "(g) EBHNet" + ], + "image_footnote": [], + "bbox": [ + 647, + 306, + 709, + 349 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/99d96a7b2e6ea9bff54a4df2f07eb210e44bf5f8e760bf84b6cbb0f35d33e08a.jpg", + "image_caption": [ + "(h) GT" + ], + "image_footnote": [], + "bbox": [ + 720, + 308, + 777, + 348 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Moreover, Tab. 7 compares image-only, event-only, and both inputs in the EBH dataset. Modifying BlurHandNet [27] to predict four outputs, our EBHNet consistently outperforms prior methods, excelling against an event-based 3D recovery method [35]. The best results of our EBHNet are obtained with both event and image inputs. Also, Tabs. 6 and 7 compare the acceleration error (Accel), showing that EBHNet produces more temporally consistent outcomes by successfully addressing the real motion. Here, acceleration error is computed only for hand sequence generation methods [27,35]. Fig. 9 shows a visual comparison between our EBHNet and EventHand [35] when only event inputs are used.", + "bbox": [ + 212, + 446, + 784, + 582 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Furthermore, Fig. 10 visually compares EBHNet with previous methods [20, 22,27] when using only image inputs. As shown, Pose2Pose [20] and I2L-MeshNet [22] even struggle to capture the hand pose, while BlurHandNet [27] fails to capture motion information, resulting in consistent outputs across time steps. In contrast, EBHNet successfully generates 3D hands at different time steps.", + "bbox": [ + 212, + 582, + 784, + 657 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Efficiency comparison. Tab. 8 shows a comparison of the efficiency of our EBHNet with previous methods. Since all previous methods use images as input, we evaluate the efficiency of our EBHNet when using images only as input. We first compare our EBHNet with Pose2Pose [20] and I2L-MeshNet [22], which produce a single output, by considering EBHNet (1) generating only one output. Here, EBHNet (1) exhibits comparable computational efficiency while showing quantitatively superior results compared to Pose2Pose and I2L-MeshNet in Tables 6 and 7. Compared to BlurHandNet, we configure EBHNet to generate the same number of 3D hands for a fair comparison in EBHNet (3). In contrast to BlurHandNet [27], which requires additional layers for predicting multiple hands and results in a larger model size, EBHNet can generate 3D hands at different time steps using varied temporal embeddings, leading to a reduced model size.", + "bbox": [ + 212, + 657, + 785, + 839 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "EBH Dataset and Network", + "bbox": [ + 550, + 114, + 730, + 126 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 767, + 114, + 784, + 126 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/0805e304eed22aeb094d56656e007e964f22371ea2c5203329e787f51a585b89.jpg", + "image_caption": [ + "Fig. 10: Visual comparison with previous methods on BH [27] and our EBH. Unlike other methods that generate a fixed number of 3D hands, EBHNet can produce a variable number of 3D hands. The red circles show severely blurry regions." + ], + "image_footnote": [], + "bbox": [ + 225, + 143, + 782, + 271 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/194148debb20b7632bb01b567738edc69b4fd3034c14526ac184d578761b7c13.jpg", + "table_caption": [ + "Table 8: Efficiency comparison with previous methods. Our EBHNet doesn't require extra parameters for recovering 3D hands at additional time steps, maintaining the same number of parameters whether predicting a single hand or three hands." + ], + "table_footnote": [], + "table_body": "
MetricsPose2Pose [20] I2L-MeshNet [22] BlurHandNet [27] EBHNet (1) EBHNet (3)
Num of params (MB)↓77141202146146
Latency (fps)↑26.3125.7614.4325.1215.45
", + "bbox": [ + 276, + 405, + 730, + 439 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "6 Limitations", + "text_level": 1, + "bbox": [ + 215, + 465, + 362, + 482 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Our EBH dataset includes hands with various real motion blur and corresponding 3D annotations, marking a crucial step in addressing key community challenges. However, since our data were captured from 10 individuals, it may lack shape diversity. We will address this issue by designing additional modules for shape generalization or constructing a large-scale dataset from various people.", + "bbox": [ + 212, + 497, + 787, + 575 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "7 Conclusion", + "text_level": 1, + "bbox": [ + 215, + 595, + 359, + 612 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "This work tackles the challenging problem of recovering a 3D hand sequence from a blurry hand. To this end, we introduce the EBH, the first dataset that includes real blurry hand images, their corresponding 3D ground truths, and continuous temporal information from an event stream. In conjunction with the EBH dataset, we propose EBHNet, a method for generating 3D hand sequences from a single blurry hand input in diverse combinations. Our experiments show the efficacy of the EBH and EBHNet in enhancing 3D hand sequence recovery from blurry hands, with the ability to generate hand sequences at novel time steps. Our contributions offer future insights for addressing motion blur in 3D hand recovery.", + "bbox": [ + 212, + 628, + 787, + 779 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Acknowledgments. This work was supported in part by the IITP grants [No. 2021-0-01343, Artificial Intelligence Graduate School Program (Seoul National University), No.2021-0-02068, and No.2023-0-00156], the NRF grant [No. 2021M3A9E4080782] funded by the Korean government (MSIT).", + "bbox": [ + 214, + 779, + 787, + 840 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "JK. Park et al.", + "bbox": [ + 271, + 114, + 372, + 126 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 217, + 143, + 321, + 159 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "1. Boukhayma, A., Bem, R.d., Torr, P.H.: 3D hand shape and pose from images in the wild. In: CVPR (2019)", + "2. Chen, L., Chu, X., Zhang, X., Sun, J.: Simple baselines for image restoration. In: ECCV (2022)", + "3. Choi, H., Moon, G., Lee, K.M.: Pose2Mesh: Graph convolutional network for 3D human pose and mesh recovery from a 2D human pose. In: ECCV (2020)", + "4. Deng, J., Dong, W., Socher, R., Li, L.J., Li, K., Fei-Fei, L.: ImageNet: A large-scale hierarchical image database. In: CVPR (2009)", + "5. Hampali, S., Rad, M., Oberweger, M., Lepetit, V.: Honnotate: A method for 3D annotation of hand and object poses. In: CVPR (2020)", + "6. Hampali, S., Sarkar, S.D., Rad, M., Lepetit, V.: Keypoint Transformer: Solving joint identification in challenging hands and object interactions for accurate 3D pose estimation. In: CVPR (2022)", + "7. Jaderberg, M., Simonyan, K., Zisserman, A., et al.: Spatial transformer networks. In: NeurIPS (2015)", + "8. Joo, H., Neverova, N., Vedaldi, A.: Exemplar fine-tuning for 3D human model fitting towards in-the-wild 3D human pose estimation. In: 3DV (2021)", + "9. Kanazawa, A., Zhang, J.Y., Felsen, P., Malik, J.: Learning 3d human dynamics from video. In: CVPR (2019)", + "0. Kulon, D., Guler, R.A., Kokkinos, I., Bronstein, M.M., Zafeiriou, S.: Weakly-supervised mesh-convolutional hand reconstruction in the wild. In: CVPR (2020)", + "1. Li, Z., Liu, J., Zhang, Z., Xu, S., Yan, Y.: CIUFF: Carrying location information in full frames into human pose and shape estimation. In: ECCV (2022)", + "2. Lin, K., Wang, L., Liu, Z.: End-to-end human pose and mesh reconstruction with transformers. In: CVPR (2021)", + "3. Lin, K., Wang, L., Liu, Z.: Mesh graphormer. In: ICCV (2021)", + "4. Lin, M., Chen, Q., Yan, S.: Network in network. arXiv preprint arXiv:1312.4400 (2013)", + "5. Lin, S., Yang, L., Saleemi, I., Sengupta, S.: Robust high-resolution video matting with temporal guidance. In: WACV (2022)", + "6. Lin, T.Y., Dolkar, P., Girshick, R., He, K., Hariharan, B., Belongie, S.: Feature pyramid networks for object detection. In: CVPR (2017)", + "7. Liu, S., Jiang, H., Xu, J., Liu, S., Wang, X.: Semi-supervised 3D hand-object poses estimation with interactions in time. In: CVPR (2021)", + "8. Messikommer, N., Fang, C., Gehrig, M., Scaramuzza, D.: Data-driven feature tracking for event cameras. In: CVPR (2023)", + "9. Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: Nerf: Representing scenes as neural radiance fields for view synthesis. ACM (2021)", + "20. Moon, G., Choi, H., Lee, K.M.: Accurate 3D hand pose estimation for whole-body 3D human mesh estimation. In: CVPRW (2022)", + "21. Moon, G., Choi, H., Lee, K.M.: Neuralannot: Neural annotator for 3d human mesh training sets. In: CVPR (2022)", + "22. Moon, G., Lee, K.M.: I2L-MeshNet: Image-to-lixel prediction network for accurate 3D human pose and mesh estimation from a single RGB image. In: ECCV (2020)", + "23. Moon, G., Saito, S., Xu, W., Joshi, R., Buffalini, J., Bellan, H., Rosen, N., Richardson, J., Mize, M., De Bree, P., et al.: A dataset of relighted 3D interacting hands. In: NeurIPS (2023)" + ], + "bbox": [ + 225, + 176, + 784, + 839 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "EBH Dataset and Network", + "bbox": [ + 550, + 114, + 730, + 126 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 767, + 116, + 784, + 126 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "24. Moon, G., Yu, S.I., Wen, H., Shiratori, T., Lee, K.M.: Interhand2.6M: A dataset and baseline for 3D interacting hand pose estimation from a single RGB image. In: ECCV (2020)", + "25. Nah, S., Kim, T.H., Lee, K.M.: Deep multi-scale convolutional neural network for dynamic scene deblurring. In: CVPR (2017)", + "26. Nehvi, J., Golyanik, V., Mueller, F., Seidel, H.P., Elgharib, M., Theobalt, C.: Differentiable event stream simulator for non-rigid 3d tracking. In: CVPR (2021)", + "27. Oh, Y., Park, J., Kim, J., Moon, G., Lee, K.M.: Recovering 3d hand mesh sequence from a single blurry image: A new dataset and temporal unfolding. In: CVPR (2023)", + "28. Ozawa, T., Sekikawa, Y., Saito, H.: Accuracy and speed improvement of event camera motion estimation using a bird's-eye view transformation. Sensors (2022)", + "29. Park, J., Jung, D.S., Moon, G., Lee, K.M.: Extract-and-adaptation network for 3d interacting hand mesh recovery. In: ICCVW (2023)", + "30. Park, J., Nah, S., Lee, K.M.: Pay attention to hidden states for video deblurring: Ping-pong recurrent neural networks and selective non-local attention. arXiv preprint arXiv:2203.16063 (2022)", + "31. Park, J., Nah, S., Lee, K.M.: Recurrence-in-recurrence networks for video deblurring. In: BMVC (2022)", + "32. Park, J., Oh, Y., Moon, G., Choi, H., Lee, K.M.: Handoccnet: Occlusion-robust 3D hand mesh estimation network. In: CVPR (2022)", + "33. Romero, J., Tzionas, D., Black, M.J.: Embodied hands: Modeling and capturing hands and bodies together. SIGGRAPH Asia (2017)", + "34. Rong, Y., Shiratori, T., Joo, H.: FrankMocap: A monocular 3D whole-body pose estimation system via regression and integration. In: ICCVW (2021)", + "35. Rudnev, V., Golyanik, V., Wang, J., Seidel, H.P., Mueller, F., Elgharib, M., Theobalt, C.: Eventhands: Real-time neural 3d hand pose estimation from an event stream. In: ICCV (2021)", + "36. Shen, Z., Wang, W., Shen, J., Ling, H., Xu, T., Shao, L.: Human-aware motion deblurring. In: ICCV (2019)", + "37. Sun, X., Xiao, B., Wei, F., Liang, S., Wei, Y.: Integral human pose regression. In: ECCV (2018)", + "38. Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, L., Polosukhin, I.: Attention is all you need. In: NeurIPS (2017)", + "39. Zhang, F., Bazarevsky, V., Vakunov, A., Tkachenka, A., Sung, G., Chang, C.L., Grundmann, M.: Mediapipe hands: On-device real-time hand tracking. arXiv preprint arXiv:2006.10214 (2020)", + "40. Zhang, S., Wang, W., Li, H., Zhang, S.: Evtracker: An event-driven spatiotemporal method for dynamic object tracking. Sensors (2022)", + "41. Zhong, Z., Gao, Y., Zheng, Y., Zheng, B.: Efficient spatio-temporal recurrent neural network for video deblurring. In: ECCV (2020)", + "42. Zimmermann, C., Brox, T.: Learning to estimate 3D hand pose from single RGB images. In: ICCV (2017)", + "43. Zimmermann, C., Ceylan, D., Yang, J., Russell, B., Argus, M., Brox, T.: Freihand: A dataset for markerless capture of hand pose and shape from single RGB images. In: ICCV (2019)" + ], + "bbox": [ + 215, + 146, + 784, + 784 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "JK. Park et al.", + "bbox": [ + 271, + 114, + 372, + 126 + ], + "page_idx": 15 + } +] \ No newline at end of file diff --git a/2024/3D Hand Sequence Recovery from Real Blurry Images and Event Stream/c806d671-8f45-4954-8ba0-1ca55bf4fc0d_model.json b/2024/3D Hand Sequence Recovery from Real Blurry Images and Event Stream/c806d671-8f45-4954-8ba0-1ca55bf4fc0d_model.json new file mode 100644 index 0000000000000000000000000000000000000000..6e347e937fc977ec0d318df4cd1988064778c9ee --- /dev/null +++ b/2024/3D Hand Sequence Recovery from Real Blurry Images and Event Stream/c806d671-8f45-4954-8ba0-1ca55bf4fc0d_model.json @@ -0,0 +1,2740 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.283, + 0.142, + 0.723, + 0.187 + ], + "angle": 0, + "content": "3D Hand Sequence Recovery from Real Blurry Images and Event Stream" + }, + { + "type": "text", + "bbox": [ + 0.245, + 0.212, + 0.761, + 0.245 + ], + "angle": 0, + "content": "Joonkyu Park1, Gyeongsik Moon3,4, Weipeng Xu4, Evan Kaseman4, Takaaki Shiratori4, and Kyoung Mu Lee1,2" + }, + { + "type": "text", + "bbox": [ + 0.291, + 0.254, + 0.713, + 0.281 + ], + "angle": 0, + "content": "\\(^{1}\\)Dept. of ECE&ASRI, \\(^{2}\\)IPAI, Seoul National University, Korea \\(^{3}\\)DGIST" + }, + { + "type": "text", + "bbox": [ + 0.412, + 0.283, + 0.593, + 0.297 + ], + "angle": 0, + "content": "\\(^{4}\\)Codec Avatars Lab, Meta" + }, + { + "type": "text", + "bbox": [ + 0.301, + 0.298, + 0.702, + 0.311 + ], + "angle": 0, + "content": "{jkpark0825, kyoungmu}@snu.ac.kr, mks0601@gmail.com," + }, + { + "type": "text", + "bbox": [ + 0.339, + 0.312, + 0.665, + 0.325 + ], + "angle": 0, + "content": "{xuweipeng, ekaseman, tshiratori}@meta.com" + }, + { + "type": "text", + "bbox": [ + 0.393, + 0.326, + 0.61, + 0.339 + ], + "angle": 0, + "content": "https://jkpark08.github.io/EBH" + }, + { + "type": "image", + "bbox": [ + 0.262, + 0.36, + 0.406, + 0.457 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.408, + 0.36, + 0.581, + 0.451 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.582, + 0.36, + 0.739, + 0.453 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.215, + 0.472, + 0.788, + 0.502 + ], + "angle": 0, + "content": "Fig. 1: Blurry hand to 3D hand sequences. We address 3D hand sequence recovery with real motion blur, showing proficiency in predicting 3D hands at novel time steps." + }, + { + "type": "text", + "bbox": [ + 0.261, + 0.522, + 0.744, + 0.816 + ], + "angle": 0, + "content": "Abstract. Although hands frequently exhibit motion blur due to their dynamic nature, existing approaches for 3D hand recovery often disregard the impact of motion blur in hand images. Blurry hand images contain hands from multiple time steps, lack precise hand location at a specific time step, and introduce temporal ambiguity, leading to multiple possible hand trajectories. To address this issue and in the absence of datasets with real blur, we introduce the EBH dataset, which provides 1) hand images with real motion blur and 2) event data for authentic representation of fast hand movements. In conjunction with our new dataset, we present EBHNet, a novel network capable of recovering 3D hands from diverse input combinations, including blurry hand images, events, or both. Here, the event stream enhances motion understanding in blurry hands, addressing temporal ambiguity. Recognizing that blurry hand images include not only single 3D hands at a time step but also multiple hands along their motion trajectories, we design EBHNet to generate 3D hand sequences in motion. Moreover, to enable our EBHNet to predict 3D hands at novel, unsupervised time steps using a single shared module, we employ a Transformer-based module, temporal splitter, into EBHNet. Our experiments show the superior performance of EBH and EBHNet, especially in handling blurry hand images, making them valuable in real-world applications." + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.826, + 0.737, + 0.84 + ], + "angle": 0, + "content": "Keywords: 3D hand sequence recovery \\(\\cdot\\) Blurry hands \\(\\cdot\\) Event stream" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "2" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.373, + 0.128 + ], + "angle": 0, + "content": "JK. Park et al." + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.144, + 0.313, + 0.216 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.222, + 0.218, + 0.308, + 0.228 + ], + "angle": 0, + "content": "(a) Blurry image" + }, + { + "type": "image", + "bbox": [ + 0.314, + 0.144, + 0.407, + 0.216 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.336, + 0.218, + 0.384, + 0.227 + ], + "angle": 0, + "content": "(b) Mesh" + }, + { + "type": "image", + "bbox": [ + 0.41, + 0.144, + 0.501, + 0.216 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.43, + 0.218, + 0.479, + 0.227 + ], + "angle": 0, + "content": "(c) Event" + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.144, + 0.597, + 0.216 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.507, + 0.218, + 0.593, + 0.227 + ], + "angle": 0, + "content": "(d) Blurry image" + }, + { + "type": "image", + "bbox": [ + 0.599, + 0.144, + 0.692, + 0.216 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.622, + 0.218, + 0.668, + 0.227 + ], + "angle": 0, + "content": "(e) Mesh" + }, + { + "type": "image", + "bbox": [ + 0.695, + 0.144, + 0.787, + 0.216 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.716, + 0.218, + 0.764, + 0.227 + ], + "angle": 0, + "content": "(f) Event" + }, + { + "type": "image_caption", + "bbox": [ + 0.215, + 0.239, + 0.785, + 0.281 + ], + "angle": 0, + "content": "Fig. 2: Examples of the proposed EBH dataset. Our EBH consists of real motion-blurred hand images with the corresponding meshes derived from sharp images captured from three different viewpoints during motion. Also, we offer an event stream." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.313, + 0.375, + 0.329 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.352, + 0.785, + 0.473 + ], + "angle": 0, + "content": "The dynamic nature of hands leads to prevalent blurriness during rapid movements (e.g., sports, typing, and dancing), posing a challenge for accurately capturing hand information. Moreover, isolating a single hand during motion is impractical because blurry hands convey multiple hand information during the motion. To address this, there is a need for a robust framework that can effectively recover 3D hand sequence even in the presence of motion blur. However, existing methods [3, 5, 10, 12, 13, 22] have exhibited subpar performance when faced with motion blur present in hand images." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.476, + 0.787, + 0.656 + ], + "angle": 0, + "content": "The main issue arises from the lack of datasets containing both blurry hand images and accurate 3D ground truth. Although Oh et al. [27] introduced the BlurHand dataset, it has limitations. First, their motion blur is artificially generated by averaging sequential frames [25, 36], which doesn't accurately reflect real-world blur. Second, BlurHand [27] provides only images, which can lead to challenges in accurately capturing the trajectory due to temporal ambiguity. The images display the hand's position over time, lacking exact information about its location at particular time steps and overall motion. As a result, a single blurry image can exhibit multiple potential motion trajectories, leading to temporal ambiguity. While BlurHand includes corresponding sharp image pairs to assist in determining the motion trajectory, relying solely on these sharp images may not provide an accurate trajectory beyond the frame rates of the sharp images." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.658, + 0.788, + 0.809 + ], + "angle": 0, + "content": "To address these issues, we propose the EBH dataset, which captures real motion blur in hand images using cameras with different exposure times, where the examples are shown in Fig. 2. Specifically, we use an RGB camera with extended exposure times to generate blurry hand images with real motion blur. At the same time, multiple RGB cameras with shorter exposure times are employed to capture sharp images. The 3D annotations (i.e., meshes and keypoints) are derived from pairs of these multi-view sharp images. Also, we incorporate event data to compensate for the temporal ambiguity in the blurry hand images. This event data captures instantaneous changes in brightness, helping find the hand's position and track its movement in the blurry image." + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.81, + 0.787, + 0.841 + ], + "angle": 0, + "content": "Building upon the EBH dataset, we present EBHNet, a flexible network, designed to predict a 3D hand sequence from diverse input combinations: 1)" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.551, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "EBH Dataset and Network" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "3" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.784, + 0.177 + ], + "angle": 0, + "content": "only a single blurry image, 2) only event data, and 3) both a blurry image and event data. In each case, we adapt feature extraction based on input types." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.178, + 0.787, + 0.328 + ], + "angle": 0, + "content": "With the feature extracted by considering various input types, our goal is to predict 3D hand sequences, which consist of 3D hands at different time steps. Here, using a dedicated 3D hand recovery module for each time step, such as BlurHandNet [27], can inflate model parameters, especially with more 3D hands at different time steps. To address this, we propose a Transformer-based module, temporal splitter, in our EBHNet. This enables the generation of multiple 3D hands at novel time steps using a single shared module, with the temporal splitter estimating based on the temporal embedding. Additionally, our temporal embeddings enable EBHNet to generate 3D hands at novel time steps without requiring supervision for those steps." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.329, + 0.787, + 0.404 + ], + "angle": 0, + "content": "Using our newly introduced dataset, EBH, and the baseline network, EBHNet, we tackle the task of recovering 3D hands from blurry hand images. Our experiments show the efficacy of our dataset when applied to real-world blurry hand scenarios and the robustness of EBHNet in handling such cases. We summarize our contributions as follows:" + }, + { + "type": "text", + "bbox": [ + 0.227, + 0.418, + 0.784, + 0.448 + ], + "angle": 0, + "content": "- We introduce the EBH dataset for 3D hand recovery from motion-blurred images, providing real-world motion blur and event information." + }, + { + "type": "text", + "bbox": [ + 0.228, + 0.449, + 0.784, + 0.479 + ], + "angle": 0, + "content": "- We introduce EBHNet, a novel network for 3D hand sequence recovery from blurry hands, capable of handling diverse input combinations." + }, + { + "type": "text", + "bbox": [ + 0.228, + 0.48, + 0.784, + 0.51 + ], + "angle": 0, + "content": "- The temporal splitter in EBHNet allows our network to produce 3D hands at novel time steps using a single shared module." + }, + { + "type": "list", + "bbox": [ + 0.227, + 0.418, + 0.784, + 0.51 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.535, + 0.392, + 0.55 + ], + "angle": 0, + "content": "2 Related works" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.568, + 0.787, + 0.673 + ], + "angle": 0, + "content": "3D hand recovery. Since the introduction of RGB-based hand benchmark datasets [5, 24, 32, 43], several methods [1, 3, 22, 42] for hand estimation have emerged. Although several approaches [6,17,32] have focused on addressing hand occlusion, especially in cases where objects are being held, others have taken on the task of handling interacting hands [24, 29], which are frequently encountered in real-world scenarios. However, there is a notable research gap on the challenge of fast-moving hands despite its prevalence in real-world situations." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.675, + 0.787, + 0.809 + ], + "angle": 0, + "content": "Recently, there have been several attempts to make a 3D hand recovery system robust to motion blur. FrankMocap [34] synthetically generated motion blur for data augmentation. BlurHand [27] presented synthetic blurry hand data in conjunction with their baseline network, BlurHandNet, with the goal of recovering three temporal 3D hands. However, both rely on artificially generated motion blur, which has a domain gap from real-world blur [41]. In particular, the motion detail of the BlurHand dataset is limited by the frame rate of their RGB cameras. To address these shortcomings, we introduce the EBH dataset, which features real blur-hand images with an event stream." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.81, + 0.787, + 0.84 + ], + "angle": 0, + "content": "Event stream. An event stream comprises a sequence of data points that chronicle changes within a scene over time. Specialized sensors capture these changes" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "4" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.373, + 0.127 + ], + "angle": 0, + "content": "JK. Park et al." + }, + { + "type": "image", + "bbox": [ + 0.232, + 0.145, + 0.49, + 0.27 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.513, + 0.162, + 0.766, + 0.27 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.291, + 0.788, + 0.362 + ], + "angle": 0, + "content": "Fig. 3: Overview of our EBH dataset. We capture blurry hand images with one camera using a longer exposure time and obtain sharp hand images with six additional cameras, three triggered simultaneously for the blurry image and three with delayed triggers for different time steps. Additionally, we enrich our dataset with event stream. The right figure depicts the cameras, color-circled for their respective groups." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.391, + 0.789, + 0.573 + ], + "angle": 0, + "content": "per-pixel basis, including attributes such as location, timestamp, and polarity, which indicate brightness changes. These data prove valuable in mitigating image blurriness by facilitating precise motion estimation, making them applicable in various tasks, including object tracking [18,40], and visual odometry for accurate camera trajectory [28]. More recently, inspired by the advantages of using event data, several works [26,35] have used event information for 3D hand recovery. Specifically, EventHand [35] introduced synthetic event data and trained their model on these events. Furthermore, [26] introduced an event stream simulator designed to generate event information for hands, simulating data from real event cameras. Based on this, we incorporate event information into our EBH dataset, offering a comprehensive view of overall motion and providing more data than limited 3D annotations at specific time intervals." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.596, + 0.378, + 0.612 + ], + "angle": 0, + "content": "3 EBH dataset" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.629, + 0.789, + 0.842 + ], + "angle": 0, + "content": "Fig. 3 shows the process of constructing our EBH dataset. Instead of artificially generating blur [27], we employ RGB cameras (Microsoft Kinects) with different exposure times for real motion blur. First, we configure a single camera (blue in Fig. 3) with an 80 millisecond (ms) exposure time and a 5 fps frame rate to capture the blurry hand image. Next, we employ two groups of three cameras (six cameras) with a 2.5 ms exposure time and a 30 fps frame rate. These camera groups are triggered differently: one group (red in Fig. 3) shares the same trigger as the camera used for the blurry image, while the other (orange in Fig. 3) has a trigger signal shifted by 16 ms. As a result, we obtain a total of 18 sharp images (6-time steps \\(\\times\\) 3 cameras), as indicated by the red and orange boxed hand images in Fig. 3. Motivated by recent neural network-based annotators [8,11,21, 23], we train a MANO parameter [33] estimation network on our dataset and test it on the training set, where the output becomes the 3D ground truth. Given the additional depth information from our RGB cameras (Microsoft Kinects)," + } + ], + [ + { + "type": "header", + "bbox": [ + 0.551, + 0.115, + 0.732, + 0.128 + ], + "angle": 0, + "content": "EBH Dataset and Network" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "5" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.784, + 0.237 + ], + "angle": 0, + "content": "our model processes RGB images from three viewpoints and the corresponding depth maps to estimate MANO parameters. The network is self-supervised by minimizing loss functions between 1) 2D joints projected from the estimated hand mesh and those obtained by an off-the-shelf 2D joint detection model [39], 2) differentiable rendered silhouettes and masks from a matting model [15], and 3) rendered depth and their corresponding ground truth depth map." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.239, + 0.788, + 0.526 + ], + "angle": 0, + "content": "Furthermore, since obtaining 3D annotations from sharp images is done discretely due to the limited shutter speed of the RGB cameras, we introduce event stream (green in Fig. 3) to supplement the limited information obtained from the discrete 3D annotations. To this end, we employ an event camera (Prophesee EVK4) with an exceptionally high frame rate of more than \\(10\\mathrm{K}\\) fps, which is calibrated to the RGB camera that captures the blurry hand image. We calibrate the event camera and RGB camera using a blinking checkerboard, with additional details that can be found in our supplementary material. Also, the event camera is synchronized with the RGB camera to capture the blurry hand image using the same external trigger. The event stream from the event camera provides continuous pixel-wise information, but it can be data-intensive. To address this, we accumulate event data at specific intervals, reducing the overall volume of the data. Specifically, to prevent information loss, we employ a sliding window approach to accumulate light intensity over predefined time intervals (e.g., 1ms). Then, we utilize color encoding to differentiate event occurrences at various time steps, assigning different colors to events happening at different times. This color-encoding process entails converting accumulated light intensity into a grayscale image, which is then mapped to an RGB representation using a light blue palette provided by the event camera's SDK." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.528, + 0.788, + 0.725 + ], + "angle": 0, + "content": "Finally, our EBH dataset includes hand images from 10 individuals displaying various commonly occurring gestures, totaling 40,213 annotated blurry hand images with corresponding event streams. The dataset is divided into training (32,482) and test sets (7,731). Please note that each blurry hand image in our dataset has six 3D annotations from the different time steps, represented as three in red and three in orange in Fig. 3. Consequently, our EBH dataset comprises a total of 241,278 \\((40,213 \\times 6)\\) 3D annotations. Furthermore, unlike the synthetic blurry hand dataset BlurHand [27], which mainly comprises hands with minimal or no blur, our EBH dataset includes more dynamic blur, as shown in Fig. 4. We measure the length of hand trajectory by summing the distances between 3D keypoints for each blurry hand image using ground-truth data, providing an indication of the hand blur level. For the details (e.g., camera ID, pose examples, and camera calibration), please refer to the supplementary material." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.755, + 0.338, + 0.77 + ], + "angle": 0, + "content": "4 EBHNet" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.795, + 0.787, + 0.842 + ], + "angle": 0, + "content": "Fig. 5 shows an overview of our EBHNet framework. Our goal is to recover sequences of 3D hands from a given input, changing the output number to match the time steps. To accommodate the diverse cases encountered in practical ap" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "6" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.374, + 0.127 + ], + "angle": 0, + "content": "JK. Park et al." + }, + { + "type": "image", + "bbox": [ + 0.266, + 0.155, + 0.493, + 0.322 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.334, + 0.323, + 0.425, + 0.335 + ], + "angle": 0, + "content": "(a) training set" + }, + { + "type": "image", + "bbox": [ + 0.51, + 0.155, + 0.732, + 0.322 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.582, + 0.323, + 0.667, + 0.335 + ], + "angle": 0, + "content": "(b) testing set" + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.359, + 0.788, + 0.402 + ], + "angle": 0, + "content": "Fig.4: Statistics on blur strength of EBH dataset compared to Blur-Hand (BH). \\( x \\)-axis denotes the length of hand trajectory, showing 3D joint movement in a single blurry hand image. \\( y \\)-axis shows the image proportion in the dataset." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.438, + 0.786, + 0.469 + ], + "angle": 0, + "content": "plications, we introduce various variants of EBHNet, each designed to utilize either blurry hand image alone, event data alone, or a combination of both." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.51, + 0.591, + 0.525 + ], + "angle": 0, + "content": "4.1 Feature Extraction from Various Inputs" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.554, + 0.788, + 0.841 + ], + "angle": 0, + "content": "To facilitate understanding, let us consider a scenario where both a blurry image \\(\\mathbf{I} \\in \\mathbb{R}^{H \\times W \\times 3}\\) and an event \\(\\mathbf{E}\\) are available. Here, \\(H = 256\\), and \\(W = 256\\) correspond to the height and width of the input image, respectively. Initially, we extract two types of features: the hand feature \\(\\mathbf{F}_{\\mathrm{I}}\\) from the blurry hand image \\(\\mathbf{I}\\) and the event feature \\(\\mathbf{F}_{\\mathrm{E}}\\) from the event data \\(\\mathbf{E}\\). We achieve this by utilizing a pre-trained Feature Pyramid Network (FPN) [16] trained on ImageNet [4] for the image feature and shallow convolutional networks for the event feature. Here, unlike a single image that can be used to recover multiple 3D hands at different time steps, each event feature corresponds to a single 3D hand. For example, with an exposure time of \\(80~\\mathrm{ms}\\) for blurry images and accumulating events within 1 ms intervals in our EBH dataset, a single blurry image can be aligned with up to 80 event frames. Therefore, we design our event feature extraction network to be shallow for efficiency. Also, note that both of these features, \\(\\mathbf{F}_{\\mathrm{I}}\\) and \\(\\mathbf{F}_{\\mathrm{E}}\\), share the same dimensions, which are \\(\\mathbb{R}^{h \\times w \\times c}\\), where \\(h = \\frac{H}{8}\\), \\(h = \\frac{H}{8}\\), and \\(c = 256\\) represent the height, width, and number of channels of the extracted features. Subsequently, we concatenate these two features and pass the combined feature through five residual blocks to obtain a fused feature \\(\\mathbf{F}\\). However, when only one of an image and an event is available, we do not perform the concatenation step. Instead, we feed either \\(\\mathbf{F}_{\\mathrm{I}}\\) or \\(\\mathbf{F}_{\\mathrm{E}}\\) into the residual blocks to obtain the feature \\(\\mathbf{F}\\)." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.551, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "EBH Dataset and Network" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.116, + 0.785, + 0.127 + ], + "angle": 0, + "content": "7" + }, + { + "type": "image", + "bbox": [ + 0.223, + 0.146, + 0.782, + 0.257 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.265, + 0.788, + 0.349 + ], + "angle": 0, + "content": "Fig. 5: The overall architecture of EBHNet. Our EBHNet first extracts feature \\(\\mathbf{F}\\) in three scenarios: image only, event only, and both. During training and evaluation, we employ a temporal splitter with different temporal embedding strategies. Specifically, during the training phase, we utilize temporal embedding value when corresponding ground truths are available. Conversely, during the testing phase, we adopt novel temporal embedding values to generate 3D hand meshes at novel time steps." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.367, + 0.41, + 0.383 + ], + "angle": 0, + "content": "4.2 Temporal Splitter" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.398, + 0.788, + 0.504 + ], + "angle": 0, + "content": "Unlike event data, which can determine a single 3D hand for each time step, a blurry image contains a hand's trajectory, making 3D hand recovery from it much more challenging. Hence, we introduce our temporal splitter, particularly useful for blurry image input. Our temporal splitter splits the 3D hand trajectory in the blurry image into a single 3D hand of a given query time step \\(\\mathbf{t}\\). When an event is included in the input, our temporal splitter is trained to produce a single 3D hand that corresponds to the time step of the input event data." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.506, + 0.788, + 0.701 + ], + "angle": 0, + "content": "Network architecture. The architectural design of our temporal splitter is depicted in Fig. 5. Starting with the feature \\(\\mathbf{F}\\) that contains information related to the hand across the entire temporal axis, we introduce our temporal splitter to refine this feature and obtain the feature of the hand at a specific time step \\(\\mathbf{t}\\), denoted as \\(\\mathbf{F}_{\\mathbf{t}}\\). To acquire the hand feature at a particular time step, we incorporate a temporal embedding \\(\\mathbf{t}\\). Here, \\(\\mathbf{t}\\) ranges from 0 to 1 and represents a normalized value within the exposure time, where \\(\\mathbf{t} = 0\\) indicates the motion's initiation, \\(\\mathbf{t} = 0.5\\) corresponds to the middle, and \\(\\mathbf{t} = 1\\) marks its conclusion. An important difference from the frequently used frequency encoding method [19] is our straightforward decision to append the temporal embedding \\(\\mathbf{t}\\) to the channel dimension of the feature \\(\\mathbf{F}\\), and then consolidate these features by applying a \\(1 \\times 1\\) convolution. The justification for our temporal embedding can be found in our experimental section." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.703, + 0.788, + 0.793 + ], + "angle": 0, + "content": "Then, the final output of the temporal splitter \\(\\mathbf{F}_{\\mathrm{t}}\\) is generated using a conventional self-attention Transformer [38]. To do this, we first derive the query \\(\\mathbf{q}_{\\mathrm{t}}\\) and the key-value pairs \\(\\mathbf{k}_{\\mathrm{t}} - \\mathbf{v}_{\\mathrm{t}}\\) from the feature maps after applying both positional and temporal embedding. This extraction is accomplished through the use of three separate \\(1\\times 1\\) convolutions. Subsequently, these query and key-value pairs are input into self-attention-based Transformer blocks:" + }, + { + "type": "equation", + "bbox": [ + 0.391, + 0.806, + 0.787, + 0.845 + ], + "angle": 0, + "content": "\\[\n\\mathbf {R} _ {\\mathrm {t}} = \\mathbf {q} _ {\\mathrm {t}} + \\operatorname {s o f t m a x} \\left(\\frac {\\mathbf {q} _ {\\mathrm {t}} \\mathbf {k} _ {\\mathrm {t}} ^ {T}}{\\sqrt {d _ {\\mathbf {k} _ {\\mathrm {t}}}}} \\right) \\mathbf {v} _ {\\mathrm {t}}, \\tag {1}\n\\]" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "8" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.374, + 0.128 + ], + "angle": 0, + "content": "JK. Park et al." + }, + { + "type": "equation", + "bbox": [ + 0.42, + 0.162, + 0.786, + 0.179 + ], + "angle": 0, + "content": "\\[\n\\mathbf {F} _ {\\mathrm {t}} = \\mathbf {R} _ {\\mathrm {t}} + \\operatorname {M L P} \\left(\\mathbf {R} _ {\\mathrm {t}}\\right), \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.185, + 0.785, + 0.26 + ], + "angle": 0, + "content": "where \\( d_{\\mathbf{k}_{\\mathrm{t}}} = 256 \\) is the feature dimension of the key \\( \\mathbf{k}_{\\mathrm{t}} \\), and \\( \\mathbf{R}_{\\mathrm{t}} \\) is the residual feature of Transformer. In addition, MLP refers to a multi-layer perceptron, which is responsible for increasing the dimension of the input feature by a factor of 2 and subsequently reducing it back to its original dimension using two separate \\( 1 \\times 1 \\) convolution layers." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.261, + 0.787, + 0.396 + ], + "angle": 0, + "content": "Novel time step prediction. To enable our temporal splitter to predict 3D hands at novel, unsupervised time steps, we adjust the temporal embedding \\(\\mathbf{t}\\) between the training and testing phases. During training, where we have access to ground truth 3D hands at specific time points, we restrict the values of our temporal embedding \\(\\mathbf{t}\\) to correspond to these ground truth time steps. For example, in the case of the BlurHand dataset [27], which provides 3D annotations at the motion's start, middle, and end, we set \\(\\mathbf{t} = \\{0,0.5,1\\}\\). This configuration guarantees the desired generation of 3D hands at specific time steps, ensuring alignment with the available ground-truth data during the training phase." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.397, + 0.788, + 0.503 + ], + "angle": 0, + "content": "Conversely, during testing, we use an unconstrained approach, allowing continuous values for temporal embedding \\(\\mathbf{t}\\) within the range of 0 to 1. This differs from the training phase, where we restrict the temporal embedding values to time steps with ground truth. This flexibility enables our model to generate 3D hands at novel time steps that were not explicitly provided in the training data. For example, we can produce 3D hands at time steps like 0.25 or 0.75, enhancing the versatility of our model's output when confronted with unseen time steps." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.525, + 0.467, + 0.541 + ], + "angle": 0, + "content": "4.3 Obtaining Final Outputs" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.55, + 0.788, + 0.732 + ], + "angle": 0, + "content": "From the hand feature \\(\\mathbf{F}_{\\mathrm{t}}\\), we extract the joint features by projecting \\(\\mathbf{F}_{\\mathrm{t}}\\) into a \\(dJ\\)-dimensional feature through a \\(1\\times 1\\) convolution layer [20]. These features are then reshaped into 3D heatmaps \\(\\mathbf{H}_{\\mathrm{t}}\\in \\mathbb{R}^{J\\times h\\times w\\times d}\\), where \\(d = 32\\) represents predefined depth discretization, and \\(J = 21\\) is the number of hand joints. Subsequently, a soft-argmax operation [37] is applied to the heatmap to obtain 3D joint coordinates \\(\\mathbf{J}_{\\mathrm{t}}\\in \\mathbb{R}^{J\\times 3}\\). From \\(\\mathbf{F}_{\\mathrm{t}}\\) and \\(\\mathbf{J}_{\\mathrm{t}}\\), we derive the MANO pose \\(\\theta_{\\mathrm{t}}\\) and shape \\(\\beta_{\\mathrm{t}}\\) parameters. Specifically, the shape parameter \\(\\beta_{\\mathrm{t}}\\) is obtained through a fully connected layer applied to \\(\\mathbf{F}_{\\mathrm{t}}\\) after global average pooling [14]. For the pose parameter \\(\\theta_{\\mathrm{t}}\\), grid-sampling [7,20] is conducted on \\(\\mathbf{F}_{\\mathrm{t}}\\) with \\(\\mathbf{J}_{\\mathrm{t}}\\) to obtain joint features \\(\\mathbf{F}_{\\mathrm{J}_{\\mathrm{t}}}\\), and then the pose parameter \\(\\theta_{\\mathrm{t}}\\) is obtained by feeding \\(\\mathbf{F}_{\\mathrm{J}_{\\mathrm{t}}}\\) into a fully connected layer after flattening. Subsequently, the MANO parameters are passed to the MANO layer to produce 3D hand meshes \\(\\mathbf{V}_{\\mathrm{t}}\\)." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.755, + 0.429, + 0.77 + ], + "angle": 0, + "content": "4.4 Objective Functions" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.78, + 0.787, + 0.842 + ], + "angle": 0, + "content": "We minimize objective functions, defined as a weighted sum of L1 distances between estimated values \\((\\theta_{\\mathrm{t}},\\beta_{\\mathrm{t}},\\mathbf{J}_{\\mathrm{t}},\\) and \\(\\mathbf{V}_{\\mathrm{t}})\\) and their respective ground truth. Among our input combinations (e.g., only image, only event, and both), the event stream effectively reduces temporal ambiguity, while using only the image" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.551, + 0.115, + 0.732, + 0.128 + ], + "angle": 0, + "content": "EBH Dataset and Network" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "9" + }, + { + "type": "table_caption", + "bbox": [ + 0.217, + 0.145, + 0.786, + 0.2 + ], + "angle": 0, + "content": "Table 1: Accuracy of the GT. We measure metrics by comparing predicted keypoints, masks, and their depth maps with their ground truth counterparts, which are obtained from an off-the-shelf 2D keypoint estimator [39], an off-the-shelf matting model [15], and a Kinect camera." + }, + { + "type": "table", + "bbox": [ + 0.349, + 0.213, + 0.662, + 0.293 + ], + "angle": 0, + "content": "
Evaluation onMetricvalue
2D keypointsdistance0.18 (pixel)
MaskIoU84 (%)
Depthmapdistance8 (mm)
" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.321, + 0.786, + 0.427 + ], + "angle": 0, + "content": "may struggle. Therefore, we employ different objective functions for each case. For the image-only input, we calculate losses for both the original and reversed ground truth, obtained by converting the ground truth in the reverse temporal order. The model is then updated by selecting the smaller loss between them. On the other hand, when the event stream is used as input (event-only or combined with images), we supervise EBHNet with a loss obtained from our prediction and the original ground truth without considering temporal ambiguity." + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.451, + 0.375, + 0.468 + ], + "angle": 0, + "content": "5 Experiments" + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.482, + 0.531, + 0.497 + ], + "angle": 0, + "content": "5.1 Datasets and Evaluation Metrics" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.508, + 0.786, + 0.553 + ], + "angle": 0, + "content": "InterHand2.6M. InterHand2.6M [24] is a large-scale dataset, which consists of sharp hand images and 3D annotations. We use the BlurHand [27]'s splits for the training and evaluation on InterHand2.6M." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.553, + 0.786, + 0.658 + ], + "angle": 0, + "content": "BlurHand. BlurHand [27] consists of blurry hand images created by synthetically averaging consecutive 5 frames from the InterHand2.6M dataset [24] (30fps). This process imitates motion blur typically seen with a 6fps (30/5) shutter speed. Note that BlurHand is derived from the InterHand2.6M dataset, which contains a substantial amount of static hand images; hence, many images in BlurHand do not have large blur, as shown in Fig. 4. We follow their protocol for the training and evaluation on BlurHand." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.659, + 0.786, + 0.81 + ], + "angle": 0, + "content": "EBH. Among the six ground truths for each blurry hand image (red and orange in Fig. 3), we use only a subset during training to assess our model's ability to generate 3D hands at both learned and novel time steps. During training, we use four of the ground truth samples. For example, in Fig. 3, out of the ground truth for six-time steps (depicted as red and orange steps), we use four for training (the first and last of red and the first two orange). During testing, we evaluate models on the remaining two ground truth samples (middle red and the last orange in Fig. 3) to check the generalizability to unseen time steps. We also evaluate on the trained time steps to assess the model's proficiency in recovering 3D hands at the seen time steps." + }, + { + "type": "text", + "bbox": [ + 0.218, + 0.81, + 0.786, + 0.84 + ], + "angle": 0, + "content": "Evaluation metrics. To assess the accuracy of predicted hands, we employ two metrics: the Mean Per Joint Position Error (MPJPE) and the Mean Per Vertex" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "10" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.373, + 0.127 + ], + "angle": 0, + "content": "JK. Park et al." + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.144, + 0.293, + 0.202 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.225, + 0.203, + 0.284, + 0.213 + ], + "angle": 0, + "content": "(a) Input I" + }, + { + "type": "image", + "bbox": [ + 0.299, + 0.151, + 0.358, + 0.202 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.303, + 0.203, + 0.36, + 0.213 + ], + "angle": 0, + "content": "(b) From I" + }, + { + "type": "image", + "bbox": [ + 0.368, + 0.144, + 0.444, + 0.202 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.371, + 0.203, + 0.441, + 0.214 + ], + "angle": 0, + "content": "(c) Deblur \\(\\mathbf{I}_{\\mathrm{D}}\\)" + }, + { + "type": "image", + "bbox": [ + 0.447, + 0.15, + 0.512, + 0.196 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.447, + 0.203, + 0.512, + 0.214 + ], + "angle": 0, + "content": "(d) From \\(\\mathbf{I}_{\\mathrm{D}}\\)" + }, + { + "type": "image", + "bbox": [ + 0.525, + 0.152, + 0.587, + 0.194 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.536, + 0.203, + 0.574, + 0.213 + ], + "angle": 0, + "content": "(e) GT" + }, + { + "type": "table", + "bbox": [ + 0.616, + 0.153, + 0.756, + 0.213 + ], + "angle": 0, + "content": "
DataDeblurMPJPE↓
Init.Mid. Final
BH-16.79
17.2316.45
EBH-16.14
16.1015.23
" + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.231, + 0.788, + 0.288 + ], + "angle": 0, + "content": "Fig. 6 & Table 2: (Left) Visual comparison with deblurring. (b) and (d) show the estimated 3D hands using \\(\\mathbf{I}\\) and \\(\\mathbf{I}_{\\mathrm{D}}\\), respectively. (Right) Efficacy of EBHNet compared to deblurring baseline. Results are reported when only images are used as input. For the EBH dataset, we employ \\(\\mathbf{t} = 0.6\\) for evaluating the Mid." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.318, + 0.789, + 0.426 + ], + "angle": 0, + "content": "Position Error (MPVPE). These metrics gauge the Euclidean distance (mm) between the estimated coordinates and the ground truth coordinates. Here, we measure the metrics after aligning the translation of the root joint (i.e., wrist), following the prior researches [3, 27]. Also, to evaluate the temporal consistency of hand motion, we use the acceleration error proposed in HMMR [9]. Here, the acceleration error calculates the average difference between the predicted and ground truth accelerations of each joint of hands." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.45, + 0.402, + 0.463 + ], + "angle": 0, + "content": "5.2 Ablation Studies" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.477, + 0.789, + 0.674 + ], + "angle": 0, + "content": "Validation of EBH Dataset Accuracy To validate the accuracy of our EBH dataset, Tab. 1 shows metrics for predicted 2D keypoints, masks, and depth maps against their ground truth counterparts. The predicted 2D keypoints, masks, and depth maps are either projected (2D keypoints) or rendered (masks and depth maps) from the predicted 3D meshes. Ground truth 2D keypoints, masks, and depth maps are obtained from a 2D keypoint detection model [39], a matting model [15], and the output of a Kinect camera. For 2D keypoints, we calculate the mean distance between 21 joints from the projected and detected keypoints. For masks, we compute the Intersection over Union (IoU) between the rendered mask and the mask from the matting model. For the depth map, we measure the pixel-wise L1 distance between the predicted and camera-derived depth maps. Here, all metrics are calculated after cropping and resizing the sharp hand images to \\(\\mathbb{R}^{256\\times 256\\times 3}\\). For additional details, refer to our supplementary material." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.674, + 0.789, + 0.764 + ], + "angle": 0, + "content": "Comparison with deblurring. In assessing EBHNet's performance in producing 3D hands from blurry hand images, we compare it with deblurring methods from prior works [2, 30, 31]. To this end, we integrate a state-of-the-art deblurring network [2] before applying EBHNet. Tab. 2 reveals that ours significantly outperforms the one that employs deblurring before 3D hand recovery. The performance decline can be attributed to several factors." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.765, + 0.789, + 0.842 + ], + "angle": 0, + "content": "First, as shown in Fig. 6a, even advanced deblurring networks struggle to restore sharp hand images from blurry ones. Second, deblurring processes often eliminate valuable temporal information for 3D hand recovery. For example, Fig. 6c exhibits the absence of the middle finger, leading to inaccurate 3D hand mesh in Fig. 6d. Also, deblurring can restrict networks from producing single" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.551, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "EBH Dataset and Network" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.117, + 0.784, + 0.127 + ], + "angle": 0, + "content": "11" + }, + { + "type": "table_caption", + "bbox": [ + 0.216, + 0.145, + 0.493, + 0.201 + ], + "angle": 0, + "content": "Table 3: Comparison of models with various temporal encoding. Results are reported using images as input. Freq. indicates frequency embedding [19]." + }, + { + "type": "table", + "bbox": [ + 0.242, + 0.213, + 0.477, + 0.302 + ], + "angle": 0, + "content": "
DataMethodsMPJPE↓
Init.Mid.Final
BHFreq.18.0917.0718.27
Ours17.2316.4517.17
EBHFreq.16.7116.2117.98
Ours16.1015.2317.15
" + }, + { + "type": "table_caption", + "bbox": [ + 0.51, + 0.145, + 0.788, + 0.201 + ], + "angle": 0, + "content": "Table 4: Comparison of EBHNet with various input combinations. We use four GTs in training and evaluate the model on the corresponding time steps." + }, + { + "type": "table", + "bbox": [ + 0.517, + 0.213, + 0.787, + 0.302 + ], + "angle": 0, + "content": "
InputMPJPE↓
t=0t=0.2t=0.6t=0.8
I16.1014.2915.2316.18
E28.9129.3528.5930.80
I & E12.7312.0014.2416.35
" + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.315, + 0.304, + 0.381 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.224, + 0.382, + 0.295, + 0.393 + ], + "angle": 0, + "content": "(a) Image I" + }, + { + "type": "image", + "bbox": [ + 0.315, + 0.315, + 0.401, + 0.381 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.319, + 0.382, + 0.393, + 0.393 + ], + "angle": 0, + "content": "(b) Event E" + }, + { + "type": "image", + "bbox": [ + 0.425, + 0.317, + 0.48, + 0.375 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.424, + 0.382, + 0.48, + 0.393 + ], + "angle": 0, + "content": "(c) On \\(\\mathbf{E}\\)" + }, + { + "type": "image", + "bbox": [ + 0.523, + 0.321, + 0.578, + 0.376 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.522, + 0.382, + 0.576, + 0.393 + ], + "angle": 0, + "content": "(d) On \\(\\mathbf{I}\\)" + }, + { + "type": "image", + "bbox": [ + 0.618, + 0.319, + 0.674, + 0.377 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.61, + 0.382, + 0.681, + 0.393 + ], + "angle": 0, + "content": "(e) On both" + }, + { + "type": "image", + "bbox": [ + 0.716, + 0.318, + 0.771, + 0.378 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.721, + 0.382, + 0.763, + 0.393 + ], + "angle": 0, + "content": "(f) GT" + }, + { + "type": "image_caption", + "bbox": [ + 0.215, + 0.405, + 0.788, + 0.448 + ], + "angle": 0, + "content": "Fig.7: Comparison of different input combinations. Event information in severely blurry hand images can offer valuable complementary data, validating the effectiveness of incorporating event streams in our approach to address blurry hands." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.477, + 0.785, + 0.508 + ], + "angle": 0, + "content": "outputs instead of sequences. Conversely, EBHNet excels at using the image's temporal information, producing multiple 3D hands at different time steps." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.509, + 0.787, + 0.628 + ], + "angle": 0, + "content": "Comparison with frequency encoding. Tab. 3 compares our temporal embedding with conventional frequency encoding [19]. For frequency encoding, following BlurHandNet [27], we apply a sinusoidal operation on \\(\\mathbf{t}\\), expand the dimension to match feature \\(\\mathbf{F}\\) using \\(1 \\times 1\\) convolution layers, and then add the output to \\(\\mathbf{F}\\). The table shows that our more straightforward approach, which concatenates the temporal encoding \\(\\mathbf{t}\\) across the channel of feature \\(\\mathbf{F}\\), and processes them through \\(1 \\times 1\\) convolution layers, consistently outperforms the frequency encoding on both BlurHand [27] and our EBH datasets." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.629, + 0.788, + 0.81 + ], + "angle": 0, + "content": "Various input combinations for EBHNet. Tab. 4 shows the results for different input combinations, including images only, events only, and both. Among six available ground truths, note that we employ only four ground truths at \\( t = \\{0,0.2,0.6,0.8\\} \\) for supervision and evaluate the model on these four time steps. As shown, when using event stream as an input (second row in Tab. 4), metric shows significant degradation. This is because events lack information for static hands, leading to a failure in recovering 3D hands. On the other hand, when both image and event streams are utilized (third row in Tab. 4), events complement the image in capturing 3D hands across time steps, particularly in blurry regions, yielding the best performance. Furthermore, Fig. 7 demonstrates that combining event and image input (Fig. 7e) produces results closest to the ground truth (Fig. 7f), consistent with Tab. 4." + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.81, + 0.787, + 0.84 + ], + "angle": 0, + "content": "Novel time step reconstruction. Tab. 5 evaluates the metrics at novel time steps, not included in the training phase, to further assess EBHNet's capacity to" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "12" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.373, + 0.127 + ], + "angle": 0, + "content": "JK. Park et al." + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.145, + 0.296, + 0.206 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.229, + 0.207, + 0.286, + 0.216 + ], + "angle": 0, + "content": "(a) Input I" + }, + { + "type": "image", + "bbox": [ + 0.298, + 0.144, + 0.377, + 0.203 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.3, + 0.207, + 0.376, + 0.216 + ], + "angle": 0, + "content": "(b) Interpolate" + }, + { + "type": "image", + "bbox": [ + 0.378, + 0.144, + 0.458, + 0.203 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.392, + 0.207, + 0.44, + 0.216 + ], + "angle": 0, + "content": "(c) Ours" + }, + { + "type": "image", + "bbox": [ + 0.46, + 0.145, + 0.538, + 0.203 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.476, + 0.207, + 0.516, + 0.216 + ], + "angle": 0, + "content": "(d) GT" + }, + { + "type": "table", + "bbox": [ + 0.561, + 0.15, + 0.766, + 0.219 + ], + "angle": 0, + "content": "
DataMethods\\( MPJPE^{\\downarrow} \\) t = 0.4 t = 1.0\\( MPVPE^{\\downarrow} \\) t = 0.4 t = 1.0
BHLinear-18.12-16.81
Ours (I)-17.87-16.30
EBHLinear14.6617.5319.2017.95
Ours (I)14.3817.1518.7317.58
Ours (E)28.7431.1225.4125.92
Ours (I & E)14.0316.7616.8817.09
" + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.235, + 0.787, + 0.334 + ], + "angle": 0, + "content": "Fig. 8 & Table 5: (Left) Visual comparison between linear interpolation and temporal embedding. We generate middle hand from two predicted 3D hands (red and white) in two ways: (b) linear interpolation, and (c) applying the temporal embedding value between temporal embeddings to obtain neighboring 3D hands. \\(\\times\\), \\(\\times\\), and \\(\\times\\) show the same joint (tip of middle finger) at different time steps. (Right) Comparison of models at a novel time step. For BH, we show the metrics at time steps \\(t = 1.0\\), while for EBH, we present the metrics at time steps \\(t = \\{0.4, 1.0\\}\\)." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.36, + 0.788, + 0.526 + ], + "angle": 0, + "content": "generate 3D hands at untrained time steps. For EBH, we evaluate 3D hand recovery at two novel time steps not included in the training phase. For BlurHand, our model is trained only on initial and middle hands (\\(t = \\{0,0.5\\}\\)), and we evaluate its performance in recovering hands located at the last time step (\\(t = \\{1\\}\\)). Tab. 5 compares the model's performance using three different input combinations with linear interpolation for reference. To clarify, linear interpolation outputs a hand at \\(t = 0.4\\) by linearly interpolating the neighboring hands, which are obtained by our EBHNet trained on \\(\\mathbf{I}\\) (e.g., \\(\\mathbf{V}_{0.4} = \\frac{1}{2}\\mathbf{V}_{0.2} + \\frac{1}{2}\\mathbf{V}_{0.6}\\)). The table shows the superior performance of our approach with both image and event and with only image. This is attributed to our temporal splitter and event stream, guiding the network to predict 3D hands at specific time steps." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.527, + 0.788, + 0.603 + ], + "angle": 0, + "content": "Fig. 8 visually compares linear interpolation and our temporal embedding for generating novel hand sequences. In Fig. 8b, while linear interpolation restricts all articulation movements in a linear way (see purple line in Fig. 8), our method produces more plausible results by using the corresponding temporal embedding values, exhibiting motion trajectories similar to GT." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.622, + 0.548, + 0.637 + ], + "angle": 0, + "content": "5.3 Comparisons with Previous Works" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.644, + 0.789, + 0.841 + ], + "angle": 0, + "content": "Performance comparison. Tabs. 6 and 7 clearly show that our EBHNet surpasses previous 3D hand mesh estimation methods. In Tab. 6, the most prior approaches [12, 20, 22] do not account for motion blur in hand images, resulting in inaccuracies. While BlurHandNet [27] performs admirably by considering three-hand time steps, it may overlook crucial hand information between those time steps, as it extracts features based on supervision with 3D hands at those specific time steps. Moreover, BlurHandNet consists of modules dedicated to each time step; thus, it cannot generate hands at novel time steps that were not included in the training phase. In contrast, our approach does not constrain the extracted feature \\(\\mathbf{F}\\) to 3D hand at particular time steps, outperforming prior methods by leveraging temporal information from the entire motion trajectory. Also, our EBHNet can generate 3D hands at novel time steps without requiring training data for those specific time points, distinguishing it from BlurHandNet." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.551, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "EBH Dataset and Network" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.116, + 0.785, + 0.127 + ], + "angle": 0, + "content": "13" + }, + { + "type": "table_caption", + "bbox": [ + 0.216, + 0.145, + 0.493, + 0.215 + ], + "angle": 0, + "content": "Table 6: Comparison with SoTA methods on BH [27]. For MPVPE, we evaluate metrics at the midpoint of the motion. As BH provides only images, our results are based on image inputs." + }, + { + "type": "table", + "bbox": [ + 0.241, + 0.227, + 0.472, + 0.291 + ], + "angle": 0, + "content": "
MethodsMPJPE↓MPVPE↓Accel↓
Init.Mid.Final
I2L-MeshNet [22]-24.32-23.07-
Pose2Pose [20]-18.80-17.42-
METRO [12]-20.54-27.03-
BlurHandNet [27]18.0816.8018.2115.303.94
EBHHNet (Ours)17.2316.4517.1715.023.37
" + }, + { + "type": "table_caption", + "bbox": [ + 0.51, + 0.145, + 0.787, + 0.201 + ], + "angle": 0, + "content": "Table 7: Comparison with SoTA methods on our proposed EBH. For MPVPE, we assess metrics for the hand at the midpoint of the motion (t=0.6)." + }, + { + "type": "table", + "bbox": [ + 0.514, + 0.213, + 0.787, + 0.291 + ], + "angle": 0, + "content": "
InputMethods\\( MPJPE^{\\ddagger} \\)\\( MPVPE^{\\ddagger} \\)\\( Accel^{\\ddagger} \\)
t=0t=0.2t=0.6t=0.8
II2L-MeshNet [22]--28.12-30.86-
Pose2Pose [20]--17.28-20.41-
BlurHandNet [27]17.0815.5316.1317.5417.995.78
EBHNet (Ours)16.1014.2915.2316.1817.894.69
EEventHands [35]28.8029.8128.9130.9725.7010.75
EBHNet (Ours)28.9129.3528.5930.8025.309.25
I & EEBHNet (Ours)12.7312.0014.2416.3516.933.19
" + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.304, + 0.29, + 0.359 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.221, + 0.361, + 0.283, + 0.37 + ], + "angle": 0, + "content": "(a) Event E" + }, + { + "type": "image", + "bbox": [ + 0.298, + 0.305, + 0.351, + 0.357 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.301, + 0.36, + 0.345, + 0.37 + ], + "angle": 0, + "content": "(b) [35]" + }, + { + "type": "image", + "bbox": [ + 0.364, + 0.308, + 0.426, + 0.356 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.362, + 0.36, + 0.429, + 0.369 + ], + "angle": 0, + "content": "(c) EBHNet" + }, + { + "type": "image", + "bbox": [ + 0.436, + 0.309, + 0.497, + 0.357 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.448, + 0.36, + 0.486, + 0.369 + ], + "angle": 0, + "content": "(d) GT" + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.304, + 0.574, + 0.358 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.506, + 0.36, + 0.568, + 0.37 + ], + "angle": 0, + "content": "(e) Event E" + }, + { + "type": "image", + "bbox": [ + 0.582, + 0.308, + 0.638, + 0.351 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.587, + 0.36, + 0.627, + 0.37 + ], + "angle": 0, + "content": "(f) [35]" + }, + { + "type": "image", + "bbox": [ + 0.648, + 0.308, + 0.71, + 0.35 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.647, + 0.36, + 0.713, + 0.369 + ], + "angle": 0, + "content": "(g) EBHNet" + }, + { + "type": "image", + "bbox": [ + 0.722, + 0.309, + 0.778, + 0.349 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.732, + 0.36, + 0.77, + 0.369 + ], + "angle": 0, + "content": "(h) GT" + }, + { + "type": "image_caption", + "bbox": [ + 0.216, + 0.388, + 0.785, + 0.417 + ], + "angle": 0, + "content": "Fig. 9: Visual comparison with ours and EventHand [35]. When only the event is provided, ours yields results that closely resemble the corresponding ground truth." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.447, + 0.785, + 0.583 + ], + "angle": 0, + "content": "Moreover, Tab. 7 compares image-only, event-only, and both inputs in the EBH dataset. Modifying BlurHandNet [27] to predict four outputs, our EBHNet consistently outperforms prior methods, excelling against an event-based 3D recovery method [35]. The best results of our EBHNet are obtained with both event and image inputs. Also, Tabs. 6 and 7 compare the acceleration error (Accel), showing that EBHNet produces more temporally consistent outcomes by successfully addressing the real motion. Here, acceleration error is computed only for hand sequence generation methods [27,35]. Fig. 9 shows a visual comparison between our EBHNet and EventHand [35] when only event inputs are used." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.583, + 0.785, + 0.658 + ], + "angle": 0, + "content": "Furthermore, Fig. 10 visually compares EBHNet with previous methods [20, 22,27] when using only image inputs. As shown, Pose2Pose [20] and I2L-MeshNet [22] even struggle to capture the hand pose, while BlurHandNet [27] fails to capture motion information, resulting in consistent outputs across time steps. In contrast, EBHNet successfully generates 3D hands at different time steps." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.659, + 0.787, + 0.84 + ], + "angle": 0, + "content": "Efficiency comparison. Tab. 8 shows a comparison of the efficiency of our EBHNet with previous methods. Since all previous methods use images as input, we evaluate the efficiency of our EBHNet when using images only as input. We first compare our EBHNet with Pose2Pose [20] and I2L-MeshNet [22], which produce a single output, by considering EBHNet (1) generating only one output. Here, EBHNet (1) exhibits comparable computational efficiency while showing quantitatively superior results compared to Pose2Pose and I2L-MeshNet in Tables 6 and 7. Compared to BlurHandNet, we configure EBHNet to generate the same number of 3D hands for a fair comparison in EBHNet (3). In contrast to BlurHandNet [27], which requires additional layers for predicting multiple hands and results in a larger model size, EBHNet can generate 3D hands at different time steps using varied temporal embeddings, leading to a reduced model size." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "14" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.373, + 0.127 + ], + "angle": 0, + "content": "JK. Park et al." + }, + { + "type": "image", + "bbox": [ + 0.227, + 0.145, + 0.784, + 0.272 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.215, + 0.295, + 0.787, + 0.339 + ], + "angle": 0, + "content": "Fig. 10: Visual comparison with previous methods on BH [27] and our EBH. Unlike other methods that generate a fixed number of 3D hands, EBHNet can produce a variable number of 3D hands. The red circles show severely blurry regions." + }, + { + "type": "table_caption", + "bbox": [ + 0.215, + 0.352, + 0.788, + 0.396 + ], + "angle": 0, + "content": "Table 8: Efficiency comparison with previous methods. Our EBHNet doesn't require extra parameters for recovering 3D hands at additional time steps, maintaining the same number of parameters whether predicting a single hand or three hands." + }, + { + "type": "table", + "bbox": [ + 0.277, + 0.406, + 0.731, + 0.44 + ], + "angle": 0, + "content": "
MetricsPose2Pose [20] I2L-MeshNet [22] BlurHandNet [27] EBHNet (1) EBHNet (3)
Num of params (MB)↓77141202146146
Latency (fps)↑26.3125.7614.4325.1215.45
" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.467, + 0.364, + 0.483 + ], + "angle": 0, + "content": "6 Limitations" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.498, + 0.788, + 0.577 + ], + "angle": 0, + "content": "Our EBH dataset includes hands with various real motion blur and corresponding 3D annotations, marking a crucial step in addressing key community challenges. However, since our data were captured from 10 individuals, it may lack shape diversity. We will address this issue by designing additional modules for shape generalization or constructing a large-scale dataset from various people." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.597, + 0.36, + 0.613 + ], + "angle": 0, + "content": "7 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.629, + 0.788, + 0.78 + ], + "angle": 0, + "content": "This work tackles the challenging problem of recovering a 3D hand sequence from a blurry hand. To this end, we introduce the EBH, the first dataset that includes real blurry hand images, their corresponding 3D ground truths, and continuous temporal information from an event stream. In conjunction with the EBH dataset, we propose EBHNet, a method for generating 3D hand sequences from a single blurry hand input in diverse combinations. Our experiments show the efficacy of the EBH and EBHNet in enhancing 3D hand sequence recovery from blurry hands, with the ability to generate hand sequences at novel time steps. Our contributions offer future insights for addressing motion blur in 3D hand recovery." + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.78, + 0.788, + 0.842 + ], + "angle": 0, + "content": "Acknowledgments. This work was supported in part by the IITP grants [No. 2021-0-01343, Artificial Intelligence Graduate School Program (Seoul National University), No.2021-0-02068, and No.2023-0-00156], the NRF grant [No. 2021M3A9E4080782] funded by the Korean government (MSIT)." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.551, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "EBH Dataset and Network" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "15" + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.145, + 0.323, + 0.16 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.177, + 0.785, + 0.204 + ], + "angle": 0, + "content": "1. Boukhayma, A., Bem, R.d., Torr, P.H.: 3D hand shape and pose from images in the wild. In: CVPR (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.205, + 0.785, + 0.232 + ], + "angle": 0, + "content": "2. Chen, L., Chu, X., Zhang, X., Sun, J.: Simple baselines for image restoration. In: ECCV (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.233, + 0.785, + 0.26 + ], + "angle": 0, + "content": "3. Choi, H., Moon, G., Lee, K.M.: Pose2Mesh: Graph convolutional network for 3D human pose and mesh recovery from a 2D human pose. In: ECCV (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.261, + 0.785, + 0.287 + ], + "angle": 0, + "content": "4. Deng, J., Dong, W., Socher, R., Li, L.J., Li, K., Fei-Fei, L.: ImageNet: A large-scale hierarchical image database. In: CVPR (2009)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.288, + 0.785, + 0.315 + ], + "angle": 0, + "content": "5. Hampali, S., Rad, M., Oberweger, M., Lepetit, V.: Honnotate: A method for 3D annotation of hand and object poses. In: CVPR (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.316, + 0.785, + 0.357 + ], + "angle": 0, + "content": "6. Hampali, S., Sarkar, S.D., Rad, M., Lepetit, V.: Keypoint Transformer: Solving joint identification in challenging hands and object interactions for accurate 3D pose estimation. In: CVPR (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.358, + 0.785, + 0.384 + ], + "angle": 0, + "content": "7. Jaderberg, M., Simonyan, K., Zisserman, A., et al.: Spatial transformer networks. In: NeurIPS (2015)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.385, + 0.785, + 0.412 + ], + "angle": 0, + "content": "8. Joo, H., Neverova, N., Vedaldi, A.: Exemplar fine-tuning for 3D human model fitting towards in-the-wild 3D human pose estimation. In: 3DV (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.413, + 0.785, + 0.439 + ], + "angle": 0, + "content": "9. Kanazawa, A., Zhang, J.Y., Felsen, P., Malik, J.: Learning 3d human dynamics from video. In: CVPR (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.44, + 0.785, + 0.467 + ], + "angle": 0, + "content": "0. Kulon, D., Guler, R.A., Kokkinos, I., Bronstein, M.M., Zafeiriou, S.: Weakly-supervised mesh-convolutional hand reconstruction in the wild. In: CVPR (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.468, + 0.785, + 0.495 + ], + "angle": 0, + "content": "1. Li, Z., Liu, J., Zhang, Z., Xu, S., Yan, Y.: CIUFF: Carrying location information in full frames into human pose and shape estimation. In: ECCV (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.496, + 0.785, + 0.522 + ], + "angle": 0, + "content": "2. Lin, K., Wang, L., Liu, Z.: End-to-end human pose and mesh reconstruction with transformers. In: CVPR (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.523, + 0.661, + 0.537 + ], + "angle": 0, + "content": "3. Lin, K., Wang, L., Liu, Z.: Mesh graphormer. In: ICCV (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.538, + 0.785, + 0.564 + ], + "angle": 0, + "content": "4. Lin, M., Chen, Q., Yan, S.: Network in network. arXiv preprint arXiv:1312.4400 (2013)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.565, + 0.785, + 0.592 + ], + "angle": 0, + "content": "5. Lin, S., Yang, L., Saleemi, I., Sengupta, S.: Robust high-resolution video matting with temporal guidance. In: WACV (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.593, + 0.785, + 0.62 + ], + "angle": 0, + "content": "6. Lin, T.Y., Dolkar, P., Girshick, R., He, K., Hariharan, B., Belongie, S.: Feature pyramid networks for object detection. In: CVPR (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.62, + 0.785, + 0.646 + ], + "angle": 0, + "content": "7. Liu, S., Jiang, H., Xu, J., Liu, S., Wang, X.: Semi-supervised 3D hand-object poses estimation with interactions in time. In: CVPR (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.647, + 0.785, + 0.674 + ], + "angle": 0, + "content": "8. Messikommer, N., Fang, C., Gehrig, M., Scaramuzza, D.: Data-driven feature tracking for event cameras. In: CVPR (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.675, + 0.785, + 0.716 + ], + "angle": 0, + "content": "9. Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: Nerf: Representing scenes as neural radiance fields for view synthesis. ACM (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.717, + 0.785, + 0.744 + ], + "angle": 0, + "content": "20. Moon, G., Choi, H., Lee, K.M.: Accurate 3D hand pose estimation for whole-body 3D human mesh estimation. In: CVPRW (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.745, + 0.785, + 0.771 + ], + "angle": 0, + "content": "21. Moon, G., Choi, H., Lee, K.M.: Neuralannot: Neural annotator for 3d human mesh training sets. In: CVPR (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.772, + 0.785, + 0.799 + ], + "angle": 0, + "content": "22. Moon, G., Lee, K.M.: I2L-MeshNet: Image-to-lixel prediction network for accurate 3D human pose and mesh estimation from a single RGB image. In: ECCV (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.8, + 0.785, + 0.84 + ], + "angle": 0, + "content": "23. Moon, G., Saito, S., Xu, W., Joshi, R., Buffalini, J., Bellan, H., Rosen, N., Richardson, J., Mize, M., De Bree, P., et al.: A dataset of relighted 3D interacting hands. In: NeurIPS (2023)" + }, + { + "type": "list", + "bbox": [ + 0.226, + 0.177, + 0.785, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "16" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.373, + 0.127 + ], + "angle": 0, + "content": "JK. Park et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.147, + 0.785, + 0.189 + ], + "angle": 0, + "content": "24. Moon, G., Yu, S.I., Wen, H., Shiratori, T., Lee, K.M.: Interhand2.6M: A dataset and baseline for 3D interacting hand pose estimation from a single RGB image. In: ECCV (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.19, + 0.785, + 0.217 + ], + "angle": 0, + "content": "25. Nah, S., Kim, T.H., Lee, K.M.: Deep multi-scale convolutional neural network for dynamic scene deblurring. In: CVPR (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.218, + 0.785, + 0.244 + ], + "angle": 0, + "content": "26. Nehvi, J., Golyanik, V., Mueller, F., Seidel, H.P., Elgharib, M., Theobalt, C.: Differentiable event stream simulator for non-rigid 3d tracking. In: CVPR (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.245, + 0.785, + 0.286 + ], + "angle": 0, + "content": "27. Oh, Y., Park, J., Kim, J., Moon, G., Lee, K.M.: Recovering 3d hand mesh sequence from a single blurry image: A new dataset and temporal unfolding. In: CVPR (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.287, + 0.785, + 0.314 + ], + "angle": 0, + "content": "28. Ozawa, T., Sekikawa, Y., Saito, H.: Accuracy and speed improvement of event camera motion estimation using a bird's-eye view transformation. Sensors (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.315, + 0.785, + 0.342 + ], + "angle": 0, + "content": "29. Park, J., Jung, D.S., Moon, G., Lee, K.M.: Extract-and-adaptation network for 3d interacting hand mesh recovery. In: ICCVW (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.343, + 0.785, + 0.383 + ], + "angle": 0, + "content": "30. Park, J., Nah, S., Lee, K.M.: Pay attention to hidden states for video deblurring: Ping-pong recurrent neural networks and selective non-local attention. arXiv preprint arXiv:2203.16063 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.384, + 0.785, + 0.411 + ], + "angle": 0, + "content": "31. Park, J., Nah, S., Lee, K.M.: Recurrence-in-recurrence networks for video deblurring. In: BMVC (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.411, + 0.785, + 0.438 + ], + "angle": 0, + "content": "32. Park, J., Oh, Y., Moon, G., Choi, H., Lee, K.M.: Handoccnet: Occlusion-robust 3D hand mesh estimation network. In: CVPR (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.439, + 0.785, + 0.466 + ], + "angle": 0, + "content": "33. Romero, J., Tzionas, D., Black, M.J.: Embodied hands: Modeling and capturing hands and bodies together. SIGGRAPH Asia (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.467, + 0.785, + 0.494 + ], + "angle": 0, + "content": "34. Rong, Y., Shiratori, T., Joo, H.: FrankMocap: A monocular 3D whole-body pose estimation system via regression and integration. In: ICCVW (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.495, + 0.785, + 0.535 + ], + "angle": 0, + "content": "35. Rudnev, V., Golyanik, V., Wang, J., Seidel, H.P., Mueller, F., Elgharib, M., Theobalt, C.: Eventhands: Real-time neural 3d hand pose estimation from an event stream. In: ICCV (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.536, + 0.785, + 0.563 + ], + "angle": 0, + "content": "36. Shen, Z., Wang, W., Shen, J., Ling, H., Xu, T., Shao, L.: Human-aware motion deblurring. In: ICCV (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.564, + 0.785, + 0.591 + ], + "angle": 0, + "content": "37. Sun, X., Xiao, B., Wei, F., Liang, S., Wei, Y.: Integral human pose regression. In: ECCV (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.592, + 0.785, + 0.619 + ], + "angle": 0, + "content": "38. Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, L., Polosukhin, I.: Attention is all you need. In: NeurIPS (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.62, + 0.785, + 0.66 + ], + "angle": 0, + "content": "39. Zhang, F., Bazarevsky, V., Vakunov, A., Tkachenka, A., Sung, G., Chang, C.L., Grundmann, M.: Mediapipe hands: On-device real-time hand tracking. arXiv preprint arXiv:2006.10214 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.661, + 0.785, + 0.688 + ], + "angle": 0, + "content": "40. Zhang, S., Wang, W., Li, H., Zhang, S.: Evtracker: An event-driven spatiotemporal method for dynamic object tracking. Sensors (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.689, + 0.785, + 0.716 + ], + "angle": 0, + "content": "41. Zhong, Z., Gao, Y., Zheng, Y., Zheng, B.: Efficient spatio-temporal recurrent neural network for video deblurring. In: ECCV (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.717, + 0.785, + 0.743 + ], + "angle": 0, + "content": "42. Zimmermann, C., Brox, T.: Learning to estimate 3D hand pose from single RGB images. In: ICCV (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.744, + 0.785, + 0.785 + ], + "angle": 0, + "content": "43. Zimmermann, C., Ceylan, D., Yang, J., Russell, B., Argus, M., Brox, T.: Freihand: A dataset for markerless capture of hand pose and shape from single RGB images. In: ICCV (2019)" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.147, + 0.785, + 0.785 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/2024/3D Hand Sequence Recovery from Real Blurry Images and Event Stream/c806d671-8f45-4954-8ba0-1ca55bf4fc0d_origin.pdf b/2024/3D Hand Sequence Recovery from Real Blurry Images and Event Stream/c806d671-8f45-4954-8ba0-1ca55bf4fc0d_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..44146af110468ef4c3dc44cd9f0feadf7ddaae59 --- /dev/null +++ b/2024/3D Hand Sequence Recovery from Real Blurry Images and Event Stream/c806d671-8f45-4954-8ba0-1ca55bf4fc0d_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:baa99a534d69e6c21166755ead463fabf7c5822da0c3631c2741c3bed96e0867 +size 6114175 diff --git a/2024/3D Hand Sequence Recovery from Real Blurry Images and Event Stream/full.md b/2024/3D Hand Sequence Recovery from Real Blurry Images and Event Stream/full.md new file mode 100644 index 0000000000000000000000000000000000000000..c08763908f09ccaaafa9af5ad128afe526e1144b --- /dev/null +++ b/2024/3D Hand Sequence Recovery from Real Blurry Images and Event Stream/full.md @@ -0,0 +1,345 @@ +# 3D Hand Sequence Recovery from Real Blurry Images and Event Stream + +Joonkyu Park1, Gyeongsik Moon3,4, Weipeng Xu4, Evan Kaseman4, Takaaki Shiratori4, and Kyoung Mu Lee1,2 + +$^{1}$ Dept. of ECE&ASRI, $^{2}$ IPAI, Seoul National University, Korea $^{3}$ DGIST + +$^{4}$ Codec Avatars Lab, Meta + +{jkpark0825, kyoungmu}@snu.ac.kr, mks0601@gmail.com, + +{xuweipeng, ekaseman, tshiratori}@meta.com + +https://jkpark08.github.io/EBH + +![](images/d2b17a2bdf9cc07f82963459c864da272144348600f5f02a417f0368f5d65d9c.jpg) +Fig. 1: Blurry hand to 3D hand sequences. We address 3D hand sequence recovery with real motion blur, showing proficiency in predicting 3D hands at novel time steps. + +![](images/d361cb97edfb193a40e6d119186db73008f45f94798a8dc42ddebd799b458427.jpg) + +![](images/4666be7000d8cfc471df6840bfb1d0ca6a737ea3e6d355969a7332998d609b1d.jpg) + +Abstract. Although hands frequently exhibit motion blur due to their dynamic nature, existing approaches for 3D hand recovery often disregard the impact of motion blur in hand images. Blurry hand images contain hands from multiple time steps, lack precise hand location at a specific time step, and introduce temporal ambiguity, leading to multiple possible hand trajectories. To address this issue and in the absence of datasets with real blur, we introduce the EBH dataset, which provides 1) hand images with real motion blur and 2) event data for authentic representation of fast hand movements. In conjunction with our new dataset, we present EBHNet, a novel network capable of recovering 3D hands from diverse input combinations, including blurry hand images, events, or both. Here, the event stream enhances motion understanding in blurry hands, addressing temporal ambiguity. Recognizing that blurry hand images include not only single 3D hands at a time step but also multiple hands along their motion trajectories, we design EBHNet to generate 3D hand sequences in motion. Moreover, to enable our EBHNet to predict 3D hands at novel, unsupervised time steps using a single shared module, we employ a Transformer-based module, temporal splitter, into EBHNet. Our experiments show the superior performance of EBH and EBHNet, especially in handling blurry hand images, making them valuable in real-world applications. + +Keywords: 3D hand sequence recovery $\cdot$ Blurry hands $\cdot$ Event stream + +![](images/d78fdff3c51bac024426b61aa935ffd6c74c49c2c2d02ffdb4cb3846840fca43.jpg) +(a) Blurry image + +![](images/67c106cc55eb268194d38a21adc381bf55a3dfd164d3f494da935a6955d00aa2.jpg) +(b) Mesh + +![](images/2d8833f3e8127159f9a356b79309c4c54994b145aa8d41ee3ec625a676012f4c.jpg) +(c) Event +Fig. 2: Examples of the proposed EBH dataset. Our EBH consists of real motion-blurred hand images with the corresponding meshes derived from sharp images captured from three different viewpoints during motion. Also, we offer an event stream. + +![](images/d30f090a6fb9baa4ff7fc257c8296e4db75dbd1bd576c2eb236129cb70ec9aca.jpg) +(d) Blurry image + +![](images/e428866d5c4bac0b4cd783f378ea3b7d7ab79d984716e8aa13b37ebd9716ebfd.jpg) +(e) Mesh + +![](images/5e228bdb5e7338373c4dee15fbdf3ffe8598f8d0806375eb9e798e4b0efcd02a.jpg) +(f) Event + +# 1 Introduction + +The dynamic nature of hands leads to prevalent blurriness during rapid movements (e.g., sports, typing, and dancing), posing a challenge for accurately capturing hand information. Moreover, isolating a single hand during motion is impractical because blurry hands convey multiple hand information during the motion. To address this, there is a need for a robust framework that can effectively recover 3D hand sequence even in the presence of motion blur. However, existing methods [3, 5, 10, 12, 13, 22] have exhibited subpar performance when faced with motion blur present in hand images. + +The main issue arises from the lack of datasets containing both blurry hand images and accurate 3D ground truth. Although Oh et al. [27] introduced the BlurHand dataset, it has limitations. First, their motion blur is artificially generated by averaging sequential frames [25, 36], which doesn't accurately reflect real-world blur. Second, BlurHand [27] provides only images, which can lead to challenges in accurately capturing the trajectory due to temporal ambiguity. The images display the hand's position over time, lacking exact information about its location at particular time steps and overall motion. As a result, a single blurry image can exhibit multiple potential motion trajectories, leading to temporal ambiguity. While BlurHand includes corresponding sharp image pairs to assist in determining the motion trajectory, relying solely on these sharp images may not provide an accurate trajectory beyond the frame rates of the sharp images. + +To address these issues, we propose the EBH dataset, which captures real motion blur in hand images using cameras with different exposure times, where the examples are shown in Fig. 2. Specifically, we use an RGB camera with extended exposure times to generate blurry hand images with real motion blur. At the same time, multiple RGB cameras with shorter exposure times are employed to capture sharp images. The 3D annotations (i.e., meshes and keypoints) are derived from pairs of these multi-view sharp images. Also, we incorporate event data to compensate for the temporal ambiguity in the blurry hand images. This event data captures instantaneous changes in brightness, helping find the hand's position and track its movement in the blurry image. + +Building upon the EBH dataset, we present EBHNet, a flexible network, designed to predict a 3D hand sequence from diverse input combinations: 1) + +only a single blurry image, 2) only event data, and 3) both a blurry image and event data. In each case, we adapt feature extraction based on input types. + +With the feature extracted by considering various input types, our goal is to predict 3D hand sequences, which consist of 3D hands at different time steps. Here, using a dedicated 3D hand recovery module for each time step, such as BlurHandNet [27], can inflate model parameters, especially with more 3D hands at different time steps. To address this, we propose a Transformer-based module, temporal splitter, in our EBHNet. This enables the generation of multiple 3D hands at novel time steps using a single shared module, with the temporal splitter estimating based on the temporal embedding. Additionally, our temporal embeddings enable EBHNet to generate 3D hands at novel time steps without requiring supervision for those steps. + +Using our newly introduced dataset, EBH, and the baseline network, EBHNet, we tackle the task of recovering 3D hands from blurry hand images. Our experiments show the efficacy of our dataset when applied to real-world blurry hand scenarios and the robustness of EBHNet in handling such cases. We summarize our contributions as follows: + +- We introduce the EBH dataset for 3D hand recovery from motion-blurred images, providing real-world motion blur and event information. +- We introduce EBHNet, a novel network for 3D hand sequence recovery from blurry hands, capable of handling diverse input combinations. +- The temporal splitter in EBHNet allows our network to produce 3D hands at novel time steps using a single shared module. + +# 2 Related works + +3D hand recovery. Since the introduction of RGB-based hand benchmark datasets [5, 24, 32, 43], several methods [1, 3, 22, 42] for hand estimation have emerged. Although several approaches [6,17,32] have focused on addressing hand occlusion, especially in cases where objects are being held, others have taken on the task of handling interacting hands [24, 29], which are frequently encountered in real-world scenarios. However, there is a notable research gap on the challenge of fast-moving hands despite its prevalence in real-world situations. + +Recently, there have been several attempts to make a 3D hand recovery system robust to motion blur. FrankMocap [34] synthetically generated motion blur for data augmentation. BlurHand [27] presented synthetic blurry hand data in conjunction with their baseline network, BlurHandNet, with the goal of recovering three temporal 3D hands. However, both rely on artificially generated motion blur, which has a domain gap from real-world blur [41]. In particular, the motion detail of the BlurHand dataset is limited by the frame rate of their RGB cameras. To address these shortcomings, we introduce the EBH dataset, which features real blur-hand images with an event stream. + +Event stream. An event stream comprises a sequence of data points that chronicle changes within a scene over time. Specialized sensors capture these changes + +![](images/585d10ab9586d5b6b17a9ac14d1627949587580061fee12aa23fe5359d6e3a6c.jpg) +Fig. 3: Overview of our EBH dataset. We capture blurry hand images with one camera using a longer exposure time and obtain sharp hand images with six additional cameras, three triggered simultaneously for the blurry image and three with delayed triggers for different time steps. Additionally, we enrich our dataset with event stream. The right figure depicts the cameras, color-circled for their respective groups. + +![](images/d825495986d77b5dc941fb475fa7a91a9fe9e36bb01c1d259aa00cf777b7be1a.jpg) + +per-pixel basis, including attributes such as location, timestamp, and polarity, which indicate brightness changes. These data prove valuable in mitigating image blurriness by facilitating precise motion estimation, making them applicable in various tasks, including object tracking [18,40], and visual odometry for accurate camera trajectory [28]. More recently, inspired by the advantages of using event data, several works [26,35] have used event information for 3D hand recovery. Specifically, EventHand [35] introduced synthetic event data and trained their model on these events. Furthermore, [26] introduced an event stream simulator designed to generate event information for hands, simulating data from real event cameras. Based on this, we incorporate event information into our EBH dataset, offering a comprehensive view of overall motion and providing more data than limited 3D annotations at specific time intervals. + +# 3 EBH dataset + +Fig. 3 shows the process of constructing our EBH dataset. Instead of artificially generating blur [27], we employ RGB cameras (Microsoft Kinects) with different exposure times for real motion blur. First, we configure a single camera (blue in Fig. 3) with an 80 millisecond (ms) exposure time and a 5 fps frame rate to capture the blurry hand image. Next, we employ two groups of three cameras (six cameras) with a 2.5 ms exposure time and a 30 fps frame rate. These camera groups are triggered differently: one group (red in Fig. 3) shares the same trigger as the camera used for the blurry image, while the other (orange in Fig. 3) has a trigger signal shifted by 16 ms. As a result, we obtain a total of 18 sharp images (6-time steps $\times$ 3 cameras), as indicated by the red and orange boxed hand images in Fig. 3. Motivated by recent neural network-based annotators [8,11,21, 23], we train a MANO parameter [33] estimation network on our dataset and test it on the training set, where the output becomes the 3D ground truth. Given the additional depth information from our RGB cameras (Microsoft Kinects), + +our model processes RGB images from three viewpoints and the corresponding depth maps to estimate MANO parameters. The network is self-supervised by minimizing loss functions between 1) 2D joints projected from the estimated hand mesh and those obtained by an off-the-shelf 2D joint detection model [39], 2) differentiable rendered silhouettes and masks from a matting model [15], and 3) rendered depth and their corresponding ground truth depth map. + +Furthermore, since obtaining 3D annotations from sharp images is done discretely due to the limited shutter speed of the RGB cameras, we introduce event stream (green in Fig. 3) to supplement the limited information obtained from the discrete 3D annotations. To this end, we employ an event camera (Prophesee EVK4) with an exceptionally high frame rate of more than $10\mathrm{K}$ fps, which is calibrated to the RGB camera that captures the blurry hand image. We calibrate the event camera and RGB camera using a blinking checkerboard, with additional details that can be found in our supplementary material. Also, the event camera is synchronized with the RGB camera to capture the blurry hand image using the same external trigger. The event stream from the event camera provides continuous pixel-wise information, but it can be data-intensive. To address this, we accumulate event data at specific intervals, reducing the overall volume of the data. Specifically, to prevent information loss, we employ a sliding window approach to accumulate light intensity over predefined time intervals (e.g., 1ms). Then, we utilize color encoding to differentiate event occurrences at various time steps, assigning different colors to events happening at different times. This color-encoding process entails converting accumulated light intensity into a grayscale image, which is then mapped to an RGB representation using a light blue palette provided by the event camera's SDK. + +Finally, our EBH dataset includes hand images from 10 individuals displaying various commonly occurring gestures, totaling 40,213 annotated blurry hand images with corresponding event streams. The dataset is divided into training (32,482) and test sets (7,731). Please note that each blurry hand image in our dataset has six 3D annotations from the different time steps, represented as three in red and three in orange in Fig. 3. Consequently, our EBH dataset comprises a total of 241,278 $(40,213 \times 6)$ 3D annotations. Furthermore, unlike the synthetic blurry hand dataset BlurHand [27], which mainly comprises hands with minimal or no blur, our EBH dataset includes more dynamic blur, as shown in Fig. 4. We measure the length of hand trajectory by summing the distances between 3D keypoints for each blurry hand image using ground-truth data, providing an indication of the hand blur level. For the details (e.g., camera ID, pose examples, and camera calibration), please refer to the supplementary material. + +# 4 EBHNet + +Fig. 5 shows an overview of our EBHNet framework. Our goal is to recover sequences of 3D hands from a given input, changing the output number to match the time steps. To accommodate the diverse cases encountered in practical ap + +![](images/b67bcc046a94ee0c69f1196b044181428fa7034085c2a7f68827582a7fa30a3f.jpg) +(a) training set +Fig.4: Statistics on blur strength of EBH dataset compared to Blur-Hand (BH). $x$ -axis denotes the length of hand trajectory, showing 3D joint movement in a single blurry hand image. $y$ -axis shows the image proportion in the dataset. + +![](images/495468db9416b1e8512ca14af6512de2f8102c6dba65deb0c562d03b97588946.jpg) +(b) testing set + +plications, we introduce various variants of EBHNet, each designed to utilize either blurry hand image alone, event data alone, or a combination of both. + +# 4.1 Feature Extraction from Various Inputs + +To facilitate understanding, let us consider a scenario where both a blurry image $\mathbf{I} \in \mathbb{R}^{H \times W \times 3}$ and an event $\mathbf{E}$ are available. Here, $H = 256$ , and $W = 256$ correspond to the height and width of the input image, respectively. Initially, we extract two types of features: the hand feature $\mathbf{F}_{\mathrm{I}}$ from the blurry hand image $\mathbf{I}$ and the event feature $\mathbf{F}_{\mathrm{E}}$ from the event data $\mathbf{E}$ . We achieve this by utilizing a pre-trained Feature Pyramid Network (FPN) [16] trained on ImageNet [4] for the image feature and shallow convolutional networks for the event feature. Here, unlike a single image that can be used to recover multiple 3D hands at different time steps, each event feature corresponds to a single 3D hand. For example, with an exposure time of $80~\mathrm{ms}$ for blurry images and accumulating events within 1 ms intervals in our EBH dataset, a single blurry image can be aligned with up to 80 event frames. Therefore, we design our event feature extraction network to be shallow for efficiency. Also, note that both of these features, $\mathbf{F}_{\mathrm{I}}$ and $\mathbf{F}_{\mathrm{E}}$ , share the same dimensions, which are $\mathbb{R}^{h \times w \times c}$ , where $h = \frac{H}{8}$ , $h = \frac{H}{8}$ , and $c = 256$ represent the height, width, and number of channels of the extracted features. Subsequently, we concatenate these two features and pass the combined feature through five residual blocks to obtain a fused feature $\mathbf{F}$ . However, when only one of an image and an event is available, we do not perform the concatenation step. Instead, we feed either $\mathbf{F}_{\mathrm{I}}$ or $\mathbf{F}_{\mathrm{E}}$ into the residual blocks to obtain the feature $\mathbf{F}$ . + +![](images/58faa59bf870ca72203aef8bd4a1628711022aad6446d9a948c2e255bf153be5.jpg) +Fig. 5: The overall architecture of EBHNet. Our EBHNet first extracts feature $\mathbf{F}$ in three scenarios: image only, event only, and both. During training and evaluation, we employ a temporal splitter with different temporal embedding strategies. Specifically, during the training phase, we utilize temporal embedding value when corresponding ground truths are available. Conversely, during the testing phase, we adopt novel temporal embedding values to generate 3D hand meshes at novel time steps. + +# 4.2 Temporal Splitter + +Unlike event data, which can determine a single 3D hand for each time step, a blurry image contains a hand's trajectory, making 3D hand recovery from it much more challenging. Hence, we introduce our temporal splitter, particularly useful for blurry image input. Our temporal splitter splits the 3D hand trajectory in the blurry image into a single 3D hand of a given query time step $\mathbf{t}$ . When an event is included in the input, our temporal splitter is trained to produce a single 3D hand that corresponds to the time step of the input event data. + +Network architecture. The architectural design of our temporal splitter is depicted in Fig. 5. Starting with the feature $\mathbf{F}$ that contains information related to the hand across the entire temporal axis, we introduce our temporal splitter to refine this feature and obtain the feature of the hand at a specific time step $\mathbf{t}$ , denoted as $\mathbf{F}_{\mathbf{t}}$ . To acquire the hand feature at a particular time step, we incorporate a temporal embedding $\mathbf{t}$ . Here, $\mathbf{t}$ ranges from 0 to 1 and represents a normalized value within the exposure time, where $\mathbf{t} = 0$ indicates the motion's initiation, $\mathbf{t} = 0.5$ corresponds to the middle, and $\mathbf{t} = 1$ marks its conclusion. An important difference from the frequently used frequency encoding method [19] is our straightforward decision to append the temporal embedding $\mathbf{t}$ to the channel dimension of the feature $\mathbf{F}$ , and then consolidate these features by applying a $1 \times 1$ convolution. The justification for our temporal embedding can be found in our experimental section. + +Then, the final output of the temporal splitter $\mathbf{F}_{\mathrm{t}}$ is generated using a conventional self-attention Transformer [38]. To do this, we first derive the query $\mathbf{q}_{\mathrm{t}}$ and the key-value pairs $\mathbf{k}_{\mathrm{t}} - \mathbf{v}_{\mathrm{t}}$ from the feature maps after applying both positional and temporal embedding. This extraction is accomplished through the use of three separate $1\times 1$ convolutions. Subsequently, these query and key-value pairs are input into self-attention-based Transformer blocks: + +$$ +\mathbf {R} _ {\mathrm {t}} = \mathbf {q} _ {\mathrm {t}} + \operatorname {s o f t m a x} \left(\frac {\mathbf {q} _ {\mathrm {t}} \mathbf {k} _ {\mathrm {t}} ^ {T}}{\sqrt {d _ {\mathbf {k} _ {\mathrm {t}}}}} \right) \mathbf {v} _ {\mathrm {t}}, \tag {1} +$$ + +$$ +\mathbf {F} _ {\mathrm {t}} = \mathbf {R} _ {\mathrm {t}} + \operatorname {M L P} \left(\mathbf {R} _ {\mathrm {t}}\right), \tag {2} +$$ + +where $d_{\mathbf{k}_{\mathrm{t}}} = 256$ is the feature dimension of the key $\mathbf{k}_{\mathrm{t}}$ , and $\mathbf{R}_{\mathrm{t}}$ is the residual feature of Transformer. In addition, MLP refers to a multi-layer perceptron, which is responsible for increasing the dimension of the input feature by a factor of 2 and subsequently reducing it back to its original dimension using two separate $1 \times 1$ convolution layers. + +Novel time step prediction. To enable our temporal splitter to predict 3D hands at novel, unsupervised time steps, we adjust the temporal embedding $\mathbf{t}$ between the training and testing phases. During training, where we have access to ground truth 3D hands at specific time points, we restrict the values of our temporal embedding $\mathbf{t}$ to correspond to these ground truth time steps. For example, in the case of the BlurHand dataset [27], which provides 3D annotations at the motion's start, middle, and end, we set $\mathbf{t} = \{0,0.5,1\}$ . This configuration guarantees the desired generation of 3D hands at specific time steps, ensuring alignment with the available ground-truth data during the training phase. + +Conversely, during testing, we use an unconstrained approach, allowing continuous values for temporal embedding $\mathbf{t}$ within the range of 0 to 1. This differs from the training phase, where we restrict the temporal embedding values to time steps with ground truth. This flexibility enables our model to generate 3D hands at novel time steps that were not explicitly provided in the training data. For example, we can produce 3D hands at time steps like 0.25 or 0.75, enhancing the versatility of our model's output when confronted with unseen time steps. + +# 4.3 Obtaining Final Outputs + +From the hand feature $\mathbf{F}_{\mathrm{t}}$ , we extract the joint features by projecting $\mathbf{F}_{\mathrm{t}}$ into a $dJ$ -dimensional feature through a $1\times 1$ convolution layer [20]. These features are then reshaped into 3D heatmaps $\mathbf{H}_{\mathrm{t}}\in \mathbb{R}^{J\times h\times w\times d}$ , where $d = 32$ represents predefined depth discretization, and $J = 21$ is the number of hand joints. Subsequently, a soft-argmax operation [37] is applied to the heatmap to obtain 3D joint coordinates $\mathbf{J}_{\mathrm{t}}\in \mathbb{R}^{J\times 3}$ . From $\mathbf{F}_{\mathrm{t}}$ and $\mathbf{J}_{\mathrm{t}}$ , we derive the MANO pose $\theta_{\mathrm{t}}$ and shape $\beta_{\mathrm{t}}$ parameters. Specifically, the shape parameter $\beta_{\mathrm{t}}$ is obtained through a fully connected layer applied to $\mathbf{F}_{\mathrm{t}}$ after global average pooling [14]. For the pose parameter $\theta_{\mathrm{t}}$ , grid-sampling [7,20] is conducted on $\mathbf{F}_{\mathrm{t}}$ with $\mathbf{J}_{\mathrm{t}}$ to obtain joint features $\mathbf{F}_{\mathrm{J}_{\mathrm{t}}}$ , and then the pose parameter $\theta_{\mathrm{t}}$ is obtained by feeding $\mathbf{F}_{\mathrm{J}_{\mathrm{t}}}$ into a fully connected layer after flattening. Subsequently, the MANO parameters are passed to the MANO layer to produce 3D hand meshes $\mathbf{V}_{\mathrm{t}}$ . + +# 4.4 Objective Functions + +We minimize objective functions, defined as a weighted sum of L1 distances between estimated values $(\theta_{\mathrm{t}},\beta_{\mathrm{t}},\mathbf{J}_{\mathrm{t}},$ and $\mathbf{V}_{\mathrm{t}})$ and their respective ground truth. Among our input combinations (e.g., only image, only event, and both), the event stream effectively reduces temporal ambiguity, while using only the image + +Table 1: Accuracy of the GT. We measure metrics by comparing predicted keypoints, masks, and their depth maps with their ground truth counterparts, which are obtained from an off-the-shelf 2D keypoint estimator [39], an off-the-shelf matting model [15], and a Kinect camera. + +
Evaluation onMetricvalue
2D keypointsdistance0.18 (pixel)
MaskIoU84 (%)
Depthmapdistance8 (mm)
+ +may struggle. Therefore, we employ different objective functions for each case. For the image-only input, we calculate losses for both the original and reversed ground truth, obtained by converting the ground truth in the reverse temporal order. The model is then updated by selecting the smaller loss between them. On the other hand, when the event stream is used as input (event-only or combined with images), we supervise EBHNet with a loss obtained from our prediction and the original ground truth without considering temporal ambiguity. + +# 5 Experiments + +# 5.1 Datasets and Evaluation Metrics + +InterHand2.6M. InterHand2.6M [24] is a large-scale dataset, which consists of sharp hand images and 3D annotations. We use the BlurHand [27]'s splits for the training and evaluation on InterHand2.6M. + +BlurHand. BlurHand [27] consists of blurry hand images created by synthetically averaging consecutive 5 frames from the InterHand2.6M dataset [24] (30fps). This process imitates motion blur typically seen with a 6fps (30/5) shutter speed. Note that BlurHand is derived from the InterHand2.6M dataset, which contains a substantial amount of static hand images; hence, many images in BlurHand do not have large blur, as shown in Fig. 4. We follow their protocol for the training and evaluation on BlurHand. + +EBH. Among the six ground truths for each blurry hand image (red and orange in Fig. 3), we use only a subset during training to assess our model's ability to generate 3D hands at both learned and novel time steps. During training, we use four of the ground truth samples. For example, in Fig. 3, out of the ground truth for six-time steps (depicted as red and orange steps), we use four for training (the first and last of red and the first two orange). During testing, we evaluate models on the remaining two ground truth samples (middle red and the last orange in Fig. 3) to check the generalizability to unseen time steps. We also evaluate on the trained time steps to assess the model's proficiency in recovering 3D hands at the seen time steps. + +Evaluation metrics. To assess the accuracy of predicted hands, we employ two metrics: the Mean Per Joint Position Error (MPJPE) and the Mean Per Vertex + +![](images/321bbab1bed0095e519d467609a5f32dd947fa6843098ad87a211d91958f1bdd.jpg) +(a) Input I + +![](images/857c88f58b97558498095c23da5cd485492a5c7b7e9f8a54d742fb6f7cc3ad41.jpg) +(b) From I + +![](images/efca711afac477bc32089ac3e911c75b7c4ac426e40eb33e999fad5257ba6ddb.jpg) +(c) Deblur $\mathbf{I}_{\mathrm{D}}$ +(d) From $\mathbf{I}_{\mathrm{D}}$ +Fig. 6 & Table 2: (Left) Visual comparison with deblurring. (b) and (d) show the estimated 3D hands using $\mathbf{I}$ and $\mathbf{I}_{\mathrm{D}}$ , respectively. (Right) Efficacy of EBHNet compared to deblurring baseline. Results are reported when only images are used as input. For the EBH dataset, we employ $\mathbf{t} = 0.6$ for evaluating the Mid. + +![](images/78baca8b44dce9563952fe87e8d691e8437cce35e2ad560ad40848d8211ba4d9.jpg) + +![](images/72bfe4e37fd8ad5c4a57a8c2fc51cd165087109fb4b221101d98fbb8b7cd90ed.jpg) +(e) GT + +
DataDeblurMPJPE↓
Init.Mid. Final
BH-16.79
17.2316.45
EBH-16.14
16.1015.23
+ +Position Error (MPVPE). These metrics gauge the Euclidean distance (mm) between the estimated coordinates and the ground truth coordinates. Here, we measure the metrics after aligning the translation of the root joint (i.e., wrist), following the prior researches [3, 27]. Also, to evaluate the temporal consistency of hand motion, we use the acceleration error proposed in HMMR [9]. Here, the acceleration error calculates the average difference between the predicted and ground truth accelerations of each joint of hands. + +# 5.2 Ablation Studies + +Validation of EBH Dataset Accuracy To validate the accuracy of our EBH dataset, Tab. 1 shows metrics for predicted 2D keypoints, masks, and depth maps against their ground truth counterparts. The predicted 2D keypoints, masks, and depth maps are either projected (2D keypoints) or rendered (masks and depth maps) from the predicted 3D meshes. Ground truth 2D keypoints, masks, and depth maps are obtained from a 2D keypoint detection model [39], a matting model [15], and the output of a Kinect camera. For 2D keypoints, we calculate the mean distance between 21 joints from the projected and detected keypoints. For masks, we compute the Intersection over Union (IoU) between the rendered mask and the mask from the matting model. For the depth map, we measure the pixel-wise L1 distance between the predicted and camera-derived depth maps. Here, all metrics are calculated after cropping and resizing the sharp hand images to $\mathbb{R}^{256\times 256\times 3}$ . For additional details, refer to our supplementary material. + +Comparison with deblurring. In assessing EBHNet's performance in producing 3D hands from blurry hand images, we compare it with deblurring methods from prior works [2, 30, 31]. To this end, we integrate a state-of-the-art deblurring network [2] before applying EBHNet. Tab. 2 reveals that ours significantly outperforms the one that employs deblurring before 3D hand recovery. The performance decline can be attributed to several factors. + +First, as shown in Fig. 6a, even advanced deblurring networks struggle to restore sharp hand images from blurry ones. Second, deblurring processes often eliminate valuable temporal information for 3D hand recovery. For example, Fig. 6c exhibits the absence of the middle finger, leading to inaccurate 3D hand mesh in Fig. 6d. Also, deblurring can restrict networks from producing single + +Table 3: Comparison of models with various temporal encoding. Results are reported using images as input. Freq. indicates frequency embedding [19]. + +
DataMethodsMPJPE↓
Init.Mid.Final
BHFreq.18.0917.0718.27
Ours17.2316.4517.17
EBHFreq.16.7116.2117.98
Ours16.1015.2317.15
+ +Table 4: Comparison of EBHNet with various input combinations. We use four GTs in training and evaluate the model on the corresponding time steps. + +
InputMPJPE↓
t=0t=0.2t=0.6t=0.8
I16.1014.2915.2316.18
E28.9129.3528.5930.80
I & E12.7312.0014.2416.35
+ +![](images/5f96b90fd9ea1cbed99e36a345d3d40257cb29f227f01aa5d34f453423041549.jpg) +(a) Image I + +![](images/f7cb76269bb1a18991c68d91d552945c90f6ddd0e5d2972f50c707c573d8298a.jpg) +(b) Event E + +![](images/e4aff721f6d1d4d7e6bb7130091f0f3991aa8962422bb712177689925ec01f5a.jpg) +(c) On $\mathbf{E}$ + +![](images/028ff2548d4bf0d37c1ddb1a72c18a5c0208ebf3f7f89ef7dc5f03ae98334c03.jpg) +(d) On $\mathbf{I}$ + +![](images/7902e7b36cef40a32316e7ab9a607fc098bad619636a39a074d9446bc5e31a23.jpg) +(e) On both + +![](images/cfae70374d49965e73dfa987f57abb9ab62adb6f136e952603a9f733981c4431.jpg) +(f) GT +Fig.7: Comparison of different input combinations. Event information in severely blurry hand images can offer valuable complementary data, validating the effectiveness of incorporating event streams in our approach to address blurry hands. + +outputs instead of sequences. Conversely, EBHNet excels at using the image's temporal information, producing multiple 3D hands at different time steps. + +Comparison with frequency encoding. Tab. 3 compares our temporal embedding with conventional frequency encoding [19]. For frequency encoding, following BlurHandNet [27], we apply a sinusoidal operation on $\mathbf{t}$ , expand the dimension to match feature $\mathbf{F}$ using $1 \times 1$ convolution layers, and then add the output to $\mathbf{F}$ . The table shows that our more straightforward approach, which concatenates the temporal encoding $\mathbf{t}$ across the channel of feature $\mathbf{F}$ , and processes them through $1 \times 1$ convolution layers, consistently outperforms the frequency encoding on both BlurHand [27] and our EBH datasets. + +Various input combinations for EBHNet. Tab. 4 shows the results for different input combinations, including images only, events only, and both. Among six available ground truths, note that we employ only four ground truths at $t = \{0,0.2,0.6,0.8\}$ for supervision and evaluate the model on these four time steps. As shown, when using event stream as an input (second row in Tab. 4), metric shows significant degradation. This is because events lack information for static hands, leading to a failure in recovering 3D hands. On the other hand, when both image and event streams are utilized (third row in Tab. 4), events complement the image in capturing 3D hands across time steps, particularly in blurry regions, yielding the best performance. Furthermore, Fig. 7 demonstrates that combining event and image input (Fig. 7e) produces results closest to the ground truth (Fig. 7f), consistent with Tab. 4. + +Novel time step reconstruction. Tab. 5 evaluates the metrics at novel time steps, not included in the training phase, to further assess EBHNet's capacity to + +![](images/c5a86776204449c5368751b3152a73bf026c11fa3045a933b301bbc371092f79.jpg) +(a) Input I +(b) Interpolate +Fig. 8 & Table 5: (Left) Visual comparison between linear interpolation and temporal embedding. We generate middle hand from two predicted 3D hands (red and white) in two ways: (b) linear interpolation, and (c) applying the temporal embedding value between temporal embeddings to obtain neighboring 3D hands. $\times$ , $\times$ , and $\times$ show the same joint (tip of middle finger) at different time steps. (Right) Comparison of models at a novel time step. For BH, we show the metrics at time steps $t = 1.0$ , while for EBH, we present the metrics at time steps $t = \{0.4, 1.0\}$ . + +![](images/15553539fee7d02e74a3b1d9b542b686e1f6b4a74d34e39235af8f4eb5d8f24e.jpg) + +![](images/ba113c5a7c8c0c6b9f2015cfeac3438ff74670dd6e363038813474876d97cb3e.jpg) +(c) Ours + +![](images/d9e088dff0d2c5e1f5ac9e2c9487a8a9bb7f5fa104eefdd61617e78da0e5bd2f.jpg) +(d) GT + +
DataMethods\( MPJPE^{\downarrow} \) t = 0.4 t = 1.0\( MPVPE^{\downarrow} \) t = 0.4 t = 1.0
BHLinear-18.12-16.81
Ours (I)-17.87-16.30
EBHLinear14.6617.5319.2017.95
Ours (I)14.3817.1518.7317.58
Ours (E)28.7431.1225.4125.92
Ours (I & E)14.0316.7616.8817.09
+ +generate 3D hands at untrained time steps. For EBH, we evaluate 3D hand recovery at two novel time steps not included in the training phase. For BlurHand, our model is trained only on initial and middle hands ( $t = \{0,0.5\}$ ), and we evaluate its performance in recovering hands located at the last time step ( $t = \{1\}$ ). Tab. 5 compares the model's performance using three different input combinations with linear interpolation for reference. To clarify, linear interpolation outputs a hand at $t = 0.4$ by linearly interpolating the neighboring hands, which are obtained by our EBHNet trained on $\mathbf{I}$ (e.g., $\mathbf{V}_{0.4} = \frac{1}{2}\mathbf{V}_{0.2} + \frac{1}{2}\mathbf{V}_{0.6}$ ). The table shows the superior performance of our approach with both image and event and with only image. This is attributed to our temporal splitter and event stream, guiding the network to predict 3D hands at specific time steps. + +Fig. 8 visually compares linear interpolation and our temporal embedding for generating novel hand sequences. In Fig. 8b, while linear interpolation restricts all articulation movements in a linear way (see purple line in Fig. 8), our method produces more plausible results by using the corresponding temporal embedding values, exhibiting motion trajectories similar to GT. + +# 5.3 Comparisons with Previous Works + +Performance comparison. Tabs. 6 and 7 clearly show that our EBHNet surpasses previous 3D hand mesh estimation methods. In Tab. 6, the most prior approaches [12, 20, 22] do not account for motion blur in hand images, resulting in inaccuracies. While BlurHandNet [27] performs admirably by considering three-hand time steps, it may overlook crucial hand information between those time steps, as it extracts features based on supervision with 3D hands at those specific time steps. Moreover, BlurHandNet consists of modules dedicated to each time step; thus, it cannot generate hands at novel time steps that were not included in the training phase. In contrast, our approach does not constrain the extracted feature $\mathbf{F}$ to 3D hand at particular time steps, outperforming prior methods by leveraging temporal information from the entire motion trajectory. Also, our EBHNet can generate 3D hands at novel time steps without requiring training data for those specific time points, distinguishing it from BlurHandNet. + +Table 6: Comparison with SoTA methods on BH [27]. For MPVPE, we evaluate metrics at the midpoint of the motion. As BH provides only images, our results are based on image inputs. + +
MethodsMPJPE↓MPVPE↓Accel↓
Init.Mid.Final
I2L-MeshNet [22]-24.32-23.07-
Pose2Pose [20]-18.80-17.42-
METRO [12]-20.54-27.03-
BlurHandNet [27]18.0816.8018.2115.303.94
EBHHNet (Ours)17.2316.4517.1715.023.37
+ +Table 7: Comparison with SoTA methods on our proposed EBH. For MPVPE, we assess metrics for the hand at the midpoint of the motion (t=0.6). + +
InputMethods\( MPJPE^{\ddagger} \)\( MPVPE^{\ddagger} \)\( Accel^{\ddagger} \)
t=0t=0.2t=0.6t=0.8
II2L-MeshNet [22]--28.12-30.86-
Pose2Pose [20]--17.28-20.41-
BlurHandNet [27]17.0815.5316.1317.5417.995.78
EBHNet (Ours)16.1014.2915.2316.1817.894.69
EEventHands [35]28.8029.8128.9130.9725.7010.75
EBHNet (Ours)28.9129.3528.5930.8025.309.25
I & EEBHNet (Ours)12.7312.0014.2416.3516.933.19
+ +![](images/039b8b95277ed331e33037a1d4fa9ca3bc532af0a0ed625c5adefa712de48210.jpg) +(a) Event E + +![](images/c72ed01e6b65d1d4b540f09f7b10102ff78d9d2a4387e363223933d99a363e5e.jpg) +(b) [35] + +![](images/70ced0da09006ab91bf5e141dc1f3de08986df2c22c6b859cdc6ec8b6f3d6f83.jpg) +(c) EBHNet + +![](images/25c137b8b3710a1fe2fe74a219a7be74023311904e303e426dbfd128875b414b.jpg) +(d) GT + +![](images/c7f3f1f62de32c53a85ecf3ed167a3393275da4009c755251133067fdc6bdc19.jpg) +(e) Event E +Fig. 9: Visual comparison with ours and EventHand [35]. When only the event is provided, ours yields results that closely resemble the corresponding ground truth. + +![](images/5482e82d7d5f52a4040ea21eb3a5011be13afade62e3caeaf388ae6f0aa56613.jpg) +(f) [35] + +![](images/22802507b6651d0fd17d0f636c81c17a5f4dbcf7105ed10d7f758e97d58468ed.jpg) +(g) EBHNet + +![](images/99d96a7b2e6ea9bff54a4df2f07eb210e44bf5f8e760bf84b6cbb0f35d33e08a.jpg) +(h) GT + +Moreover, Tab. 7 compares image-only, event-only, and both inputs in the EBH dataset. Modifying BlurHandNet [27] to predict four outputs, our EBHNet consistently outperforms prior methods, excelling against an event-based 3D recovery method [35]. The best results of our EBHNet are obtained with both event and image inputs. Also, Tabs. 6 and 7 compare the acceleration error (Accel), showing that EBHNet produces more temporally consistent outcomes by successfully addressing the real motion. Here, acceleration error is computed only for hand sequence generation methods [27,35]. Fig. 9 shows a visual comparison between our EBHNet and EventHand [35] when only event inputs are used. + +Furthermore, Fig. 10 visually compares EBHNet with previous methods [20, 22,27] when using only image inputs. As shown, Pose2Pose [20] and I2L-MeshNet [22] even struggle to capture the hand pose, while BlurHandNet [27] fails to capture motion information, resulting in consistent outputs across time steps. In contrast, EBHNet successfully generates 3D hands at different time steps. + +Efficiency comparison. Tab. 8 shows a comparison of the efficiency of our EBHNet with previous methods. Since all previous methods use images as input, we evaluate the efficiency of our EBHNet when using images only as input. We first compare our EBHNet with Pose2Pose [20] and I2L-MeshNet [22], which produce a single output, by considering EBHNet (1) generating only one output. Here, EBHNet (1) exhibits comparable computational efficiency while showing quantitatively superior results compared to Pose2Pose and I2L-MeshNet in Tables 6 and 7. Compared to BlurHandNet, we configure EBHNet to generate the same number of 3D hands for a fair comparison in EBHNet (3). In contrast to BlurHandNet [27], which requires additional layers for predicting multiple hands and results in a larger model size, EBHNet can generate 3D hands at different time steps using varied temporal embeddings, leading to a reduced model size. + +![](images/0805e304eed22aeb094d56656e007e964f22371ea2c5203329e787f51a585b89.jpg) +Fig. 10: Visual comparison with previous methods on BH [27] and our EBH. Unlike other methods that generate a fixed number of 3D hands, EBHNet can produce a variable number of 3D hands. The red circles show severely blurry regions. + +Table 8: Efficiency comparison with previous methods. Our EBHNet doesn't require extra parameters for recovering 3D hands at additional time steps, maintaining the same number of parameters whether predicting a single hand or three hands. + +
MetricsPose2Pose [20] I2L-MeshNet [22] BlurHandNet [27] EBHNet (1) EBHNet (3)
Num of params (MB)↓77141202146146
Latency (fps)↑26.3125.7614.4325.1215.45
+ +# 6 Limitations + +Our EBH dataset includes hands with various real motion blur and corresponding 3D annotations, marking a crucial step in addressing key community challenges. However, since our data were captured from 10 individuals, it may lack shape diversity. We will address this issue by designing additional modules for shape generalization or constructing a large-scale dataset from various people. + +# 7 Conclusion + +This work tackles the challenging problem of recovering a 3D hand sequence from a blurry hand. To this end, we introduce the EBH, the first dataset that includes real blurry hand images, their corresponding 3D ground truths, and continuous temporal information from an event stream. In conjunction with the EBH dataset, we propose EBHNet, a method for generating 3D hand sequences from a single blurry hand input in diverse combinations. Our experiments show the efficacy of the EBH and EBHNet in enhancing 3D hand sequence recovery from blurry hands, with the ability to generate hand sequences at novel time steps. Our contributions offer future insights for addressing motion blur in 3D hand recovery. + +Acknowledgments. This work was supported in part by the IITP grants [No. 2021-0-01343, Artificial Intelligence Graduate School Program (Seoul National University), No.2021-0-02068, and No.2023-0-00156], the NRF grant [No. 2021M3A9E4080782] funded by the Korean government (MSIT). + +# References + +1. Boukhayma, A., Bem, R.d., Torr, P.H.: 3D hand shape and pose from images in the wild. In: CVPR (2019) +2. Chen, L., Chu, X., Zhang, X., Sun, J.: Simple baselines for image restoration. In: ECCV (2022) +3. Choi, H., Moon, G., Lee, K.M.: Pose2Mesh: Graph convolutional network for 3D human pose and mesh recovery from a 2D human pose. In: ECCV (2020) +4. Deng, J., Dong, W., Socher, R., Li, L.J., Li, K., Fei-Fei, L.: ImageNet: A large-scale hierarchical image database. In: CVPR (2009) +5. Hampali, S., Rad, M., Oberweger, M., Lepetit, V.: Honnotate: A method for 3D annotation of hand and object poses. In: CVPR (2020) +6. Hampali, S., Sarkar, S.D., Rad, M., Lepetit, V.: Keypoint Transformer: Solving joint identification in challenging hands and object interactions for accurate 3D pose estimation. In: CVPR (2022) +7. Jaderberg, M., Simonyan, K., Zisserman, A., et al.: Spatial transformer networks. In: NeurIPS (2015) +8. Joo, H., Neverova, N., Vedaldi, A.: Exemplar fine-tuning for 3D human model fitting towards in-the-wild 3D human pose estimation. In: 3DV (2021) +9. Kanazawa, A., Zhang, J.Y., Felsen, P., Malik, J.: Learning 3d human dynamics from video. In: CVPR (2019) +0. Kulon, D., Guler, R.A., Kokkinos, I., Bronstein, M.M., Zafeiriou, S.: Weakly-supervised mesh-convolutional hand reconstruction in the wild. In: CVPR (2020) +1. Li, Z., Liu, J., Zhang, Z., Xu, S., Yan, Y.: CIUFF: Carrying location information in full frames into human pose and shape estimation. In: ECCV (2022) +2. Lin, K., Wang, L., Liu, Z.: End-to-end human pose and mesh reconstruction with transformers. In: CVPR (2021) +3. Lin, K., Wang, L., Liu, Z.: Mesh graphormer. In: ICCV (2021) +4. Lin, M., Chen, Q., Yan, S.: Network in network. arXiv preprint arXiv:1312.4400 (2013) +5. Lin, S., Yang, L., Saleemi, I., Sengupta, S.: Robust high-resolution video matting with temporal guidance. In: WACV (2022) +6. Lin, T.Y., Dolkar, P., Girshick, R., He, K., Hariharan, B., Belongie, S.: Feature pyramid networks for object detection. In: CVPR (2017) +7. Liu, S., Jiang, H., Xu, J., Liu, S., Wang, X.: Semi-supervised 3D hand-object poses estimation with interactions in time. In: CVPR (2021) +8. Messikommer, N., Fang, C., Gehrig, M., Scaramuzza, D.: Data-driven feature tracking for event cameras. In: CVPR (2023) +9. Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: Nerf: Representing scenes as neural radiance fields for view synthesis. ACM (2021) +20. Moon, G., Choi, H., Lee, K.M.: Accurate 3D hand pose estimation for whole-body 3D human mesh estimation. In: CVPRW (2022) +21. Moon, G., Choi, H., Lee, K.M.: Neuralannot: Neural annotator for 3d human mesh training sets. In: CVPR (2022) +22. Moon, G., Lee, K.M.: I2L-MeshNet: Image-to-lixel prediction network for accurate 3D human pose and mesh estimation from a single RGB image. In: ECCV (2020) +23. Moon, G., Saito, S., Xu, W., Joshi, R., Buffalini, J., Bellan, H., Rosen, N., Richardson, J., Mize, M., De Bree, P., et al.: A dataset of relighted 3D interacting hands. In: NeurIPS (2023) + +24. Moon, G., Yu, S.I., Wen, H., Shiratori, T., Lee, K.M.: Interhand2.6M: A dataset and baseline for 3D interacting hand pose estimation from a single RGB image. In: ECCV (2020) +25. Nah, S., Kim, T.H., Lee, K.M.: Deep multi-scale convolutional neural network for dynamic scene deblurring. In: CVPR (2017) +26. Nehvi, J., Golyanik, V., Mueller, F., Seidel, H.P., Elgharib, M., Theobalt, C.: Differentiable event stream simulator for non-rigid 3d tracking. In: CVPR (2021) +27. Oh, Y., Park, J., Kim, J., Moon, G., Lee, K.M.: Recovering 3d hand mesh sequence from a single blurry image: A new dataset and temporal unfolding. In: CVPR (2023) +28. Ozawa, T., Sekikawa, Y., Saito, H.: Accuracy and speed improvement of event camera motion estimation using a bird's-eye view transformation. Sensors (2022) +29. Park, J., Jung, D.S., Moon, G., Lee, K.M.: Extract-and-adaptation network for 3d interacting hand mesh recovery. In: ICCVW (2023) +30. Park, J., Nah, S., Lee, K.M.: Pay attention to hidden states for video deblurring: Ping-pong recurrent neural networks and selective non-local attention. arXiv preprint arXiv:2203.16063 (2022) +31. Park, J., Nah, S., Lee, K.M.: Recurrence-in-recurrence networks for video deblurring. In: BMVC (2022) +32. Park, J., Oh, Y., Moon, G., Choi, H., Lee, K.M.: Handoccnet: Occlusion-robust 3D hand mesh estimation network. In: CVPR (2022) +33. Romero, J., Tzionas, D., Black, M.J.: Embodied hands: Modeling and capturing hands and bodies together. SIGGRAPH Asia (2017) +34. Rong, Y., Shiratori, T., Joo, H.: FrankMocap: A monocular 3D whole-body pose estimation system via regression and integration. In: ICCVW (2021) +35. Rudnev, V., Golyanik, V., Wang, J., Seidel, H.P., Mueller, F., Elgharib, M., Theobalt, C.: Eventhands: Real-time neural 3d hand pose estimation from an event stream. In: ICCV (2021) +36. Shen, Z., Wang, W., Shen, J., Ling, H., Xu, T., Shao, L.: Human-aware motion deblurring. In: ICCV (2019) +37. Sun, X., Xiao, B., Wei, F., Liang, S., Wei, Y.: Integral human pose regression. In: ECCV (2018) +38. Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, L., Polosukhin, I.: Attention is all you need. In: NeurIPS (2017) +39. Zhang, F., Bazarevsky, V., Vakunov, A., Tkachenka, A., Sung, G., Chang, C.L., Grundmann, M.: Mediapipe hands: On-device real-time hand tracking. arXiv preprint arXiv:2006.10214 (2020) +40. Zhang, S., Wang, W., Li, H., Zhang, S.: Evtracker: An event-driven spatiotemporal method for dynamic object tracking. Sensors (2022) +41. Zhong, Z., Gao, Y., Zheng, Y., Zheng, B.: Efficient spatio-temporal recurrent neural network for video deblurring. In: ECCV (2020) +42. Zimmermann, C., Brox, T.: Learning to estimate 3D hand pose from single RGB images. In: ICCV (2017) +43. Zimmermann, C., Ceylan, D., Yang, J., Russell, B., Argus, M., Brox, T.: Freihand: A dataset for markerless capture of hand pose and shape from single RGB images. In: ICCV (2019) \ No newline at end of file diff --git a/2024/3D Hand Sequence Recovery from Real Blurry Images and Event Stream/images.zip b/2024/3D Hand Sequence Recovery from Real Blurry Images and Event Stream/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..4385c104ef0b639e46931abb9912bd3a1dd7665d --- /dev/null +++ b/2024/3D Hand Sequence Recovery from Real Blurry Images and Event Stream/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eec86a0eae37ab9d92c355787f7a75599c358f5853b797aac8ede2a007f529ed +size 454596 diff --git a/2024/3D Hand Sequence Recovery from Real Blurry Images and Event Stream/layout.json b/2024/3D Hand Sequence Recovery from Real Blurry Images and Event Stream/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..b15efca912eefc199fd635aaa5667acc4d849bb0 --- /dev/null +++ b/2024/3D Hand Sequence Recovery from Real Blurry Images and Event Stream/layout.json @@ -0,0 +1,10844 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 173, + 112, + 442, + 148 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 173, + 112, + 442, + 148 + ], + "spans": [ + { + "bbox": [ + 173, + 112, + 442, + 148 + ], + "type": "text", + "content": "3D Hand Sequence Recovery from Real Blurry Images and Event Stream" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 149, + 167, + 465, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 167, + 465, + 194 + ], + "spans": [ + { + "bbox": [ + 149, + 167, + 465, + 194 + ], + "type": "text", + "content": "Joonkyu Park1, Gyeongsik Moon3,4, Weipeng Xu4, Evan Kaseman4, Takaaki Shiratori4, and Kyoung Mu Lee1,2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 178, + 201, + 436, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 178, + 201, + 436, + 222 + ], + "spans": [ + { + "bbox": [ + 178, + 201, + 436, + 222 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 178, + 201, + 436, + 222 + ], + "type": "text", + "content": "Dept. of ECE&ASRI, " + }, + { + "bbox": [ + 178, + 201, + 436, + 222 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 178, + 201, + 436, + 222 + ], + "type": "text", + "content": "IPAI, Seoul National University, Korea " + }, + { + "bbox": [ + 178, + 201, + 436, + 222 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 178, + 201, + 436, + 222 + ], + "type": "text", + "content": "DGIST" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 252, + 224, + 362, + 235 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 252, + 224, + 362, + 235 + ], + "spans": [ + { + "bbox": [ + 252, + 224, + 362, + 235 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 252, + 224, + 362, + 235 + ], + "type": "text", + "content": "Codec Avatars Lab, Meta" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 184, + 236, + 429, + 246 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 184, + 236, + 429, + 246 + ], + "spans": [ + { + "bbox": [ + 184, + 236, + 429, + 246 + ], + "type": "text", + "content": "{jkpark0825, kyoungmu}@snu.ac.kr, mks0601@gmail.com," + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 207, + 247, + 406, + 257 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 207, + 247, + 406, + 257 + ], + "spans": [ + { + "bbox": [ + 207, + 247, + 406, + 257 + ], + "type": "text", + "content": "{xuweipeng, ekaseman, tshiratori}@meta.com" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 240, + 258, + 373, + 268 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 240, + 258, + 373, + 268 + ], + "spans": [ + { + "bbox": [ + 240, + 258, + 373, + 268 + ], + "type": "text", + "content": "https://jkpark08.github.io/EBH" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 160, + 285, + 248, + 361 + ], + "blocks": [ + { + "bbox": [ + 160, + 285, + 248, + 361 + ], + "lines": [ + { + "bbox": [ + 160, + 285, + 248, + 361 + ], + "spans": [ + { + "bbox": [ + 160, + 285, + 248, + 361 + ], + "type": "image", + "image_path": "d2b17a2bdf9cc07f82963459c864da272144348600f5f02a417f0368f5d65d9c.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 131, + 373, + 482, + 397 + ], + "lines": [ + { + "bbox": [ + 131, + 373, + 482, + 397 + ], + "spans": [ + { + "bbox": [ + 131, + 373, + 482, + 397 + ], + "type": "text", + "content": "Fig. 1: Blurry hand to 3D hand sequences. We address 3D hand sequence recovery with real motion blur, showing proficiency in predicting 3D hands at novel time steps." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 249, + 285, + 355, + 357 + ], + "blocks": [ + { + "bbox": [ + 249, + 285, + 355, + 357 + ], + "lines": [ + { + "bbox": [ + 249, + 285, + 355, + 357 + ], + "spans": [ + { + "bbox": [ + 249, + 285, + 355, + 357 + ], + "type": "image", + "image_path": "d361cb97edfb193a40e6d119186db73008f45f94798a8dc42ddebd799b458427.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 356, + 285, + 452, + 358 + ], + "blocks": [ + { + "bbox": [ + 356, + 285, + 452, + 358 + ], + "lines": [ + { + "bbox": [ + 356, + 285, + 452, + 358 + ], + "spans": [ + { + "bbox": [ + 356, + 285, + 452, + 358 + ], + "type": "image", + "image_path": "4666be7000d8cfc471df6840bfb1d0ca6a737ea3e6d355969a7332998d609b1d.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 159, + 413, + 455, + 646 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 413, + 455, + 646 + ], + "spans": [ + { + "bbox": [ + 159, + 413, + 455, + 646 + ], + "type": "text", + "content": "Abstract. Although hands frequently exhibit motion blur due to their dynamic nature, existing approaches for 3D hand recovery often disregard the impact of motion blur in hand images. Blurry hand images contain hands from multiple time steps, lack precise hand location at a specific time step, and introduce temporal ambiguity, leading to multiple possible hand trajectories. To address this issue and in the absence of datasets with real blur, we introduce the EBH dataset, which provides 1) hand images with real motion blur and 2) event data for authentic representation of fast hand movements. In conjunction with our new dataset, we present EBHNet, a novel network capable of recovering 3D hands from diverse input combinations, including blurry hand images, events, or both. Here, the event stream enhances motion understanding in blurry hands, addressing temporal ambiguity. Recognizing that blurry hand images include not only single 3D hands at a time step but also multiple hands along their motion trajectories, we design EBHNet to generate 3D hand sequences in motion. Moreover, to enable our EBHNet to predict 3D hands at novel, unsupervised time steps using a single shared module, we employ a Transformer-based module, temporal splitter, into EBHNet. Our experiments show the superior performance of EBH and EBHNet, especially in handling blurry hand images, making them valuable in real-world applications." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 160, + 654, + 451, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 654, + 451, + 665 + ], + "spans": [ + { + "bbox": [ + 160, + 654, + 451, + 665 + ], + "type": "text", + "content": "Keywords: 3D hand sequence recovery " + }, + { + "bbox": [ + 160, + 654, + 451, + 665 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 160, + 654, + 451, + 665 + ], + "type": "text", + "content": " Blurry hands " + }, + { + "bbox": [ + 160, + 654, + 451, + 665 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 160, + 654, + 451, + 665 + ], + "type": "text", + "content": " Event stream" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 133, + 114, + 191, + 171 + ], + "blocks": [ + { + "bbox": [ + 133, + 114, + 191, + 171 + ], + "lines": [ + { + "bbox": [ + 133, + 114, + 191, + 171 + ], + "spans": [ + { + "bbox": [ + 133, + 114, + 191, + 171 + ], + "type": "image", + "image_path": "d78fdff3c51bac024426b61aa935ffd6c74c49c2c2d02ffdb4cb3846840fca43.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 135, + 172, + 188, + 180 + ], + "lines": [ + { + "bbox": [ + 135, + 172, + 188, + 180 + ], + "spans": [ + { + "bbox": [ + 135, + 172, + 188, + 180 + ], + "type": "text", + "content": "(a) Blurry image" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 192, + 114, + 249, + 171 + ], + "blocks": [ + { + "bbox": [ + 192, + 114, + 249, + 171 + ], + "lines": [ + { + "bbox": [ + 192, + 114, + 249, + 171 + ], + "spans": [ + { + "bbox": [ + 192, + 114, + 249, + 171 + ], + "type": "image", + "image_path": "67c106cc55eb268194d38a21adc381bf55a3dfd164d3f494da935a6955d00aa2.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 205, + 172, + 235, + 179 + ], + "lines": [ + { + "bbox": [ + 205, + 172, + 235, + 179 + ], + "spans": [ + { + "bbox": [ + 205, + 172, + 235, + 179 + ], + "type": "text", + "content": "(b) Mesh" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 250, + 114, + 306, + 171 + ], + "blocks": [ + { + "bbox": [ + 250, + 114, + 306, + 171 + ], + "lines": [ + { + "bbox": [ + 250, + 114, + 306, + 171 + ], + "spans": [ + { + "bbox": [ + 250, + 114, + 306, + 171 + ], + "type": "image", + "image_path": "2d8833f3e8127159f9a356b79309c4c54994b145aa8d41ee3ec625a676012f4c.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 263, + 172, + 293, + 179 + ], + "lines": [ + { + "bbox": [ + 263, + 172, + 293, + 179 + ], + "spans": [ + { + "bbox": [ + 263, + 172, + 293, + 179 + ], + "type": "text", + "content": "(c) Event" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 131, + 189, + 480, + 222 + ], + "lines": [ + { + "bbox": [ + 131, + 189, + 480, + 222 + ], + "spans": [ + { + "bbox": [ + 131, + 189, + 480, + 222 + ], + "type": "text", + "content": "Fig. 2: Examples of the proposed EBH dataset. Our EBH consists of real motion-blurred hand images with the corresponding meshes derived from sharp images captured from three different viewpoints during motion. Also, we offer an event stream." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 307, + 114, + 365, + 171 + ], + "blocks": [ + { + "bbox": [ + 307, + 114, + 365, + 171 + ], + "lines": [ + { + "bbox": [ + 307, + 114, + 365, + 171 + ], + "spans": [ + { + "bbox": [ + 307, + 114, + 365, + 171 + ], + "type": "image", + "image_path": "d30f090a6fb9baa4ff7fc257c8296e4db75dbd1bd576c2eb236129cb70ec9aca.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 310, + 172, + 362, + 179 + ], + "lines": [ + { + "bbox": [ + 310, + 172, + 362, + 179 + ], + "spans": [ + { + "bbox": [ + 310, + 172, + 362, + 179 + ], + "type": "text", + "content": "(d) Blurry image" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 366, + 114, + 423, + 171 + ], + "blocks": [ + { + "bbox": [ + 366, + 114, + 423, + 171 + ], + "lines": [ + { + "bbox": [ + 366, + 114, + 423, + 171 + ], + "spans": [ + { + "bbox": [ + 366, + 114, + 423, + 171 + ], + "type": "image", + "image_path": "e428866d5c4bac0b4cd783f378ea3b7d7ab79d984716e8aa13b37ebd9716ebfd.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 380, + 172, + 408, + 179 + ], + "lines": [ + { + "bbox": [ + 380, + 172, + 408, + 179 + ], + "spans": [ + { + "bbox": [ + 380, + 172, + 408, + 179 + ], + "type": "text", + "content": "(e) Mesh" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 425, + 114, + 481, + 171 + ], + "blocks": [ + { + "bbox": [ + 425, + 114, + 481, + 171 + ], + "lines": [ + { + "bbox": [ + 425, + 114, + 481, + 171 + ], + "spans": [ + { + "bbox": [ + 425, + 114, + 481, + 171 + ], + "type": "image", + "image_path": "5e228bdb5e7338373c4dee15fbdf3ffe8598f8d0806375eb9e798e4b0efcd02a.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 438, + 172, + 467, + 179 + ], + "lines": [ + { + "bbox": [ + 438, + 172, + 467, + 179 + ], + "spans": [ + { + "bbox": [ + 438, + 172, + 467, + 179 + ], + "type": "text", + "content": "(f) Event" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "bbox": [ + 132, + 247, + 229, + 260 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 247, + 229, + 260 + ], + "spans": [ + { + "bbox": [ + 132, + 247, + 229, + 260 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 130, + 278, + 480, + 374 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 278, + 480, + 374 + ], + "spans": [ + { + "bbox": [ + 130, + 278, + 480, + 374 + ], + "type": "text", + "content": "The dynamic nature of hands leads to prevalent blurriness during rapid movements (e.g., sports, typing, and dancing), posing a challenge for accurately capturing hand information. Moreover, isolating a single hand during motion is impractical because blurry hands convey multiple hand information during the motion. To address this, there is a need for a robust framework that can effectively recover 3D hand sequence even in the presence of motion blur. However, existing methods [3, 5, 10, 12, 13, 22] have exhibited subpar performance when faced with motion blur present in hand images." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 130, + 376, + 481, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 376, + 481, + 519 + ], + "spans": [ + { + "bbox": [ + 130, + 376, + 481, + 519 + ], + "type": "text", + "content": "The main issue arises from the lack of datasets containing both blurry hand images and accurate 3D ground truth. Although Oh et al. [27] introduced the BlurHand dataset, it has limitations. First, their motion blur is artificially generated by averaging sequential frames [25, 36], which doesn't accurately reflect real-world blur. Second, BlurHand [27] provides only images, which can lead to challenges in accurately capturing the trajectory due to temporal ambiguity. The images display the hand's position over time, lacking exact information about its location at particular time steps and overall motion. As a result, a single blurry image can exhibit multiple potential motion trajectories, leading to temporal ambiguity. While BlurHand includes corresponding sharp image pairs to assist in determining the motion trajectory, relying solely on these sharp images may not provide an accurate trajectory beyond the frame rates of the sharp images." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 130, + 521, + 482, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 521, + 482, + 640 + ], + "spans": [ + { + "bbox": [ + 130, + 521, + 482, + 640 + ], + "type": "text", + "content": "To address these issues, we propose the EBH dataset, which captures real motion blur in hand images using cameras with different exposure times, where the examples are shown in Fig. 2. Specifically, we use an RGB camera with extended exposure times to generate blurry hand images with real motion blur. At the same time, multiple RGB cameras with shorter exposure times are employed to capture sharp images. The 3D annotations (i.e., meshes and keypoints) are derived from pairs of these multi-view sharp images. Also, we incorporate event data to compensate for the temporal ambiguity in the blurry hand images. This event data captures instantaneous changes in brightness, helping find the hand's position and track its movement in the blurry image." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 131, + 641, + 481, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 641, + 481, + 666 + ], + "spans": [ + { + "bbox": [ + 131, + 641, + 481, + 666 + ], + "type": "text", + "content": "Building upon the EBH dataset, we present EBHNet, a flexible network, designed to predict a 3D hand sequence from diverse input combinations: 1)" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 228, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 228, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 228, + 101 + ], + "type": "text", + "content": "JK. Park et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "type": "text", + "content": "only a single blurry image, 2) only event data, and 3) both a blurry image and event data. In each case, we adapt feature extraction based on input types." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 140, + 481, + 259 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 140, + 481, + 259 + ], + "spans": [ + { + "bbox": [ + 130, + 140, + 481, + 259 + ], + "type": "text", + "content": "With the feature extracted by considering various input types, our goal is to predict 3D hand sequences, which consist of 3D hands at different time steps. Here, using a dedicated 3D hand recovery module for each time step, such as BlurHandNet [27], can inflate model parameters, especially with more 3D hands at different time steps. To address this, we propose a Transformer-based module, temporal splitter, in our EBHNet. This enables the generation of multiple 3D hands at novel time steps using a single shared module, with the temporal splitter estimating based on the temporal embedding. Additionally, our temporal embeddings enable EBHNet to generate 3D hands at novel time steps without requiring supervision for those steps." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 260, + 481, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 260, + 481, + 319 + ], + "spans": [ + { + "bbox": [ + 130, + 260, + 481, + 319 + ], + "type": "text", + "content": "Using our newly introduced dataset, EBH, and the baseline network, EBHNet, we tackle the task of recovering 3D hands from blurry hand images. Our experiments show the efficacy of our dataset when applied to real-world blurry hand scenarios and the robustness of EBHNet in handling such cases. We summarize our contributions as follows:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 138, + 331, + 479, + 403 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 138, + 331, + 479, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 331, + 479, + 354 + ], + "spans": [ + { + "bbox": [ + 138, + 331, + 479, + 354 + ], + "type": "text", + "content": "- We introduce the EBH dataset for 3D hand recovery from motion-blurred images, providing real-world motion blur and event information." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 139, + 355, + 479, + 379 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 355, + 479, + 379 + ], + "spans": [ + { + "bbox": [ + 139, + 355, + 479, + 379 + ], + "type": "text", + "content": "- We introduce EBHNet, a novel network for 3D hand sequence recovery from blurry hands, capable of handling diverse input combinations." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 139, + 380, + 479, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 380, + 479, + 403 + ], + "spans": [ + { + "bbox": [ + 139, + 380, + 479, + 403 + ], + "type": "text", + "content": "- The temporal splitter in EBHNet allows our network to produce 3D hands at novel time steps using a single shared module." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 132, + 423, + 239, + 435 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 423, + 239, + 435 + ], + "spans": [ + { + "bbox": [ + 132, + 423, + 239, + 435 + ], + "type": "text", + "content": "2 Related works" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 449, + 481, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 449, + 481, + 533 + ], + "spans": [ + { + "bbox": [ + 130, + 449, + 481, + 533 + ], + "type": "text", + "content": "3D hand recovery. Since the introduction of RGB-based hand benchmark datasets [5, 24, 32, 43], several methods [1, 3, 22, 42] for hand estimation have emerged. Although several approaches [6,17,32] have focused on addressing hand occlusion, especially in cases where objects are being held, others have taken on the task of handling interacting hands [24, 29], which are frequently encountered in real-world scenarios. However, there is a notable research gap on the challenge of fast-moving hands despite its prevalence in real-world situations." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 534, + 481, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 534, + 481, + 640 + ], + "spans": [ + { + "bbox": [ + 130, + 534, + 481, + 640 + ], + "type": "text", + "content": "Recently, there have been several attempts to make a 3D hand recovery system robust to motion blur. FrankMocap [34] synthetically generated motion blur for data augmentation. BlurHand [27] presented synthetic blurry hand data in conjunction with their baseline network, BlurHandNet, with the goal of recovering three temporal 3D hands. However, both rely on artificially generated motion blur, which has a domain gap from real-world blur [41]. In particular, the motion detail of the BlurHand dataset is limited by the frame rate of their RGB cameras. To address these shortcomings, we introduce the EBH dataset, which features real blur-hand images with an event stream." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 130, + 641, + 481, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 641, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 641, + 481, + 665 + ], + "type": "text", + "content": "Event stream. An event stream comprises a sequence of data points that chronicle changes within a scene over time. Specialized sensors capture these changes" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 337, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 337, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 337, + 91, + 447, + 100 + ], + "type": "text", + "content": "EBH Dataset and Network" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 141, + 114, + 299, + 213 + ], + "blocks": [ + { + "bbox": [ + 141, + 114, + 299, + 213 + ], + "lines": [ + { + "bbox": [ + 141, + 114, + 299, + 213 + ], + "spans": [ + { + "bbox": [ + 141, + 114, + 299, + 213 + ], + "type": "image", + "image_path": "585d10ab9586d5b6b17a9ac14d1627949587580061fee12aa23fe5359d6e3a6c.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 230, + 482, + 286 + ], + "lines": [ + { + "bbox": [ + 130, + 230, + 482, + 286 + ], + "spans": [ + { + "bbox": [ + 130, + 230, + 482, + 286 + ], + "type": "text", + "content": "Fig. 3: Overview of our EBH dataset. We capture blurry hand images with one camera using a longer exposure time and obtain sharp hand images with six additional cameras, three triggered simultaneously for the blurry image and three with delayed triggers for different time steps. Additionally, we enrich our dataset with event stream. The right figure depicts the cameras, color-circled for their respective groups." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 313, + 128, + 468, + 213 + ], + "blocks": [ + { + "bbox": [ + 313, + 128, + 468, + 213 + ], + "lines": [ + { + "bbox": [ + 313, + 128, + 468, + 213 + ], + "spans": [ + { + "bbox": [ + 313, + 128, + 468, + 213 + ], + "type": "image", + "image_path": "d825495986d77b5dc941fb475fa7a91a9fe9e36bb01c1d259aa00cf777b7be1a.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 309, + 482, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 309, + 482, + 453 + ], + "spans": [ + { + "bbox": [ + 130, + 309, + 482, + 453 + ], + "type": "text", + "content": "per-pixel basis, including attributes such as location, timestamp, and polarity, which indicate brightness changes. These data prove valuable in mitigating image blurriness by facilitating precise motion estimation, making them applicable in various tasks, including object tracking [18,40], and visual odometry for accurate camera trajectory [28]. More recently, inspired by the advantages of using event data, several works [26,35] have used event information for 3D hand recovery. Specifically, EventHand [35] introduced synthetic event data and trained their model on these events. Furthermore, [26] introduced an event stream simulator designed to generate event information for hands, simulating data from real event cameras. Based on this, we incorporate event information into our EBH dataset, offering a comprehensive view of overall motion and providing more data than limited 3D annotations at specific time intervals." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 472, + 231, + 484 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 472, + 231, + 484 + ], + "spans": [ + { + "bbox": [ + 132, + 472, + 231, + 484 + ], + "type": "text", + "content": "3 EBH dataset" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 498, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 498, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 498, + 482, + 666 + ], + "type": "text", + "content": "Fig. 3 shows the process of constructing our EBH dataset. Instead of artificially generating blur [27], we employ RGB cameras (Microsoft Kinects) with different exposure times for real motion blur. First, we configure a single camera (blue in Fig. 3) with an 80 millisecond (ms) exposure time and a 5 fps frame rate to capture the blurry hand image. Next, we employ two groups of three cameras (six cameras) with a 2.5 ms exposure time and a 30 fps frame rate. These camera groups are triggered differently: one group (red in Fig. 3) shares the same trigger as the camera used for the blurry image, while the other (orange in Fig. 3) has a trigger signal shifted by 16 ms. As a result, we obtain a total of 18 sharp images (6-time steps " + }, + { + "bbox": [ + 130, + 498, + 482, + 666 + ], + "type": "inline_equation", + "content": "\\times" + }, + { + "bbox": [ + 130, + 498, + 482, + 666 + ], + "type": "text", + "content": " 3 cameras), as indicated by the red and orange boxed hand images in Fig. 3. Motivated by recent neural network-based annotators [8,11,21, 23], we train a MANO parameter [33] estimation network on our dataset and test it on the training set, where the output becomes the 3D ground truth. Given the additional depth information from our RGB cameras (Microsoft Kinects)," + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 228, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 228, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 228, + 100 + ], + "type": "text", + "content": "JK. Park et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 479, + 187 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 479, + 187 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 479, + 187 + ], + "type": "text", + "content": "our model processes RGB images from three viewpoints and the corresponding depth maps to estimate MANO parameters. The network is self-supervised by minimizing loss functions between 1) 2D joints projected from the estimated hand mesh and those obtained by an off-the-shelf 2D joint detection model [39], 2) differentiable rendered silhouettes and masks from a matting model [15], and 3) rendered depth and their corresponding ground truth depth map." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 189, + 482, + 416 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 189, + 482, + 416 + ], + "spans": [ + { + "bbox": [ + 130, + 189, + 482, + 416 + ], + "type": "text", + "content": "Furthermore, since obtaining 3D annotations from sharp images is done discretely due to the limited shutter speed of the RGB cameras, we introduce event stream (green in Fig. 3) to supplement the limited information obtained from the discrete 3D annotations. To this end, we employ an event camera (Prophesee EVK4) with an exceptionally high frame rate of more than " + }, + { + "bbox": [ + 130, + 189, + 482, + 416 + ], + "type": "inline_equation", + "content": "10\\mathrm{K}" + }, + { + "bbox": [ + 130, + 189, + 482, + 416 + ], + "type": "text", + "content": " fps, which is calibrated to the RGB camera that captures the blurry hand image. We calibrate the event camera and RGB camera using a blinking checkerboard, with additional details that can be found in our supplementary material. Also, the event camera is synchronized with the RGB camera to capture the blurry hand image using the same external trigger. The event stream from the event camera provides continuous pixel-wise information, but it can be data-intensive. To address this, we accumulate event data at specific intervals, reducing the overall volume of the data. Specifically, to prevent information loss, we employ a sliding window approach to accumulate light intensity over predefined time intervals (e.g., 1ms). Then, we utilize color encoding to differentiate event occurrences at various time steps, assigning different colors to events happening at different times. This color-encoding process entails converting accumulated light intensity into a grayscale image, which is then mapped to an RGB representation using a light blue palette provided by the event camera's SDK." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 418, + 482, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 418, + 482, + 574 + ], + "spans": [ + { + "bbox": [ + 130, + 418, + 482, + 574 + ], + "type": "text", + "content": "Finally, our EBH dataset includes hand images from 10 individuals displaying various commonly occurring gestures, totaling 40,213 annotated blurry hand images with corresponding event streams. The dataset is divided into training (32,482) and test sets (7,731). Please note that each blurry hand image in our dataset has six 3D annotations from the different time steps, represented as three in red and three in orange in Fig. 3. Consequently, our EBH dataset comprises a total of 241,278 " + }, + { + "bbox": [ + 130, + 418, + 482, + 574 + ], + "type": "inline_equation", + "content": "(40,213 \\times 6)" + }, + { + "bbox": [ + 130, + 418, + 482, + 574 + ], + "type": "text", + "content": " 3D annotations. Furthermore, unlike the synthetic blurry hand dataset BlurHand [27], which mainly comprises hands with minimal or no blur, our EBH dataset includes more dynamic blur, as shown in Fig. 4. We measure the length of hand trajectory by summing the distances between 3D keypoints for each blurry hand image using ground-truth data, providing an indication of the hand blur level. For the details (e.g., camera ID, pose examples, and camera calibration), please refer to the supplementary material." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 597, + 206, + 609 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 597, + 206, + 609 + ], + "spans": [ + { + "bbox": [ + 132, + 597, + 206, + 609 + ], + "type": "text", + "content": "4 EBHNet" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 629, + 481, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 629, + 481, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 629, + 481, + 666 + ], + "type": "text", + "content": "Fig. 5 shows an overview of our EBHNet framework. Our goal is to recover sequences of 3D hands from a given input, changing the output number to match the time steps. To accommodate the diverse cases encountered in practical ap" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 337, + 91, + 447, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 337, + 91, + 447, + 101 + ], + "spans": [ + { + "bbox": [ + 337, + 91, + 447, + 101 + ], + "type": "text", + "content": "EBH Dataset and Network" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 162, + 122, + 301, + 255 + ], + "blocks": [ + { + "bbox": [ + 162, + 122, + 301, + 255 + ], + "lines": [ + { + "bbox": [ + 162, + 122, + 301, + 255 + ], + "spans": [ + { + "bbox": [ + 162, + 122, + 301, + 255 + ], + "type": "image", + "image_path": "b67bcc046a94ee0c69f1196b044181428fa7034085c2a7f68827582a7fa30a3f.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 204, + 255, + 260, + 265 + ], + "lines": [ + { + "bbox": [ + 204, + 255, + 260, + 265 + ], + "spans": [ + { + "bbox": [ + 204, + 255, + 260, + 265 + ], + "type": "text", + "content": "(a) training set" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 130, + 284, + 482, + 318 + ], + "lines": [ + { + "bbox": [ + 130, + 284, + 482, + 318 + ], + "spans": [ + { + "bbox": [ + 130, + 284, + 482, + 318 + ], + "type": "text", + "content": "Fig.4: Statistics on blur strength of EBH dataset compared to Blur-Hand (BH). " + }, + { + "bbox": [ + 130, + 284, + 482, + 318 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 130, + 284, + 482, + 318 + ], + "type": "text", + "content": "-axis denotes the length of hand trajectory, showing 3D joint movement in a single blurry hand image. " + }, + { + "bbox": [ + 130, + 284, + 482, + 318 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 130, + 284, + 482, + 318 + ], + "type": "text", + "content": "-axis shows the image proportion in the dataset." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 312, + 122, + 447, + 255 + ], + "blocks": [ + { + "bbox": [ + 312, + 122, + 447, + 255 + ], + "lines": [ + { + "bbox": [ + 312, + 122, + 447, + 255 + ], + "spans": [ + { + "bbox": [ + 312, + 122, + 447, + 255 + ], + "type": "image", + "image_path": "495468db9416b1e8512ca14af6512de2f8102c6dba65deb0c562d03b97588946.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 356, + 255, + 408, + 265 + ], + "lines": [ + { + "bbox": [ + 356, + 255, + 408, + 265 + ], + "spans": [ + { + "bbox": [ + 356, + 255, + 408, + 265 + ], + "type": "text", + "content": "(b) testing set" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 346, + 481, + 371 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 346, + 481, + 371 + ], + "spans": [ + { + "bbox": [ + 130, + 346, + 481, + 371 + ], + "type": "text", + "content": "plications, we introduce various variants of EBHNet, each designed to utilize either blurry hand image alone, event data alone, or a combination of both." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 131, + 403, + 361, + 415 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 403, + 361, + 415 + ], + "spans": [ + { + "bbox": [ + 131, + 403, + 361, + 415 + ], + "type": "text", + "content": "4.1 Feature Extraction from Various Inputs" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 438, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 438, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 438, + 482, + 666 + ], + "type": "text", + "content": "To facilitate understanding, let us consider a scenario where both a blurry image " + }, + { + "bbox": [ + 130, + 438, + 482, + 666 + ], + "type": "inline_equation", + "content": "\\mathbf{I} \\in \\mathbb{R}^{H \\times W \\times 3}" + }, + { + "bbox": [ + 130, + 438, + 482, + 666 + ], + "type": "text", + "content": " and an event " + }, + { + "bbox": [ + 130, + 438, + 482, + 666 + ], + "type": "inline_equation", + "content": "\\mathbf{E}" + }, + { + "bbox": [ + 130, + 438, + 482, + 666 + ], + "type": "text", + "content": " are available. Here, " + }, + { + "bbox": [ + 130, + 438, + 482, + 666 + ], + "type": "inline_equation", + "content": "H = 256" + }, + { + "bbox": [ + 130, + 438, + 482, + 666 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 130, + 438, + 482, + 666 + ], + "type": "inline_equation", + "content": "W = 256" + }, + { + "bbox": [ + 130, + 438, + 482, + 666 + ], + "type": "text", + "content": " correspond to the height and width of the input image, respectively. Initially, we extract two types of features: the hand feature " + }, + { + "bbox": [ + 130, + 438, + 482, + 666 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_{\\mathrm{I}}" + }, + { + "bbox": [ + 130, + 438, + 482, + 666 + ], + "type": "text", + "content": " from the blurry hand image " + }, + { + "bbox": [ + 130, + 438, + 482, + 666 + ], + "type": "inline_equation", + "content": "\\mathbf{I}" + }, + { + "bbox": [ + 130, + 438, + 482, + 666 + ], + "type": "text", + "content": " and the event feature " + }, + { + "bbox": [ + 130, + 438, + 482, + 666 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_{\\mathrm{E}}" + }, + { + "bbox": [ + 130, + 438, + 482, + 666 + ], + "type": "text", + "content": " from the event data " + }, + { + "bbox": [ + 130, + 438, + 482, + 666 + ], + "type": "inline_equation", + "content": "\\mathbf{E}" + }, + { + "bbox": [ + 130, + 438, + 482, + 666 + ], + "type": "text", + "content": ". We achieve this by utilizing a pre-trained Feature Pyramid Network (FPN) [16] trained on ImageNet [4] for the image feature and shallow convolutional networks for the event feature. Here, unlike a single image that can be used to recover multiple 3D hands at different time steps, each event feature corresponds to a single 3D hand. For example, with an exposure time of " + }, + { + "bbox": [ + 130, + 438, + 482, + 666 + ], + "type": "inline_equation", + "content": "80~\\mathrm{ms}" + }, + { + "bbox": [ + 130, + 438, + 482, + 666 + ], + "type": "text", + "content": " for blurry images and accumulating events within 1 ms intervals in our EBH dataset, a single blurry image can be aligned with up to 80 event frames. Therefore, we design our event feature extraction network to be shallow for efficiency. Also, note that both of these features, " + }, + { + "bbox": [ + 130, + 438, + 482, + 666 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_{\\mathrm{I}}" + }, + { + "bbox": [ + 130, + 438, + 482, + 666 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 438, + 482, + 666 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_{\\mathrm{E}}" + }, + { + "bbox": [ + 130, + 438, + 482, + 666 + ], + "type": "text", + "content": ", share the same dimensions, which are " + }, + { + "bbox": [ + 130, + 438, + 482, + 666 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^{h \\times w \\times c}" + }, + { + "bbox": [ + 130, + 438, + 482, + 666 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 130, + 438, + 482, + 666 + ], + "type": "inline_equation", + "content": "h = \\frac{H}{8}" + }, + { + "bbox": [ + 130, + 438, + 482, + 666 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 438, + 482, + 666 + ], + "type": "inline_equation", + "content": "h = \\frac{H}{8}" + }, + { + "bbox": [ + 130, + 438, + 482, + 666 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 130, + 438, + 482, + 666 + ], + "type": "inline_equation", + "content": "c = 256" + }, + { + "bbox": [ + 130, + 438, + 482, + 666 + ], + "type": "text", + "content": " represent the height, width, and number of channels of the extracted features. Subsequently, we concatenate these two features and pass the combined feature through five residual blocks to obtain a fused feature " + }, + { + "bbox": [ + 130, + 438, + 482, + 666 + ], + "type": "inline_equation", + "content": "\\mathbf{F}" + }, + { + "bbox": [ + 130, + 438, + 482, + 666 + ], + "type": "text", + "content": ". However, when only one of an image and an event is available, we do not perform the concatenation step. Instead, we feed either " + }, + { + "bbox": [ + 130, + 438, + 482, + 666 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_{\\mathrm{I}}" + }, + { + "bbox": [ + 130, + 438, + 482, + 666 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 130, + 438, + 482, + 666 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_{\\mathrm{E}}" + }, + { + "bbox": [ + 130, + 438, + 482, + 666 + ], + "type": "text", + "content": " into the residual blocks to obtain the feature " + }, + { + "bbox": [ + 130, + 438, + 482, + 666 + ], + "type": "inline_equation", + "content": "\\mathbf{F}" + }, + { + "bbox": [ + 130, + 438, + 482, + 666 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 228, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 228, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 228, + 100 + ], + "type": "text", + "content": "JK. Park et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 136, + 115, + 478, + 203 + ], + "blocks": [ + { + "bbox": [ + 136, + 115, + 478, + 203 + ], + "lines": [ + { + "bbox": [ + 136, + 115, + 478, + 203 + ], + "spans": [ + { + "bbox": [ + 136, + 115, + 478, + 203 + ], + "type": "image", + "image_path": "58faa59bf870ca72203aef8bd4a1628711022aad6446d9a948c2e255bf153be5.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 209, + 482, + 276 + ], + "lines": [ + { + "bbox": [ + 130, + 209, + 482, + 276 + ], + "spans": [ + { + "bbox": [ + 130, + 209, + 482, + 276 + ], + "type": "text", + "content": "Fig. 5: The overall architecture of EBHNet. Our EBHNet first extracts feature " + }, + { + "bbox": [ + 130, + 209, + 482, + 276 + ], + "type": "inline_equation", + "content": "\\mathbf{F}" + }, + { + "bbox": [ + 130, + 209, + 482, + 276 + ], + "type": "text", + "content": " in three scenarios: image only, event only, and both. During training and evaluation, we employ a temporal splitter with different temporal embedding strategies. Specifically, during the training phase, we utilize temporal embedding value when corresponding ground truths are available. Conversely, during the testing phase, we adopt novel temporal embedding values to generate 3D hand meshes at novel time steps." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 290, + 250, + 303 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 290, + 250, + 303 + ], + "spans": [ + { + "bbox": [ + 132, + 290, + 250, + 303 + ], + "type": "text", + "content": "4.2 Temporal Splitter" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 315, + 482, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 315, + 482, + 399 + ], + "spans": [ + { + "bbox": [ + 130, + 315, + 482, + 399 + ], + "type": "text", + "content": "Unlike event data, which can determine a single 3D hand for each time step, a blurry image contains a hand's trajectory, making 3D hand recovery from it much more challenging. Hence, we introduce our temporal splitter, particularly useful for blurry image input. Our temporal splitter splits the 3D hand trajectory in the blurry image into a single 3D hand of a given query time step " + }, + { + "bbox": [ + 130, + 315, + 482, + 399 + ], + "type": "inline_equation", + "content": "\\mathbf{t}" + }, + { + "bbox": [ + 130, + 315, + 482, + 399 + ], + "type": "text", + "content": ". When an event is included in the input, our temporal splitter is trained to produce a single 3D hand that corresponds to the time step of the input event data." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 400, + 482, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 400, + 482, + 555 + ], + "spans": [ + { + "bbox": [ + 130, + 400, + 482, + 555 + ], + "type": "text", + "content": "Network architecture. The architectural design of our temporal splitter is depicted in Fig. 5. Starting with the feature " + }, + { + "bbox": [ + 130, + 400, + 482, + 555 + ], + "type": "inline_equation", + "content": "\\mathbf{F}" + }, + { + "bbox": [ + 130, + 400, + 482, + 555 + ], + "type": "text", + "content": " that contains information related to the hand across the entire temporal axis, we introduce our temporal splitter to refine this feature and obtain the feature of the hand at a specific time step " + }, + { + "bbox": [ + 130, + 400, + 482, + 555 + ], + "type": "inline_equation", + "content": "\\mathbf{t}" + }, + { + "bbox": [ + 130, + 400, + 482, + 555 + ], + "type": "text", + "content": ", denoted as " + }, + { + "bbox": [ + 130, + 400, + 482, + 555 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_{\\mathbf{t}}" + }, + { + "bbox": [ + 130, + 400, + 482, + 555 + ], + "type": "text", + "content": ". To acquire the hand feature at a particular time step, we incorporate a temporal embedding " + }, + { + "bbox": [ + 130, + 400, + 482, + 555 + ], + "type": "inline_equation", + "content": "\\mathbf{t}" + }, + { + "bbox": [ + 130, + 400, + 482, + 555 + ], + "type": "text", + "content": ". Here, " + }, + { + "bbox": [ + 130, + 400, + 482, + 555 + ], + "type": "inline_equation", + "content": "\\mathbf{t}" + }, + { + "bbox": [ + 130, + 400, + 482, + 555 + ], + "type": "text", + "content": " ranges from 0 to 1 and represents a normalized value within the exposure time, where " + }, + { + "bbox": [ + 130, + 400, + 482, + 555 + ], + "type": "inline_equation", + "content": "\\mathbf{t} = 0" + }, + { + "bbox": [ + 130, + 400, + 482, + 555 + ], + "type": "text", + "content": " indicates the motion's initiation, " + }, + { + "bbox": [ + 130, + 400, + 482, + 555 + ], + "type": "inline_equation", + "content": "\\mathbf{t} = 0.5" + }, + { + "bbox": [ + 130, + 400, + 482, + 555 + ], + "type": "text", + "content": " corresponds to the middle, and " + }, + { + "bbox": [ + 130, + 400, + 482, + 555 + ], + "type": "inline_equation", + "content": "\\mathbf{t} = 1" + }, + { + "bbox": [ + 130, + 400, + 482, + 555 + ], + "type": "text", + "content": " marks its conclusion. An important difference from the frequently used frequency encoding method [19] is our straightforward decision to append the temporal embedding " + }, + { + "bbox": [ + 130, + 400, + 482, + 555 + ], + "type": "inline_equation", + "content": "\\mathbf{t}" + }, + { + "bbox": [ + 130, + 400, + 482, + 555 + ], + "type": "text", + "content": " to the channel dimension of the feature " + }, + { + "bbox": [ + 130, + 400, + 482, + 555 + ], + "type": "inline_equation", + "content": "\\mathbf{F}" + }, + { + "bbox": [ + 130, + 400, + 482, + 555 + ], + "type": "text", + "content": ", and then consolidate these features by applying a " + }, + { + "bbox": [ + 130, + 400, + 482, + 555 + ], + "type": "inline_equation", + "content": "1 \\times 1" + }, + { + "bbox": [ + 130, + 400, + 482, + 555 + ], + "type": "text", + "content": " convolution. The justification for our temporal embedding can be found in our experimental section." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 556, + 482, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 556, + 482, + 628 + ], + "spans": [ + { + "bbox": [ + 130, + 556, + 482, + 628 + ], + "type": "text", + "content": "Then, the final output of the temporal splitter " + }, + { + "bbox": [ + 130, + 556, + 482, + 628 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_{\\mathrm{t}}" + }, + { + "bbox": [ + 130, + 556, + 482, + 628 + ], + "type": "text", + "content": " is generated using a conventional self-attention Transformer [38]. To do this, we first derive the query " + }, + { + "bbox": [ + 130, + 556, + 482, + 628 + ], + "type": "inline_equation", + "content": "\\mathbf{q}_{\\mathrm{t}}" + }, + { + "bbox": [ + 130, + 556, + 482, + 628 + ], + "type": "text", + "content": " and the key-value pairs " + }, + { + "bbox": [ + 130, + 556, + 482, + 628 + ], + "type": "inline_equation", + "content": "\\mathbf{k}_{\\mathrm{t}} - \\mathbf{v}_{\\mathrm{t}}" + }, + { + "bbox": [ + 130, + 556, + 482, + 628 + ], + "type": "text", + "content": " from the feature maps after applying both positional and temporal embedding. This extraction is accomplished through the use of three separate " + }, + { + "bbox": [ + 130, + 556, + 482, + 628 + ], + "type": "inline_equation", + "content": "1\\times 1" + }, + { + "bbox": [ + 130, + 556, + 482, + 628 + ], + "type": "text", + "content": " convolutions. Subsequently, these query and key-value pairs are input into self-attention-based Transformer blocks:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 239, + 638, + 481, + 669 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 239, + 638, + 481, + 669 + ], + "spans": [ + { + "bbox": [ + 239, + 638, + 481, + 669 + ], + "type": "interline_equation", + "content": "\\mathbf {R} _ {\\mathrm {t}} = \\mathbf {q} _ {\\mathrm {t}} + \\operatorname {s o f t m a x} \\left(\\frac {\\mathbf {q} _ {\\mathrm {t}} \\mathbf {k} _ {\\mathrm {t}} ^ {T}}{\\sqrt {d _ {\\mathbf {k} _ {\\mathrm {t}}}}} \\right) \\mathbf {v} _ {\\mathrm {t}}, \\tag {1}", + "image_path": "b62039a7859c509deb37fa92ea4e647442d7da26dd1ab7be6a83d0fd765db068.jpg" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 337, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 337, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 337, + 91, + 447, + 100 + ], + "type": "text", + "content": "EBH Dataset and Network" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 91, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 91, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 91, + 480, + 100 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 257, + 128, + 481, + 141 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 257, + 128, + 481, + 141 + ], + "spans": [ + { + "bbox": [ + 257, + 128, + 481, + 141 + ], + "type": "interline_equation", + "content": "\\mathbf {F} _ {\\mathrm {t}} = \\mathbf {R} _ {\\mathrm {t}} + \\operatorname {M L P} \\left(\\mathbf {R} _ {\\mathrm {t}}\\right), \\tag {2}", + "image_path": "48a0434ed25b69870a82acecc92528fd0023c215a29ec32b2b84e39f41e329df.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 146, + 480, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 146, + 480, + 205 + ], + "spans": [ + { + "bbox": [ + 130, + 146, + 480, + 205 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 146, + 480, + 205 + ], + "type": "inline_equation", + "content": "d_{\\mathbf{k}_{\\mathrm{t}}} = 256" + }, + { + "bbox": [ + 130, + 146, + 480, + 205 + ], + "type": "text", + "content": " is the feature dimension of the key " + }, + { + "bbox": [ + 130, + 146, + 480, + 205 + ], + "type": "inline_equation", + "content": "\\mathbf{k}_{\\mathrm{t}}" + }, + { + "bbox": [ + 130, + 146, + 480, + 205 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 130, + 146, + 480, + 205 + ], + "type": "inline_equation", + "content": "\\mathbf{R}_{\\mathrm{t}}" + }, + { + "bbox": [ + 130, + 146, + 480, + 205 + ], + "type": "text", + "content": " is the residual feature of Transformer. In addition, MLP refers to a multi-layer perceptron, which is responsible for increasing the dimension of the input feature by a factor of 2 and subsequently reducing it back to its original dimension using two separate " + }, + { + "bbox": [ + 130, + 146, + 480, + 205 + ], + "type": "inline_equation", + "content": "1 \\times 1" + }, + { + "bbox": [ + 130, + 146, + 480, + 205 + ], + "type": "text", + "content": " convolution layers." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 206, + 481, + 313 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 206, + 481, + 313 + ], + "spans": [ + { + "bbox": [ + 130, + 206, + 481, + 313 + ], + "type": "text", + "content": "Novel time step prediction. To enable our temporal splitter to predict 3D hands at novel, unsupervised time steps, we adjust the temporal embedding " + }, + { + "bbox": [ + 130, + 206, + 481, + 313 + ], + "type": "inline_equation", + "content": "\\mathbf{t}" + }, + { + "bbox": [ + 130, + 206, + 481, + 313 + ], + "type": "text", + "content": " between the training and testing phases. During training, where we have access to ground truth 3D hands at specific time points, we restrict the values of our temporal embedding " + }, + { + "bbox": [ + 130, + 206, + 481, + 313 + ], + "type": "inline_equation", + "content": "\\mathbf{t}" + }, + { + "bbox": [ + 130, + 206, + 481, + 313 + ], + "type": "text", + "content": " to correspond to these ground truth time steps. For example, in the case of the BlurHand dataset [27], which provides 3D annotations at the motion's start, middle, and end, we set " + }, + { + "bbox": [ + 130, + 206, + 481, + 313 + ], + "type": "inline_equation", + "content": "\\mathbf{t} = \\{0,0.5,1\\}" + }, + { + "bbox": [ + 130, + 206, + 481, + 313 + ], + "type": "text", + "content": ". This configuration guarantees the desired generation of 3D hands at specific time steps, ensuring alignment with the available ground-truth data during the training phase." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 314, + 482, + 398 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 314, + 482, + 398 + ], + "spans": [ + { + "bbox": [ + 130, + 314, + 482, + 398 + ], + "type": "text", + "content": "Conversely, during testing, we use an unconstrained approach, allowing continuous values for temporal embedding " + }, + { + "bbox": [ + 130, + 314, + 482, + 398 + ], + "type": "inline_equation", + "content": "\\mathbf{t}" + }, + { + "bbox": [ + 130, + 314, + 482, + 398 + ], + "type": "text", + "content": " within the range of 0 to 1. This differs from the training phase, where we restrict the temporal embedding values to time steps with ground truth. This flexibility enables our model to generate 3D hands at novel time steps that were not explicitly provided in the training data. For example, we can produce 3D hands at time steps like 0.25 or 0.75, enhancing the versatility of our model's output when confronted with unseen time steps." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 415, + 285, + 428 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 415, + 285, + 428 + ], + "spans": [ + { + "bbox": [ + 132, + 415, + 285, + 428 + ], + "type": "text", + "content": "4.3 Obtaining Final Outputs" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 435, + 482, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 435, + 482, + 579 + ], + "spans": [ + { + "bbox": [ + 130, + 435, + 482, + 579 + ], + "type": "text", + "content": "From the hand feature " + }, + { + "bbox": [ + 130, + 435, + 482, + 579 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_{\\mathrm{t}}" + }, + { + "bbox": [ + 130, + 435, + 482, + 579 + ], + "type": "text", + "content": ", we extract the joint features by projecting " + }, + { + "bbox": [ + 130, + 435, + 482, + 579 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_{\\mathrm{t}}" + }, + { + "bbox": [ + 130, + 435, + 482, + 579 + ], + "type": "text", + "content": " into a " + }, + { + "bbox": [ + 130, + 435, + 482, + 579 + ], + "type": "inline_equation", + "content": "dJ" + }, + { + "bbox": [ + 130, + 435, + 482, + 579 + ], + "type": "text", + "content": "-dimensional feature through a " + }, + { + "bbox": [ + 130, + 435, + 482, + 579 + ], + "type": "inline_equation", + "content": "1\\times 1" + }, + { + "bbox": [ + 130, + 435, + 482, + 579 + ], + "type": "text", + "content": " convolution layer [20]. These features are then reshaped into 3D heatmaps " + }, + { + "bbox": [ + 130, + 435, + 482, + 579 + ], + "type": "inline_equation", + "content": "\\mathbf{H}_{\\mathrm{t}}\\in \\mathbb{R}^{J\\times h\\times w\\times d}" + }, + { + "bbox": [ + 130, + 435, + 482, + 579 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 130, + 435, + 482, + 579 + ], + "type": "inline_equation", + "content": "d = 32" + }, + { + "bbox": [ + 130, + 435, + 482, + 579 + ], + "type": "text", + "content": " represents predefined depth discretization, and " + }, + { + "bbox": [ + 130, + 435, + 482, + 579 + ], + "type": "inline_equation", + "content": "J = 21" + }, + { + "bbox": [ + 130, + 435, + 482, + 579 + ], + "type": "text", + "content": " is the number of hand joints. Subsequently, a soft-argmax operation [37] is applied to the heatmap to obtain 3D joint coordinates " + }, + { + "bbox": [ + 130, + 435, + 482, + 579 + ], + "type": "inline_equation", + "content": "\\mathbf{J}_{\\mathrm{t}}\\in \\mathbb{R}^{J\\times 3}" + }, + { + "bbox": [ + 130, + 435, + 482, + 579 + ], + "type": "text", + "content": ". From " + }, + { + "bbox": [ + 130, + 435, + 482, + 579 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_{\\mathrm{t}}" + }, + { + "bbox": [ + 130, + 435, + 482, + 579 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 435, + 482, + 579 + ], + "type": "inline_equation", + "content": "\\mathbf{J}_{\\mathrm{t}}" + }, + { + "bbox": [ + 130, + 435, + 482, + 579 + ], + "type": "text", + "content": ", we derive the MANO pose " + }, + { + "bbox": [ + 130, + 435, + 482, + 579 + ], + "type": "inline_equation", + "content": "\\theta_{\\mathrm{t}}" + }, + { + "bbox": [ + 130, + 435, + 482, + 579 + ], + "type": "text", + "content": " and shape " + }, + { + "bbox": [ + 130, + 435, + 482, + 579 + ], + "type": "inline_equation", + "content": "\\beta_{\\mathrm{t}}" + }, + { + "bbox": [ + 130, + 435, + 482, + 579 + ], + "type": "text", + "content": " parameters. Specifically, the shape parameter " + }, + { + "bbox": [ + 130, + 435, + 482, + 579 + ], + "type": "inline_equation", + "content": "\\beta_{\\mathrm{t}}" + }, + { + "bbox": [ + 130, + 435, + 482, + 579 + ], + "type": "text", + "content": " is obtained through a fully connected layer applied to " + }, + { + "bbox": [ + 130, + 435, + 482, + 579 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_{\\mathrm{t}}" + }, + { + "bbox": [ + 130, + 435, + 482, + 579 + ], + "type": "text", + "content": " after global average pooling [14]. For the pose parameter " + }, + { + "bbox": [ + 130, + 435, + 482, + 579 + ], + "type": "inline_equation", + "content": "\\theta_{\\mathrm{t}}" + }, + { + "bbox": [ + 130, + 435, + 482, + 579 + ], + "type": "text", + "content": ", grid-sampling [7,20] is conducted on " + }, + { + "bbox": [ + 130, + 435, + 482, + 579 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_{\\mathrm{t}}" + }, + { + "bbox": [ + 130, + 435, + 482, + 579 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 130, + 435, + 482, + 579 + ], + "type": "inline_equation", + "content": "\\mathbf{J}_{\\mathrm{t}}" + }, + { + "bbox": [ + 130, + 435, + 482, + 579 + ], + "type": "text", + "content": " to obtain joint features " + }, + { + "bbox": [ + 130, + 435, + 482, + 579 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_{\\mathrm{J}_{\\mathrm{t}}}" + }, + { + "bbox": [ + 130, + 435, + 482, + 579 + ], + "type": "text", + "content": ", and then the pose parameter " + }, + { + "bbox": [ + 130, + 435, + 482, + 579 + ], + "type": "inline_equation", + "content": "\\theta_{\\mathrm{t}}" + }, + { + "bbox": [ + 130, + 435, + 482, + 579 + ], + "type": "text", + "content": " is obtained by feeding " + }, + { + "bbox": [ + 130, + 435, + 482, + 579 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_{\\mathrm{J}_{\\mathrm{t}}}" + }, + { + "bbox": [ + 130, + 435, + 482, + 579 + ], + "type": "text", + "content": " into a fully connected layer after flattening. Subsequently, the MANO parameters are passed to the MANO layer to produce 3D hand meshes " + }, + { + "bbox": [ + 130, + 435, + 482, + 579 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_{\\mathrm{t}}" + }, + { + "bbox": [ + 130, + 435, + 482, + 579 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 597, + 262, + 609 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 597, + 262, + 609 + ], + "spans": [ + { + "bbox": [ + 132, + 597, + 262, + 609 + ], + "type": "text", + "content": "4.4 Objective Functions" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 617, + 481, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 617, + 481, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 617, + 481, + 666 + ], + "type": "text", + "content": "We minimize objective functions, defined as a weighted sum of L1 distances between estimated values " + }, + { + "bbox": [ + 130, + 617, + 481, + 666 + ], + "type": "inline_equation", + "content": "(\\theta_{\\mathrm{t}},\\beta_{\\mathrm{t}},\\mathbf{J}_{\\mathrm{t}}," + }, + { + "bbox": [ + 130, + 617, + 481, + 666 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 617, + 481, + 666 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_{\\mathrm{t}})" + }, + { + "bbox": [ + 130, + 617, + 481, + 666 + ], + "type": "text", + "content": " and their respective ground truth. Among our input combinations (e.g., only image, only event, and both), the event stream effectively reduces temporal ambiguity, while using only the image" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 228, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 228, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 228, + 101 + ], + "type": "text", + "content": "JK. Park et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 213, + 168, + 405, + 232 + ], + "blocks": [ + { + "bbox": [ + 132, + 114, + 481, + 158 + ], + "lines": [ + { + "bbox": [ + 132, + 114, + 481, + 158 + ], + "spans": [ + { + "bbox": [ + 132, + 114, + 481, + 158 + ], + "type": "text", + "content": "Table 1: Accuracy of the GT. We measure metrics by comparing predicted keypoints, masks, and their depth maps with their ground truth counterparts, which are obtained from an off-the-shelf 2D keypoint estimator [39], an off-the-shelf matting model [15], and a Kinect camera." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 213, + 168, + 405, + 232 + ], + "lines": [ + { + "bbox": [ + 213, + 168, + 405, + 232 + ], + "spans": [ + { + "bbox": [ + 213, + 168, + 405, + 232 + ], + "type": "table", + "html": "
Evaluation onMetricvalue
2D keypointsdistance0.18 (pixel)
MaskIoU84 (%)
Depthmapdistance8 (mm)
", + "image_path": "1e80e5b78cf72f2ed339c60e5bfa69dad07d93fb1f43bdfd243a874b1d95eddf.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 254, + 481, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 254, + 481, + 338 + ], + "spans": [ + { + "bbox": [ + 132, + 254, + 481, + 338 + ], + "type": "text", + "content": "may struggle. Therefore, we employ different objective functions for each case. For the image-only input, we calculate losses for both the original and reversed ground truth, obtained by converting the ground truth in the reverse temporal order. The model is then updated by selecting the smaller loss between them. On the other hand, when the event stream is used as input (event-only or combined with images), we supervise EBHNet with a loss obtained from our prediction and the original ground truth without considering temporal ambiguity." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 133, + 357, + 229, + 370 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 357, + 229, + 370 + ], + "spans": [ + { + "bbox": [ + 133, + 357, + 229, + 370 + ], + "type": "text", + "content": "5 Experiments" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 133, + 381, + 324, + 393 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 381, + 324, + 393 + ], + "spans": [ + { + "bbox": [ + 133, + 381, + 324, + 393 + ], + "type": "text", + "content": "5.1 Datasets and Evaluation Metrics" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 402, + 481, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 402, + 481, + 437 + ], + "spans": [ + { + "bbox": [ + 132, + 402, + 481, + 437 + ], + "type": "text", + "content": "InterHand2.6M. InterHand2.6M [24] is a large-scale dataset, which consists of sharp hand images and 3D annotations. We use the BlurHand [27]'s splits for the training and evaluation on InterHand2.6M." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 437, + 481, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 437, + 481, + 521 + ], + "spans": [ + { + "bbox": [ + 132, + 437, + 481, + 521 + ], + "type": "text", + "content": "BlurHand. BlurHand [27] consists of blurry hand images created by synthetically averaging consecutive 5 frames from the InterHand2.6M dataset [24] (30fps). This process imitates motion blur typically seen with a 6fps (30/5) shutter speed. Note that BlurHand is derived from the InterHand2.6M dataset, which contains a substantial amount of static hand images; hence, many images in BlurHand do not have large blur, as shown in Fig. 4. We follow their protocol for the training and evaluation on BlurHand." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 521, + 481, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 521, + 481, + 641 + ], + "spans": [ + { + "bbox": [ + 132, + 521, + 481, + 641 + ], + "type": "text", + "content": "EBH. Among the six ground truths for each blurry hand image (red and orange in Fig. 3), we use only a subset during training to assess our model's ability to generate 3D hands at both learned and novel time steps. During training, we use four of the ground truth samples. For example, in Fig. 3, out of the ground truth for six-time steps (depicted as red and orange steps), we use four for training (the first and last of red and the first two orange). During testing, we evaluate models on the remaining two ground truth samples (middle red and the last orange in Fig. 3) to check the generalizability to unseen time steps. We also evaluate on the trained time steps to assess the model's proficiency in recovering 3D hands at the seen time steps." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 133, + 641, + 481, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 641, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 133, + 641, + 481, + 665 + ], + "type": "text", + "content": "Evaluation metrics. To assess the accuracy of predicted hands, we employ two metrics: the Mean Per Joint Position Error (MPJPE) and the Mean Per Vertex" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 337, + 91, + 447, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 337, + 91, + 447, + 101 + ], + "spans": [ + { + "bbox": [ + 337, + 91, + 447, + 101 + ], + "type": "text", + "content": "EBH Dataset and Network" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 133, + 114, + 179, + 159 + ], + "blocks": [ + { + "bbox": [ + 133, + 114, + 179, + 159 + ], + "lines": [ + { + "bbox": [ + 133, + 114, + 179, + 159 + ], + "spans": [ + { + "bbox": [ + 133, + 114, + 179, + 159 + ], + "type": "image", + "image_path": "321bbab1bed0095e519d467609a5f32dd947fa6843098ad87a211d91958f1bdd.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 137, + 160, + 173, + 168 + ], + "lines": [ + { + "bbox": [ + 137, + 160, + 173, + 168 + ], + "spans": [ + { + "bbox": [ + 137, + 160, + 173, + 168 + ], + "type": "text", + "content": "(a) Input I" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 182, + 119, + 219, + 159 + ], + "blocks": [ + { + "bbox": [ + 182, + 119, + 219, + 159 + ], + "lines": [ + { + "bbox": [ + 182, + 119, + 219, + 159 + ], + "spans": [ + { + "bbox": [ + 182, + 119, + 219, + 159 + ], + "type": "image", + "image_path": "857c88f58b97558498095c23da5cd485492a5c7b7e9f8a54d742fb6f7cc3ad41.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 185, + 160, + 220, + 168 + ], + "lines": [ + { + "bbox": [ + 185, + 160, + 220, + 168 + ], + "spans": [ + { + "bbox": [ + 185, + 160, + 220, + 168 + ], + "type": "text", + "content": "(b) From I" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 225, + 114, + 271, + 159 + ], + "blocks": [ + { + "bbox": [ + 225, + 114, + 271, + 159 + ], + "lines": [ + { + "bbox": [ + 225, + 114, + 271, + 159 + ], + "spans": [ + { + "bbox": [ + 225, + 114, + 271, + 159 + ], + "type": "image", + "image_path": "efca711afac477bc32089ac3e911c75b7c4ac426e40eb33e999fad5257ba6ddb.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 227, + 160, + 269, + 169 + ], + "lines": [ + { + "bbox": [ + 227, + 160, + 269, + 169 + ], + "spans": [ + { + "bbox": [ + 227, + 160, + 269, + 169 + ], + "type": "text", + "content": "(c) Deblur " + }, + { + "bbox": [ + 227, + 160, + 269, + 169 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_{\\mathrm{D}}" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 273, + 160, + 313, + 169 + ], + "lines": [ + { + "bbox": [ + 273, + 160, + 313, + 169 + ], + "spans": [ + { + "bbox": [ + 273, + 160, + 313, + 169 + ], + "type": "text", + "content": "(d) From " + }, + { + "bbox": [ + 273, + 160, + 313, + 169 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_{\\mathrm{D}}" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 130, + 182, + 482, + 228 + ], + "lines": [ + { + "bbox": [ + 130, + 182, + 482, + 228 + ], + "spans": [ + { + "bbox": [ + 130, + 182, + 482, + 228 + ], + "type": "text", + "content": "Fig. 6 & Table 2: (Left) Visual comparison with deblurring. (b) and (d) show the estimated 3D hands using " + }, + { + "bbox": [ + 130, + 182, + 482, + 228 + ], + "type": "inline_equation", + "content": "\\mathbf{I}" + }, + { + "bbox": [ + 130, + 182, + 482, + 228 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 182, + 482, + 228 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_{\\mathrm{D}}" + }, + { + "bbox": [ + 130, + 182, + 482, + 228 + ], + "type": "text", + "content": ", respectively. (Right) Efficacy of EBHNet compared to deblurring baseline. Results are reported when only images are used as input. For the EBH dataset, we employ " + }, + { + "bbox": [ + 130, + 182, + 482, + 228 + ], + "type": "inline_equation", + "content": "\\mathbf{t} = 0.6" + }, + { + "bbox": [ + 130, + 182, + 482, + 228 + ], + "type": "text", + "content": " for evaluating the Mid." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 273, + 118, + 313, + 155 + ], + "blocks": [ + { + "bbox": [ + 273, + 118, + 313, + 155 + ], + "lines": [ + { + "bbox": [ + 273, + 118, + 313, + 155 + ], + "spans": [ + { + "bbox": [ + 273, + 118, + 313, + 155 + ], + "type": "image", + "image_path": "78baca8b44dce9563952fe87e8d691e8437cce35e2ad560ad40848d8211ba4d9.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 321, + 120, + 359, + 153 + ], + "blocks": [ + { + "bbox": [ + 321, + 120, + 359, + 153 + ], + "lines": [ + { + "bbox": [ + 321, + 120, + 359, + 153 + ], + "spans": [ + { + "bbox": [ + 321, + 120, + 359, + 153 + ], + "type": "image", + "image_path": "72bfe4e37fd8ad5c4a57a8c2fc51cd165087109fb4b221101d98fbb8b7cd90ed.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 328, + 160, + 351, + 168 + ], + "lines": [ + { + "bbox": [ + 328, + 160, + 351, + 168 + ], + "spans": [ + { + "bbox": [ + 328, + 160, + 351, + 168 + ], + "type": "text", + "content": "(e) GT" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "table", + "bbox": [ + 376, + 121, + 462, + 168 + ], + "blocks": [ + { + "bbox": [ + 376, + 121, + 462, + 168 + ], + "lines": [ + { + "bbox": [ + 376, + 121, + 462, + 168 + ], + "spans": [ + { + "bbox": [ + 376, + 121, + 462, + 168 + ], + "type": "table", + "html": "
DataDeblurMPJPE↓
Init.Mid. Final
BH-16.79
17.2316.45
EBH-16.14
16.1015.23
", + "image_path": "dc74ea62e5f637bbee8d8d2c0b976559942d972a7bb0b13b619774cd498833e0.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 130, + 251, + 482, + 337 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 251, + 482, + 337 + ], + "spans": [ + { + "bbox": [ + 130, + 251, + 482, + 337 + ], + "type": "text", + "content": "Position Error (MPVPE). These metrics gauge the Euclidean distance (mm) between the estimated coordinates and the ground truth coordinates. Here, we measure the metrics after aligning the translation of the root joint (i.e., wrist), following the prior researches [3, 27]. Also, to evaluate the temporal consistency of hand motion, we use the acceleration error proposed in HMMR [9]. Here, the acceleration error calculates the average difference between the predicted and ground truth accelerations of each joint of hands." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 132, + 356, + 246, + 366 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 356, + 246, + 366 + ], + "spans": [ + { + "bbox": [ + 132, + 356, + 246, + 366 + ], + "type": "text", + "content": "5.2 Ablation Studies" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 130, + 377, + 482, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 377, + 482, + 533 + ], + "spans": [ + { + "bbox": [ + 130, + 377, + 482, + 533 + ], + "type": "text", + "content": "Validation of EBH Dataset Accuracy To validate the accuracy of our EBH dataset, Tab. 1 shows metrics for predicted 2D keypoints, masks, and depth maps against their ground truth counterparts. The predicted 2D keypoints, masks, and depth maps are either projected (2D keypoints) or rendered (masks and depth maps) from the predicted 3D meshes. Ground truth 2D keypoints, masks, and depth maps are obtained from a 2D keypoint detection model [39], a matting model [15], and the output of a Kinect camera. For 2D keypoints, we calculate the mean distance between 21 joints from the projected and detected keypoints. For masks, we compute the Intersection over Union (IoU) between the rendered mask and the mask from the matting model. For the depth map, we measure the pixel-wise L1 distance between the predicted and camera-derived depth maps. Here, all metrics are calculated after cropping and resizing the sharp hand images to " + }, + { + "bbox": [ + 130, + 377, + 482, + 533 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^{256\\times 256\\times 3}" + }, + { + "bbox": [ + 130, + 377, + 482, + 533 + ], + "type": "text", + "content": ". For additional details, refer to our supplementary material." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 130, + 533, + 482, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 533, + 482, + 605 + ], + "spans": [ + { + "bbox": [ + 130, + 533, + 482, + 605 + ], + "type": "text", + "content": "Comparison with deblurring. In assessing EBHNet's performance in producing 3D hands from blurry hand images, we compare it with deblurring methods from prior works [2, 30, 31]. To this end, we integrate a state-of-the-art deblurring network [2] before applying EBHNet. Tab. 2 reveals that ours significantly outperforms the one that employs deblurring before 3D hand recovery. The performance decline can be attributed to several factors." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "type": "text", + "content": "First, as shown in Fig. 6a, even advanced deblurring networks struggle to restore sharp hand images from blurry ones. Second, deblurring processes often eliminate valuable temporal information for 3D hand recovery. For example, Fig. 6c exhibits the absence of the middle finger, leading to inaccurate 3D hand mesh in Fig. 6d. Also, deblurring can restrict networks from producing single" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 228, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 228, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 228, + 100 + ], + "type": "text", + "content": "JK. Park et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 148, + 168, + 291, + 239 + ], + "blocks": [ + { + "bbox": [ + 132, + 114, + 301, + 159 + ], + "lines": [ + { + "bbox": [ + 132, + 114, + 301, + 159 + ], + "spans": [ + { + "bbox": [ + 132, + 114, + 301, + 159 + ], + "type": "text", + "content": "Table 3: Comparison of models with various temporal encoding. Results are reported using images as input. Freq. indicates frequency embedding [19]." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 148, + 168, + 291, + 239 + ], + "lines": [ + { + "bbox": [ + 148, + 168, + 291, + 239 + ], + "spans": [ + { + "bbox": [ + 148, + 168, + 291, + 239 + ], + "type": "table", + "html": "
DataMethodsMPJPE↓
Init.Mid.Final
BHFreq.18.0917.0718.27
Ours17.2316.4517.17
EBHFreq.16.7116.2117.98
Ours16.1015.2317.15
", + "image_path": "cf8abb563101a359ca692799f5cb286ab89a6f0b6e70b178e077e9c98470f8be.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 316, + 168, + 481, + 239 + ], + "blocks": [ + { + "bbox": [ + 312, + 114, + 482, + 159 + ], + "lines": [ + { + "bbox": [ + 312, + 114, + 482, + 159 + ], + "spans": [ + { + "bbox": [ + 312, + 114, + 482, + 159 + ], + "type": "text", + "content": "Table 4: Comparison of EBHNet with various input combinations. We use four GTs in training and evaluate the model on the corresponding time steps." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 316, + 168, + 481, + 239 + ], + "lines": [ + { + "bbox": [ + 316, + 168, + 481, + 239 + ], + "spans": [ + { + "bbox": [ + 316, + 168, + 481, + 239 + ], + "type": "table", + "html": "
InputMPJPE↓
t=0t=0.2t=0.6t=0.8
I16.1014.2915.2316.18
E28.9129.3528.5930.80
I & E12.7312.0014.2416.35
", + "image_path": "a2b54b3d13ba2107a575e5d9ce27b27cd7a5ee86427cbeff8c43f1157819b117.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 133, + 249, + 186, + 301 + ], + "blocks": [ + { + "bbox": [ + 133, + 249, + 186, + 301 + ], + "lines": [ + { + "bbox": [ + 133, + 249, + 186, + 301 + ], + "spans": [ + { + "bbox": [ + 133, + 249, + 186, + 301 + ], + "type": "image", + "image_path": "5f96b90fd9ea1cbed99e36a345d3d40257cb29f227f01aa5d34f453423041549.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 137, + 302, + 180, + 311 + ], + "lines": [ + { + "bbox": [ + 137, + 302, + 180, + 311 + ], + "spans": [ + { + "bbox": [ + 137, + 302, + 180, + 311 + ], + "type": "text", + "content": "(a) Image I" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 192, + 249, + 245, + 301 + ], + "blocks": [ + { + "bbox": [ + 192, + 249, + 245, + 301 + ], + "lines": [ + { + "bbox": [ + 192, + 249, + 245, + 301 + ], + "spans": [ + { + "bbox": [ + 192, + 249, + 245, + 301 + ], + "type": "image", + "image_path": "f7cb76269bb1a18991c68d91d552945c90f6ddd0e5d2972f50c707c573d8298a.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 195, + 302, + 240, + 311 + ], + "lines": [ + { + "bbox": [ + 195, + 302, + 240, + 311 + ], + "spans": [ + { + "bbox": [ + 195, + 302, + 240, + 311 + ], + "type": "text", + "content": "(b) Event E" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 260, + 251, + 293, + 297 + ], + "blocks": [ + { + "bbox": [ + 260, + 251, + 293, + 297 + ], + "lines": [ + { + "bbox": [ + 260, + 251, + 293, + 297 + ], + "spans": [ + { + "bbox": [ + 260, + 251, + 293, + 297 + ], + "type": "image", + "image_path": "e4aff721f6d1d4d7e6bb7130091f0f3991aa8962422bb712177689925ec01f5a.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 259, + 302, + 293, + 311 + ], + "lines": [ + { + "bbox": [ + 259, + 302, + 293, + 311 + ], + "spans": [ + { + "bbox": [ + 259, + 302, + 293, + 311 + ], + "type": "text", + "content": "(c) On " + }, + { + "bbox": [ + 259, + 302, + 293, + 311 + ], + "type": "inline_equation", + "content": "\\mathbf{E}" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 320, + 254, + 353, + 297 + ], + "blocks": [ + { + "bbox": [ + 320, + 254, + 353, + 297 + ], + "lines": [ + { + "bbox": [ + 320, + 254, + 353, + 297 + ], + "spans": [ + { + "bbox": [ + 320, + 254, + 353, + 297 + ], + "type": "image", + "image_path": "028ff2548d4bf0d37c1ddb1a72c18a5c0208ebf3f7f89ef7dc5f03ae98334c03.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 319, + 302, + 352, + 311 + ], + "lines": [ + { + "bbox": [ + 319, + 302, + 352, + 311 + ], + "spans": [ + { + "bbox": [ + 319, + 302, + 352, + 311 + ], + "type": "text", + "content": "(d) On " + }, + { + "bbox": [ + 319, + 302, + 352, + 311 + ], + "type": "inline_equation", + "content": "\\mathbf{I}" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 378, + 252, + 412, + 298 + ], + "blocks": [ + { + "bbox": [ + 378, + 252, + 412, + 298 + ], + "lines": [ + { + "bbox": [ + 378, + 252, + 412, + 298 + ], + "spans": [ + { + "bbox": [ + 378, + 252, + 412, + 298 + ], + "type": "image", + "image_path": "7902e7b36cef40a32316e7ab9a607fc098bad619636a39a074d9446bc5e31a23.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 373, + 302, + 416, + 311 + ], + "lines": [ + { + "bbox": [ + 373, + 302, + 416, + 311 + ], + "spans": [ + { + "bbox": [ + 373, + 302, + 416, + 311 + ], + "type": "text", + "content": "(e) On both" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 438, + 251, + 471, + 299 + ], + "blocks": [ + { + "bbox": [ + 438, + 251, + 471, + 299 + ], + "lines": [ + { + "bbox": [ + 438, + 251, + 471, + 299 + ], + "spans": [ + { + "bbox": [ + 438, + 251, + 471, + 299 + ], + "type": "image", + "image_path": "cfae70374d49965e73dfa987f57abb9ab62adb6f136e952603a9f733981c4431.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 441, + 302, + 466, + 311 + ], + "lines": [ + { + "bbox": [ + 441, + 302, + 466, + 311 + ], + "spans": [ + { + "bbox": [ + 441, + 302, + 466, + 311 + ], + "type": "text", + "content": "(f) GT" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 131, + 320, + 482, + 354 + ], + "lines": [ + { + "bbox": [ + 131, + 320, + 482, + 354 + ], + "spans": [ + { + "bbox": [ + 131, + 320, + 482, + 354 + ], + "type": "text", + "content": "Fig.7: Comparison of different input combinations. Event information in severely blurry hand images can offer valuable complementary data, validating the effectiveness of incorporating event streams in our approach to address blurry hands." + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "bbox": [ + 130, + 377, + 480, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 377, + 480, + 402 + ], + "spans": [ + { + "bbox": [ + 130, + 377, + 480, + 402 + ], + "type": "text", + "content": "outputs instead of sequences. Conversely, EBHNet excels at using the image's temporal information, producing multiple 3D hands at different time steps." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 130, + 403, + 481, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 403, + 481, + 497 + ], + "spans": [ + { + "bbox": [ + 130, + 403, + 481, + 497 + ], + "type": "text", + "content": "Comparison with frequency encoding. Tab. 3 compares our temporal embedding with conventional frequency encoding [19]. For frequency encoding, following BlurHandNet [27], we apply a sinusoidal operation on " + }, + { + "bbox": [ + 130, + 403, + 481, + 497 + ], + "type": "inline_equation", + "content": "\\mathbf{t}" + }, + { + "bbox": [ + 130, + 403, + 481, + 497 + ], + "type": "text", + "content": ", expand the dimension to match feature " + }, + { + "bbox": [ + 130, + 403, + 481, + 497 + ], + "type": "inline_equation", + "content": "\\mathbf{F}" + }, + { + "bbox": [ + 130, + 403, + 481, + 497 + ], + "type": "text", + "content": " using " + }, + { + "bbox": [ + 130, + 403, + 481, + 497 + ], + "type": "inline_equation", + "content": "1 \\times 1" + }, + { + "bbox": [ + 130, + 403, + 481, + 497 + ], + "type": "text", + "content": " convolution layers, and then add the output to " + }, + { + "bbox": [ + 130, + 403, + 481, + 497 + ], + "type": "inline_equation", + "content": "\\mathbf{F}" + }, + { + "bbox": [ + 130, + 403, + 481, + 497 + ], + "type": "text", + "content": ". The table shows that our more straightforward approach, which concatenates the temporal encoding " + }, + { + "bbox": [ + 130, + 403, + 481, + 497 + ], + "type": "inline_equation", + "content": "\\mathbf{t}" + }, + { + "bbox": [ + 130, + 403, + 481, + 497 + ], + "type": "text", + "content": " across the channel of feature " + }, + { + "bbox": [ + 130, + 403, + 481, + 497 + ], + "type": "inline_equation", + "content": "\\mathbf{F}" + }, + { + "bbox": [ + 130, + 403, + 481, + 497 + ], + "type": "text", + "content": ", and processes them through " + }, + { + "bbox": [ + 130, + 403, + 481, + 497 + ], + "type": "inline_equation", + "content": "1 \\times 1" + }, + { + "bbox": [ + 130, + 403, + 481, + 497 + ], + "type": "text", + "content": " convolution layers, consistently outperforms the frequency encoding on both BlurHand [27] and our EBH datasets." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 130, + 498, + 482, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 498, + 482, + 641 + ], + "spans": [ + { + "bbox": [ + 130, + 498, + 482, + 641 + ], + "type": "text", + "content": "Various input combinations for EBHNet. Tab. 4 shows the results for different input combinations, including images only, events only, and both. Among six available ground truths, note that we employ only four ground truths at " + }, + { + "bbox": [ + 130, + 498, + 482, + 641 + ], + "type": "inline_equation", + "content": "t = \\{0,0.2,0.6,0.8\\}" + }, + { + "bbox": [ + 130, + 498, + 482, + 641 + ], + "type": "text", + "content": " for supervision and evaluate the model on these four time steps. As shown, when using event stream as an input (second row in Tab. 4), metric shows significant degradation. This is because events lack information for static hands, leading to a failure in recovering 3D hands. On the other hand, when both image and event streams are utilized (third row in Tab. 4), events complement the image in capturing 3D hands across time steps, particularly in blurry regions, yielding the best performance. Furthermore, Fig. 7 demonstrates that combining event and image input (Fig. 7e) produces results closest to the ground truth (Fig. 7f), consistent with Tab. 4." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 131, + 641, + 481, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 641, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 131, + 641, + 481, + 665 + ], + "type": "text", + "content": "Novel time step reconstruction. Tab. 5 evaluates the metrics at novel time steps, not included in the training phase, to further assess EBHNet's capacity to" + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 337, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 337, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 337, + 91, + 447, + 100 + ], + "type": "text", + "content": "EBH Dataset and Network" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 479, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 479, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 479, + 100 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 133, + 114, + 181, + 163 + ], + "blocks": [ + { + "bbox": [ + 133, + 114, + 181, + 163 + ], + "lines": [ + { + "bbox": [ + 133, + 114, + 181, + 163 + ], + "spans": [ + { + "bbox": [ + 133, + 114, + 181, + 163 + ], + "type": "image", + "image_path": "c5a86776204449c5368751b3152a73bf026c11fa3045a933b301bbc371092f79.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 140, + 163, + 175, + 171 + ], + "lines": [ + { + "bbox": [ + 140, + 163, + 175, + 171 + ], + "spans": [ + { + "bbox": [ + 140, + 163, + 175, + 171 + ], + "type": "text", + "content": "(a) Input I" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 183, + 163, + 230, + 171 + ], + "lines": [ + { + "bbox": [ + 183, + 163, + 230, + 171 + ], + "spans": [ + { + "bbox": [ + 183, + 163, + 230, + 171 + ], + "type": "text", + "content": "(b) Interpolate" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 130, + 186, + 481, + 264 + ], + "lines": [ + { + "bbox": [ + 130, + 186, + 481, + 264 + ], + "spans": [ + { + "bbox": [ + 130, + 186, + 481, + 264 + ], + "type": "text", + "content": "Fig. 8 & Table 5: (Left) Visual comparison between linear interpolation and temporal embedding. We generate middle hand from two predicted 3D hands (red and white) in two ways: (b) linear interpolation, and (c) applying the temporal embedding value between temporal embeddings to obtain neighboring 3D hands. " + }, + { + "bbox": [ + 130, + 186, + 481, + 264 + ], + "type": "inline_equation", + "content": "\\times" + }, + { + "bbox": [ + 130, + 186, + 481, + 264 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 186, + 481, + 264 + ], + "type": "inline_equation", + "content": "\\times" + }, + { + "bbox": [ + 130, + 186, + 481, + 264 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 130, + 186, + 481, + 264 + ], + "type": "inline_equation", + "content": "\\times" + }, + { + "bbox": [ + 130, + 186, + 481, + 264 + ], + "type": "text", + "content": " show the same joint (tip of middle finger) at different time steps. (Right) Comparison of models at a novel time step. For BH, we show the metrics at time steps " + }, + { + "bbox": [ + 130, + 186, + 481, + 264 + ], + "type": "inline_equation", + "content": "t = 1.0" + }, + { + "bbox": [ + 130, + 186, + 481, + 264 + ], + "type": "text", + "content": ", while for EBH, we present the metrics at time steps " + }, + { + "bbox": [ + 130, + 186, + 481, + 264 + ], + "type": "inline_equation", + "content": "t = \\{0.4, 1.0\\}" + }, + { + "bbox": [ + 130, + 186, + 481, + 264 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 182, + 114, + 230, + 160 + ], + "blocks": [ + { + "bbox": [ + 182, + 114, + 230, + 160 + ], + "lines": [ + { + "bbox": [ + 182, + 114, + 230, + 160 + ], + "spans": [ + { + "bbox": [ + 182, + 114, + 230, + 160 + ], + "type": "image", + "image_path": "15553539fee7d02e74a3b1d9b542b686e1f6b4a74d34e39235af8f4eb5d8f24e.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 231, + 114, + 280, + 160 + ], + "blocks": [ + { + "bbox": [ + 231, + 114, + 280, + 160 + ], + "lines": [ + { + "bbox": [ + 231, + 114, + 280, + 160 + ], + "spans": [ + { + "bbox": [ + 231, + 114, + 280, + 160 + ], + "type": "image", + "image_path": "ba113c5a7c8c0c6b9f2015cfeac3438ff74670dd6e363038813474876d97cb3e.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 239, + 163, + 269, + 171 + ], + "lines": [ + { + "bbox": [ + 239, + 163, + 269, + 171 + ], + "spans": [ + { + "bbox": [ + 239, + 163, + 269, + 171 + ], + "type": "text", + "content": "(c) Ours" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 281, + 114, + 329, + 160 + ], + "blocks": [ + { + "bbox": [ + 281, + 114, + 329, + 160 + ], + "lines": [ + { + "bbox": [ + 281, + 114, + 329, + 160 + ], + "spans": [ + { + "bbox": [ + 281, + 114, + 329, + 160 + ], + "type": "image", + "image_path": "d9e088dff0d2c5e1f5ac9e2c9487a8a9bb7f5fa104eefdd61617e78da0e5bd2f.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 291, + 163, + 315, + 171 + ], + "lines": [ + { + "bbox": [ + 291, + 163, + 315, + 171 + ], + "spans": [ + { + "bbox": [ + 291, + 163, + 315, + 171 + ], + "type": "text", + "content": "(d) GT" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 343, + 118, + 468, + 173 + ], + "blocks": [ + { + "bbox": [ + 343, + 118, + 468, + 173 + ], + "lines": [ + { + "bbox": [ + 343, + 118, + 468, + 173 + ], + "spans": [ + { + "bbox": [ + 343, + 118, + 468, + 173 + ], + "type": "table", + "html": "
DataMethods\\( MPJPE^{\\downarrow} \\) t = 0.4 t = 1.0\\( MPVPE^{\\downarrow} \\) t = 0.4 t = 1.0
BHLinear-18.12-16.81
Ours (I)-17.87-16.30
EBHLinear14.6617.5319.2017.95
Ours (I)14.3817.1518.7317.58
Ours (E)28.7431.1225.4125.92
Ours (I & E)14.0316.7616.8817.09
", + "image_path": "90e2e48a441d93a9b235979c9016e6c95e7aeeacdc64538e8fbfde25e2154ca1.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 285, + 482, + 416 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 285, + 482, + 416 + ], + "spans": [ + { + "bbox": [ + 130, + 285, + 482, + 416 + ], + "type": "text", + "content": "generate 3D hands at untrained time steps. For EBH, we evaluate 3D hand recovery at two novel time steps not included in the training phase. For BlurHand, our model is trained only on initial and middle hands (" + }, + { + "bbox": [ + 130, + 285, + 482, + 416 + ], + "type": "inline_equation", + "content": "t = \\{0,0.5\\}" + }, + { + "bbox": [ + 130, + 285, + 482, + 416 + ], + "type": "text", + "content": "), and we evaluate its performance in recovering hands located at the last time step (" + }, + { + "bbox": [ + 130, + 285, + 482, + 416 + ], + "type": "inline_equation", + "content": "t = \\{1\\}" + }, + { + "bbox": [ + 130, + 285, + 482, + 416 + ], + "type": "text", + "content": "). Tab. 5 compares the model's performance using three different input combinations with linear interpolation for reference. To clarify, linear interpolation outputs a hand at " + }, + { + "bbox": [ + 130, + 285, + 482, + 416 + ], + "type": "inline_equation", + "content": "t = 0.4" + }, + { + "bbox": [ + 130, + 285, + 482, + 416 + ], + "type": "text", + "content": " by linearly interpolating the neighboring hands, which are obtained by our EBHNet trained on " + }, + { + "bbox": [ + 130, + 285, + 482, + 416 + ], + "type": "inline_equation", + "content": "\\mathbf{I}" + }, + { + "bbox": [ + 130, + 285, + 482, + 416 + ], + "type": "text", + "content": " (e.g., " + }, + { + "bbox": [ + 130, + 285, + 482, + 416 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_{0.4} = \\frac{1}{2}\\mathbf{V}_{0.2} + \\frac{1}{2}\\mathbf{V}_{0.6}" + }, + { + "bbox": [ + 130, + 285, + 482, + 416 + ], + "type": "text", + "content": "). The table shows the superior performance of our approach with both image and event and with only image. This is attributed to our temporal splitter and event stream, guiding the network to predict 3D hands at specific time steps." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 130, + 417, + 482, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 417, + 482, + 477 + ], + "spans": [ + { + "bbox": [ + 130, + 417, + 482, + 477 + ], + "type": "text", + "content": "Fig. 8 visually compares linear interpolation and our temporal embedding for generating novel hand sequences. In Fig. 8b, while linear interpolation restricts all articulation movements in a linear way (see purple line in Fig. 8), our method produces more plausible results by using the corresponding temporal embedding values, exhibiting motion trajectories similar to GT." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 131, + 492, + 335, + 504 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 492, + 335, + 504 + ], + "spans": [ + { + "bbox": [ + 131, + 492, + 335, + 504 + ], + "type": "text", + "content": "5.3 Comparisons with Previous Works" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 130, + 510, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 510, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 510, + 482, + 666 + ], + "type": "text", + "content": "Performance comparison. Tabs. 6 and 7 clearly show that our EBHNet surpasses previous 3D hand mesh estimation methods. In Tab. 6, the most prior approaches [12, 20, 22] do not account for motion blur in hand images, resulting in inaccuracies. While BlurHandNet [27] performs admirably by considering three-hand time steps, it may overlook crucial hand information between those time steps, as it extracts features based on supervision with 3D hands at those specific time steps. Moreover, BlurHandNet consists of modules dedicated to each time step; thus, it cannot generate hands at novel time steps that were not included in the training phase. In contrast, our approach does not constrain the extracted feature " + }, + { + "bbox": [ + 130, + 510, + 482, + 666 + ], + "type": "inline_equation", + "content": "\\mathbf{F}" + }, + { + "bbox": [ + 130, + 510, + 482, + 666 + ], + "type": "text", + "content": " to 3D hand at particular time steps, outperforming prior methods by leveraging temporal information from the entire motion trajectory. Also, our EBHNet can generate 3D hands at novel time steps without requiring training data for those specific time points, distinguishing it from BlurHandNet." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 228, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 228, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 228, + 100 + ], + "type": "text", + "content": "JK. Park et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 147, + 179, + 288, + 230 + ], + "blocks": [ + { + "bbox": [ + 132, + 114, + 301, + 170 + ], + "lines": [ + { + "bbox": [ + 132, + 114, + 301, + 170 + ], + "spans": [ + { + "bbox": [ + 132, + 114, + 301, + 170 + ], + "type": "text", + "content": "Table 6: Comparison with SoTA methods on BH [27]. For MPVPE, we evaluate metrics at the midpoint of the motion. As BH provides only images, our results are based on image inputs." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 147, + 179, + 288, + 230 + ], + "lines": [ + { + "bbox": [ + 147, + 179, + 288, + 230 + ], + "spans": [ + { + "bbox": [ + 147, + 179, + 288, + 230 + ], + "type": "table", + "html": "
MethodsMPJPE↓MPVPE↓Accel↓
Init.Mid.Final
I2L-MeshNet [22]-24.32-23.07-
Pose2Pose [20]-18.80-17.42-
METRO [12]-20.54-27.03-
BlurHandNet [27]18.0816.8018.2115.303.94
EBHHNet (Ours)17.2316.4517.1715.023.37
", + "image_path": "739d296e568e24d90ffa87bced605e82949ea7356a8abda51136580b3aa3e62a.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 314, + 168, + 481, + 230 + ], + "blocks": [ + { + "bbox": [ + 312, + 114, + 481, + 159 + ], + "lines": [ + { + "bbox": [ + 312, + 114, + 481, + 159 + ], + "spans": [ + { + "bbox": [ + 312, + 114, + 481, + 159 + ], + "type": "text", + "content": "Table 7: Comparison with SoTA methods on our proposed EBH. For MPVPE, we assess metrics for the hand at the midpoint of the motion (t=0.6)." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 314, + 168, + 481, + 230 + ], + "lines": [ + { + "bbox": [ + 314, + 168, + 481, + 230 + ], + "spans": [ + { + "bbox": [ + 314, + 168, + 481, + 230 + ], + "type": "table", + "html": "
InputMethods\\( MPJPE^{\\ddagger} \\)\\( MPVPE^{\\ddagger} \\)\\( Accel^{\\ddagger} \\)
t=0t=0.2t=0.6t=0.8
II2L-MeshNet [22]--28.12-30.86-
Pose2Pose [20]--17.28-20.41-
BlurHandNet [27]17.0815.5316.1317.5417.995.78
EBHNet (Ours)16.1014.2915.2316.1817.894.69
EEventHands [35]28.8029.8128.9130.9725.7010.75
EBHNet (Ours)28.9129.3528.5930.8025.309.25
I & EEBHNet (Ours)12.7312.0014.2416.3516.933.19
", + "image_path": "60b429a3ba48094c78ca7e277648874387fd444e5c41ed80faf255282d67b0f0.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 133, + 240, + 177, + 284 + ], + "blocks": [ + { + "bbox": [ + 133, + 240, + 177, + 284 + ], + "lines": [ + { + "bbox": [ + 133, + 240, + 177, + 284 + ], + "spans": [ + { + "bbox": [ + 133, + 240, + 177, + 284 + ], + "type": "image", + "image_path": "039b8b95277ed331e33037a1d4fa9ca3bc532af0a0ed625c5adefa712de48210.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 135, + 285, + 173, + 293 + ], + "lines": [ + { + "bbox": [ + 135, + 285, + 173, + 293 + ], + "spans": [ + { + "bbox": [ + 135, + 285, + 173, + 293 + ], + "type": "text", + "content": "(a) Event E" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 182, + 241, + 214, + 282 + ], + "blocks": [ + { + "bbox": [ + 182, + 241, + 214, + 282 + ], + "lines": [ + { + "bbox": [ + 182, + 241, + 214, + 282 + ], + "spans": [ + { + "bbox": [ + 182, + 241, + 214, + 282 + ], + "type": "image", + "image_path": "c72ed01e6b65d1d4b540f09f7b10102ff78d9d2a4387e363223933d99a363e5e.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 184, + 285, + 211, + 293 + ], + "lines": [ + { + "bbox": [ + 184, + 285, + 211, + 293 + ], + "spans": [ + { + "bbox": [ + 184, + 285, + 211, + 293 + ], + "type": "text", + "content": "(b) [35]" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 222, + 243, + 260, + 281 + ], + "blocks": [ + { + "bbox": [ + 222, + 243, + 260, + 281 + ], + "lines": [ + { + "bbox": [ + 222, + 243, + 260, + 281 + ], + "spans": [ + { + "bbox": [ + 222, + 243, + 260, + 281 + ], + "type": "image", + "image_path": "70ced0da09006ab91bf5e141dc1f3de08986df2c22c6b859cdc6ec8b6f3d6f83.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 221, + 285, + 262, + 292 + ], + "lines": [ + { + "bbox": [ + 221, + 285, + 262, + 292 + ], + "spans": [ + { + "bbox": [ + 221, + 285, + 262, + 292 + ], + "type": "text", + "content": "(c) EBHNet" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 266, + 244, + 304, + 282 + ], + "blocks": [ + { + "bbox": [ + 266, + 244, + 304, + 282 + ], + "lines": [ + { + "bbox": [ + 266, + 244, + 304, + 282 + ], + "spans": [ + { + "bbox": [ + 266, + 244, + 304, + 282 + ], + "type": "image", + "image_path": "25c137b8b3710a1fe2fe74a219a7be74023311904e303e426dbfd128875b414b.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 274, + 285, + 297, + 292 + ], + "lines": [ + { + "bbox": [ + 274, + 285, + 297, + 292 + ], + "spans": [ + { + "bbox": [ + 274, + 285, + 297, + 292 + ], + "type": "text", + "content": "(d) GT" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 307, + 240, + 351, + 283 + ], + "blocks": [ + { + "bbox": [ + 307, + 240, + 351, + 283 + ], + "lines": [ + { + "bbox": [ + 307, + 240, + 351, + 283 + ], + "spans": [ + { + "bbox": [ + 307, + 240, + 351, + 283 + ], + "type": "image", + "image_path": "c7f3f1f62de32c53a85ecf3ed167a3393275da4009c755251133067fdc6bdc19.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 309, + 285, + 347, + 293 + ], + "lines": [ + { + "bbox": [ + 309, + 285, + 347, + 293 + ], + "spans": [ + { + "bbox": [ + 309, + 285, + 347, + 293 + ], + "type": "text", + "content": "(e) Event E" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 132, + 307, + 480, + 330 + ], + "lines": [ + { + "bbox": [ + 132, + 307, + 480, + 330 + ], + "spans": [ + { + "bbox": [ + 132, + 307, + 480, + 330 + ], + "type": "text", + "content": "Fig. 9: Visual comparison with ours and EventHand [35]. When only the event is provided, ours yields results that closely resemble the corresponding ground truth." + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 356, + 243, + 390, + 277 + ], + "blocks": [ + { + "bbox": [ + 356, + 243, + 390, + 277 + ], + "lines": [ + { + "bbox": [ + 356, + 243, + 390, + 277 + ], + "spans": [ + { + "bbox": [ + 356, + 243, + 390, + 277 + ], + "type": "image", + "image_path": "5482e82d7d5f52a4040ea21eb3a5011be13afade62e3caeaf388ae6f0aa56613.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 359, + 285, + 383, + 293 + ], + "lines": [ + { + "bbox": [ + 359, + 285, + 383, + 293 + ], + "spans": [ + { + "bbox": [ + 359, + 285, + 383, + 293 + ], + "type": "text", + "content": "(f) [35]" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 396, + 243, + 434, + 277 + ], + "blocks": [ + { + "bbox": [ + 396, + 243, + 434, + 277 + ], + "lines": [ + { + "bbox": [ + 396, + 243, + 434, + 277 + ], + "spans": [ + { + "bbox": [ + 396, + 243, + 434, + 277 + ], + "type": "image", + "image_path": "22802507b6651d0fd17d0f636c81c17a5f4dbcf7105ed10d7f758e97d58468ed.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 395, + 285, + 436, + 292 + ], + "lines": [ + { + "bbox": [ + 395, + 285, + 436, + 292 + ], + "spans": [ + { + "bbox": [ + 395, + 285, + 436, + 292 + ], + "type": "text", + "content": "(g) EBHNet" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 441, + 244, + 476, + 276 + ], + "blocks": [ + { + "bbox": [ + 441, + 244, + 476, + 276 + ], + "lines": [ + { + "bbox": [ + 441, + 244, + 476, + 276 + ], + "spans": [ + { + "bbox": [ + 441, + 244, + 476, + 276 + ], + "type": "image", + "image_path": "99d96a7b2e6ea9bff54a4df2f07eb210e44bf5f8e760bf84b6cbb0f35d33e08a.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 447, + 285, + 471, + 292 + ], + "lines": [ + { + "bbox": [ + 447, + 285, + 471, + 292 + ], + "spans": [ + { + "bbox": [ + 447, + 285, + 471, + 292 + ], + "type": "text", + "content": "(h) GT" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "bbox": [ + 130, + 354, + 480, + 461 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 354, + 480, + 461 + ], + "spans": [ + { + "bbox": [ + 130, + 354, + 480, + 461 + ], + "type": "text", + "content": "Moreover, Tab. 7 compares image-only, event-only, and both inputs in the EBH dataset. Modifying BlurHandNet [27] to predict four outputs, our EBHNet consistently outperforms prior methods, excelling against an event-based 3D recovery method [35]. The best results of our EBHNet are obtained with both event and image inputs. Also, Tabs. 6 and 7 compare the acceleration error (Accel), showing that EBHNet produces more temporally consistent outcomes by successfully addressing the real motion. Here, acceleration error is computed only for hand sequence generation methods [27,35]. Fig. 9 shows a visual comparison between our EBHNet and EventHand [35] when only event inputs are used." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 130, + 461, + 480, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 461, + 480, + 521 + ], + "spans": [ + { + "bbox": [ + 130, + 461, + 480, + 521 + ], + "type": "text", + "content": "Furthermore, Fig. 10 visually compares EBHNet with previous methods [20, 22,27] when using only image inputs. As shown, Pose2Pose [20] and I2L-MeshNet [22] even struggle to capture the hand pose, while BlurHandNet [27] fails to capture motion information, resulting in consistent outputs across time steps. In contrast, EBHNet successfully generates 3D hands at different time steps." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 130, + 521, + 481, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 521, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 521, + 481, + 665 + ], + "type": "text", + "content": "Efficiency comparison. Tab. 8 shows a comparison of the efficiency of our EBHNet with previous methods. Since all previous methods use images as input, we evaluate the efficiency of our EBHNet when using images only as input. We first compare our EBHNet with Pose2Pose [20] and I2L-MeshNet [22], which produce a single output, by considering EBHNet (1) generating only one output. Here, EBHNet (1) exhibits comparable computational efficiency while showing quantitatively superior results compared to Pose2Pose and I2L-MeshNet in Tables 6 and 7. Compared to BlurHandNet, we configure EBHNet to generate the same number of 3D hands for a fair comparison in EBHNet (3). In contrast to BlurHandNet [27], which requires additional layers for predicting multiple hands and results in a larger model size, EBHNet can generate 3D hands at different time steps using varied temporal embeddings, leading to a reduced model size." + } + ] + } + ], + "index": 25 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 337, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 337, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 337, + 91, + 447, + 100 + ], + "type": "text", + "content": "EBH Dataset and Network" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 480, + 100 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 138, + 114, + 479, + 215 + ], + "blocks": [ + { + "bbox": [ + 138, + 114, + 479, + 215 + ], + "lines": [ + { + "bbox": [ + 138, + 114, + 479, + 215 + ], + "spans": [ + { + "bbox": [ + 138, + 114, + 479, + 215 + ], + "type": "image", + "image_path": "0805e304eed22aeb094d56656e007e964f22371ea2c5203329e787f51a585b89.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 131, + 233, + 481, + 268 + ], + "lines": [ + { + "bbox": [ + 131, + 233, + 481, + 268 + ], + "spans": [ + { + "bbox": [ + 131, + 233, + 481, + 268 + ], + "type": "text", + "content": "Fig. 10: Visual comparison with previous methods on BH [27] and our EBH. Unlike other methods that generate a fixed number of 3D hands, EBHNet can produce a variable number of 3D hands. The red circles show severely blurry regions." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 169, + 321, + 447, + 348 + ], + "blocks": [ + { + "bbox": [ + 131, + 278, + 482, + 313 + ], + "lines": [ + { + "bbox": [ + 131, + 278, + 482, + 313 + ], + "spans": [ + { + "bbox": [ + 131, + 278, + 482, + 313 + ], + "type": "text", + "content": "Table 8: Efficiency comparison with previous methods. Our EBHNet doesn't require extra parameters for recovering 3D hands at additional time steps, maintaining the same number of parameters whether predicting a single hand or three hands." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 169, + 321, + 447, + 348 + ], + "lines": [ + { + "bbox": [ + 169, + 321, + 447, + 348 + ], + "spans": [ + { + "bbox": [ + 169, + 321, + 447, + 348 + ], + "type": "table", + "html": "
MetricsPose2Pose [20] I2L-MeshNet [22] BlurHandNet [27] EBHNet (1) EBHNet (3)
Num of params (MB)↓77141202146146
Latency (fps)↑26.3125.7614.4325.1215.45
", + "image_path": "194148debb20b7632bb01b567738edc69b4fd3034c14526ac184d578761b7c13.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 369, + 222, + 382 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 369, + 222, + 382 + ], + "spans": [ + { + "bbox": [ + 132, + 369, + 222, + 382 + ], + "type": "text", + "content": "6 Limitations" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 394, + 482, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 394, + 482, + 456 + ], + "spans": [ + { + "bbox": [ + 130, + 394, + 482, + 456 + ], + "type": "text", + "content": "Our EBH dataset includes hands with various real motion blur and corresponding 3D annotations, marking a crucial step in addressing key community challenges. However, since our data were captured from 10 individuals, it may lack shape diversity. We will address this issue by designing additional modules for shape generalization or constructing a large-scale dataset from various people." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 472, + 220, + 485 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 472, + 220, + 485 + ], + "spans": [ + { + "bbox": [ + 132, + 472, + 220, + 485 + ], + "type": "text", + "content": "7 Conclusion" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 498, + 482, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 498, + 482, + 617 + ], + "spans": [ + { + "bbox": [ + 130, + 498, + 482, + 617 + ], + "type": "text", + "content": "This work tackles the challenging problem of recovering a 3D hand sequence from a blurry hand. To this end, we introduce the EBH, the first dataset that includes real blurry hand images, their corresponding 3D ground truths, and continuous temporal information from an event stream. In conjunction with the EBH dataset, we propose EBHNet, a method for generating 3D hand sequences from a single blurry hand input in diverse combinations. Our experiments show the efficacy of the EBH and EBHNet in enhancing 3D hand sequence recovery from blurry hands, with the ability to generate hand sequences at novel time steps. Our contributions offer future insights for addressing motion blur in 3D hand recovery." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 131, + 617, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 617, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 131, + 617, + 482, + 666 + ], + "type": "text", + "content": "Acknowledgments. This work was supported in part by the IITP grants [No. 2021-0-01343, Artificial Intelligence Graduate School Program (Seoul National University), No.2021-0-02068, and No.2023-0-00156], the NRF grant [No. 2021M3A9E4080782] funded by the Korean government (MSIT)." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 228, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 228, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 228, + 100 + ], + "type": "text", + "content": "JK. Park et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 133, + 114, + 197, + 126 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 114, + 197, + 126 + ], + "spans": [ + { + "bbox": [ + 133, + 114, + 197, + 126 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 138, + 140, + 480, + 665 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 138, + 140, + 480, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 140, + 480, + 161 + ], + "spans": [ + { + "bbox": [ + 138, + 140, + 480, + 161 + ], + "type": "text", + "content": "1. Boukhayma, A., Bem, R.d., Torr, P.H.: 3D hand shape and pose from images in the wild. In: CVPR (2019)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 138, + 162, + 480, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 162, + 480, + 183 + ], + "spans": [ + { + "bbox": [ + 138, + 162, + 480, + 183 + ], + "type": "text", + "content": "2. Chen, L., Chu, X., Zhang, X., Sun, J.: Simple baselines for image restoration. In: ECCV (2022)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 138, + 184, + 480, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 184, + 480, + 205 + ], + "spans": [ + { + "bbox": [ + 138, + 184, + 480, + 205 + ], + "type": "text", + "content": "3. Choi, H., Moon, G., Lee, K.M.: Pose2Mesh: Graph convolutional network for 3D human pose and mesh recovery from a 2D human pose. In: ECCV (2020)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 138, + 206, + 480, + 227 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 206, + 480, + 227 + ], + "spans": [ + { + "bbox": [ + 138, + 206, + 480, + 227 + ], + "type": "text", + "content": "4. Deng, J., Dong, W., Socher, R., Li, L.J., Li, K., Fei-Fei, L.: ImageNet: A large-scale hierarchical image database. In: CVPR (2009)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 138, + 228, + 480, + 249 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 228, + 480, + 249 + ], + "spans": [ + { + "bbox": [ + 138, + 228, + 480, + 249 + ], + "type": "text", + "content": "5. Hampali, S., Rad, M., Oberweger, M., Lepetit, V.: Honnotate: A method for 3D annotation of hand and object poses. In: CVPR (2020)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 138, + 250, + 480, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 250, + 480, + 282 + ], + "spans": [ + { + "bbox": [ + 138, + 250, + 480, + 282 + ], + "type": "text", + "content": "6. Hampali, S., Sarkar, S.D., Rad, M., Lepetit, V.: Keypoint Transformer: Solving joint identification in challenging hands and object interactions for accurate 3D pose estimation. In: CVPR (2022)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 138, + 283, + 480, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 283, + 480, + 304 + ], + "spans": [ + { + "bbox": [ + 138, + 283, + 480, + 304 + ], + "type": "text", + "content": "7. Jaderberg, M., Simonyan, K., Zisserman, A., et al.: Spatial transformer networks. In: NeurIPS (2015)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 138, + 304, + 480, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 304, + 480, + 326 + ], + "spans": [ + { + "bbox": [ + 138, + 304, + 480, + 326 + ], + "type": "text", + "content": "8. Joo, H., Neverova, N., Vedaldi, A.: Exemplar fine-tuning for 3D human model fitting towards in-the-wild 3D human pose estimation. In: 3DV (2021)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 138, + 327, + 480, + 347 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 327, + 480, + 347 + ], + "spans": [ + { + "bbox": [ + 138, + 327, + 480, + 347 + ], + "type": "text", + "content": "9. Kanazawa, A., Zhang, J.Y., Felsen, P., Malik, J.: Learning 3d human dynamics from video. In: CVPR (2019)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 138, + 348, + 480, + 369 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 348, + 480, + 369 + ], + "spans": [ + { + "bbox": [ + 138, + 348, + 480, + 369 + ], + "type": "text", + "content": "0. Kulon, D., Guler, R.A., Kokkinos, I., Bronstein, M.M., Zafeiriou, S.: Weakly-supervised mesh-convolutional hand reconstruction in the wild. In: CVPR (2020)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 138, + 370, + 480, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 370, + 480, + 392 + ], + "spans": [ + { + "bbox": [ + 138, + 370, + 480, + 392 + ], + "type": "text", + "content": "1. Li, Z., Liu, J., Zhang, Z., Xu, S., Yan, Y.: CIUFF: Carrying location information in full frames into human pose and shape estimation. In: ECCV (2022)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 138, + 392, + 480, + 413 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 392, + 480, + 413 + ], + "spans": [ + { + "bbox": [ + 138, + 392, + 480, + 413 + ], + "type": "text", + "content": "2. Lin, K., Wang, L., Liu, Z.: End-to-end human pose and mesh reconstruction with transformers. In: CVPR (2021)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 138, + 414, + 404, + 425 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 414, + 404, + 425 + ], + "spans": [ + { + "bbox": [ + 138, + 414, + 404, + 425 + ], + "type": "text", + "content": "3. Lin, K., Wang, L., Liu, Z.: Mesh graphormer. In: ICCV (2021)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 138, + 426, + 480, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 426, + 480, + 446 + ], + "spans": [ + { + "bbox": [ + 138, + 426, + 480, + 446 + ], + "type": "text", + "content": "4. Lin, M., Chen, Q., Yan, S.: Network in network. arXiv preprint arXiv:1312.4400 (2013)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 138, + 447, + 480, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 447, + 480, + 468 + ], + "spans": [ + { + "bbox": [ + 138, + 447, + 480, + 468 + ], + "type": "text", + "content": "5. Lin, S., Yang, L., Saleemi, I., Sengupta, S.: Robust high-resolution video matting with temporal guidance. In: WACV (2022)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 138, + 469, + 480, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 469, + 480, + 491 + ], + "spans": [ + { + "bbox": [ + 138, + 469, + 480, + 491 + ], + "type": "text", + "content": "6. Lin, T.Y., Dolkar, P., Girshick, R., He, K., Hariharan, B., Belongie, S.: Feature pyramid networks for object detection. In: CVPR (2017)" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 138, + 491, + 480, + 511 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 491, + 480, + 511 + ], + "spans": [ + { + "bbox": [ + 138, + 491, + 480, + 511 + ], + "type": "text", + "content": "7. Liu, S., Jiang, H., Xu, J., Liu, S., Wang, X.: Semi-supervised 3D hand-object poses estimation with interactions in time. In: CVPR (2021)" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 138, + 512, + 480, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 512, + 480, + 533 + ], + "spans": [ + { + "bbox": [ + 138, + 512, + 480, + 533 + ], + "type": "text", + "content": "8. Messikommer, N., Fang, C., Gehrig, M., Scaramuzza, D.: Data-driven feature tracking for event cameras. In: CVPR (2023)" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 138, + 534, + 480, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 534, + 480, + 567 + ], + "spans": [ + { + "bbox": [ + 138, + 534, + 480, + 567 + ], + "type": "text", + "content": "9. Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: Nerf: Representing scenes as neural radiance fields for view synthesis. ACM (2021)" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 138, + 567, + 480, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 567, + 480, + 589 + ], + "spans": [ + { + "bbox": [ + 138, + 567, + 480, + 589 + ], + "type": "text", + "content": "20. Moon, G., Choi, H., Lee, K.M.: Accurate 3D hand pose estimation for whole-body 3D human mesh estimation. In: CVPRW (2022)" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 138, + 590, + 480, + 610 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 590, + 480, + 610 + ], + "spans": [ + { + "bbox": [ + 138, + 590, + 480, + 610 + ], + "type": "text", + "content": "21. Moon, G., Choi, H., Lee, K.M.: Neuralannot: Neural annotator for 3d human mesh training sets. In: CVPR (2022)" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 138, + 611, + 480, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 611, + 480, + 632 + ], + "spans": [ + { + "bbox": [ + 138, + 611, + 480, + 632 + ], + "type": "text", + "content": "22. Moon, G., Lee, K.M.: I2L-MeshNet: Image-to-lixel prediction network for accurate 3D human pose and mesh estimation from a single RGB image. In: ECCV (2020)" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 138, + 633, + 480, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 633, + 480, + 665 + ], + "spans": [ + { + "bbox": [ + 138, + 633, + 480, + 665 + ], + "type": "text", + "content": "23. Moon, G., Saito, S., Xu, W., Joshi, R., Buffalini, J., Bellan, H., Rosen, N., Richardson, J., Mize, M., De Bree, P., et al.: A dataset of relighted 3D interacting hands. In: NeurIPS (2023)" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 337, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 337, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 337, + 91, + 447, + 100 + ], + "type": "text", + "content": "EBH Dataset and Network" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 480, + 621 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 132, + 116, + 480, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 116, + 480, + 149 + ], + "spans": [ + { + "bbox": [ + 132, + 116, + 480, + 149 + ], + "type": "text", + "content": "24. Moon, G., Yu, S.I., Wen, H., Shiratori, T., Lee, K.M.: Interhand2.6M: A dataset and baseline for 3D interacting hand pose estimation from a single RGB image. In: ECCV (2020)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 150, + 480, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 150, + 480, + 171 + ], + "spans": [ + { + "bbox": [ + 132, + 150, + 480, + 171 + ], + "type": "text", + "content": "25. Nah, S., Kim, T.H., Lee, K.M.: Deep multi-scale convolutional neural network for dynamic scene deblurring. In: CVPR (2017)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 172, + 480, + 193 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 172, + 480, + 193 + ], + "spans": [ + { + "bbox": [ + 132, + 172, + 480, + 193 + ], + "type": "text", + "content": "26. Nehvi, J., Golyanik, V., Mueller, F., Seidel, H.P., Elgharib, M., Theobalt, C.: Differentiable event stream simulator for non-rigid 3d tracking. In: CVPR (2021)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 194, + 480, + 226 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 194, + 480, + 226 + ], + "spans": [ + { + "bbox": [ + 132, + 194, + 480, + 226 + ], + "type": "text", + "content": "27. Oh, Y., Park, J., Kim, J., Moon, G., Lee, K.M.: Recovering 3d hand mesh sequence from a single blurry image: A new dataset and temporal unfolding. In: CVPR (2023)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 227, + 480, + 248 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 227, + 480, + 248 + ], + "spans": [ + { + "bbox": [ + 132, + 227, + 480, + 248 + ], + "type": "text", + "content": "28. Ozawa, T., Sekikawa, Y., Saito, H.: Accuracy and speed improvement of event camera motion estimation using a bird's-eye view transformation. Sensors (2022)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 249, + 480, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 249, + 480, + 270 + ], + "spans": [ + { + "bbox": [ + 132, + 249, + 480, + 270 + ], + "type": "text", + "content": "29. Park, J., Jung, D.S., Moon, G., Lee, K.M.: Extract-and-adaptation network for 3d interacting hand mesh recovery. In: ICCVW (2023)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 271, + 480, + 303 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 271, + 480, + 303 + ], + "spans": [ + { + "bbox": [ + 132, + 271, + 480, + 303 + ], + "type": "text", + "content": "30. Park, J., Nah, S., Lee, K.M.: Pay attention to hidden states for video deblurring: Ping-pong recurrent neural networks and selective non-local attention. arXiv preprint arXiv:2203.16063 (2022)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 304, + 480, + 325 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 304, + 480, + 325 + ], + "spans": [ + { + "bbox": [ + 132, + 304, + 480, + 325 + ], + "type": "text", + "content": "31. Park, J., Nah, S., Lee, K.M.: Recurrence-in-recurrence networks for video deblurring. In: BMVC (2022)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 325, + 480, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 325, + 480, + 346 + ], + "spans": [ + { + "bbox": [ + 132, + 325, + 480, + 346 + ], + "type": "text", + "content": "32. Park, J., Oh, Y., Moon, G., Choi, H., Lee, K.M.: Handoccnet: Occlusion-robust 3D hand mesh estimation network. In: CVPR (2022)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 347, + 480, + 369 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 347, + 480, + 369 + ], + "spans": [ + { + "bbox": [ + 132, + 347, + 480, + 369 + ], + "type": "text", + "content": "33. Romero, J., Tzionas, D., Black, M.J.: Embodied hands: Modeling and capturing hands and bodies together. SIGGRAPH Asia (2017)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 369, + 480, + 391 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 369, + 480, + 391 + ], + "spans": [ + { + "bbox": [ + 132, + 369, + 480, + 391 + ], + "type": "text", + "content": "34. Rong, Y., Shiratori, T., Joo, H.: FrankMocap: A monocular 3D whole-body pose estimation system via regression and integration. In: ICCVW (2021)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 132, + 392, + 480, + 423 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 392, + 480, + 423 + ], + "spans": [ + { + "bbox": [ + 132, + 392, + 480, + 423 + ], + "type": "text", + "content": "35. Rudnev, V., Golyanik, V., Wang, J., Seidel, H.P., Mueller, F., Elgharib, M., Theobalt, C.: Eventhands: Real-time neural 3d hand pose estimation from an event stream. In: ICCV (2021)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 132, + 424, + 480, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 424, + 480, + 445 + ], + "spans": [ + { + "bbox": [ + 132, + 424, + 480, + 445 + ], + "type": "text", + "content": "36. Shen, Z., Wang, W., Shen, J., Ling, H., Xu, T., Shao, L.: Human-aware motion deblurring. In: ICCV (2019)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 132, + 446, + 480, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 446, + 480, + 468 + ], + "spans": [ + { + "bbox": [ + 132, + 446, + 480, + 468 + ], + "type": "text", + "content": "37. Sun, X., Xiao, B., Wei, F., Liang, S., Wei, Y.: Integral human pose regression. In: ECCV (2018)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 132, + 468, + 480, + 490 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 468, + 480, + 490 + ], + "spans": [ + { + "bbox": [ + 132, + 468, + 480, + 490 + ], + "type": "text", + "content": "38. Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, L., Polosukhin, I.: Attention is all you need. In: NeurIPS (2017)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 132, + 491, + 480, + 522 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 491, + 480, + 522 + ], + "spans": [ + { + "bbox": [ + 132, + 491, + 480, + 522 + ], + "type": "text", + "content": "39. Zhang, F., Bazarevsky, V., Vakunov, A., Tkachenka, A., Sung, G., Chang, C.L., Grundmann, M.: Mediapipe hands: On-device real-time hand tracking. arXiv preprint arXiv:2006.10214 (2020)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 132, + 523, + 480, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 523, + 480, + 544 + ], + "spans": [ + { + "bbox": [ + 132, + 523, + 480, + 544 + ], + "type": "text", + "content": "40. Zhang, S., Wang, W., Li, H., Zhang, S.: Evtracker: An event-driven spatiotemporal method for dynamic object tracking. Sensors (2022)" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 132, + 545, + 480, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 545, + 480, + 567 + ], + "spans": [ + { + "bbox": [ + 132, + 545, + 480, + 567 + ], + "type": "text", + "content": "41. Zhong, Z., Gao, Y., Zheng, Y., Zheng, B.: Efficient spatio-temporal recurrent neural network for video deblurring. In: ECCV (2020)" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 132, + 567, + 480, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 567, + 480, + 588 + ], + "spans": [ + { + "bbox": [ + 132, + 567, + 480, + 588 + ], + "type": "text", + "content": "42. Zimmermann, C., Brox, T.: Learning to estimate 3D hand pose from single RGB images. In: ICCV (2017)" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 132, + 589, + 480, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 589, + 480, + 621 + ], + "spans": [ + { + "bbox": [ + 132, + 589, + 480, + 621 + ], + "type": "text", + "content": "43. Zimmermann, C., Ceylan, D., Yang, J., Russell, B., Argus, M., Brox, T.: Freihand: A dataset for markerless capture of hand pose and shape from single RGB images. In: ICCV (2019)" + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 228, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 228, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 228, + 100 + ], + "type": "text", + "content": "JK. Park et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/3D Human Pose Estimation via Non-Causal Retentive Networks/38d38aa6-38d5-486e-8643-4f15fed7d372_content_list.json b/2024/3D Human Pose Estimation via Non-Causal Retentive Networks/38d38aa6-38d5-486e-8643-4f15fed7d372_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..4027818b130682e48b96655d0c95a695054a7815 --- /dev/null +++ b/2024/3D Human Pose Estimation via Non-Causal Retentive Networks/38d38aa6-38d5-486e-8643-4f15fed7d372_content_list.json @@ -0,0 +1,1582 @@ +[ + { + "type": "text", + "text": "3D Human Pose Estimation via Non-Causal Retentive Networks", + "text_level": 1, + "bbox": [ + 250, + 140, + 754, + 184 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Kaili Zheng $^{1}$ , Feixiang Lu $^{2}$ , Yihao Lv $^{2}$ , Liangjun Zhang $^{2}$ , Chenyi Guo $^{1\\boxtimes}$ , and Ji Wu $^{1,3,4\\boxtimes}$", + "bbox": [ + 243, + 210, + 759, + 244 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Department of Electronic Engineering, Tsinghua University $^{2}$ Baidu Research", + "bbox": [ + 295, + 253, + 705, + 280 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "3 College of AI, Tsinghua University \n4 Beijing National Research Center for Information Science and Technology \nzk122@mails.tsinghua.edu.cn", + "bbox": [ + 246, + 281, + 753, + 324 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract. Temporal dependencies are essential in 3D human pose estimation to mitigate depth ambiguity. Previous methods typically use a fixed-length sliding window to capture these dependencies. However, they treat past and future frames equally, ignoring the fact that relying on too many future frames increases the inference latency. In this paper, we present a 3D human pose estimation model based on Retentive Networks (RetNet) that incorporates temporal information by utilizing a large number of past frames and a few future frames. The Non-Causal RetNet (NC-RetNet) is designed to allow the originally causal RetNet to be aware of future information. Additionally, we propose a knowledge transfer strategy, i.e., training the model with a larger chunk size and using a smaller chunk size during inference, to reduce latency while maintaining comparable accuracy. Extensive experiments have been conducted on the Human3.6M and MPI-INF-3DHP datasets, and the results demonstrate that our method achieves state-of-the-art performance. Code and models are available at https://github.com/Kelly510/PoseRetNet.", + "bbox": [ + 261, + 376, + 743, + 598 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Keywords: 3D Human Pose Estimation $\\cdot$ Temporal Dependency $\\cdot$ Retentive Networks", + "bbox": [ + 261, + 609, + 740, + 638 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 215, + 665, + 375, + 681 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Monocular 3D Human Pose Estimation (HPE) aims to reconstruct the 3D positions of human body joints based on monocular observations. This popular computer vision task has a wide range of applications, including action recognition [44], human-robot interaction [37] and motion analysis [11]. Most of the previous works [2,22,39-41,45,48] adopt the 2D-to-3D lifting pipeline which predicts 3D human pose based on 2D keypoint detection results. It is challenging due to the depth ambiguity issue, namely, one 2D detection result may correspond to multiple 3D human skeletons.", + "bbox": [ + 212, + 695, + 787, + 816 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "$\\boxtimes$ denotes corresponding author.", + "bbox": [ + 215, + 824, + 433, + 839 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/d43a6bc9dcd8f883044c0b3e11f2c13a09273476a056116aaf2462a2aebd94b8.jpg", + "image_caption": [ + "Fig. 1: (Left) The framework of our method, which utilizes long-term historical information from the cross-chunk state and relies on only a few future frames within the chunk. The past, current, and future frames are denoted by blue, green, and red borders, respectively. (Right) Comparison of Mean Per-Joint Position Error (MPJPE) on the Human3.6M dataset under different test chunk sizes. Our method outperforms previous state-of-the-art remarkably, especially under small chunk sizes." + ], + "image_footnote": [], + "bbox": [ + 217, + 146, + 787, + 311 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To mitigate the depth ambiguity, monocular 3D human pose estimation models usually take multiple frames as the input and exploit additional temporal dependencies of human pose to reduce the ambiguity [1, 27, 29, 43, 46, 49, 50]. Specifically, a sliding-window of fixed length is usually adopted to capture the temporal dependencies, where the length of window is referred to as the number of frames or chunk size. A larger chunk size typically results in better accuracy performance as it allows for the perception of more long-range temporal information. However, previous methods treat past and future frames equally, and a larger chunk size also means that the model relies on the arrival of more future frames before inference, which significantly increases the inference latency. For instance, consider the seq2frame framework, which aims to predict the 3D pose of the center frame among the input frames. If the chunk size is 243 and the input frame rate is $10\\mathrm{Hz}$ , the inference latency will be $(243 - 1)\\div 2\\div 10 = 12.1$ seconds. For seq2seq framework in the same case, the inference latency for the first frame within the chunk is $(243 - 1)\\div 10 = 24.2$ seconds and that for the last frame is zero. The average latency is 12.1 seconds as well. This is considerably longer than the forward time of the model itself.", + "bbox": [ + 212, + 445, + 787, + 700 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address this problem, we propose a 3D human pose estimation model based on Retentive Networks (RetNet) [35]. Fig. 1(left) illustrates the framework of our method. Different from previous methods that use similar amounts of past and future frames to incorporate temporal information, our method mainly extracts temporal information from past frames (blue) and uses only a few future frames (red) within the current chunk for refinement. The RetNet can easily capture long-term historical information by using the cross-chunk state, and the Non-Causal RetNet (NC-RetNet) is further designed to make the originally causal RetNet be aware of the future frames. Moreover, we develop a knowl", + "bbox": [ + 212, + 704, + 787, + 839 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "K. Zheng et al.", + "bbox": [ + 271, + 114, + 372, + 128 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "edge transfer strategy of training the model with a large chunk size and using a small chunk size during inference. Thanks to the long-term historical information brought by the cross-chunk state, decreasing the test chunk size does not significantly affect performance, as shown in Fig. 1(right), but greatly reduces the inference latency.", + "bbox": [ + 212, + 146, + 782, + 219 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Extensive experiments have been conducted on two datasets, Human3.6M [14] and MPI-INF-3DHP [23], both quantitatively and qualitatively. The results demonstrate that our method outperforms state-of-the-art with a clear margin in terms of accuracy and continuity, especially when the model infers with a small chunk size. Our method even surpasses state-of-the-art with a smaller chunk size during inference. The ablation study also validates the efficacy of the components in our method. Our main contributions can be summarized as follows.", + "bbox": [ + 212, + 220, + 782, + 325 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. This is the first study to investigate the potential of RetNet in 3D human pose estimation. And we introduce NC-RetNet to extract temporal information, which leverages past frames through the cross-chunk state and a limited number of future frames within the chunk.", + "2. The NC-RetNet can be trained using a large chunk size and infer using a small chunk size without significant performance deterioration, but with a notable decrease in inference latency.", + "3. Extensive experiments have been conducted and the results demonstrate that our method is the state-of-the-art in terms of accuracy and continuity, especially when the test chunk size is small." + ], + "bbox": [ + 220, + 339, + 784, + 488 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Related Work", + "text_level": 1, + "bbox": [ + 215, + 511, + 387, + 527 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.1 3D Human Pose Estimation", + "text_level": 1, + "bbox": [ + 215, + 542, + 491, + 556 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Monocular 3D human pose estimation is a fundamental computer vision task with a broad range of applications. Direct estimation of the 3D positions of human joints from raw image pixels [26, 34] is difficult not only because of the complexity of extracting image features, but also due to the lack of image-3D data pairs. For these reasons, Martinez et al. [22] propose to estimate 3D human pose in a two-stage manner: detect 2D keypoints from images first and then lift 2D to 3D. Since this approach can utilize existing 2D pose estimation systems [3, 19, 25, 33, 42] and a large amount of 3D motion capture data, it has received a lot of attention. In this paper, we also focus on the 2D-to-3D lifting task. Although there are methods such as [48] propose to leverage visual cues only to mitigate depth ambiguity, these methods are unable to produce reconstructions with good continuity. Therefore, temporal dependencies are very crucial for monocular human pose estimation models.", + "bbox": [ + 212, + 566, + 784, + 763 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2 Exploitation of Temporal Dependencies", + "text_level": 1, + "bbox": [ + 215, + 785, + 586, + 800 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Previous methods mostly adopt four architectures to exploit temporal dependencies: CNN, RNN, GCN [15] and transformer [21]. For example, to model", + "bbox": [ + 212, + 809, + 782, + 839 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "3D HPE via Non-Causal Retentive Networks", + "bbox": [ + 431, + 114, + 730, + 127 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "the temporal dependencies of human motion, Pavllo et al. [27] propose a temporal convolution model that utilizes dilated temporal convolutions to capture long-term information and model the temporal dependencies of human motion. The temporal receptive field depends on the dilation ratio and the number of layers. Similarly, Choi et al. [5] utilizes GRU [7] to extract features from the past frames and future frames within a fixed-length window respectively before integration. Cai et al. [1] exploit graph convolutions [15] to model the graph structure of different human joints. Along the time axis, this method treats the joints at different time steps as the graph nodes where any two consecutive joints are adjacent in the graph. Poseformer [50] proposed by Zheng et al. is the first work to introduce transformers to 3D human pose estimation task. This model incorporates the Spatial Transformer Module to encode the geometric structure of the human pose in a single frame into a token, and the Temporal Transformer Encoder to model temporal dependencies between frames. Since then, a lot of works [8,10,16-18,51] have emerged to explore the potential of transformers in 3D human pose estimation.", + "bbox": [ + 212, + 146, + 787, + 387 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Although these methods leverage different architectures to extract spatial-temporal information from 2D sequences, they share a common framework that employs a fixed number of frames to predict the result. Moreover, the chunk size has a significant impact on the accuracy, and a larger chunk size is usually beneficial for performance. However, previous works have not taken into account that a larger chunk size also significantly increases the inference delay. This motivates us to develop a method that balances the accuracy and inference latency better.", + "bbox": [ + 212, + 388, + 787, + 508 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.3 Real-time Human Pose Estimation", + "text_level": 1, + "bbox": [ + 214, + 530, + 549, + 544 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In addition to accuracy, low inference latency is also desired for human pose estimation models in many scenarios, and significant efforts have been devoted to reducing the inference latency. On one hand, since human pose estimation models typically use a backbone model to extract image features, general-purpose lightweight backbones [9,13,30,47], can be used directly to replace the backbone in HPE models [6]. On the other hand, simplifying the pipeline can improve the model's efficiency. For example, Vnect [24] is proposed to combine the bounding box detection, 2D keypoint detection, and 2D-to-3D lifting into one model. However, existing methods only focus on decreasing the forward time of the HPE models, but do not consider the inference latency caused by large chunk sizes, as our method does.", + "bbox": [ + 212, + 553, + 787, + 720 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.4 Length Extrapolation", + "text_level": 1, + "bbox": [ + 214, + 739, + 441, + 757 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Models in natural language processing are expected to be generalizable across sequences of varying lengths, particularly to sequences longer than the training samples. This desired property is called length extrapolation. To achieve this, the use of relative position embedding, such as RoPE [32] and xPos [36], is necessary because it does not require the input sequences to be of fixed length.", + "bbox": [ + 212, + 763, + 787, + 840 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "K. Zheng et al.", + "bbox": [ + 271, + 114, + 372, + 128 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Additionally, there are methods [4, 28] to improve the format of the attention module to achieve length extrapolation. For example, ALIBI [28] proposes to subtract the absolute temporal distance of two tokens from the attention score, which enhances the performance on extremely long sequences. Our knowledge transfer strategy is similar to length extrapolation, except that we concentrate on the model's transition from large chunks to smaller ones.", + "bbox": [ + 212, + 146, + 782, + 237 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3 Method", + "text_level": 1, + "bbox": [ + 215, + 260, + 330, + 276 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.1 Preliminary", + "text_level": 1, + "bbox": [ + 215, + 292, + 359, + 306 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "RetNet [35] is a sequence modeling network that produces a contextualized feature sequence of length $L$ given an input sequence $X \\in \\mathbb{R}^{L \\times d}$ . The basic module of RetNet is retention, which has three mathematically equivalent representations: parallel, recurrent, and chunkwise recurrent. We present a detailed explanation of these representations below.", + "bbox": [ + 212, + 316, + 782, + 393 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Parallel Given the input sequence $X$ , the query $Q$ and key $K$ are derived by applying the linear projection and RoPE. $P_{q}, P_{k}$ are the rotary position embedding for the query and the key respectively. The value $V$ is obtained by the linear projection only. $D \\in \\mathbb{R}^{L \\times L}$ is the combination of causal masking and exponential decay with respect to the relative distance. $\\odot$ denotes the element-wise product. Since the value in the mask is non-zero only when the reference token $(m^{th})$ is earlier than the target token $(n^{th})$ , RetNet is a fully causal model.", + "bbox": [ + 212, + 415, + 784, + 521 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nQ = P _ {q} \\left(X W _ {Q}\\right), K = P _ {k} \\left(X W _ {K}\\right), V = X W _ {V}\n$$\n", + "text_format": "latex", + "bbox": [ + 346, + 534, + 655, + 549 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nD _ {n m} = \\left\\{ \\begin{array}{l l} \\gamma^ {n - m}, & n \\geq m \\\\ 0, & n < m \\end{array} \\right. \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 346, + 553, + 784, + 592 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {R e t e n t i o n} (X) = \\left(Q K ^ {T} \\odot D\\right) V\n$$\n", + "text_format": "latex", + "bbox": [ + 346, + 595, + 565, + 611 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Recurrent $S_{n} \\in \\mathbb{R}^{d \\times d}$ represents the state of time step $n$ , and $Q_{n}, K_{n}, V_{n}$ is the value of the same $Q, K, V$ in Eq. (1) at time step $n$ . This representation also shows that RetNet is entirely causal, as the output for the $n^{th}$ frame depends solely on the previous state $S_{n-1}$ and the $n^{th}$ input.", + "bbox": [ + 212, + 633, + 782, + 696 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nS _ {n} = \\gamma S _ {n - 1} + K _ {n} ^ {T} V _ {n} \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 374, + 707, + 784, + 731 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {R e t e n t i o n} \\left(X _ {n}\\right) = Q _ {n} S _ {n}, n = 1, \\dots , L\n$$\n", + "text_format": "latex", + "bbox": [ + 374, + 729, + 629, + 742 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Chunkwise Recurrent This representation is the hybrid form of the above two representations. Suppose the input sequence is segmented into chunks of length $T$ . Denote $X_{iT:(i+1)T}$ as $X_{[i]}$ , where $[i]$ indicates the $i$ -th chunk. Within the chunk, the model follows the parallel representation and the cross-chunk information is passed following the recurrent representation. The $D$ here is similar", + "bbox": [ + 212, + 763, + 784, + 840 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "3D HPE via Non-Causal Retentive Networks", + "bbox": [ + 431, + 114, + 732, + 127 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/f727394a1ee857546ce8c1acd6a4eaf4314f7f883b2ccf713669ca8611a62375.jpg", + "image_caption": [ + "Fig. 2: (Left) The causal masks in the parallel and chunkwise recurrent representations of the original RetNet. The model can only perceive historical frames although there are several future frames in the current chunk. (Right) We propose Non-Causal RetNet (NC-RetNet), which utilizes all the frames within current chunk using the full mask and can be trained in parallel with the staircase-shaped mask." + ], + "image_footnote": [], + "bbox": [ + 222, + 146, + 782, + 289 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "to that in Eq. (1), but its shape changes from $L \\times L$ to $T \\times T$ . The $D$ 's in Eq. (1) and Eq. (3) are illustrated in Fig. 2(left). $\\zeta$ and $\\xi$ are both $T \\times d$ matrices and the $r$ -th row of them is $\\gamma^{T - r - 1}$ and $\\gamma^{r + 1}$ respectively.", + "bbox": [ + 212, + 398, + 787, + 446 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nS _ {i} = K _ {[ i ]} ^ {T} (V _ {[ i ]} \\odot \\zeta) + \\gamma^ {T} S _ {i - 1}\n$$\n", + "text_format": "latex", + "bbox": [ + 312, + 454, + 517, + 477 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {R e t e n t i o n} \\left(X _ {[ i ]}\\right) = \\underbrace {\\left(Q _ {[ i ]} K _ {[ i ]} ^ {T} \\odot D\\right) V _ {[ i ]}} _ {\\text {I n n e r - C h u n k}} + \\underbrace {\\left(Q _ {[ i ]} S _ {i - 1}\\right) \\odot \\xi} _ {\\text {C r o s s - C h u n k}} \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 312, + 477, + 785, + 516 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Since low inference latency is required in real-time scenarios, the parallel representation is not suitable. Moreover, the recurrent representation is the special case of the chunkwise recurrent representation when $T = 1$ . Therefore, we focus on the chunkwise recurrent representation of RetNet to design our human pose estimation model.", + "bbox": [ + 212, + 521, + 787, + 595 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.2 Non-Causal RetNet", + "text_level": 1, + "bbox": [ + 214, + 618, + 426, + 632 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Although the chunkwise recurrent representation of RetNet processes the input sequence chunk by chunk, it does not utilize all the information in the current chunk. As is shown in Fig. 2(left), the masking in the chunkwise recurrent representation is a lower triangular matrix. This means that the estimation of the current frame only uses the frames before it, regardless of the future frames within the chunk. However, leveraging certain future information can be very helpful for the accuracy of human pose estimation models.", + "bbox": [ + 212, + 643, + 787, + 748 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To solve this problem, we modify the causal masking in RetNet to exploit all the information within the current chunk and propose Non-Causal RetNet (NC-RetNet). Formally, the new $D$ in the chunkwise recurrent representation is given by Eq. (4). The new masking is a full matrix instead of a lower triangular matrix. When predicting the 3D pose of the $n^{th}$ frame, we can calculate the exponential decay of both past frames $(m < n)$ and future frames $(m > n)$", + "bbox": [ + 212, + 750, + 787, + 842 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "K. Zheng et al.", + "bbox": [ + 271, + 114, + 374, + 128 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "within the chunk by using the absolute distance $|n - m|$ between the two frames. The mathematical expression of the new $D$ matrix in the parallel representation can be found in the Supp. Mat. The chunk size, denoted by $T$ , can be adjusted to balance the accuracy and inference latency. The larger $T$ is, the more future information can be perceived by the model, but the longer the inference latency will be.", + "bbox": [ + 212, + 146, + 787, + 234 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nD = \\left\\{D _ {n m} \\right\\} = \\left\\{\\gamma^ {| n - m |} \\right\\}, n, m \\in \\{1, \\dots , T \\} \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 349, + 237, + 785, + 256 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Fig. 2(right) illustrates the masks in the parallel and chunkwise recurrent representations of this non-causal retention. Note that the model can also be trained in parallel by using the staircase-shaped mask, but it does not have the recurrent representation unless $T = 1$ .", + "bbox": [ + 212, + 263, + 784, + 321 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "By using the non-causal masking, NC-RetNet exploits temporal dependencies from the cross-chunk state which provides long-term historical information, and only a few future frames which provides some future information. Therefore, the temporal receptive field of our method is not limited by the chunk size. In fact, the chunk size in our method only affects the amount of future information while historical information is always adequate due to the cross-chunk state.", + "bbox": [ + 212, + 324, + 784, + 414 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.3 Transfer Knowledge from LargeChunks", + "text_level": 1, + "bbox": [ + 214, + 439, + 591, + 455 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We further develop a strategy for our NC-RetNet to improve its performance under small test chunk sizes, which is to train the model with a large chunk size and infer with a small chunk size. Since the model uses xPos, a relative position embedding, it is able to handle 2D sequences of different lengths from the form. In addition, the cross-chunk state $S_{i}$ in the chunkwise recurrent representation is updated every chunk, containing a lot of information from previous chunks. With this long-term historical information, the model becomes insensitive to the length of future frames. Therefore, using a smaller chunk size during inference does not significantly decrease accuracy, but greatly reduces inference latency. This indicates that some knowledge is transferred from large chunks when training to the small chunks during inference.", + "bbox": [ + 212, + 467, + 787, + 633 + ], + "page_idx": 6 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 1 Pseudo-code for training" + ], + "code_body": "Input: Training dataloader, initialized model, training chunk size $T_{l}$ \nOutput: model after training for input_2d, target in dataloader do $\\mathrm{L} =$ input_2d.size(1) # Total length of the input sequence D_parallel $\\equiv$ get_D_parallel(L, $T_{l})$ #Get the staircase-shaped mask for parallel training given the total length and chunk size pred $\\equiv$ model.forward_parallel(input_2d,D_parallel) loss $\\equiv$ loss_func(pred,target) loss.backup() optimizer.step() \nend for", + "bbox": [ + 215, + 680, + 787, + 834 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "3D HPE via Non-Causal Retentive Networks", + "bbox": [ + 431, + 114, + 730, + 127 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 6 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 2 Pseudo-code for inference" + ], + "code_body": "Input: 2D stream, trained model, test chunk size $T_{s}$ \nOutput: 3D stream $\\{\\mathbf{y}_{-}\\mathbf{n}\\}$ D_chunkwise $\\equiv$ get_D_chunkwise $(T_{s})\\#$ Get the full mask for chunkwise inference given the test chunk size s_n, x_n = None, [] \nfor x_i in stream do x_n.append(x_i) if len(x_n) == T_s then y_n, s_n = model.forward_chunkwise(x_n, D_chunkwise, s_n, n) x_n = [] output(y_n) # Output y_n every chunk for downstream task \nend if \nend for", + "bbox": [ + 215, + 164, + 785, + 345 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The details of the training and testing are elaborated below. During training, we utilize RetNet's parallel representation to achieve training parallelism. We set the training chunk size to a large number $T_{l}$ , to capture long-term patterns of human motion. The training pseudo-code is as shown in Algorithm 1. The parallel representation used during training implicitly incorporates the cross-chunk state. This means that the model can theoretically observe historical information over a long period of time as well as many future frames. During inference, the test chunk size $T_{s}$ is set smaller than the training chunk size $T_{l}$ and the chunkwise-recurrent representation is used. The pseudo-code for inference is as in Algorithm 2. Given a stream of 2D keypoints, the model processes the stream in the chunkwise-recurrent representation every $T_{s}$ frames based on the current chunk x_n as well as an explicit cross-chunk state s_n. This cross-chunk state contains information about previous chunks and makes the model insensitive to the number of future frames. Therefore, although the chunk is smaller than the training chunks, the model can still extract stable temporal features.", + "bbox": [ + 212, + 375, + 787, + 603 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3.4 Implementation Details", + "text_level": 1, + "bbox": [ + 215, + 623, + 455, + 638 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We implement our idea based on the state-of-the-art seq2seq method, MixSTE [46], by replacing its temporal encoder with RetNet. Since the movement of distal joints is more erratic than that of torso joints, the estimation of these distal joints should rely on more local temporal information. Therefore, we assign different decay coefficients to different human joints, which is referred to as joint-related decay coefficients. The chunkwise recurrent representation of it can be formulated in Eq. (5), where $p$ is the index of human joints.", + "bbox": [ + 212, + 647, + 787, + 753 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\nS _ {i, p} = K _ {[ i ], p} ^ {T} \\left(V _ {[ i ], p} \\odot \\zeta_ {p}\\right) + \\gamma_ {p} ^ {T} S _ {i - 1, p}\n$$\n", + "text_format": "latex", + "bbox": [ + 330, + 762, + 584, + 782 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\nI n n e r _ {i, p} = \\left(Q _ {[ i ], p} K _ {[ i ], p} ^ {T} \\odot D _ {p}\\right) V _ {[ i ], p} \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 330, + 784, + 785, + 810 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\nC r o s s _ {i, p} = \\left(Q _ {[ i ], p} S _ {i - 1, p}\\right) \\odot \\xi_ {p}\n$$\n", + "text_format": "latex", + "bbox": [ + 330, + 806, + 545, + 824 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {R e t e n t i o n} \\left(X _ {[ i ]}\\right) = \\operatorname {C o n c a t} \\left(\\operatorname {I n n e r} _ {i, p} + \\operatorname {C r o s s} _ {i, p}\\right)\n$$\n", + "text_format": "latex", + "bbox": [ + 330, + 825, + 669, + 843 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "K. Zheng et al.", + "bbox": [ + 271, + 114, + 374, + 128 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The loss function and training strategies are the same as in MixSTE. The training chunk size is 243 on the Human3.6M dataset and 81 on the MPI-INF-3DHP dataset, and then we test the model with different chunk sizes to get the results. The overall architecture of our model is given in the Supp. Mat. We also implement our idea on MotionBERT [51] and the results can be found in the Supp. Mat.", + "bbox": [ + 212, + 146, + 782, + 234 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4 Experiments", + "text_level": 1, + "bbox": [ + 214, + 258, + 375, + 275 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.1 Datasets and Evaluation Protocols", + "text_level": 1, + "bbox": [ + 214, + 289, + 545, + 304 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Experiments are conducted on two human pose estimation datasets: Human3.6M [14] and MPI-INF-3DHP [23]. Human3.6M is the most widely used indoor dataset for single-person 3D human pose estimation, containing about 3.6 million images collected from 11 professional actors. Following the common practice [2,27,46,50], we use the samples of S1, S5, S6, S7, S8 for training and evaluate on S9 and S11 subjects. Mean Per-Joint Position Error (MPJPE) and Procrustes-Aligned MPJPE (PA-MPJPE) are evaluated on this dataset. We also report the Mean Per-Joint Velocity Error (MPJVE) results, which reflect the continuity of the predicted results. MPI-INF-3DHP is a more challenging 3D human pose estimation dataset because it includes both indoor and outdoor scenes. The samples are collected from 8 subjects, each performing 8 actions. The test set consists of 6 subjects in different scenes. We follow the setup in [2,39,46,50]. For the MPI-INF-3DHP dataset, we report the results of MPJPE, Percentage of Correct Keypoints (PCK) within the $150\\mathrm{mm}$ range, and Area Under Curve (AUC), following [18,38,50].", + "bbox": [ + 212, + 311, + 787, + 541 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.2 Quantitative Comparison", + "text_level": 1, + "bbox": [ + 214, + 560, + 470, + 575 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Results on Human3.6M We first use the 2D keypoints detected by CPN [3] as the input, and the results are shown in Tab. 1. It can be seen that our method achieves comparable performance with the state-of-the-art when the chunk size for inference is large ( $T = 243$ ). Moreover, our method outperforms previous methods by a clear margin when the chunk size is small ( $T = 27$ , 81), as the accuracy of our method only decreases slightly when the chunk size is reduced. Furthermore, the MPJPE at $T = 27$ is comparable to the previous state-of-the-art method at $T = 81$ (42.1 mm vs. 42.0 mm). This indicates that our method can provide similarly accurate predictions with much lower inference latency. Additionally, our method exhibits significant improvement in continuity compared with previous methods, with a 0.2 mm per frame improvement on the MPJVE metric.", + "bbox": [ + 212, + 583, + 787, + 763 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We further use the ground truth 2D keypoints of the Human3.6M dataset as input to test the upper bound of our method, as shown in Tab. 2. The results indicate that using the model trained with 243 frames to infer with a chunk size of 81 improves the MPJPE metric by $2.3\\mathrm{mm}$ compared to the previous state-of-the-art $(22.4\\mathrm{mm}$ vs. $25.7\\mathrm{mm})$ . Moreover, the MPJPE of our method at $T = 27$", + "bbox": [ + 212, + 765, + 787, + 839 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "3D HPE via Non-Causal Retentive Networks", + "bbox": [ + 431, + 114, + 730, + 127 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "is remarkably lower than that of previous methods at $T = 81$ , demonstrating our method's efficient utilization of transferred knowledge to achieve higher accuracy with lower inference latency.", + "bbox": [ + 212, + 146, + 782, + 191 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/917480d22f541b00740c288666abde74a1ab509f3792d0033f57d0fa2d4f470f.jpg", + "table_caption": [ + "Table 1: Comparison of MPJPE, PA-MPJPE and MPJVE on the Human3.6M dataset using 2D keypoints detected by CPN [3] as input. $T$ is the chunk size when testing." + ], + "table_footnote": [], + "table_body": "
MPJPETDir.Disc.Eat.GreetPhonePhotoPosePurch.SitSitD.SmokeWaitWalkD.WalkWalkT.Average
MixSTE [46]27---------------45.1
STCFformer [38]2740.744.641.241.945.853.741.540.955.963.844.641.544.729.530.844.1
Ours2738.041.540.040.044.151.339.841.753.158.343.539.842.028.429.642.1
Anatomy [2]8142.143.841.043.846.153.542.443.153.960.545.742.146.232.233.844.6
PoseFormer [50]8141.544.839.842.546.551.642.142.053.360.745.543.346.131.832.244.3
Xue et al. [43]8142.145.340.942.945.452.742.642.555.361.844.941.744.929.930.844.2
P-STMO [31]8141.744.541.042.946.051.342.841.354.961.845.142.843.830.830.744.1
MixSTE [46]8139.843.038.640.143.450.640.641.452.256.743.840.843.929.430.342.4
STCFformer [38]8140.643.038.340.243.552.640.340.151.857.742.839.842.328.029.542.0
Ours8136.940.539.038.643.349.638.840.252.656.542.638.840.526.828.440.9
VideoPose3D [27]24345.246.743.345.648.155.144.644.357.365.847.144.049.032.833.946.8
Anatomy [2]24341.443.540.142.946.651.941.742.353.960.245.441.746.031.532.744.1
Xue et al. [43]24339.942.740.342.345.052.840.439.356.961.244.141.342.828.429.343.1
MHFormer [18]35139.243.140.140.944.951.240.641.353.560.343.741.143.829.830.643.0
P-STMO [31]24338.942.740.441.145.649.740.939.955.559.444.942.242.729.429.442.8
MixSTE [46]24337.640.937.339.742.349.940.139.851.755.042.139.841.027.927.940.9
STCFformer [38]24338.441.236.838.042.750.538.738.252.556.841.838.440.226.227.740.5
Ours24336.940.138.738.342.948.638.240.052.555.442.338.739.726.227.840.4
", + "bbox": [ + 218, + 258, + 785, + 444 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/1091134b2efadcdd1b22b2846411347d506e3d0049143c03eeca5f2c60a79a6a.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
PA-MPJPETDir.Disc.Eat.GreetPhonePhotoPosePurch.SitSitD.SmokeWaitWalkD.WalkWalkT.Average
STCFormer [38]2731.935.132.734.134.941.332.131.645.050.636.031.735.523.625.134.8
Ours2731.733.932.333.335.239.131.031.944.048.736.031.034.623.024.834.0
Anatomy [2]8133.135.333.435.936.141.732.833.342.649.437.032.736.525.527.935.6
PoseFormer [50]8132.534.832.634.635.339.532.132.042.848.534.832.435.324.526.034.6
Xue et al. [43]8131.635.532.334.235.140.332.332.344.549.635.831.635.023.724.734.6
MixSTE [46]8132.034.231.733.734.439.232.031.842.946.935.532.034.423.625.233.9
STCFormer [38]8130.433.831.131.733.539.530.830.041.845.834.330.132.821.923.432.7
Ours8130.533.131.431.633.038.429.830.643.645.434.430.332.421.522.232.6
VideoPose3D [27]24334.136.134.437.236.442.234.433.645.052.537.433.837.825.627.336.5
Anatomy [2]24332.635.132.835.436.340.432.432.342.749.036.832.436.024.926.535.0
Xue et al. [43]24331.234.131.933.833.939.531.630.045.448.135.031.133.522.423.633.7
P-STMO [31]24331.335.232.933.935.439.332.531.544.648.236.332.934.423.823.934.4
MixSTE [46]24330.833.130.331.833.139.131.130.542.544.534.030.832.722.122.932.6
STCFormer [38]24329.333.030.730.632.738.229.728.842.245.033.329.431.520.922.331.8
Ours24330.833.131.331.833.437.730.130.543.445.534.330.331.521.422.732.5
", + "bbox": [ + 218, + 450, + 785, + 606 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/bbc80ed98c3768cf51eab5477c82c524f09a91df87951cab66fe8ca6b6cca1df.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MPJVETDir.Disc.Eat.GreetPhonePhotoPosePurch.SitSitD.SmokeWaitWalkD.WalkWalkT.Average
VideoPose3D [27]2433.03.12.23.42.32.72.73.12.12.92.32.43.73.12.82.8
Anatomy [2]2432.72.82.03.12.02.42.42.81.82.42.02.13.42.72.42.5
PoseFormer [50]813.23.42.63.62.63.02.93.22.63.32.72.73.83.22.93.1
StridedFormer [17]3512.42.51.82.81.82.22.22.51.52.01.81.93.22.52.12.2
MixSTE [46]2432.52.71.92.81.92.22.32.61.62.21.92.03.12.62.22.3
Ours812.32.41.82.61.72.12.12.51.52.11.81.93.02.42.02.2
Ours2432.32.41.82.61.72.12.12.51.52.11.81.93.02.42.02.0
", + "bbox": [ + 218, + 612, + 785, + 691 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Results on MPI-INF-3DHP The results on the MPI-INF-3DHP datasets are shown in Tab. 3. An improvement of $0.9\\mathrm{mm}$ on the MPJPE metric is achieved at $T = 81$ , and the improvement becomes more remarkable as the chunk size decreases. In particular, our method outperforms previous methods very significantly with an improvement of $4.1\\mathrm{mm}$ on the MPJPE metric when $T$ is 9. Similar to the phenomenon on the Human3.6M dataset, our method is", + "bbox": [ + 212, + 748, + 787, + 840 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "K. Zheng et al.", + "bbox": [ + 271, + 114, + 372, + 128 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/5cfd6856267fbab9aaeaa33c9a663ac0cfb4409b5f810b5795b0c3c22c1b7183.jpg", + "table_caption": [ + "Table 2: Comparison of MPJPE on the Human3.6M dataset using 2D ground truth keypoints as input." + ], + "table_footnote": [], + "table_body": "
MPJPETDir.Disc.Eat.GreetPhonePhotoPosePurch.SitSitD.SmokeWaitWalkD.WalkWalkT.Average
Ours2723.724.823.524.423.628.127.225.326.727.925.023.523.617.218.724.2
PoseFormer [50]8130.033.629.931.030.233.334.831.437.838.631.731.529.023.323.131.3
Xue et al. [43]8127.628.824.925.726.730.630.826.435.832.727.126.225.619.220.627.2
MixSTE [46]8125.627.824.525.724.929.928.627.429.929.026.125.025.218.719.925.9
STCFformer [38]8126.226.523.424.625.028.628.324.630.933.725.725.324.618.619.725.7
Ours8120.922.521.821.522.025.623.423.728.128.823.920.921.114.916.322.4
Xue et al. [43]24325.825.223.323.524.027.427.924.429.330.124.924.123.318.619.724.7
MHFormer [18]35127.732.129.128.930.033.933.031.237.039.330.031.029.422.223.030.5
P-STMO [31]24328.530.128.627.929.833.231.327.836.037.429.729.528.121.021.029.3
MixSTE [46]24321.622.020.421.020.824.324.721.926.924.921.221.520.814.715.721.6
STCFformer [38]24321.422.621.021.323.826.024.220.028.928.022.321.420.114.215.022.0
Ours24320.021.120.920.820.124.923.522.526.539.621.720.920.414.515.721.5
", + "bbox": [ + 218, + 184, + 785, + 314 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "comparable to or better than previous methods with an even smaller chunk size. For example, the MPJPE of our method at $T = 27$ is better than STCFformer at $T = 81$ (22.7 mm vs. 23.1 mm). And the MPJPE of our method at $T = 9$ is similar to STCFformer at $T = 27$ (24.1 mm vs. 24.2 mm). These results show that our method generalizes well on different datasets.", + "bbox": [ + 212, + 344, + 784, + 417 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/37b15a4eb1ca9f3d84460b62dd7d67465d5c8fe74652c1efdce46bee2741738e.jpg", + "table_caption": [ + "Table 3: Comparison of quantitative results on the MPI-INF-3DHP dataset. $\\uparrow$ : higher is better. $\\downarrow$ : lower is better." + ], + "table_footnote": [], + "table_body": "
MethodTPCK↑AUC↑MPJPE↓
PoseFormer [50]988.656.477.1
CrossFormer [10]989.157.576.3
MHFormer [18]993.863.358.0
STCFormal [38]998.281.528.2
Ours998.983.324.1
Lin et al. [20]2583.651.479.8
MixSTE [46]2794.466.554.9
STCFormal [38]2798.483.424.2
Ours2799.184.122.7
UGCN [39]9686.962.168.1
Anatomy [2]8187.853.879.1
Hu et al. [12]9697.969.542.5
Einfalt et al. [8]8195.467.646.9
P-STMO [31]8197.975.832.2
STCFormal [38]8198.783.923.1
Ours8199.184.422.2
", + "bbox": [ + 349, + 484, + 643, + 726 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "4.3 Qualitative Results", + "text_level": 1, + "bbox": [ + 215, + 768, + 419, + 784 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Visualization on Continuity We compute the MPJVE of the results predicted by MixSTE, STCFormer and our method at different timesteps, and visualize the curves in Fig. 3. It can be seen that the MPJVE of our method is lower than", + "bbox": [ + 212, + 794, + 784, + 839 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "3D HPE via Non-Causal Retentive Networks", + "bbox": [ + 431, + 114, + 730, + 127 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 767, + 114, + 782, + 126 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "that of previous methods. Our method captures temporal information using non-overlapping shift windows, similar to MixSTE. However, our method produces more continuous results at the edge between two chunks compared to MixSTE. MixSTE independently estimates two consecutive chunks, which results in a lack of continuity at the edge. In contrast, our method incorporates temporal information from previous chunks through the cross-chunk state, which improves the continuity. Compared to STCFoer, our method generally produces more continuous results. This is because our method generates multiple frames each time, allowing for the continuity constraints to the output.", + "bbox": [ + 212, + 146, + 787, + 282 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/d4a2a04bd5c9b75b20b917823782835ace91f8d0da2d1cac95d9ceab8033f48d.jpg", + "image_caption": [ + "Fig. 3: Comparison of the MPJVE curves over time between MixSTE, STCFormer and our method." + ], + "image_footnote": [], + "bbox": [ + 302, + 310, + 699, + 434 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/9b633f75a3a335b0c645f7cd5dd5a44eaafd41d3708e049ff26326234e45eab3.jpg", + "image_caption": [ + "Fig. 4: Comparison of some visualization results predicted by MixSTE [46], STC-Former [38] and our method. The black skeletons are the ground truth, and the red skeletons are the predicted results. The comparison with MixSTE is shown in green circles, while the comparison with STC-Former is shown in blue circles." + ], + "image_footnote": [], + "bbox": [ + 222, + 535, + 781, + 767 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "K. Zheng et al.", + "bbox": [ + 271, + 114, + 372, + 128 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Visualization of Results We present some visualization examples in Fig. 4, where the results are predicted by MixSTE [46], STCFoer [38] and our method, respectively. It can be seen that our method predicts more accurate results, and the improvement is visually obvious. More visualization results can be found in the Supp. Mat.", + "bbox": [ + 212, + 146, + 787, + 220 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "4.4 Ablation Study", + "text_level": 1, + "bbox": [ + 215, + 246, + 388, + 262 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Ablations on Knowledge Transfer The knowledge transferred from large training chunks to smaller test chunks plays an important role in our method. To demonstrate this, we train the model with chunk sizes of 27 and 81, respectively, and compare the performance of these models with that of the model trained with $T = 243$ . The results are shown in Tab. 4 ( $2^{nd}$ to $4^{th}$ rows). It can be seen that compared with the models trained with $T = 27$ and 81, using the models trained with a larger chunk size ( $T = 243$ ) for inference is significantly better. This indicates that the knowledge learned with large chunks is useful for reasoning about small chunks.", + "bbox": [ + 212, + 272, + 787, + 409 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/5356be9cd0c8ff29e349005839e1938e7ee2099b25bee0f3ddbc3b70096411ab.jpg", + "table_caption": [ + "Table 4: Comparison of different methods in terms of knowledge transfer." + ], + "table_footnote": [], + "table_body": "
MethodTrain TTest T = 27Test T = 81Test T = 243
Previous SOTASame as test T44.142.040.5
Ours2743.7--
Ours8143.041.9-
Ours24342.140.940.4
MixSTE w.t. xPos2745.3--
MixSTE w.t. xPos8147.042.6-
MixSTE w.t. xPos24348.844.141.1
Ours w.o. state2746.3--
Ours w.o. state8149.144.0-
Ours w.o. state24354.249.842.5
", + "bbox": [ + 250, + 462, + 748, + 621 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Effect of Cross-Chunk State We compare our methods with two baselines that do not use the cross-chunk state: the MixSTE model with xPos as the position embedding, and the model based on RetNet but without the cross-chunk state. These two baselines are able to handle sequences of different lengths, but can only use within-chunk information. The results are shown in Tab. 4 (bottom six rows). It can be seen that the two baselines without long-term historical information deteriorate rapidly as the gap between the training and test chunk sizes increases. This means that they cannot efficiently transfer knowledge from large chunks to small chunks. Therefore, the cross-chunk state is essential for knowledge transfer in our method, and our NC-RetNet is the first method to have this knowledge transfer property.", + "bbox": [ + 212, + 672, + 787, + 840 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "3D HPE via Non-Causal Retentive Networks", + "bbox": [ + 431, + 114, + 730, + 127 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 767, + 114, + 785, + 126 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Comparison of Computational Cost The comparison of the model parameters and computational cost of our method and previous methods is as shown in Tab. 5. For seq2seq methods, the FLOPs are averaged over the number of frames, since the prediction of a single inference yields results over multiple frames. It can be seen that our modification of MixSTE does not bring any increase in the model parameters or FLOPs. And compared to STCFormer [38], which has comparable performance to our method at $T = 243$ , the computational cost of our method is much lower (430 M vs. 78107 M).", + "bbox": [ + 212, + 146, + 787, + 268 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/518dfffa3d6f9d4c06c124f1b18a40f4a3e50f0f357388c475b92e4faeef0a2c.jpg", + "table_caption": [ + "Table 5: Comparison of model parameters, computational cost. FLOPs for seq2seq and our methods is averaged over the number of output frames, as is done in [46]." + ], + "table_footnote": [], + "table_body": "
MethodParams (M)FLOPs (M)MPJPE (T=243)
StridedFormer [17]4.2137244.0
P-STMO [31]6.7173742.8
MHFormer [18]24.7481243.2
MixSTE [46]33.657240.9
STCFemale [38]18.97810740.5
Ours25.243040.4
", + "bbox": [ + 289, + 337, + 709, + 441 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "5 Conclusion", + "text_level": 1, + "bbox": [ + 214, + 500, + 359, + 515 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "In this paper, we propose the first 3D human pose estimation model based on Retentive Networks, NC-RetNet. By using the non-causal masking, it effectively leverages a large number of past frames and a limited number of future frames to incorporate temporal information. Furthermore, we introduce a knowledge transfer strategy that involves training the model with a larger chunk size and using a smaller chunk size during inference, resulting in reduced inference latency without too much loss in accuracy. Through extensive experiments on the Human3.6M and MPI-INF-3DHP datasets, our approach has demonstrated state-of-the-art performance even with a smaller test chunk size. In conclusion, our method achieves a good balance between high accuracy and low inference latency, making it suitable for real-time scenarios.", + "bbox": [ + 212, + 537, + 787, + 705 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Limitations Admittedly, there are two limitations in our work. Firstly, the fundamental theory behind our method's ability to transfer knowledge is unclear, despite our study of the effect of the cross-chunk state. Secondly, we have only tested our method in the 2D-to-3D lifting task. However, the idea of transferring knowledge from large chunks to smaller chunks is universal to many sequential data in computer vision. Further work is required to explain the theory and explore more applications.", + "bbox": [ + 212, + 734, + 787, + 840 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "K. Zheng et al.", + "bbox": [ + 271, + 114, + 372, + 128 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 215, + 143, + 321, + 159 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "1. Cai, Y., Ge, L., Liu, J., Cai, J., Cham, T.J., Yuan, J., Thalmann, N.M.: Exploiting spatial-temporal relationships for 3d pose estimation via graph convolutional networks. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 2272-2281 (2019) 2, 4", + "2. Chen, T., Fang, C., Shen, X., Zhu, Y., Chen, Z., Luo, J.: Anatomy-aware 3d human pose estimation with bone-based pose decomposition. IEEE Transactions on Circuits and Systems for Video Technology 32(1), 198-209 (2021) 1, 9, 10, 11", + "3. Chen, Y., Wang, Z., Peng, Y., Zhang, Z., Yu, G., Sun, J.: Cascaded pyramid network for multi-person pose estimation. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 7103-7112 (2018) 3, 9, 10", + "4. Chi, T.C., Fan, T.H., Ramadge, P.J., Rudnicky, A.: Kerple: Kernelized relative positional embedding for length extrapolation. Advances in Neural Information Processing Systems 35, 8386-8399 (2022) 5", + "5. Choi, H., Moon, G., Chang, J.Y., Lee, K.M.: Beyond static features for temporally consistent 3d human pose and shape from a video. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 1964-1973 (2021) 4", + "6. Choi, S., Choi, S., Kim, C.: Mobilehumanpose: Toward real-time 3d human pose estimation in mobile devices. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 2328-2338 (2021) 4", + "7. Chung, J., Gulcehre, C., Cho, K., Bengio, Y.: Empirical evaluation of gated recurrent neural networks on sequence modeling. arXiv preprint arXiv:1412.3555 (2014) 4", + "8. Einfalt, M., Ludwig, K., Lienhart, R.: Uplift and upsample: Efficient 3d human pose estimation with uplifting transformers. In: Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision. pp. 2903-2913 (2023) 4, 11", + "9. Han, K., Wang, Y., Tian, Q., Guo, J., Xu, C., Xu, C.: Ghostnet: More features from cheap operations. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 1580-1589 (2020) 4", + "0. Hassanin, M., Khamiss, A., Bennamoun, M., Boussaid, F., Radwan, I.: Crossformer: Cross spatio-temporal transformer for 3d human pose estimation. arXiv preprint arXiv:2203.13387 (2022) 4, 11", + "1. Hesse, N., Schröder, A.S., Müller-Felber, W., Bodensteiner, C., Arens, M., Hofmann, U.G.: Body pose estimation in depth images for infant motion analysis. In: 2017 39th Annual International Conference of the IEEE Engineering in Medicine and Biology Society (EMBC). pp. 1909-1912. IEEE (2017) 1", + "2. Hu, W., Zhang, C., Zhan, F., Zhang, L., Wong, T.T.: Conditional directed graph convolution for 3d human pose estimation. In: Proceedings of the 29th ACM International Conference on Multimedia. pp. 602-611 (2021) 11", + "3. Iandola, F.N., Han, S., Moskewicz, M.W., Ashraf, K., Dally, W.J., Keutzer, K.: SqueezeNet: Alexnet-level accuracy with 50x fewer parameters and $< 0.5$ mb model size. arXiv preprint arXiv:1602.07360 (2016) 4", + "4. Ionescu, C., Papava, D., Olaru, V., Sminchisescu, C.: Human3. 6m: Large scale datasets and predictive methods for 3d human sensing in natural environments. IEEE transactions on pattern analysis and machine intelligence 36(7), 1325-1339 (2013) 3, 9" + ], + "bbox": [ + 225, + 179, + 785, + 839 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "3D HPE via Non-Causal Retentive Networks", + "bbox": [ + 431, + 114, + 730, + 127 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 767, + 116, + 784, + 126 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "15. Kipf, T.N., Welling, M.: Semi-supervised classification with graph convolutional networks. arXiv preprint arXiv:1609.02907 (2016) 3, 4", + "16. Li, H., Shi, B., Dai, W., Zheng, H., Wang, B., Sun, Y., Guo, M., Li, C., Zou, J., Xiong, H.: Pose-oriented transformer with uncertainty-guided refinement for 2d-to-3d human pose estimation. In: Proceedings of the AAAI Conference on Artificial Intelligence. vol. 37, pp. 1296-1304 (2023) 4", + "17. Li, W., Liu, H., Ding, R., Liu, M., Wang, P., Yang, W.: Exploiting temporal contexts with strided transformer for 3d human pose estimation. IEEE Transactions on Multimedia 25, 1282-1293 (2022) 4, 10, 14", + "18. Li, W., Liu, H., Tang, H., Wang, P., Van Gool, L.: Mhformer: Multi-hypothesis transformer for 3d human pose estimation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 13147-13156 (2022) 4, 9, 10, 11, 14", + "19. Li, Y., Zhang, S., Wang, Z., Yang, S., Yang, W., Xia, S.T., Zhou, E.: Tokenpose: Learning keypoint tokens for human pose estimation. In: Proceedings of the IEEE/CVF International conference on computer vision. pp. 11313-11322 (2021) 3", + "20. Lin, J., Lee, G.H.: Trajectory space factorization for deep video-based 3d human pose estimation. arXiv preprint arXiv:1908.08289 (2019) 11", + "21. Liu, R., Shen, J., Wang, H., Chen, C., Cheung, S.c., Asari, V.: Attention mechanism exploits temporal contexts: Real-time 3d human pose reconstruction. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5064-5073 (2020) 3", + "22. Martinez, J., Hossain, R., Romero, J., Little, J.J.: A simple yet effective baseline for 3d human pose estimation. In: Proceedings of the IEEE international conference on computer vision. pp. 2640-2649 (2017) 1, 3", + "23. Mehta, D., Rhodin, H., Casas, D., Fua, P., Sotnychenko, O., Xu, W., Theobalt, C.: Monocular 3d human pose estimation in the wild using improved cnn supervision. In: 2017 international conference on 3D vision (3DV). pp. 506-516. IEEE (2017) 3, 9", + "24. Mehta, D., Sridhar, S., Sotnychenko, O., Rhodin, H., Shafiei, M., Seidel, H.P., Xu, W., Casas, D., Theobalt, C.: Vnect: Real-time 3d human pose estimation with a single rgb camera. Acm transactions on graphics (tog) 36(4), 1-14 (2017) 4", + "25. Newell, A., Yang, K., Deng, J.: Stacked hourglass networks for human pose estimation. In: Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part VIII 14. pp. 483-499. Springer (2016) 3", + "26. Pavlakos, G., Zhou, X., Derpanis, K.G., Daniilidis, K.: Coarse-to-fine volumetric prediction for single-image 3d human pose. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 7025-7034 (2017) 3", + "27. Pavllo, D., Feichtenhofer, C., Grangier, D., Auli, M.: 3d human pose estimation in video with temporal convolutions and semi-supervised training. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 7753-7762 (2019) 2, 4, 9, 10", + "28. Press, O., Smith, N.A., Lewis, M.: Train short, test long: Attention with linear biases enables input length extrapolation. arXiv preprint arXiv:2108.12409 (2021) 5", + "29. Rayat Imtiaz Hossain, M., Little, J.J.: Exploiting temporal information for 3d pose estimation. arXiv e-prints pp. arXiv-1711 (2017) 2" + ], + "bbox": [ + 215, + 146, + 785, + 839 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "K. Zheng et al.", + "bbox": [ + 271, + 114, + 372, + 127 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "30. Sandler, M., Howard, A., Zhu, M., Zhmoginov, A., Chen, L.C.: Mobilenetv2: Inverted residuals and linear bottlenecks. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 4510-4520 (2018) 4", + "31. Shan, W., Liu, Z., Zhang, X., Wang, S., Ma, S., Gao, W.: P-stmo: Pre-trained spatial temporal many-to-one model for 3d human pose estimation. In: European Conference on Computer Vision. pp. 461-478. Springer (2022) 10, 11, 14", + "32. Su, J., Lu, Y., Pan, S., Murtadha, A., Wen, B., Liu, Y.: Roformer: Enhanced transformer with rotary position embedding. arXiv preprint arXiv:2104.09864 (2021) 4", + "33. Sun, K., Xiao, B., Liu, D., Wang, J.: Deep high-resolution representation learning for human pose estimation. In: CVPR (2019) 3", + "34. Sun, X., Shang, J., Liang, S., Wei, Y.: Compositional human pose regression. In: Proceedings of the IEEE international conference on computer vision. pp. 2602-2611 (2017) 3", + "35. Sun, Y., Dong, L., Huang, S., Ma, S., Xia, Y., Xue, J., Wang, J., Wei, F.: Retentive network: A successor to transformer for large language models. arXiv preprint arXiv:2307.08621 (2023) 2, 5", + "36. Sun, Y., Dong, L., Patra, B., Ma, S., Huang, S., Benhaim, A., Chaudhary, V., Song, X., Wei, F.: A length-extrapolatable transformer. arXiv preprint arXiv:2212.10554 (2022) 4", + "37. Svenstrup, M., Tranberg, S., Andersen, H.J., Bak, T.: Pose estimation and adaptive robot behaviour for human-robot interaction. In: 2009 IEEE International Conference on Robotics and Automation. pp. 3571-3576. IEEE (2009) 1", + "38. Tang, Z., Qiu, Z., Hao, Y., Hong, R., Yao, T.: 3d human pose estimation with spatio-temporal criss-cross attention. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 4790-4799 (2023) 9, 10, 11, 12, 13, 14", + "39. Wang, J., Yan, S., Xiong, Y., Lin, D.: Motion guided 3d pose estimation from videos. In: European Conference on Computer Vision. pp. 764-780. Springer (2020) 1, 9, 11", + "40. Wehrbein, T., Rudolph, M., Rosenhahn, B., Wandt, B.: Probabilistic monocular 3d human pose estimation with normalizing flows. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 11199-11208 (2021) 1", + "41. Xu, T., Takano, W.: Graph stacked hourglass networks for 3d human pose estimation. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 16105-16114 (2021) 1", + "42. Xu, Y., Zhang, J., Zhang, Q., Tao, D.: Vitpose: Simple vision transformer baselines for human pose estimation. Advances in Neural Information Processing Systems 35, 38571-38584 (2022) 3", + "43. Xue, Y., Chen, J., Gu, X., Ma, H., Ma, H.: Boosting monocular 3d human pose estimation with part aware attention. IEEE Transactions on Image Processing 31, 4278-4291 (2022) 2, 10, 11", + "44. Yan, S., Xiong, Y., Lin, D.: Spatial temporal graph convolutional networks for skeleton-based action recognition. In: Proceedings of the AAAI conference on artificial intelligence. vol. 32 (2018) 1", + "45. Zeng, A., Sun, X., Yang, L., Zhao, N., Liu, M., Xu, Q.: Learning skeletal graph neural networks for hard 3d pose estimation. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 11436-11445 (2021) 1", + "46. Zhang, J., Tu, Z., Yang, J., Chen, Y., Yuan, J.: Mixste: Seq2seq mixed spatiotemporal encoder for 3d human pose estimation in video. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 13232-13242 (2022) 2, 8, 9, 10, 11, 12, 13, 14" + ], + "bbox": [ + 212, + 146, + 787, + 839 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "3D HPE via Non-Causal Retentive Networks", + "bbox": [ + 431, + 114, + 730, + 126 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 767, + 116, + 784, + 126 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "47. Zhang, X., Zhou, X., Lin, M., Sun, J.: Shufflenet: An extremely efficient convolutional neural network for mobile devices. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 6848-6856 (2018) 4", + "48. Zhao, Q., Zheng, C., Liu, M., Chen, C.: A single 2d pose with context is worth hundreds for 3d human pose estimation. Advances in Neural Information Processing Systems 36 (2024) 1, 3", + "49. Zhao, Q., Zheng, C., Liu, M., Wang, P., Chen, C.: Poseformerv2: Exploring frequency domain for efficient and robust 3d human pose estimation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 8877-8886 (2023) 2", + "50. Zheng, C., Zhu, S., Mendieta, M., Yang, T., Chen, C., Ding, Z.: 3d human pose estimation with spatial and temporal transformers. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 11656-11665 (2021) 2, 4, 9, 10, 11", + "51. Zhu, W., Ma, X., Liu, Z., Liu, L., Wu, W., Wang, Y.: Motionbert: A unified perspective on learning human motion representations. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 15085-15099 (2023) 4, 9" + ], + "bbox": [ + 215, + 146, + 785, + 383 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "K. Zheng et al.", + "bbox": [ + 271, + 114, + 374, + 128 + ], + "page_idx": 17 + } +] \ No newline at end of file diff --git a/2024/3D Human Pose Estimation via Non-Causal Retentive Networks/38d38aa6-38d5-486e-8643-4f15fed7d372_model.json b/2024/3D Human Pose Estimation via Non-Causal Retentive Networks/38d38aa6-38d5-486e-8643-4f15fed7d372_model.json new file mode 100644 index 0000000000000000000000000000000000000000..dbdc0fd6f18af73fd69b516a7d05a6f0af47b77b --- /dev/null +++ b/2024/3D Human Pose Estimation via Non-Causal Retentive Networks/38d38aa6-38d5-486e-8643-4f15fed7d372_model.json @@ -0,0 +1,2183 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.25, + 0.141, + 0.756, + 0.185 + ], + "angle": 0, + "content": "3D Human Pose Estimation via Non-Causal Retentive Networks" + }, + { + "type": "text", + "bbox": [ + 0.244, + 0.212, + 0.761, + 0.245 + ], + "angle": 0, + "content": "Kaili Zheng\\(^{1}\\), Feixiang Lu\\(^{2}\\), Yihao Lv\\(^{2}\\), Liangjun Zhang\\(^{2}\\), Chenyi Guo\\(^{1\\boxtimes}\\), and Ji Wu\\(^{1,3,4\\boxtimes}\\)" + }, + { + "type": "text", + "bbox": [ + 0.297, + 0.255, + 0.707, + 0.281 + ], + "angle": 0, + "content": "\\(^{1}\\) Department of Electronic Engineering, Tsinghua University \\(^{2}\\) Baidu Research" + }, + { + "type": "text", + "bbox": [ + 0.248, + 0.282, + 0.754, + 0.325 + ], + "angle": 0, + "content": "3 College of AI, Tsinghua University \n4 Beijing National Research Center for Information Science and Technology \nzk122@mails.tsinghua.edu.cn" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.377, + 0.744, + 0.599 + ], + "angle": 0, + "content": "Abstract. Temporal dependencies are essential in 3D human pose estimation to mitigate depth ambiguity. Previous methods typically use a fixed-length sliding window to capture these dependencies. However, they treat past and future frames equally, ignoring the fact that relying on too many future frames increases the inference latency. In this paper, we present a 3D human pose estimation model based on Retentive Networks (RetNet) that incorporates temporal information by utilizing a large number of past frames and a few future frames. The Non-Causal RetNet (NC-RetNet) is designed to allow the originally causal RetNet to be aware of future information. Additionally, we propose a knowledge transfer strategy, i.e., training the model with a larger chunk size and using a smaller chunk size during inference, to reduce latency while maintaining comparable accuracy. Extensive experiments have been conducted on the Human3.6M and MPI-INF-3DHP datasets, and the results demonstrate that our method achieves state-of-the-art performance. Code and models are available at https://github.com/Kelly510/PoseRetNet." + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.611, + 0.741, + 0.639 + ], + "angle": 0, + "content": "Keywords: 3D Human Pose Estimation \\(\\cdot\\) Temporal Dependency \\(\\cdot\\) Retentive Networks" + }, + { + "type": "title", + "bbox": [ + 0.217, + 0.666, + 0.377, + 0.683 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.696, + 0.788, + 0.817 + ], + "angle": 0, + "content": "Monocular 3D Human Pose Estimation (HPE) aims to reconstruct the 3D positions of human body joints based on monocular observations. This popular computer vision task has a wide range of applications, including action recognition [44], human-robot interaction [37] and motion analysis [11]. Most of the previous works [2,22,39-41,45,48] adopt the 2D-to-3D lifting pipeline which predicts 3D human pose based on 2D keypoint detection results. It is challenging due to the depth ambiguity issue, namely, one 2D detection result may correspond to multiple 3D human skeletons." + }, + { + "type": "page_footnote", + "bbox": [ + 0.216, + 0.825, + 0.434, + 0.84 + ], + "angle": 0, + "content": "\\(\\boxtimes\\) denotes corresponding author." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "2" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.374, + 0.129 + ], + "angle": 0, + "content": "K. Zheng et al." + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.147, + 0.788, + 0.313 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.328, + 0.788, + 0.412 + ], + "angle": 0, + "content": "Fig. 1: (Left) The framework of our method, which utilizes long-term historical information from the cross-chunk state and relies on only a few future frames within the chunk. The past, current, and future frames are denoted by blue, green, and red borders, respectively. (Right) Comparison of Mean Per-Joint Position Error (MPJPE) on the Human3.6M dataset under different test chunk sizes. Our method outperforms previous state-of-the-art remarkably, especially under small chunk sizes." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.446, + 0.789, + 0.702 + ], + "angle": 0, + "content": "To mitigate the depth ambiguity, monocular 3D human pose estimation models usually take multiple frames as the input and exploit additional temporal dependencies of human pose to reduce the ambiguity [1, 27, 29, 43, 46, 49, 50]. Specifically, a sliding-window of fixed length is usually adopted to capture the temporal dependencies, where the length of window is referred to as the number of frames or chunk size. A larger chunk size typically results in better accuracy performance as it allows for the perception of more long-range temporal information. However, previous methods treat past and future frames equally, and a larger chunk size also means that the model relies on the arrival of more future frames before inference, which significantly increases the inference latency. For instance, consider the seq2frame framework, which aims to predict the 3D pose of the center frame among the input frames. If the chunk size is 243 and the input frame rate is \\(10\\mathrm{Hz}\\), the inference latency will be \\((243 - 1)\\div 2\\div 10 = 12.1\\) seconds. For seq2seq framework in the same case, the inference latency for the first frame within the chunk is \\((243 - 1)\\div 10 = 24.2\\) seconds and that for the last frame is zero. The average latency is 12.1 seconds as well. This is considerably longer than the forward time of the model itself." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.705, + 0.789, + 0.84 + ], + "angle": 0, + "content": "To address this problem, we propose a 3D human pose estimation model based on Retentive Networks (RetNet) [35]. Fig. 1(left) illustrates the framework of our method. Different from previous methods that use similar amounts of past and future frames to incorporate temporal information, our method mainly extracts temporal information from past frames (blue) and uses only a few future frames (red) within the current chunk for refinement. The RetNet can easily capture long-term historical information by using the cross-chunk state, and the Non-Causal RetNet (NC-RetNet) is further designed to make the originally causal RetNet be aware of the future frames. Moreover, we develop a knowl" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.432, + 0.115, + 0.732, + 0.128 + ], + "angle": 0, + "content": "3D HPE via Non-Causal Retentive Networks" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "3" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.783, + 0.22 + ], + "angle": 0, + "content": "edge transfer strategy of training the model with a large chunk size and using a small chunk size during inference. Thanks to the long-term historical information brought by the cross-chunk state, decreasing the test chunk size does not significantly affect performance, as shown in Fig. 1(right), but greatly reduces the inference latency." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.222, + 0.784, + 0.327 + ], + "angle": 0, + "content": "Extensive experiments have been conducted on two datasets, Human3.6M [14] and MPI-INF-3DHP [23], both quantitatively and qualitatively. The results demonstrate that our method outperforms state-of-the-art with a clear margin in terms of accuracy and continuity, especially when the model infers with a small chunk size. Our method even surpasses state-of-the-art with a smaller chunk size during inference. The ablation study also validates the efficacy of the components in our method. Our main contributions can be summarized as follows." + }, + { + "type": "text", + "bbox": [ + 0.223, + 0.34, + 0.784, + 0.398 + ], + "angle": 0, + "content": "1. This is the first study to investigate the potential of RetNet in 3D human pose estimation. And we introduce NC-RetNet to extract temporal information, which leverages past frames through the cross-chunk state and a limited number of future frames within the chunk." + }, + { + "type": "text", + "bbox": [ + 0.222, + 0.4, + 0.785, + 0.443 + ], + "angle": 0, + "content": "2. The NC-RetNet can be trained using a large chunk size and infer using a small chunk size without significant performance deterioration, but with a notable decrease in inference latency." + }, + { + "type": "text", + "bbox": [ + 0.222, + 0.445, + 0.784, + 0.489 + ], + "angle": 0, + "content": "3. Extensive experiments have been conducted and the results demonstrate that our method is the state-of-the-art in terms of accuracy and continuity, especially when the test chunk size is small." + }, + { + "type": "list", + "bbox": [ + 0.222, + 0.34, + 0.785, + 0.489 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.512, + 0.388, + 0.528 + ], + "angle": 0, + "content": "2 Related Work" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.543, + 0.493, + 0.558 + ], + "angle": 0, + "content": "2.1 3D Human Pose Estimation" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.568, + 0.785, + 0.764 + ], + "angle": 0, + "content": "Monocular 3D human pose estimation is a fundamental computer vision task with a broad range of applications. Direct estimation of the 3D positions of human joints from raw image pixels [26, 34] is difficult not only because of the complexity of extracting image features, but also due to the lack of image-3D data pairs. For these reasons, Martinez et al. [22] propose to estimate 3D human pose in a two-stage manner: detect 2D keypoints from images first and then lift 2D to 3D. Since this approach can utilize existing 2D pose estimation systems [3, 19, 25, 33, 42] and a large amount of 3D motion capture data, it has received a lot of attention. In this paper, we also focus on the 2D-to-3D lifting task. Although there are methods such as [48] propose to leverage visual cues only to mitigate depth ambiguity, these methods are unable to produce reconstructions with good continuity. Therefore, temporal dependencies are very crucial for monocular human pose estimation models." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.786, + 0.588, + 0.801 + ], + "angle": 0, + "content": "2.2 Exploitation of Temporal Dependencies" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.81, + 0.784, + 0.84 + ], + "angle": 0, + "content": "Previous methods mostly adopt four architectures to exploit temporal dependencies: CNN, RNN, GCN [15] and transformer [21]. For example, to model" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "4" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.374, + 0.129 + ], + "angle": 0, + "content": "K. Zheng et al." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.388 + ], + "angle": 0, + "content": "the temporal dependencies of human motion, Pavllo et al. [27] propose a temporal convolution model that utilizes dilated temporal convolutions to capture long-term information and model the temporal dependencies of human motion. The temporal receptive field depends on the dilation ratio and the number of layers. Similarly, Choi et al. [5] utilizes GRU [7] to extract features from the past frames and future frames within a fixed-length window respectively before integration. Cai et al. [1] exploit graph convolutions [15] to model the graph structure of different human joints. Along the time axis, this method treats the joints at different time steps as the graph nodes where any two consecutive joints are adjacent in the graph. Poseformer [50] proposed by Zheng et al. is the first work to introduce transformers to 3D human pose estimation task. This model incorporates the Spatial Transformer Module to encode the geometric structure of the human pose in a single frame into a token, and the Temporal Transformer Encoder to model temporal dependencies between frames. Since then, a lot of works [8,10,16-18,51] have emerged to explore the potential of transformers in 3D human pose estimation." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.389, + 0.788, + 0.51 + ], + "angle": 0, + "content": "Although these methods leverage different architectures to extract spatial-temporal information from 2D sequences, they share a common framework that employs a fixed number of frames to predict the result. Moreover, the chunk size has a significant impact on the accuracy, and a larger chunk size is usually beneficial for performance. However, previous works have not taken into account that a larger chunk size also significantly increases the inference delay. This motivates us to develop a method that balances the accuracy and inference latency better." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.531, + 0.55, + 0.545 + ], + "angle": 0, + "content": "2.3 Real-time Human Pose Estimation" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.554, + 0.788, + 0.721 + ], + "angle": 0, + "content": "In addition to accuracy, low inference latency is also desired for human pose estimation models in many scenarios, and significant efforts have been devoted to reducing the inference latency. On one hand, since human pose estimation models typically use a backbone model to extract image features, general-purpose lightweight backbones [9,13,30,47], can be used directly to replace the backbone in HPE models [6]. On the other hand, simplifying the pipeline can improve the model's efficiency. For example, Vnect [24] is proposed to combine the bounding box detection, 2D keypoint detection, and 2D-to-3D lifting into one model. However, existing methods only focus on decreasing the forward time of the HPE models, but do not consider the inference latency caused by large chunk sizes, as our method does." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.741, + 0.442, + 0.758 + ], + "angle": 0, + "content": "2.4 Length Extrapolation" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.765, + 0.788, + 0.842 + ], + "angle": 0, + "content": "Models in natural language processing are expected to be generalizable across sequences of varying lengths, particularly to sequences longer than the training samples. This desired property is called length extrapolation. To achieve this, the use of relative position embedding, such as RoPE [32] and xPos [36], is necessary because it does not require the input sequences to be of fixed length." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.432, + 0.115, + 0.733, + 0.128 + ], + "angle": 0, + "content": "3D HPE via Non-Causal Retentive Networks" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "5" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.784, + 0.238 + ], + "angle": 0, + "content": "Additionally, there are methods [4, 28] to improve the format of the attention module to achieve length extrapolation. For example, ALIBI [28] proposes to subtract the absolute temporal distance of two tokens from the attention score, which enhances the performance on extremely long sequences. Our knowledge transfer strategy is similar to length extrapolation, except that we concentrate on the model's transition from large chunks to smaller ones." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.261, + 0.331, + 0.277 + ], + "angle": 0, + "content": "3 Method" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.293, + 0.361, + 0.308 + ], + "angle": 0, + "content": "3.1 Preliminary" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.318, + 0.784, + 0.395 + ], + "angle": 0, + "content": "RetNet [35] is a sequence modeling network that produces a contextualized feature sequence of length \\( L \\) given an input sequence \\( X \\in \\mathbb{R}^{L \\times d} \\). The basic module of RetNet is retention, which has three mathematically equivalent representations: parallel, recurrent, and chunkwise recurrent. We present a detailed explanation of these representations below." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.416, + 0.785, + 0.522 + ], + "angle": 0, + "content": "Parallel Given the input sequence \\( X \\), the query \\( Q \\) and key \\( K \\) are derived by applying the linear projection and RoPE. \\( P_{q}, P_{k} \\) are the rotary position embedding for the query and the key respectively. The value \\( V \\) is obtained by the linear projection only. \\( D \\in \\mathbb{R}^{L \\times L} \\) is the combination of causal masking and exponential decay with respect to the relative distance. \\( \\odot \\) denotes the element-wise product. Since the value in the mask is non-zero only when the reference token \\( (m^{th}) \\) is earlier than the target token \\( (n^{th}) \\), RetNet is a fully causal model." + }, + { + "type": "equation", + "bbox": [ + 0.348, + 0.535, + 0.656, + 0.55 + ], + "angle": 0, + "content": "\\[\nQ = P _ {q} \\left(X W _ {Q}\\right), K = P _ {k} \\left(X W _ {K}\\right), V = X W _ {V}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.348, + 0.554, + 0.785, + 0.593 + ], + "angle": 0, + "content": "\\[\nD _ {n m} = \\left\\{ \\begin{array}{l l} \\gamma^ {n - m}, & n \\geq m \\\\ 0, & n < m \\end{array} \\right. \\tag {1}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.348, + 0.596, + 0.566, + 0.612 + ], + "angle": 0, + "content": "\\[\n\\operatorname {R e t e n t i o n} (X) = \\left(Q K ^ {T} \\odot D\\right) V\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.635, + 0.784, + 0.698 + ], + "angle": 0, + "content": "Recurrent \\( S_{n} \\in \\mathbb{R}^{d \\times d} \\) represents the state of time step \\( n \\), and \\( Q_{n}, K_{n}, V_{n} \\) is the value of the same \\( Q, K, V \\) in Eq. (1) at time step \\( n \\). This representation also shows that RetNet is entirely causal, as the output for the \\( n^{th} \\) frame depends solely on the previous state \\( S_{n-1} \\) and the \\( n^{th} \\) input." + }, + { + "type": "equation", + "bbox": [ + 0.375, + 0.708, + 0.785, + 0.732 + ], + "angle": 0, + "content": "\\[\nS _ {n} = \\gamma S _ {n - 1} + K _ {n} ^ {T} V _ {n} \\tag {2}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.375, + 0.73, + 0.63, + 0.743 + ], + "angle": 0, + "content": "\\[\n\\operatorname {R e t e n t i o n} \\left(X _ {n}\\right) = Q _ {n} S _ {n}, n = 1, \\dots , L\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.765, + 0.785, + 0.841 + ], + "angle": 0, + "content": "Chunkwise Recurrent This representation is the hybrid form of the above two representations. Suppose the input sequence is segmented into chunks of length \\( T \\). Denote \\( X_{iT:(i+1)T} \\) as \\( X_{[i]} \\), where \\( [i] \\) indicates the \\( i \\)-th chunk. Within the chunk, the model follows the parallel representation and the cross-chunk information is passed following the recurrent representation. The \\( D \\) here is similar" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "6" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.375, + 0.129 + ], + "angle": 0, + "content": "K. Zheng et al." + }, + { + "type": "image", + "bbox": [ + 0.223, + 0.147, + 0.784, + 0.29 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.3, + 0.788, + 0.371 + ], + "angle": 0, + "content": "Fig. 2: (Left) The causal masks in the parallel and chunkwise recurrent representations of the original RetNet. The model can only perceive historical frames although there are several future frames in the current chunk. (Right) We propose Non-Causal RetNet (NC-RetNet), which utilizes all the frames within current chunk using the full mask and can be trained in parallel with the staircase-shaped mask." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.399, + 0.789, + 0.448 + ], + "angle": 0, + "content": "to that in Eq. (1), but its shape changes from \\( L \\times L \\) to \\( T \\times T \\). The \\( D \\)'s in Eq. (1) and Eq. (3) are illustrated in Fig. 2(left). \\( \\zeta \\) and \\( \\xi \\) are both \\( T \\times d \\) matrices and the \\( r \\)-th row of them is \\( \\gamma^{T - r - 1} \\) and \\( \\gamma^{r + 1} \\) respectively." + }, + { + "type": "equation", + "bbox": [ + 0.313, + 0.455, + 0.518, + 0.478 + ], + "angle": 0, + "content": "\\[\nS _ {i} = K _ {[ i ]} ^ {T} (V _ {[ i ]} \\odot \\zeta) + \\gamma^ {T} S _ {i - 1}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.313, + 0.478, + 0.787, + 0.517 + ], + "angle": 0, + "content": "\\[\n\\operatorname {R e t e n t i o n} \\left(X _ {[ i ]}\\right) = \\underbrace {\\left(Q _ {[ i ]} K _ {[ i ]} ^ {T} \\odot D\\right) V _ {[ i ]}} _ {\\text {I n n e r - C h u n k}} + \\underbrace {\\left(Q _ {[ i ]} S _ {i - 1}\\right) \\odot \\xi} _ {\\text {C r o s s - C h u n k}} \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.522, + 0.788, + 0.597 + ], + "angle": 0, + "content": "Since low inference latency is required in real-time scenarios, the parallel representation is not suitable. Moreover, the recurrent representation is the special case of the chunkwise recurrent representation when \\( T = 1 \\). Therefore, we focus on the chunkwise recurrent representation of RetNet to design our human pose estimation model." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.619, + 0.427, + 0.633 + ], + "angle": 0, + "content": "3.2 Non-Causal RetNet" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.644, + 0.788, + 0.749 + ], + "angle": 0, + "content": "Although the chunkwise recurrent representation of RetNet processes the input sequence chunk by chunk, it does not utilize all the information in the current chunk. As is shown in Fig. 2(left), the masking in the chunkwise recurrent representation is a lower triangular matrix. This means that the estimation of the current frame only uses the frames before it, regardless of the future frames within the chunk. However, leveraging certain future information can be very helpful for the accuracy of human pose estimation models." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.75, + 0.788, + 0.843 + ], + "angle": 0, + "content": "To solve this problem, we modify the causal masking in RetNet to exploit all the information within the current chunk and propose Non-Causal RetNet (NC-RetNet). Formally, the new \\( D \\) in the chunkwise recurrent representation is given by Eq. (4). The new masking is a full matrix instead of a lower triangular matrix. When predicting the 3D pose of the \\( n^{th} \\) frame, we can calculate the exponential decay of both past frames \\( (m < n) \\) and future frames \\( (m > n) \\)" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.432, + 0.115, + 0.732, + 0.128 + ], + "angle": 0, + "content": "3D HPE via Non-Causal Retentive Networks" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "7" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.235 + ], + "angle": 0, + "content": "within the chunk by using the absolute distance \\( |n - m| \\) between the two frames. The mathematical expression of the new \\( D \\) matrix in the parallel representation can be found in the Supp. Mat. The chunk size, denoted by \\( T \\), can be adjusted to balance the accuracy and inference latency. The larger \\( T \\) is, the more future information can be perceived by the model, but the longer the inference latency will be." + }, + { + "type": "equation", + "bbox": [ + 0.351, + 0.238, + 0.786, + 0.257 + ], + "angle": 0, + "content": "\\[\nD = \\left\\{D _ {n m} \\right\\} = \\left\\{\\gamma^ {| n - m |} \\right\\}, n, m \\in \\{1, \\dots , T \\} \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.264, + 0.785, + 0.323 + ], + "angle": 0, + "content": "Fig. 2(right) illustrates the masks in the parallel and chunkwise recurrent representations of this non-causal retention. Note that the model can also be trained in parallel by using the staircase-shaped mask, but it does not have the recurrent representation unless \\( T = 1 \\)." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.325, + 0.785, + 0.415 + ], + "angle": 0, + "content": "By using the non-causal masking, NC-RetNet exploits temporal dependencies from the cross-chunk state which provides long-term historical information, and only a few future frames which provides some future information. Therefore, the temporal receptive field of our method is not limited by the chunk size. In fact, the chunk size in our method only affects the amount of future information while historical information is always adequate due to the cross-chunk state." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.44, + 0.593, + 0.456 + ], + "angle": 0, + "content": "3.3 Transfer Knowledge from LargeChunks" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.468, + 0.789, + 0.634 + ], + "angle": 0, + "content": "We further develop a strategy for our NC-RetNet to improve its performance under small test chunk sizes, which is to train the model with a large chunk size and infer with a small chunk size. Since the model uses xPos, a relative position embedding, it is able to handle 2D sequences of different lengths from the form. In addition, the cross-chunk state \\( S_{i} \\) in the chunkwise recurrent representation is updated every chunk, containing a lot of information from previous chunks. With this long-term historical information, the model becomes insensitive to the length of future frames. Therefore, using a smaller chunk size during inference does not significantly decrease accuracy, but greatly reduces inference latency. This indicates that some knowledge is transferred from large chunks when training to the small chunks during inference." + }, + { + "type": "code_caption", + "bbox": [ + 0.218, + 0.663, + 0.503, + 0.679 + ], + "angle": 0, + "content": "Algorithm 1 Pseudo-code for training" + }, + { + "type": "algorithm", + "bbox": [ + 0.217, + 0.681, + 0.789, + 0.835 + ], + "angle": 0, + "content": "Input: Training dataloader, initialized model, training chunk size \\(T_{l}\\) \nOutput: model after training for input_2d, target in dataloader do \\(\\mathrm{L} =\\) input_2d.size(1) # Total length of the input sequence D_parallel \\(\\equiv\\) get_D_parallel(L, \\(T_{l})\\) #Get the staircase-shaped mask for parallel training given the total length and chunk size pred \\(\\equiv\\) model.forward_parallel(input_2d,D_parallel) loss \\(\\equiv\\) loss_func(pred,target) loss.backup() optimizer.step() \nend for" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "8" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.375, + 0.129 + ], + "angle": 0, + "content": "K. Zheng et al." + }, + { + "type": "code_caption", + "bbox": [ + 0.218, + 0.146, + 0.51, + 0.162 + ], + "angle": 0, + "content": "Algorithm 2 Pseudo-code for inference" + }, + { + "type": "algorithm", + "bbox": [ + 0.217, + 0.165, + 0.787, + 0.347 + ], + "angle": 0, + "content": "Input: 2D stream, trained model, test chunk size \\(T_{s}\\) \nOutput: 3D stream \\(\\{\\mathbf{y}_{-}\\mathbf{n}\\}\\) D_chunkwise \\(\\equiv\\) get_D_chunkwise \\((T_{s})\\#\\) Get the full mask for chunkwise inference given the test chunk size s_n, x_n = None, [] \nfor x_i in stream do x_n.append(x_i) if len(x_n) == T_s then y_n, s_n = model.forward_chunkwise(x_n, D_chunkwise, s_n, n) x_n = [] output(y_n) # Output y_n every chunk for downstream task \nend if \nend for" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.376, + 0.789, + 0.604 + ], + "angle": 0, + "content": "The details of the training and testing are elaborated below. During training, we utilize RetNet's parallel representation to achieve training parallelism. We set the training chunk size to a large number \\( T_{l} \\), to capture long-term patterns of human motion. The training pseudo-code is as shown in Algorithm 1. The parallel representation used during training implicitly incorporates the cross-chunk state. This means that the model can theoretically observe historical information over a long period of time as well as many future frames. During inference, the test chunk size \\( T_{s} \\) is set smaller than the training chunk size \\( T_{l} \\) and the chunkwise-recurrent representation is used. The pseudo-code for inference is as in Algorithm 2. Given a stream of 2D keypoints, the model processes the stream in the chunkwise-recurrent representation every \\( T_{s} \\) frames based on the current chunk x_n as well as an explicit cross-chunk state s_n. This cross-chunk state contains information about previous chunks and makes the model insensitive to the number of future frames. Therefore, although the chunk is smaller than the training chunks, the model can still extract stable temporal features." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.624, + 0.457, + 0.64 + ], + "angle": 0, + "content": "3.4 Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.648, + 0.788, + 0.754 + ], + "angle": 0, + "content": "We implement our idea based on the state-of-the-art seq2seq method, MixSTE [46], by replacing its temporal encoder with RetNet. Since the movement of distal joints is more erratic than that of torso joints, the estimation of these distal joints should rely on more local temporal information. Therefore, we assign different decay coefficients to different human joints, which is referred to as joint-related decay coefficients. The chunkwise recurrent representation of it can be formulated in Eq. (5), where \\( p \\) is the index of human joints." + }, + { + "type": "equation", + "bbox": [ + 0.331, + 0.763, + 0.586, + 0.784 + ], + "angle": 0, + "content": "\\[\nS _ {i, p} = K _ {[ i ], p} ^ {T} \\left(V _ {[ i ], p} \\odot \\zeta_ {p}\\right) + \\gamma_ {p} ^ {T} S _ {i - 1, p}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.331, + 0.785, + 0.786, + 0.811 + ], + "angle": 0, + "content": "\\[\nI n n e r _ {i, p} = \\left(Q _ {[ i ], p} K _ {[ i ], p} ^ {T} \\odot D _ {p}\\right) V _ {[ i ], p} \\tag {5}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.331, + 0.808, + 0.546, + 0.825 + ], + "angle": 0, + "content": "\\[\nC r o s s _ {i, p} = \\left(Q _ {[ i ], p} S _ {i - 1, p}\\right) \\odot \\xi_ {p}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.331, + 0.826, + 0.67, + 0.844 + ], + "angle": 0, + "content": "\\[\n\\operatorname {R e t e n t i o n} \\left(X _ {[ i ]}\\right) = \\operatorname {C o n c a t} \\left(\\operatorname {I n n e r} _ {i, p} + \\operatorname {C r o s s} _ {i, p}\\right)\n\\]" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.432, + 0.115, + 0.732, + 0.128 + ], + "angle": 0, + "content": "3D HPE via Non-Causal Retentive Networks" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "9" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.784, + 0.236 + ], + "angle": 0, + "content": "The loss function and training strategies are the same as in MixSTE. The training chunk size is 243 on the Human3.6M dataset and 81 on the MPI-INF-3DHP dataset, and then we test the model with different chunk sizes to get the results. The overall architecture of our model is given in the Supp. Mat. We also implement our idea on MotionBERT [51] and the results can be found in the Supp. Mat." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.26, + 0.376, + 0.276 + ], + "angle": 0, + "content": "4 Experiments" + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.29, + 0.547, + 0.305 + ], + "angle": 0, + "content": "4.1 Datasets and Evaluation Protocols" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.313, + 0.788, + 0.542 + ], + "angle": 0, + "content": "Experiments are conducted on two human pose estimation datasets: Human3.6M [14] and MPI-INF-3DHP [23]. Human3.6M is the most widely used indoor dataset for single-person 3D human pose estimation, containing about 3.6 million images collected from 11 professional actors. Following the common practice [2,27,46,50], we use the samples of S1, S5, S6, S7, S8 for training and evaluate on S9 and S11 subjects. Mean Per-Joint Position Error (MPJPE) and Procrustes-Aligned MPJPE (PA-MPJPE) are evaluated on this dataset. We also report the Mean Per-Joint Velocity Error (MPJVE) results, which reflect the continuity of the predicted results. MPI-INF-3DHP is a more challenging 3D human pose estimation dataset because it includes both indoor and outdoor scenes. The samples are collected from 8 subjects, each performing 8 actions. The test set consists of 6 subjects in different scenes. We follow the setup in [2,39,46,50]. For the MPI-INF-3DHP dataset, we report the results of MPJPE, Percentage of Correct Keypoints (PCK) within the \\(150\\mathrm{mm}\\) range, and Area Under Curve (AUC), following [18,38,50]." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.561, + 0.472, + 0.576 + ], + "angle": 0, + "content": "4.2 Quantitative Comparison" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.584, + 0.788, + 0.764 + ], + "angle": 0, + "content": "Results on Human3.6M We first use the 2D keypoints detected by CPN [3] as the input, and the results are shown in Tab. 1. It can be seen that our method achieves comparable performance with the state-of-the-art when the chunk size for inference is large (\\(T = 243\\)). Moreover, our method outperforms previous methods by a clear margin when the chunk size is small (\\(T = 27\\), 81), as the accuracy of our method only decreases slightly when the chunk size is reduced. Furthermore, the MPJPE at \\(T = 27\\) is comparable to the previous state-of-the-art method at \\(T = 81\\) (42.1 mm vs. 42.0 mm). This indicates that our method can provide similarly accurate predictions with much lower inference latency. Additionally, our method exhibits significant improvement in continuity compared with previous methods, with a 0.2 mm per frame improvement on the MPJVE metric." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.766, + 0.788, + 0.84 + ], + "angle": 0, + "content": "We further use the ground truth 2D keypoints of the Human3.6M dataset as input to test the upper bound of our method, as shown in Tab. 2. The results indicate that using the model trained with 243 frames to infer with a chunk size of 81 improves the MPJPE metric by \\(2.3\\mathrm{mm}\\) compared to the previous state-of-the-art \\((22.4\\mathrm{mm}\\) vs. \\(25.7\\mathrm{mm})\\). Moreover, the MPJPE of our method at \\(T = 27\\)" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "10" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.374, + 0.129 + ], + "angle": 0, + "content": "K. Zheng et al." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.783, + 0.193 + ], + "angle": 0, + "content": "is remarkably lower than that of previous methods at \\( T = 81 \\), demonstrating our method's efficient utilization of transferred knowledge to achieve higher accuracy with lower inference latency." + }, + { + "type": "table_caption", + "bbox": [ + 0.216, + 0.22, + 0.787, + 0.25 + ], + "angle": 0, + "content": "Table 1: Comparison of MPJPE, PA-MPJPE and MPJVE on the Human3.6M dataset using 2D keypoints detected by CPN [3] as input. \\(T\\) is the chunk size when testing." + }, + { + "type": "table", + "bbox": [ + 0.219, + 0.26, + 0.787, + 0.445 + ], + "angle": 0, + "content": "
MPJPETDir.Disc.Eat.GreetPhonePhotoPosePurch.SitSitD.SmokeWaitWalkD.WalkWalkT.Average
MixSTE [46]27---------------45.1
STCFformer [38]2740.744.641.241.945.853.741.540.955.963.844.641.544.729.530.844.1
Ours2738.041.540.040.044.151.339.841.753.158.343.539.842.028.429.642.1
Anatomy [2]8142.143.841.043.846.153.542.443.153.960.545.742.146.232.233.844.6
PoseFormer [50]8141.544.839.842.546.551.642.142.053.360.745.543.346.131.832.244.3
Xue et al. [43]8142.145.340.942.945.452.742.642.555.361.844.941.744.929.930.844.2
P-STMO [31]8141.744.541.042.946.051.342.841.354.961.845.142.843.830.830.744.1
MixSTE [46]8139.843.038.640.143.450.640.641.452.256.743.840.843.929.430.342.4
STCFformer [38]8140.643.038.340.243.552.640.340.151.857.742.839.842.328.029.542.0
Ours8136.940.539.038.643.349.638.840.252.656.542.638.840.526.828.440.9
VideoPose3D [27]24345.246.743.345.648.155.144.644.357.365.847.144.049.032.833.946.8
Anatomy [2]24341.443.540.142.946.651.941.742.353.960.245.441.746.031.532.744.1
Xue et al. [43]24339.942.740.342.345.052.840.439.356.961.244.141.342.828.429.343.1
MHFormer [18]35139.243.140.140.944.951.240.641.353.560.343.741.143.829.830.643.0
P-STMO [31]24338.942.740.441.145.649.740.939.955.559.444.942.242.729.429.442.8
MixSTE [46]24337.640.937.339.742.349.940.139.851.755.042.139.841.027.927.940.9
STCFformer [38]24338.441.236.838.042.750.538.738.252.556.841.838.440.226.227.740.5
Ours24336.940.138.738.342.948.638.240.052.555.442.338.739.726.227.840.4
" + }, + { + "type": "table", + "bbox": [ + 0.22, + 0.451, + 0.787, + 0.607 + ], + "angle": 0, + "content": "
PA-MPJPETDir.Disc.Eat.GreetPhonePhotoPosePurch.SitSitD.SmokeWaitWalkD.WalkWalkT.Average
STCFormer [38]2731.935.132.734.134.941.332.131.645.050.636.031.735.523.625.134.8
Ours2731.733.932.333.335.239.131.031.944.048.736.031.034.623.024.834.0
Anatomy [2]8133.135.333.435.936.141.732.833.342.649.437.032.736.525.527.935.6
PoseFormer [50]8132.534.832.634.635.339.532.132.042.848.534.832.435.324.526.034.6
Xue et al. [43]8131.635.532.334.235.140.332.332.344.549.635.831.635.023.724.734.6
MixSTE [46]8132.034.231.733.734.439.232.031.842.946.935.532.034.423.625.233.9
STCFormer [38]8130.433.831.131.733.539.530.830.041.845.834.330.132.821.923.432.7
Ours8130.533.131.431.633.038.429.830.643.645.434.430.332.421.522.232.6
VideoPose3D [27]24334.136.134.437.236.442.234.433.645.052.537.433.837.825.627.336.5
Anatomy [2]24332.635.132.835.436.340.432.432.342.749.036.832.436.024.926.535.0
Xue et al. [43]24331.234.131.933.833.939.531.630.045.448.135.031.133.522.423.633.7
P-STMO [31]24331.335.232.933.935.439.332.531.544.648.236.332.934.423.823.934.4
MixSTE [46]24330.833.130.331.833.139.131.130.542.544.534.030.832.722.122.932.6
STCFormer [38]24329.333.030.730.632.738.229.728.842.245.033.329.431.520.922.331.8
Ours24330.833.131.331.833.437.730.130.543.445.534.330.331.521.422.732.5
" + }, + { + "type": "table", + "bbox": [ + 0.22, + 0.613, + 0.787, + 0.692 + ], + "angle": 0, + "content": "
MPJVETDir.Disc.Eat.GreetPhonePhotoPosePurch.SitSitD.SmokeWaitWalkD.WalkWalkT.Average
VideoPose3D [27]2433.03.12.23.42.32.72.73.12.12.92.32.43.73.12.82.8
Anatomy [2]2432.72.82.03.12.02.42.42.81.82.42.02.13.42.72.42.5
PoseFormer [50]813.23.42.63.62.63.02.93.22.63.32.72.73.83.22.93.1
StridedFormer [17]3512.42.51.82.81.82.22.22.51.52.01.81.93.22.52.12.2
MixSTE [46]2432.52.71.92.81.92.22.32.61.62.21.92.03.12.62.22.3
Ours812.32.41.82.61.72.12.12.51.52.11.81.93.02.42.02.2
Ours2432.32.41.82.61.72.12.12.51.52.11.81.93.02.42.02.0
" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.749, + 0.788, + 0.841 + ], + "angle": 0, + "content": "Results on MPI-INF-3DHP The results on the MPI-INF-3DHP datasets are shown in Tab. 3. An improvement of \\(0.9\\mathrm{mm}\\) on the MPJPE metric is achieved at \\(T = 81\\), and the improvement becomes more remarkable as the chunk size decreases. In particular, our method outperforms previous methods very significantly with an improvement of \\(4.1\\mathrm{mm}\\) on the MPJPE metric when \\(T\\) is 9. Similar to the phenomenon on the Human3.6M dataset, our method is" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.432, + 0.115, + 0.732, + 0.128 + ], + "angle": 0, + "content": "3D HPE via Non-Causal Retentive Networks" + }, + { + "type": "page_number", + "bbox": [ + 0.768, + 0.116, + 0.784, + 0.127 + ], + "angle": 0, + "content": "11" + }, + { + "type": "table_caption", + "bbox": [ + 0.216, + 0.145, + 0.785, + 0.173 + ], + "angle": 0, + "content": "Table 2: Comparison of MPJPE on the Human3.6M dataset using 2D ground truth keypoints as input." + }, + { + "type": "table", + "bbox": [ + 0.22, + 0.185, + 0.787, + 0.315 + ], + "angle": 0, + "content": "
MPJPETDir.Disc.Eat.GreetPhonePhotoPosePurch.SitSitD.SmokeWaitWalkD.WalkWalkT.Average
Ours2723.724.823.524.423.628.127.225.326.727.925.023.523.617.218.724.2
PoseFormer [50]8130.033.629.931.030.233.334.831.437.838.631.731.529.023.323.131.3
Xue et al. [43]8127.628.824.925.726.730.630.826.435.832.727.126.225.619.220.627.2
MixSTE [46]8125.627.824.525.724.929.928.627.429.929.026.125.025.218.719.925.9
STCFformer [38]8126.226.523.424.625.028.628.324.630.933.725.725.324.618.619.725.7
Ours8120.922.521.821.522.025.623.423.728.128.823.920.921.114.916.322.4
Xue et al. [43]24325.825.223.323.524.027.427.924.429.330.124.924.123.318.619.724.7
MHFormer [18]35127.732.129.128.930.033.933.031.237.039.330.031.029.422.223.030.5
P-STMO [31]24328.530.128.627.929.833.231.327.836.037.429.729.528.121.021.029.3
MixSTE [46]24321.622.020.421.020.824.324.721.926.924.921.221.520.814.715.721.6
STCFformer [38]24321.422.621.021.323.826.024.220.028.928.022.321.420.114.215.022.0
Ours24320.021.120.920.820.124.923.522.526.539.621.720.920.414.515.721.5
" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.345, + 0.785, + 0.419 + ], + "angle": 0, + "content": "comparable to or better than previous methods with an even smaller chunk size. For example, the MPJPE of our method at \\( T = 27 \\) is better than STCFformer at \\( T = 81 \\) (22.7 mm vs. 23.1 mm). And the MPJPE of our method at \\( T = 9 \\) is similar to STCFformer at \\( T = 27 \\) (24.1 mm vs. 24.2 mm). These results show that our method generalizes well on different datasets." + }, + { + "type": "table_caption", + "bbox": [ + 0.214, + 0.446, + 0.785, + 0.473 + ], + "angle": 0, + "content": "Table 3: Comparison of quantitative results on the MPI-INF-3DHP dataset. \\(\\uparrow\\): higher is better. \\(\\downarrow\\): lower is better." + }, + { + "type": "table", + "bbox": [ + 0.35, + 0.486, + 0.645, + 0.727 + ], + "angle": 0, + "content": "
MethodTPCK↑AUC↑MPJPE↓
PoseFormer [50]988.656.477.1
CrossFormer [10]989.157.576.3
MHFormer [18]993.863.358.0
STCFormal [38]998.281.528.2
Ours998.983.324.1
Lin et al. [20]2583.651.479.8
MixSTE [46]2794.466.554.9
STCFormal [38]2798.483.424.2
Ours2799.184.122.7
UGCN [39]9686.962.168.1
Anatomy [2]8187.853.879.1
Hu et al. [12]9697.969.542.5
Einfalt et al. [8]8195.467.646.9
P-STMO [31]8197.975.832.2
STCFormal [38]8198.783.923.1
Ours8199.184.422.2
" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.77, + 0.421, + 0.785 + ], + "angle": 0, + "content": "4.3 Qualitative Results" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.795, + 0.785, + 0.84 + ], + "angle": 0, + "content": "Visualization on Continuity We compute the MPJVE of the results predicted by MixSTE, STCFormer and our method at different timesteps, and visualize the curves in Fig. 3. It can be seen that the MPJVE of our method is lower than" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "12" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.374, + 0.129 + ], + "angle": 0, + "content": "K. Zheng et al." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.284 + ], + "angle": 0, + "content": "that of previous methods. Our method captures temporal information using non-overlapping shift windows, similar to MixSTE. However, our method produces more continuous results at the edge between two chunks compared to MixSTE. MixSTE independently estimates two consecutive chunks, which results in a lack of continuity at the edge. In contrast, our method incorporates temporal information from previous chunks through the cross-chunk state, which improves the continuity. Compared to STCFoer, our method generally produces more continuous results. This is because our method generates multiple frames each time, allowing for the continuity constraints to the output." + }, + { + "type": "image", + "bbox": [ + 0.303, + 0.311, + 0.7, + 0.435 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.215, + 0.447, + 0.788, + 0.474 + ], + "angle": 0, + "content": "Fig. 3: Comparison of the MPJVE curves over time between MixSTE, STCFormer and our method." + }, + { + "type": "image", + "bbox": [ + 0.223, + 0.536, + 0.782, + 0.768 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.78, + 0.788, + 0.837 + ], + "angle": 0, + "content": "Fig. 4: Comparison of some visualization results predicted by MixSTE [46], STC-Former [38] and our method. The black skeletons are the ground truth, and the red skeletons are the predicted results. The comparison with MixSTE is shown in green circles, while the comparison with STC-Former is shown in blue circles." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.432, + 0.115, + 0.732, + 0.128 + ], + "angle": 0, + "content": "3D HPE via Non-Causal Retentive Networks" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.116, + 0.786, + 0.127 + ], + "angle": 0, + "content": "13" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.222 + ], + "angle": 0, + "content": "Visualization of Results We present some visualization examples in Fig. 4, where the results are predicted by MixSTE [46], STCFoer [38] and our method, respectively. It can be seen that our method predicts more accurate results, and the improvement is visually obvious. More visualization results can be found in the Supp. Mat." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.247, + 0.39, + 0.263 + ], + "angle": 0, + "content": "4.4 Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.273, + 0.789, + 0.41 + ], + "angle": 0, + "content": "Ablations on Knowledge Transfer The knowledge transferred from large training chunks to smaller test chunks plays an important role in our method. To demonstrate this, we train the model with chunk sizes of 27 and 81, respectively, and compare the performance of these models with that of the model trained with \\( T = 243 \\). The results are shown in Tab. 4 (\\( 2^{nd} \\) to \\( 4^{th} \\) rows). It can be seen that compared with the models trained with \\( T = 27 \\) and 81, using the models trained with a larger chunk size (\\( T = 243 \\)) for inference is significantly better. This indicates that the knowledge learned with large chunks is useful for reasoning about small chunks." + }, + { + "type": "table_caption", + "bbox": [ + 0.252, + 0.436, + 0.747, + 0.451 + ], + "angle": 0, + "content": "Table 4: Comparison of different methods in terms of knowledge transfer." + }, + { + "type": "table", + "bbox": [ + 0.25, + 0.463, + 0.749, + 0.622 + ], + "angle": 0, + "content": "
MethodTrain TTest T = 27Test T = 81Test T = 243
Previous SOTASame as test T44.142.040.5
Ours2743.7--
Ours8143.041.9-
Ours24342.140.940.4
MixSTE w.t. xPos2745.3--
MixSTE w.t. xPos8147.042.6-
MixSTE w.t. xPos24348.844.141.1
Ours w.o. state2746.3--
Ours w.o. state8149.144.0-
Ours w.o. state24354.249.842.5
" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.674, + 0.788, + 0.841 + ], + "angle": 0, + "content": "Effect of Cross-Chunk State We compare our methods with two baselines that do not use the cross-chunk state: the MixSTE model with xPos as the position embedding, and the model based on RetNet but without the cross-chunk state. These two baselines are able to handle sequences of different lengths, but can only use within-chunk information. The results are shown in Tab. 4 (bottom six rows). It can be seen that the two baselines without long-term historical information deteriorate rapidly as the gap between the training and test chunk sizes increases. This means that they cannot efficiently transfer knowledge from large chunks to small chunks. Therefore, the cross-chunk state is essential for knowledge transfer in our method, and our NC-RetNet is the first method to have this knowledge transfer property." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "14" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.374, + 0.129 + ], + "angle": 0, + "content": "K. Zheng et al." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.269 + ], + "angle": 0, + "content": "Comparison of Computational Cost The comparison of the model parameters and computational cost of our method and previous methods is as shown in Tab. 5. For seq2seq methods, the FLOPs are averaged over the number of frames, since the prediction of a single inference yields results over multiple frames. It can be seen that our modification of MixSTE does not bring any increase in the model parameters or FLOPs. And compared to STCFormer [38], which has comparable performance to our method at \\( T = 243 \\), the computational cost of our method is much lower (430 M vs. 78107 M)." + }, + { + "type": "table_caption", + "bbox": [ + 0.215, + 0.297, + 0.789, + 0.328 + ], + "angle": 0, + "content": "Table 5: Comparison of model parameters, computational cost. FLOPs for seq2seq and our methods is averaged over the number of output frames, as is done in [46]." + }, + { + "type": "table", + "bbox": [ + 0.29, + 0.338, + 0.71, + 0.442 + ], + "angle": 0, + "content": "
MethodParams (M)FLOPs (M)MPJPE (T=243)
StridedFormer [17]4.2137244.0
P-STMO [31]6.7173742.8
MHFormer [18]24.7481243.2
MixSTE [46]33.657240.9
STCFemale [38]18.97810740.5
Ours25.243040.4
" + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.5, + 0.36, + 0.516 + ], + "angle": 0, + "content": "5 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.539, + 0.788, + 0.706 + ], + "angle": 0, + "content": "In this paper, we propose the first 3D human pose estimation model based on Retentive Networks, NC-RetNet. By using the non-causal masking, it effectively leverages a large number of past frames and a limited number of future frames to incorporate temporal information. Furthermore, we introduce a knowledge transfer strategy that involves training the model with a larger chunk size and using a smaller chunk size during inference, resulting in reduced inference latency without too much loss in accuracy. Through extensive experiments on the Human3.6M and MPI-INF-3DHP datasets, our approach has demonstrated state-of-the-art performance even with a smaller test chunk size. In conclusion, our method achieves a good balance between high accuracy and low inference latency, making it suitable for real-time scenarios." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.735, + 0.788, + 0.841 + ], + "angle": 0, + "content": "Limitations Admittedly, there are two limitations in our work. Firstly, the fundamental theory behind our method's ability to transfer knowledge is unclear, despite our study of the effect of the cross-chunk state. Secondly, we have only tested our method in the 2D-to-3D lifting task. However, the idea of transferring knowledge from large chunks to smaller chunks is universal to many sequential data in computer vision. Further work is required to explain the theory and explore more applications." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.432, + 0.115, + 0.732, + 0.128 + ], + "angle": 0, + "content": "3D HPE via Non-Causal Retentive Networks" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "15" + }, + { + "type": "title", + "bbox": [ + 0.217, + 0.145, + 0.323, + 0.16 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.18, + 0.786, + 0.235 + ], + "angle": 0, + "content": "1. Cai, Y., Ge, L., Liu, J., Cai, J., Cham, T.J., Yuan, J., Thalmann, N.M.: Exploiting spatial-temporal relationships for 3d pose estimation via graph convolutional networks. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 2272-2281 (2019) 2, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.236, + 0.786, + 0.278 + ], + "angle": 0, + "content": "2. Chen, T., Fang, C., Shen, X., Zhu, Y., Chen, Z., Luo, J.: Anatomy-aware 3d human pose estimation with bone-based pose decomposition. IEEE Transactions on Circuits and Systems for Video Technology 32(1), 198-209 (2021) 1, 9, 10, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.279, + 0.786, + 0.321 + ], + "angle": 0, + "content": "3. Chen, Y., Wang, Z., Peng, Y., Zhang, Z., Yu, G., Sun, J.: Cascaded pyramid network for multi-person pose estimation. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 7103-7112 (2018) 3, 9, 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.321, + 0.786, + 0.363 + ], + "angle": 0, + "content": "4. Chi, T.C., Fan, T.H., Ramadge, P.J., Rudnicky, A.: Kerple: Kernelized relative positional embedding for length extrapolation. Advances in Neural Information Processing Systems 35, 8386-8399 (2022) 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.364, + 0.786, + 0.418 + ], + "angle": 0, + "content": "5. Choi, H., Moon, G., Chang, J.Y., Lee, K.M.: Beyond static features for temporally consistent 3d human pose and shape from a video. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 1964-1973 (2021) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.42, + 0.786, + 0.461 + ], + "angle": 0, + "content": "6. Choi, S., Choi, S., Kim, C.: Mobilehumanpose: Toward real-time 3d human pose estimation in mobile devices. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 2328-2338 (2021) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.462, + 0.786, + 0.502 + ], + "angle": 0, + "content": "7. Chung, J., Gulcehre, C., Cho, K., Bengio, Y.: Empirical evaluation of gated recurrent neural networks on sequence modeling. arXiv preprint arXiv:1412.3555 (2014) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.504, + 0.786, + 0.557 + ], + "angle": 0, + "content": "8. Einfalt, M., Ludwig, K., Lienhart, R.: Uplift and upsample: Efficient 3d human pose estimation with uplifting transformers. In: Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision. pp. 2903-2913 (2023) 4, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.56, + 0.786, + 0.602 + ], + "angle": 0, + "content": "9. Han, K., Wang, Y., Tian, Q., Guo, J., Xu, C., Xu, C.: Ghostnet: More features from cheap operations. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 1580-1589 (2020) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.603, + 0.786, + 0.644 + ], + "angle": 0, + "content": "0. Hassanin, M., Khamiss, A., Bennamoun, M., Boussaid, F., Radwan, I.: Crossformer: Cross spatio-temporal transformer for 3d human pose estimation. arXiv preprint arXiv:2203.13387 (2022) 4, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.645, + 0.786, + 0.699 + ], + "angle": 0, + "content": "1. Hesse, N., Schröder, A.S., Müller-Felber, W., Bodensteiner, C., Arens, M., Hofmann, U.G.: Body pose estimation in depth images for infant motion analysis. In: 2017 39th Annual International Conference of the IEEE Engineering in Medicine and Biology Society (EMBC). pp. 1909-1912. IEEE (2017) 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.701, + 0.786, + 0.742 + ], + "angle": 0, + "content": "2. Hu, W., Zhang, C., Zhan, F., Zhang, L., Wong, T.T.: Conditional directed graph convolution for 3d human pose estimation. In: Proceedings of the 29th ACM International Conference on Multimedia. pp. 602-611 (2021) 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.743, + 0.786, + 0.784 + ], + "angle": 0, + "content": "3. Iandola, F.N., Han, S., Moskewicz, M.W., Ashraf, K., Dally, W.J., Keutzer, K.: SqueezeNet: Alexnet-level accuracy with 50x fewer parameters and \\(< 0.5\\) mb model size. arXiv preprint arXiv:1602.07360 (2016) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.785, + 0.786, + 0.84 + ], + "angle": 0, + "content": "4. Ionescu, C., Papava, D., Olaru, V., Sminchisescu, C.: Human3. 6m: Large scale datasets and predictive methods for 3d human sensing in natural environments. IEEE transactions on pattern analysis and machine intelligence 36(7), 1325-1339 (2013) 3, 9" + }, + { + "type": "list", + "bbox": [ + 0.226, + 0.18, + 0.786, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "16" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.374, + 0.128 + ], + "angle": 0, + "content": "K. Zheng et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.147, + 0.785, + 0.175 + ], + "angle": 0, + "content": "15. Kipf, T.N., Welling, M.: Semi-supervised classification with graph convolutional networks. arXiv preprint arXiv:1609.02907 (2016) 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.177, + 0.787, + 0.232 + ], + "angle": 0, + "content": "16. Li, H., Shi, B., Dai, W., Zheng, H., Wang, B., Sun, Y., Guo, M., Li, C., Zou, J., Xiong, H.: Pose-oriented transformer with uncertainty-guided refinement for 2d-to-3d human pose estimation. In: Proceedings of the AAAI Conference on Artificial Intelligence. vol. 37, pp. 1296-1304 (2023) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.233, + 0.785, + 0.274 + ], + "angle": 0, + "content": "17. Li, W., Liu, H., Ding, R., Liu, M., Wang, P., Yang, W.: Exploiting temporal contexts with strided transformer for 3d human pose estimation. IEEE Transactions on Multimedia 25, 1282-1293 (2022) 4, 10, 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.276, + 0.785, + 0.33 + ], + "angle": 0, + "content": "18. Li, W., Liu, H., Tang, H., Wang, P., Van Gool, L.: Mhformer: Multi-hypothesis transformer for 3d human pose estimation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 13147-13156 (2022) 4, 9, 10, 11, 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.332, + 0.785, + 0.386 + ], + "angle": 0, + "content": "19. Li, Y., Zhang, S., Wang, Z., Yang, S., Yang, W., Xia, S.T., Zhou, E.: Tokenpose: Learning keypoint tokens for human pose estimation. In: Proceedings of the IEEE/CVF International conference on computer vision. pp. 11313-11322 (2021) 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.388, + 0.785, + 0.416 + ], + "angle": 0, + "content": "20. Lin, J., Lee, G.H.: Trajectory space factorization for deep video-based 3d human pose estimation. arXiv preprint arXiv:1908.08289 (2019) 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.418, + 0.785, + 0.472 + ], + "angle": 0, + "content": "21. Liu, R., Shen, J., Wang, H., Chen, C., Cheung, S.c., Asari, V.: Attention mechanism exploits temporal contexts: Real-time 3d human pose reconstruction. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5064-5073 (2020) 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.474, + 0.785, + 0.515 + ], + "angle": 0, + "content": "22. Martinez, J., Hossain, R., Romero, J., Little, J.J.: A simple yet effective baseline for 3d human pose estimation. In: Proceedings of the IEEE international conference on computer vision. pp. 2640-2649 (2017) 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.517, + 0.785, + 0.57 + ], + "angle": 0, + "content": "23. Mehta, D., Rhodin, H., Casas, D., Fua, P., Sotnychenko, O., Xu, W., Theobalt, C.: Monocular 3d human pose estimation in the wild using improved cnn supervision. In: 2017 international conference on 3D vision (3DV). pp. 506-516. IEEE (2017) 3, 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.572, + 0.785, + 0.613 + ], + "angle": 0, + "content": "24. Mehta, D., Sridhar, S., Sotnychenko, O., Rhodin, H., Shafiei, M., Seidel, H.P., Xu, W., Casas, D., Theobalt, C.: Vnect: Real-time 3d human pose estimation with a single rgb camera. Acm transactions on graphics (tog) 36(4), 1-14 (2017) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.615, + 0.785, + 0.669 + ], + "angle": 0, + "content": "25. Newell, A., Yang, K., Deng, J.: Stacked hourglass networks for human pose estimation. In: Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part VIII 14. pp. 483-499. Springer (2016) 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.671, + 0.785, + 0.713 + ], + "angle": 0, + "content": "26. Pavlakos, G., Zhou, X., Derpanis, K.G., Daniilidis, K.: Coarse-to-fine volumetric prediction for single-image 3d human pose. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 7025-7034 (2017) 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.714, + 0.785, + 0.768 + ], + "angle": 0, + "content": "27. Pavllo, D., Feichtenhofer, C., Grangier, D., Auli, M.: 3d human pose estimation in video with temporal convolutions and semi-supervised training. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 7753-7762 (2019) 2, 4, 9, 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.77, + 0.785, + 0.809 + ], + "angle": 0, + "content": "28. Press, O., Smith, N.A., Lewis, M.: Train short, test long: Attention with linear biases enables input length extrapolation. arXiv preprint arXiv:2108.12409 (2021) 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.812, + 0.785, + 0.84 + ], + "angle": 0, + "content": "29. Rayat Imtiaz Hossain, M., Little, J.J.: Exploiting temporal information for 3d pose estimation. arXiv e-prints pp. arXiv-1711 (2017) 2" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.147, + 0.787, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.432, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "3D HPE via Non-Causal Retentive Networks" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "17" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.189 + ], + "angle": 0, + "content": "30. Sandler, M., Howard, A., Zhu, M., Zhmoginov, A., Chen, L.C.: Mobilenetv2: Inverted residuals and linear bottlenecks. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 4510-4520 (2018) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.189, + 0.788, + 0.23 + ], + "angle": 0, + "content": "31. Shan, W., Liu, Z., Zhang, X., Wang, S., Ma, S., Gao, W.: P-stmo: Pre-trained spatial temporal many-to-one model for 3d human pose estimation. In: European Conference on Computer Vision. pp. 461-478. Springer (2022) 10, 11, 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.23, + 0.787, + 0.257 + ], + "angle": 0, + "content": "32. Su, J., Lu, Y., Pan, S., Murtadha, A., Wen, B., Liu, Y.: Roformer: Enhanced transformer with rotary position embedding. arXiv preprint arXiv:2104.09864 (2021) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.257, + 0.787, + 0.284 + ], + "angle": 0, + "content": "33. Sun, K., Xiao, B., Liu, D., Wang, J.: Deep high-resolution representation learning for human pose estimation. In: CVPR (2019) 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.284, + 0.787, + 0.324 + ], + "angle": 0, + "content": "34. Sun, X., Shang, J., Liang, S., Wei, Y.: Compositional human pose regression. In: Proceedings of the IEEE international conference on computer vision. pp. 2602-2611 (2017) 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.324, + 0.787, + 0.365 + ], + "angle": 0, + "content": "35. Sun, Y., Dong, L., Huang, S., Ma, S., Xia, Y., Xue, J., Wang, J., Wei, F.: Retentive network: A successor to transformer for large language models. arXiv preprint arXiv:2307.08621 (2023) 2, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.365, + 0.787, + 0.405 + ], + "angle": 0, + "content": "36. Sun, Y., Dong, L., Patra, B., Ma, S., Huang, S., Benhaim, A., Chaudhary, V., Song, X., Wei, F.: A length-extrapolatable transformer. arXiv preprint arXiv:2212.10554 (2022) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.405, + 0.787, + 0.446 + ], + "angle": 0, + "content": "37. Svenstrup, M., Tranberg, S., Andersen, H.J., Bak, T.: Pose estimation and adaptive robot behaviour for human-robot interaction. In: 2009 IEEE International Conference on Robotics and Automation. pp. 3571-3576. IEEE (2009) 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.446, + 0.787, + 0.5 + ], + "angle": 0, + "content": "38. Tang, Z., Qiu, Z., Hao, Y., Hong, R., Yao, T.: 3d human pose estimation with spatio-temporal criss-cross attention. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 4790-4799 (2023) 9, 10, 11, 12, 13, 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.5, + 0.787, + 0.542 + ], + "angle": 0, + "content": "39. Wang, J., Yan, S., Xiong, Y., Lin, D.: Motion guided 3d pose estimation from videos. In: European Conference on Computer Vision. pp. 764-780. Springer (2020) 1, 9, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.541, + 0.787, + 0.582 + ], + "angle": 0, + "content": "40. Wehrbein, T., Rudolph, M., Rosenhahn, B., Wandt, B.: Probabilistic monocular 3d human pose estimation with normalizing flows. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 11199-11208 (2021) 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.582, + 0.787, + 0.623 + ], + "angle": 0, + "content": "41. Xu, T., Takano, W.: Graph stacked hourglass networks for 3d human pose estimation. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 16105-16114 (2021) 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.622, + 0.787, + 0.663 + ], + "angle": 0, + "content": "42. Xu, Y., Zhang, J., Zhang, Q., Tao, D.: Vitpose: Simple vision transformer baselines for human pose estimation. Advances in Neural Information Processing Systems 35, 38571-38584 (2022) 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.663, + 0.787, + 0.704 + ], + "angle": 0, + "content": "43. Xue, Y., Chen, J., Gu, X., Ma, H., Ma, H.: Boosting monocular 3d human pose estimation with part aware attention. IEEE Transactions on Image Processing 31, 4278-4291 (2022) 2, 10, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.704, + 0.787, + 0.745 + ], + "angle": 0, + "content": "44. Yan, S., Xiong, Y., Lin, D.: Spatial temporal graph convolutional networks for skeleton-based action recognition. In: Proceedings of the AAAI conference on artificial intelligence. vol. 32 (2018) 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.745, + 0.787, + 0.786 + ], + "angle": 0, + "content": "45. Zeng, A., Sun, X., Yang, L., Zhao, N., Liu, M., Xu, Q.: Learning skeletal graph neural networks for hard 3d pose estimation. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 11436-11445 (2021) 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.786, + 0.787, + 0.84 + ], + "angle": 0, + "content": "46. Zhang, J., Tu, Z., Yang, J., Chen, Y., Yuan, J.: Mixste: Seq2seq mixed spatiotemporal encoder for 3d human pose estimation in video. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 13232-13242 (2022) 2, 8, 9, 10, 11, 12, 13, 14" + }, + { + "type": "list", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "18" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.375, + 0.129 + ], + "angle": 0, + "content": "K. Zheng et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.147, + 0.787, + 0.189 + ], + "angle": 0, + "content": "47. Zhang, X., Zhou, X., Lin, M., Sun, J.: Shufflenet: An extremely efficient convolutional neural network for mobile devices. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 6848-6856 (2018) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.19, + 0.787, + 0.231 + ], + "angle": 0, + "content": "48. Zhao, Q., Zheng, C., Liu, M., Chen, C.: A single 2d pose with context is worth hundreds for 3d human pose estimation. Advances in Neural Information Processing Systems 36 (2024) 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.232, + 0.787, + 0.285 + ], + "angle": 0, + "content": "49. Zhao, Q., Zheng, C., Liu, M., Wang, P., Chen, C.: Poseformerv2: Exploring frequency domain for efficient and robust 3d human pose estimation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 8877-8886 (2023) 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.287, + 0.787, + 0.34 + ], + "angle": 0, + "content": "50. Zheng, C., Zhu, S., Mendieta, M., Yang, T., Chen, C., Ding, Z.: 3d human pose estimation with spatial and temporal transformers. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 11656-11665 (2021) 2, 4, 9, 10, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.342, + 0.787, + 0.384 + ], + "angle": 0, + "content": "51. Zhu, W., Ma, X., Liu, Z., Liu, L., Wu, W., Wang, Y.: Motionbert: A unified perspective on learning human motion representations. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 15085-15099 (2023) 4, 9" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.147, + 0.787, + 0.384 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/2024/3D Human Pose Estimation via Non-Causal Retentive Networks/38d38aa6-38d5-486e-8643-4f15fed7d372_origin.pdf b/2024/3D Human Pose Estimation via Non-Causal Retentive Networks/38d38aa6-38d5-486e-8643-4f15fed7d372_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..b591d95b5242055968da83f24f9325869ea86402 --- /dev/null +++ b/2024/3D Human Pose Estimation via Non-Causal Retentive Networks/38d38aa6-38d5-486e-8643-4f15fed7d372_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:622b9ea710124caacffcf98b55d68f79aa32f087f7ca1bb7bb86a88af5356d30 +size 1230463 diff --git a/2024/3D Human Pose Estimation via Non-Causal Retentive Networks/full.md b/2024/3D Human Pose Estimation via Non-Causal Retentive Networks/full.md new file mode 100644 index 0000000000000000000000000000000000000000..b1e5872c0f40bbc18c47b1794b9a02a24602232d --- /dev/null +++ b/2024/3D Human Pose Estimation via Non-Causal Retentive Networks/full.md @@ -0,0 +1,286 @@ +# 3D Human Pose Estimation via Non-Causal Retentive Networks + +Kaili Zheng $^{1}$ , Feixiang Lu $^{2}$ , Yihao Lv $^{2}$ , Liangjun Zhang $^{2}$ , Chenyi Guo $^{1\boxtimes}$ , and Ji Wu $^{1,3,4\boxtimes}$ + +$^{1}$ Department of Electronic Engineering, Tsinghua University $^{2}$ Baidu Research + +3 College of AI, Tsinghua University +4 Beijing National Research Center for Information Science and Technology +zk122@mails.tsinghua.edu.cn + +Abstract. Temporal dependencies are essential in 3D human pose estimation to mitigate depth ambiguity. Previous methods typically use a fixed-length sliding window to capture these dependencies. However, they treat past and future frames equally, ignoring the fact that relying on too many future frames increases the inference latency. In this paper, we present a 3D human pose estimation model based on Retentive Networks (RetNet) that incorporates temporal information by utilizing a large number of past frames and a few future frames. The Non-Causal RetNet (NC-RetNet) is designed to allow the originally causal RetNet to be aware of future information. Additionally, we propose a knowledge transfer strategy, i.e., training the model with a larger chunk size and using a smaller chunk size during inference, to reduce latency while maintaining comparable accuracy. Extensive experiments have been conducted on the Human3.6M and MPI-INF-3DHP datasets, and the results demonstrate that our method achieves state-of-the-art performance. Code and models are available at https://github.com/Kelly510/PoseRetNet. + +Keywords: 3D Human Pose Estimation $\cdot$ Temporal Dependency $\cdot$ Retentive Networks + +# 1 Introduction + +Monocular 3D Human Pose Estimation (HPE) aims to reconstruct the 3D positions of human body joints based on monocular observations. This popular computer vision task has a wide range of applications, including action recognition [44], human-robot interaction [37] and motion analysis [11]. Most of the previous works [2,22,39-41,45,48] adopt the 2D-to-3D lifting pipeline which predicts 3D human pose based on 2D keypoint detection results. It is challenging due to the depth ambiguity issue, namely, one 2D detection result may correspond to multiple 3D human skeletons. + +![](images/d43a6bc9dcd8f883044c0b3e11f2c13a09273476a056116aaf2462a2aebd94b8.jpg) +Fig. 1: (Left) The framework of our method, which utilizes long-term historical information from the cross-chunk state and relies on only a few future frames within the chunk. The past, current, and future frames are denoted by blue, green, and red borders, respectively. (Right) Comparison of Mean Per-Joint Position Error (MPJPE) on the Human3.6M dataset under different test chunk sizes. Our method outperforms previous state-of-the-art remarkably, especially under small chunk sizes. + +To mitigate the depth ambiguity, monocular 3D human pose estimation models usually take multiple frames as the input and exploit additional temporal dependencies of human pose to reduce the ambiguity [1, 27, 29, 43, 46, 49, 50]. Specifically, a sliding-window of fixed length is usually adopted to capture the temporal dependencies, where the length of window is referred to as the number of frames or chunk size. A larger chunk size typically results in better accuracy performance as it allows for the perception of more long-range temporal information. However, previous methods treat past and future frames equally, and a larger chunk size also means that the model relies on the arrival of more future frames before inference, which significantly increases the inference latency. For instance, consider the seq2frame framework, which aims to predict the 3D pose of the center frame among the input frames. If the chunk size is 243 and the input frame rate is $10\mathrm{Hz}$ , the inference latency will be $(243 - 1)\div 2\div 10 = 12.1$ seconds. For seq2seq framework in the same case, the inference latency for the first frame within the chunk is $(243 - 1)\div 10 = 24.2$ seconds and that for the last frame is zero. The average latency is 12.1 seconds as well. This is considerably longer than the forward time of the model itself. + +To address this problem, we propose a 3D human pose estimation model based on Retentive Networks (RetNet) [35]. Fig. 1(left) illustrates the framework of our method. Different from previous methods that use similar amounts of past and future frames to incorporate temporal information, our method mainly extracts temporal information from past frames (blue) and uses only a few future frames (red) within the current chunk for refinement. The RetNet can easily capture long-term historical information by using the cross-chunk state, and the Non-Causal RetNet (NC-RetNet) is further designed to make the originally causal RetNet be aware of the future frames. Moreover, we develop a knowl + +edge transfer strategy of training the model with a large chunk size and using a small chunk size during inference. Thanks to the long-term historical information brought by the cross-chunk state, decreasing the test chunk size does not significantly affect performance, as shown in Fig. 1(right), but greatly reduces the inference latency. + +Extensive experiments have been conducted on two datasets, Human3.6M [14] and MPI-INF-3DHP [23], both quantitatively and qualitatively. The results demonstrate that our method outperforms state-of-the-art with a clear margin in terms of accuracy and continuity, especially when the model infers with a small chunk size. Our method even surpasses state-of-the-art with a smaller chunk size during inference. The ablation study also validates the efficacy of the components in our method. Our main contributions can be summarized as follows. + +1. This is the first study to investigate the potential of RetNet in 3D human pose estimation. And we introduce NC-RetNet to extract temporal information, which leverages past frames through the cross-chunk state and a limited number of future frames within the chunk. +2. The NC-RetNet can be trained using a large chunk size and infer using a small chunk size without significant performance deterioration, but with a notable decrease in inference latency. +3. Extensive experiments have been conducted and the results demonstrate that our method is the state-of-the-art in terms of accuracy and continuity, especially when the test chunk size is small. + +# 2 Related Work + +# 2.1 3D Human Pose Estimation + +Monocular 3D human pose estimation is a fundamental computer vision task with a broad range of applications. Direct estimation of the 3D positions of human joints from raw image pixels [26, 34] is difficult not only because of the complexity of extracting image features, but also due to the lack of image-3D data pairs. For these reasons, Martinez et al. [22] propose to estimate 3D human pose in a two-stage manner: detect 2D keypoints from images first and then lift 2D to 3D. Since this approach can utilize existing 2D pose estimation systems [3, 19, 25, 33, 42] and a large amount of 3D motion capture data, it has received a lot of attention. In this paper, we also focus on the 2D-to-3D lifting task. Although there are methods such as [48] propose to leverage visual cues only to mitigate depth ambiguity, these methods are unable to produce reconstructions with good continuity. Therefore, temporal dependencies are very crucial for monocular human pose estimation models. + +# 2.2 Exploitation of Temporal Dependencies + +Previous methods mostly adopt four architectures to exploit temporal dependencies: CNN, RNN, GCN [15] and transformer [21]. For example, to model + +the temporal dependencies of human motion, Pavllo et al. [27] propose a temporal convolution model that utilizes dilated temporal convolutions to capture long-term information and model the temporal dependencies of human motion. The temporal receptive field depends on the dilation ratio and the number of layers. Similarly, Choi et al. [5] utilizes GRU [7] to extract features from the past frames and future frames within a fixed-length window respectively before integration. Cai et al. [1] exploit graph convolutions [15] to model the graph structure of different human joints. Along the time axis, this method treats the joints at different time steps as the graph nodes where any two consecutive joints are adjacent in the graph. Poseformer [50] proposed by Zheng et al. is the first work to introduce transformers to 3D human pose estimation task. This model incorporates the Spatial Transformer Module to encode the geometric structure of the human pose in a single frame into a token, and the Temporal Transformer Encoder to model temporal dependencies between frames. Since then, a lot of works [8,10,16-18,51] have emerged to explore the potential of transformers in 3D human pose estimation. + +Although these methods leverage different architectures to extract spatial-temporal information from 2D sequences, they share a common framework that employs a fixed number of frames to predict the result. Moreover, the chunk size has a significant impact on the accuracy, and a larger chunk size is usually beneficial for performance. However, previous works have not taken into account that a larger chunk size also significantly increases the inference delay. This motivates us to develop a method that balances the accuracy and inference latency better. + +# 2.3 Real-time Human Pose Estimation + +In addition to accuracy, low inference latency is also desired for human pose estimation models in many scenarios, and significant efforts have been devoted to reducing the inference latency. On one hand, since human pose estimation models typically use a backbone model to extract image features, general-purpose lightweight backbones [9,13,30,47], can be used directly to replace the backbone in HPE models [6]. On the other hand, simplifying the pipeline can improve the model's efficiency. For example, Vnect [24] is proposed to combine the bounding box detection, 2D keypoint detection, and 2D-to-3D lifting into one model. However, existing methods only focus on decreasing the forward time of the HPE models, but do not consider the inference latency caused by large chunk sizes, as our method does. + +# 2.4 Length Extrapolation + +Models in natural language processing are expected to be generalizable across sequences of varying lengths, particularly to sequences longer than the training samples. This desired property is called length extrapolation. To achieve this, the use of relative position embedding, such as RoPE [32] and xPos [36], is necessary because it does not require the input sequences to be of fixed length. + +Additionally, there are methods [4, 28] to improve the format of the attention module to achieve length extrapolation. For example, ALIBI [28] proposes to subtract the absolute temporal distance of two tokens from the attention score, which enhances the performance on extremely long sequences. Our knowledge transfer strategy is similar to length extrapolation, except that we concentrate on the model's transition from large chunks to smaller ones. + +# 3 Method + +# 3.1 Preliminary + +RetNet [35] is a sequence modeling network that produces a contextualized feature sequence of length $L$ given an input sequence $X \in \mathbb{R}^{L \times d}$ . The basic module of RetNet is retention, which has three mathematically equivalent representations: parallel, recurrent, and chunkwise recurrent. We present a detailed explanation of these representations below. + +Parallel Given the input sequence $X$ , the query $Q$ and key $K$ are derived by applying the linear projection and RoPE. $P_{q}, P_{k}$ are the rotary position embedding for the query and the key respectively. The value $V$ is obtained by the linear projection only. $D \in \mathbb{R}^{L \times L}$ is the combination of causal masking and exponential decay with respect to the relative distance. $\odot$ denotes the element-wise product. Since the value in the mask is non-zero only when the reference token $(m^{th})$ is earlier than the target token $(n^{th})$ , RetNet is a fully causal model. + +$$ +Q = P _ {q} \left(X W _ {Q}\right), K = P _ {k} \left(X W _ {K}\right), V = X W _ {V} +$$ + +$$ +D _ {n m} = \left\{ \begin{array}{l l} \gamma^ {n - m}, & n \geq m \\ 0, & n < m \end{array} \right. \tag {1} +$$ + +$$ +\operatorname {R e t e n t i o n} (X) = \left(Q K ^ {T} \odot D\right) V +$$ + +Recurrent $S_{n} \in \mathbb{R}^{d \times d}$ represents the state of time step $n$ , and $Q_{n}, K_{n}, V_{n}$ is the value of the same $Q, K, V$ in Eq. (1) at time step $n$ . This representation also shows that RetNet is entirely causal, as the output for the $n^{th}$ frame depends solely on the previous state $S_{n-1}$ and the $n^{th}$ input. + +$$ +S _ {n} = \gamma S _ {n - 1} + K _ {n} ^ {T} V _ {n} \tag {2} +$$ + +$$ +\operatorname {R e t e n t i o n} \left(X _ {n}\right) = Q _ {n} S _ {n}, n = 1, \dots , L +$$ + +Chunkwise Recurrent This representation is the hybrid form of the above two representations. Suppose the input sequence is segmented into chunks of length $T$ . Denote $X_{iT:(i+1)T}$ as $X_{[i]}$ , where $[i]$ indicates the $i$ -th chunk. Within the chunk, the model follows the parallel representation and the cross-chunk information is passed following the recurrent representation. The $D$ here is similar + +![](images/f727394a1ee857546ce8c1acd6a4eaf4314f7f883b2ccf713669ca8611a62375.jpg) +Fig. 2: (Left) The causal masks in the parallel and chunkwise recurrent representations of the original RetNet. The model can only perceive historical frames although there are several future frames in the current chunk. (Right) We propose Non-Causal RetNet (NC-RetNet), which utilizes all the frames within current chunk using the full mask and can be trained in parallel with the staircase-shaped mask. + +to that in Eq. (1), but its shape changes from $L \times L$ to $T \times T$ . The $D$ 's in Eq. (1) and Eq. (3) are illustrated in Fig. 2(left). $\zeta$ and $\xi$ are both $T \times d$ matrices and the $r$ -th row of them is $\gamma^{T - r - 1}$ and $\gamma^{r + 1}$ respectively. + +$$ +S _ {i} = K _ {[ i ]} ^ {T} (V _ {[ i ]} \odot \zeta) + \gamma^ {T} S _ {i - 1} +$$ + +$$ +\operatorname {R e t e n t i o n} \left(X _ {[ i ]}\right) = \underbrace {\left(Q _ {[ i ]} K _ {[ i ]} ^ {T} \odot D\right) V _ {[ i ]}} _ {\text {I n n e r - C h u n k}} + \underbrace {\left(Q _ {[ i ]} S _ {i - 1}\right) \odot \xi} _ {\text {C r o s s - C h u n k}} \tag {3} +$$ + +Since low inference latency is required in real-time scenarios, the parallel representation is not suitable. Moreover, the recurrent representation is the special case of the chunkwise recurrent representation when $T = 1$ . Therefore, we focus on the chunkwise recurrent representation of RetNet to design our human pose estimation model. + +# 3.2 Non-Causal RetNet + +Although the chunkwise recurrent representation of RetNet processes the input sequence chunk by chunk, it does not utilize all the information in the current chunk. As is shown in Fig. 2(left), the masking in the chunkwise recurrent representation is a lower triangular matrix. This means that the estimation of the current frame only uses the frames before it, regardless of the future frames within the chunk. However, leveraging certain future information can be very helpful for the accuracy of human pose estimation models. + +To solve this problem, we modify the causal masking in RetNet to exploit all the information within the current chunk and propose Non-Causal RetNet (NC-RetNet). Formally, the new $D$ in the chunkwise recurrent representation is given by Eq. (4). The new masking is a full matrix instead of a lower triangular matrix. When predicting the 3D pose of the $n^{th}$ frame, we can calculate the exponential decay of both past frames $(m < n)$ and future frames $(m > n)$ + +within the chunk by using the absolute distance $|n - m|$ between the two frames. The mathematical expression of the new $D$ matrix in the parallel representation can be found in the Supp. Mat. The chunk size, denoted by $T$ , can be adjusted to balance the accuracy and inference latency. The larger $T$ is, the more future information can be perceived by the model, but the longer the inference latency will be. + +$$ +D = \left\{D _ {n m} \right\} = \left\{\gamma^ {| n - m |} \right\}, n, m \in \{1, \dots , T \} \tag {4} +$$ + +Fig. 2(right) illustrates the masks in the parallel and chunkwise recurrent representations of this non-causal retention. Note that the model can also be trained in parallel by using the staircase-shaped mask, but it does not have the recurrent representation unless $T = 1$ . + +By using the non-causal masking, NC-RetNet exploits temporal dependencies from the cross-chunk state which provides long-term historical information, and only a few future frames which provides some future information. Therefore, the temporal receptive field of our method is not limited by the chunk size. In fact, the chunk size in our method only affects the amount of future information while historical information is always adequate due to the cross-chunk state. + +# 3.3 Transfer Knowledge from LargeChunks + +We further develop a strategy for our NC-RetNet to improve its performance under small test chunk sizes, which is to train the model with a large chunk size and infer with a small chunk size. Since the model uses xPos, a relative position embedding, it is able to handle 2D sequences of different lengths from the form. In addition, the cross-chunk state $S_{i}$ in the chunkwise recurrent representation is updated every chunk, containing a lot of information from previous chunks. With this long-term historical information, the model becomes insensitive to the length of future frames. Therefore, using a smaller chunk size during inference does not significantly decrease accuracy, but greatly reduces inference latency. This indicates that some knowledge is transferred from large chunks when training to the small chunks during inference. + +Algorithm 1 Pseudo-code for training +Input: Training dataloader, initialized model, training chunk size $T_{l}$ +Output: model after training for input_2d, target in dataloader do $\mathrm{L} =$ input_2d.size(1) # Total length of the input sequence D_parallel $\equiv$ get_D_parallel(L, $T_{l})$ #Get the staircase-shaped mask for parallel training given the total length and chunk size pred $\equiv$ model.forward_parallel(input_2d,D_parallel) loss $\equiv$ loss_func(pred,target) loss.backup() optimizer.step() +end for + +Algorithm 2 Pseudo-code for inference +Input: 2D stream, trained model, test chunk size $T_{s}$ +Output: 3D stream $\{\mathbf{y}_{-}\mathbf{n}\}$ D_chunkwise $\equiv$ get_D_chunkwise $(T_{s})\#$ Get the full mask for chunkwise inference given the test chunk size s_n, x_n = None, [] +for x_i in stream do x_n.append(x_i) if len(x_n) == T_s then y_n, s_n = model.forward_chunkwise(x_n, D_chunkwise, s_n, n) x_n = [] output(y_n) # Output y_n every chunk for downstream task +end if +end for + +The details of the training and testing are elaborated below. During training, we utilize RetNet's parallel representation to achieve training parallelism. We set the training chunk size to a large number $T_{l}$ , to capture long-term patterns of human motion. The training pseudo-code is as shown in Algorithm 1. The parallel representation used during training implicitly incorporates the cross-chunk state. This means that the model can theoretically observe historical information over a long period of time as well as many future frames. During inference, the test chunk size $T_{s}$ is set smaller than the training chunk size $T_{l}$ and the chunkwise-recurrent representation is used. The pseudo-code for inference is as in Algorithm 2. Given a stream of 2D keypoints, the model processes the stream in the chunkwise-recurrent representation every $T_{s}$ frames based on the current chunk x_n as well as an explicit cross-chunk state s_n. This cross-chunk state contains information about previous chunks and makes the model insensitive to the number of future frames. Therefore, although the chunk is smaller than the training chunks, the model can still extract stable temporal features. + +# 3.4 Implementation Details + +We implement our idea based on the state-of-the-art seq2seq method, MixSTE [46], by replacing its temporal encoder with RetNet. Since the movement of distal joints is more erratic than that of torso joints, the estimation of these distal joints should rely on more local temporal information. Therefore, we assign different decay coefficients to different human joints, which is referred to as joint-related decay coefficients. The chunkwise recurrent representation of it can be formulated in Eq. (5), where $p$ is the index of human joints. + +$$ +S _ {i, p} = K _ {[ i ], p} ^ {T} \left(V _ {[ i ], p} \odot \zeta_ {p}\right) + \gamma_ {p} ^ {T} S _ {i - 1, p} +$$ + +$$ +I n n e r _ {i, p} = \left(Q _ {[ i ], p} K _ {[ i ], p} ^ {T} \odot D _ {p}\right) V _ {[ i ], p} \tag {5} +$$ + +$$ +C r o s s _ {i, p} = \left(Q _ {[ i ], p} S _ {i - 1, p}\right) \odot \xi_ {p} +$$ + +$$ +\operatorname {R e t e n t i o n} \left(X _ {[ i ]}\right) = \operatorname {C o n c a t} \left(\operatorname {I n n e r} _ {i, p} + \operatorname {C r o s s} _ {i, p}\right) +$$ + +The loss function and training strategies are the same as in MixSTE. The training chunk size is 243 on the Human3.6M dataset and 81 on the MPI-INF-3DHP dataset, and then we test the model with different chunk sizes to get the results. The overall architecture of our model is given in the Supp. Mat. We also implement our idea on MotionBERT [51] and the results can be found in the Supp. Mat. + +# 4 Experiments + +# 4.1 Datasets and Evaluation Protocols + +Experiments are conducted on two human pose estimation datasets: Human3.6M [14] and MPI-INF-3DHP [23]. Human3.6M is the most widely used indoor dataset for single-person 3D human pose estimation, containing about 3.6 million images collected from 11 professional actors. Following the common practice [2,27,46,50], we use the samples of S1, S5, S6, S7, S8 for training and evaluate on S9 and S11 subjects. Mean Per-Joint Position Error (MPJPE) and Procrustes-Aligned MPJPE (PA-MPJPE) are evaluated on this dataset. We also report the Mean Per-Joint Velocity Error (MPJVE) results, which reflect the continuity of the predicted results. MPI-INF-3DHP is a more challenging 3D human pose estimation dataset because it includes both indoor and outdoor scenes. The samples are collected from 8 subjects, each performing 8 actions. The test set consists of 6 subjects in different scenes. We follow the setup in [2,39,46,50]. For the MPI-INF-3DHP dataset, we report the results of MPJPE, Percentage of Correct Keypoints (PCK) within the $150\mathrm{mm}$ range, and Area Under Curve (AUC), following [18,38,50]. + +# 4.2 Quantitative Comparison + +Results on Human3.6M We first use the 2D keypoints detected by CPN [3] as the input, and the results are shown in Tab. 1. It can be seen that our method achieves comparable performance with the state-of-the-art when the chunk size for inference is large ( $T = 243$ ). Moreover, our method outperforms previous methods by a clear margin when the chunk size is small ( $T = 27$ , 81), as the accuracy of our method only decreases slightly when the chunk size is reduced. Furthermore, the MPJPE at $T = 27$ is comparable to the previous state-of-the-art method at $T = 81$ (42.1 mm vs. 42.0 mm). This indicates that our method can provide similarly accurate predictions with much lower inference latency. Additionally, our method exhibits significant improvement in continuity compared with previous methods, with a 0.2 mm per frame improvement on the MPJVE metric. + +We further use the ground truth 2D keypoints of the Human3.6M dataset as input to test the upper bound of our method, as shown in Tab. 2. The results indicate that using the model trained with 243 frames to infer with a chunk size of 81 improves the MPJPE metric by $2.3\mathrm{mm}$ compared to the previous state-of-the-art $(22.4\mathrm{mm}$ vs. $25.7\mathrm{mm})$ . Moreover, the MPJPE of our method at $T = 27$ + +is remarkably lower than that of previous methods at $T = 81$ , demonstrating our method's efficient utilization of transferred knowledge to achieve higher accuracy with lower inference latency. + +Table 1: Comparison of MPJPE, PA-MPJPE and MPJVE on the Human3.6M dataset using 2D keypoints detected by CPN [3] as input. $T$ is the chunk size when testing. + +
MPJPETDir.Disc.Eat.GreetPhonePhotoPosePurch.SitSitD.SmokeWaitWalkD.WalkWalkT.Average
MixSTE [46]27---------------45.1
STCFformer [38]2740.744.641.241.945.853.741.540.955.963.844.641.544.729.530.844.1
Ours2738.041.540.040.044.151.339.841.753.158.343.539.842.028.429.642.1
Anatomy [2]8142.143.841.043.846.153.542.443.153.960.545.742.146.232.233.844.6
PoseFormer [50]8141.544.839.842.546.551.642.142.053.360.745.543.346.131.832.244.3
Xue et al. [43]8142.145.340.942.945.452.742.642.555.361.844.941.744.929.930.844.2
P-STMO [31]8141.744.541.042.946.051.342.841.354.961.845.142.843.830.830.744.1
MixSTE [46]8139.843.038.640.143.450.640.641.452.256.743.840.843.929.430.342.4
STCFformer [38]8140.643.038.340.243.552.640.340.151.857.742.839.842.328.029.542.0
Ours8136.940.539.038.643.349.638.840.252.656.542.638.840.526.828.440.9
VideoPose3D [27]24345.246.743.345.648.155.144.644.357.365.847.144.049.032.833.946.8
Anatomy [2]24341.443.540.142.946.651.941.742.353.960.245.441.746.031.532.744.1
Xue et al. [43]24339.942.740.342.345.052.840.439.356.961.244.141.342.828.429.343.1
MHFormer [18]35139.243.140.140.944.951.240.641.353.560.343.741.143.829.830.643.0
P-STMO [31]24338.942.740.441.145.649.740.939.955.559.444.942.242.729.429.442.8
MixSTE [46]24337.640.937.339.742.349.940.139.851.755.042.139.841.027.927.940.9
STCFformer [38]24338.441.236.838.042.750.538.738.252.556.841.838.440.226.227.740.5
Ours24336.940.138.738.342.948.638.240.052.555.442.338.739.726.227.840.4
+ +
PA-MPJPETDir.Disc.Eat.GreetPhonePhotoPosePurch.SitSitD.SmokeWaitWalkD.WalkWalkT.Average
STCFormer [38]2731.935.132.734.134.941.332.131.645.050.636.031.735.523.625.134.8
Ours2731.733.932.333.335.239.131.031.944.048.736.031.034.623.024.834.0
Anatomy [2]8133.135.333.435.936.141.732.833.342.649.437.032.736.525.527.935.6
PoseFormer [50]8132.534.832.634.635.339.532.132.042.848.534.832.435.324.526.034.6
Xue et al. [43]8131.635.532.334.235.140.332.332.344.549.635.831.635.023.724.734.6
MixSTE [46]8132.034.231.733.734.439.232.031.842.946.935.532.034.423.625.233.9
STCFormer [38]8130.433.831.131.733.539.530.830.041.845.834.330.132.821.923.432.7
Ours8130.533.131.431.633.038.429.830.643.645.434.430.332.421.522.232.6
VideoPose3D [27]24334.136.134.437.236.442.234.433.645.052.537.433.837.825.627.336.5
Anatomy [2]24332.635.132.835.436.340.432.432.342.749.036.832.436.024.926.535.0
Xue et al. [43]24331.234.131.933.833.939.531.630.045.448.135.031.133.522.423.633.7
P-STMO [31]24331.335.232.933.935.439.332.531.544.648.236.332.934.423.823.934.4
MixSTE [46]24330.833.130.331.833.139.131.130.542.544.534.030.832.722.122.932.6
STCFormer [38]24329.333.030.730.632.738.229.728.842.245.033.329.431.520.922.331.8
Ours24330.833.131.331.833.437.730.130.543.445.534.330.331.521.422.732.5
+ +
MPJVETDir.Disc.Eat.GreetPhonePhotoPosePurch.SitSitD.SmokeWaitWalkD.WalkWalkT.Average
VideoPose3D [27]2433.03.12.23.42.32.72.73.12.12.92.32.43.73.12.82.8
Anatomy [2]2432.72.82.03.12.02.42.42.81.82.42.02.13.42.72.42.5
PoseFormer [50]813.23.42.63.62.63.02.93.22.63.32.72.73.83.22.93.1
StridedFormer [17]3512.42.51.82.81.82.22.22.51.52.01.81.93.22.52.12.2
MixSTE [46]2432.52.71.92.81.92.22.32.61.62.21.92.03.12.62.22.3
Ours812.32.41.82.61.72.12.12.51.52.11.81.93.02.42.02.2
Ours2432.32.41.82.61.72.12.12.51.52.11.81.93.02.42.02.0
+ +Results on MPI-INF-3DHP The results on the MPI-INF-3DHP datasets are shown in Tab. 3. An improvement of $0.9\mathrm{mm}$ on the MPJPE metric is achieved at $T = 81$ , and the improvement becomes more remarkable as the chunk size decreases. In particular, our method outperforms previous methods very significantly with an improvement of $4.1\mathrm{mm}$ on the MPJPE metric when $T$ is 9. Similar to the phenomenon on the Human3.6M dataset, our method is + +Table 2: Comparison of MPJPE on the Human3.6M dataset using 2D ground truth keypoints as input. + +
MPJPETDir.Disc.Eat.GreetPhonePhotoPosePurch.SitSitD.SmokeWaitWalkD.WalkWalkT.Average
Ours2723.724.823.524.423.628.127.225.326.727.925.023.523.617.218.724.2
PoseFormer [50]8130.033.629.931.030.233.334.831.437.838.631.731.529.023.323.131.3
Xue et al. [43]8127.628.824.925.726.730.630.826.435.832.727.126.225.619.220.627.2
MixSTE [46]8125.627.824.525.724.929.928.627.429.929.026.125.025.218.719.925.9
STCFformer [38]8126.226.523.424.625.028.628.324.630.933.725.725.324.618.619.725.7
Ours8120.922.521.821.522.025.623.423.728.128.823.920.921.114.916.322.4
Xue et al. [43]24325.825.223.323.524.027.427.924.429.330.124.924.123.318.619.724.7
MHFormer [18]35127.732.129.128.930.033.933.031.237.039.330.031.029.422.223.030.5
P-STMO [31]24328.530.128.627.929.833.231.327.836.037.429.729.528.121.021.029.3
MixSTE [46]24321.622.020.421.020.824.324.721.926.924.921.221.520.814.715.721.6
STCFformer [38]24321.422.621.021.323.826.024.220.028.928.022.321.420.114.215.022.0
Ours24320.021.120.920.820.124.923.522.526.539.621.720.920.414.515.721.5
+ +comparable to or better than previous methods with an even smaller chunk size. For example, the MPJPE of our method at $T = 27$ is better than STCFformer at $T = 81$ (22.7 mm vs. 23.1 mm). And the MPJPE of our method at $T = 9$ is similar to STCFformer at $T = 27$ (24.1 mm vs. 24.2 mm). These results show that our method generalizes well on different datasets. + +Table 3: Comparison of quantitative results on the MPI-INF-3DHP dataset. $\uparrow$ : higher is better. $\downarrow$ : lower is better. + +
MethodTPCK↑AUC↑MPJPE↓
PoseFormer [50]988.656.477.1
CrossFormer [10]989.157.576.3
MHFormer [18]993.863.358.0
STCFormal [38]998.281.528.2
Ours998.983.324.1
Lin et al. [20]2583.651.479.8
MixSTE [46]2794.466.554.9
STCFormal [38]2798.483.424.2
Ours2799.184.122.7
UGCN [39]9686.962.168.1
Anatomy [2]8187.853.879.1
Hu et al. [12]9697.969.542.5
Einfalt et al. [8]8195.467.646.9
P-STMO [31]8197.975.832.2
STCFormal [38]8198.783.923.1
Ours8199.184.422.2
+ +# 4.3 Qualitative Results + +Visualization on Continuity We compute the MPJVE of the results predicted by MixSTE, STCFormer and our method at different timesteps, and visualize the curves in Fig. 3. It can be seen that the MPJVE of our method is lower than + +that of previous methods. Our method captures temporal information using non-overlapping shift windows, similar to MixSTE. However, our method produces more continuous results at the edge between two chunks compared to MixSTE. MixSTE independently estimates two consecutive chunks, which results in a lack of continuity at the edge. In contrast, our method incorporates temporal information from previous chunks through the cross-chunk state, which improves the continuity. Compared to STCFoer, our method generally produces more continuous results. This is because our method generates multiple frames each time, allowing for the continuity constraints to the output. + +![](images/d4a2a04bd5c9b75b20b917823782835ace91f8d0da2d1cac95d9ceab8033f48d.jpg) +Fig. 3: Comparison of the MPJVE curves over time between MixSTE, STCFormer and our method. + +![](images/9b633f75a3a335b0c645f7cd5dd5a44eaafd41d3708e049ff26326234e45eab3.jpg) +Fig. 4: Comparison of some visualization results predicted by MixSTE [46], STC-Former [38] and our method. The black skeletons are the ground truth, and the red skeletons are the predicted results. The comparison with MixSTE is shown in green circles, while the comparison with STC-Former is shown in blue circles. + +Visualization of Results We present some visualization examples in Fig. 4, where the results are predicted by MixSTE [46], STCFoer [38] and our method, respectively. It can be seen that our method predicts more accurate results, and the improvement is visually obvious. More visualization results can be found in the Supp. Mat. + +# 4.4 Ablation Study + +Ablations on Knowledge Transfer The knowledge transferred from large training chunks to smaller test chunks plays an important role in our method. To demonstrate this, we train the model with chunk sizes of 27 and 81, respectively, and compare the performance of these models with that of the model trained with $T = 243$ . The results are shown in Tab. 4 ( $2^{nd}$ to $4^{th}$ rows). It can be seen that compared with the models trained with $T = 27$ and 81, using the models trained with a larger chunk size ( $T = 243$ ) for inference is significantly better. This indicates that the knowledge learned with large chunks is useful for reasoning about small chunks. + +Table 4: Comparison of different methods in terms of knowledge transfer. + +
MethodTrain TTest T = 27Test T = 81Test T = 243
Previous SOTASame as test T44.142.040.5
Ours2743.7--
Ours8143.041.9-
Ours24342.140.940.4
MixSTE w.t. xPos2745.3--
MixSTE w.t. xPos8147.042.6-
MixSTE w.t. xPos24348.844.141.1
Ours w.o. state2746.3--
Ours w.o. state8149.144.0-
Ours w.o. state24354.249.842.5
+ +Effect of Cross-Chunk State We compare our methods with two baselines that do not use the cross-chunk state: the MixSTE model with xPos as the position embedding, and the model based on RetNet but without the cross-chunk state. These two baselines are able to handle sequences of different lengths, but can only use within-chunk information. The results are shown in Tab. 4 (bottom six rows). It can be seen that the two baselines without long-term historical information deteriorate rapidly as the gap between the training and test chunk sizes increases. This means that they cannot efficiently transfer knowledge from large chunks to small chunks. Therefore, the cross-chunk state is essential for knowledge transfer in our method, and our NC-RetNet is the first method to have this knowledge transfer property. + +Comparison of Computational Cost The comparison of the model parameters and computational cost of our method and previous methods is as shown in Tab. 5. For seq2seq methods, the FLOPs are averaged over the number of frames, since the prediction of a single inference yields results over multiple frames. It can be seen that our modification of MixSTE does not bring any increase in the model parameters or FLOPs. And compared to STCFormer [38], which has comparable performance to our method at $T = 243$ , the computational cost of our method is much lower (430 M vs. 78107 M). + +Table 5: Comparison of model parameters, computational cost. FLOPs for seq2seq and our methods is averaged over the number of output frames, as is done in [46]. + +
MethodParams (M)FLOPs (M)MPJPE (T=243)
StridedFormer [17]4.2137244.0
P-STMO [31]6.7173742.8
MHFormer [18]24.7481243.2
MixSTE [46]33.657240.9
STCFemale [38]18.97810740.5
Ours25.243040.4
+ +# 5 Conclusion + +In this paper, we propose the first 3D human pose estimation model based on Retentive Networks, NC-RetNet. By using the non-causal masking, it effectively leverages a large number of past frames and a limited number of future frames to incorporate temporal information. Furthermore, we introduce a knowledge transfer strategy that involves training the model with a larger chunk size and using a smaller chunk size during inference, resulting in reduced inference latency without too much loss in accuracy. Through extensive experiments on the Human3.6M and MPI-INF-3DHP datasets, our approach has demonstrated state-of-the-art performance even with a smaller test chunk size. In conclusion, our method achieves a good balance between high accuracy and low inference latency, making it suitable for real-time scenarios. + +Limitations Admittedly, there are two limitations in our work. Firstly, the fundamental theory behind our method's ability to transfer knowledge is unclear, despite our study of the effect of the cross-chunk state. Secondly, we have only tested our method in the 2D-to-3D lifting task. However, the idea of transferring knowledge from large chunks to smaller chunks is universal to many sequential data in computer vision. Further work is required to explain the theory and explore more applications. + +# References + +1. Cai, Y., Ge, L., Liu, J., Cai, J., Cham, T.J., Yuan, J., Thalmann, N.M.: Exploiting spatial-temporal relationships for 3d pose estimation via graph convolutional networks. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 2272-2281 (2019) 2, 4 +2. Chen, T., Fang, C., Shen, X., Zhu, Y., Chen, Z., Luo, J.: Anatomy-aware 3d human pose estimation with bone-based pose decomposition. IEEE Transactions on Circuits and Systems for Video Technology 32(1), 198-209 (2021) 1, 9, 10, 11 +3. Chen, Y., Wang, Z., Peng, Y., Zhang, Z., Yu, G., Sun, J.: Cascaded pyramid network for multi-person pose estimation. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 7103-7112 (2018) 3, 9, 10 +4. Chi, T.C., Fan, T.H., Ramadge, P.J., Rudnicky, A.: Kerple: Kernelized relative positional embedding for length extrapolation. Advances in Neural Information Processing Systems 35, 8386-8399 (2022) 5 +5. Choi, H., Moon, G., Chang, J.Y., Lee, K.M.: Beyond static features for temporally consistent 3d human pose and shape from a video. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 1964-1973 (2021) 4 +6. Choi, S., Choi, S., Kim, C.: Mobilehumanpose: Toward real-time 3d human pose estimation in mobile devices. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 2328-2338 (2021) 4 +7. Chung, J., Gulcehre, C., Cho, K., Bengio, Y.: Empirical evaluation of gated recurrent neural networks on sequence modeling. arXiv preprint arXiv:1412.3555 (2014) 4 +8. Einfalt, M., Ludwig, K., Lienhart, R.: Uplift and upsample: Efficient 3d human pose estimation with uplifting transformers. In: Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision. pp. 2903-2913 (2023) 4, 11 +9. Han, K., Wang, Y., Tian, Q., Guo, J., Xu, C., Xu, C.: Ghostnet: More features from cheap operations. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 1580-1589 (2020) 4 +0. Hassanin, M., Khamiss, A., Bennamoun, M., Boussaid, F., Radwan, I.: Crossformer: Cross spatio-temporal transformer for 3d human pose estimation. arXiv preprint arXiv:2203.13387 (2022) 4, 11 +1. Hesse, N., Schröder, A.S., Müller-Felber, W., Bodensteiner, C., Arens, M., Hofmann, U.G.: Body pose estimation in depth images for infant motion analysis. In: 2017 39th Annual International Conference of the IEEE Engineering in Medicine and Biology Society (EMBC). pp. 1909-1912. IEEE (2017) 1 +2. Hu, W., Zhang, C., Zhan, F., Zhang, L., Wong, T.T.: Conditional directed graph convolution for 3d human pose estimation. In: Proceedings of the 29th ACM International Conference on Multimedia. pp. 602-611 (2021) 11 +3. Iandola, F.N., Han, S., Moskewicz, M.W., Ashraf, K., Dally, W.J., Keutzer, K.: SqueezeNet: Alexnet-level accuracy with 50x fewer parameters and $< 0.5$ mb model size. arXiv preprint arXiv:1602.07360 (2016) 4 +4. Ionescu, C., Papava, D., Olaru, V., Sminchisescu, C.: Human3. 6m: Large scale datasets and predictive methods for 3d human sensing in natural environments. IEEE transactions on pattern analysis and machine intelligence 36(7), 1325-1339 (2013) 3, 9 + +15. Kipf, T.N., Welling, M.: Semi-supervised classification with graph convolutional networks. arXiv preprint arXiv:1609.02907 (2016) 3, 4 +16. Li, H., Shi, B., Dai, W., Zheng, H., Wang, B., Sun, Y., Guo, M., Li, C., Zou, J., Xiong, H.: Pose-oriented transformer with uncertainty-guided refinement for 2d-to-3d human pose estimation. In: Proceedings of the AAAI Conference on Artificial Intelligence. vol. 37, pp. 1296-1304 (2023) 4 +17. Li, W., Liu, H., Ding, R., Liu, M., Wang, P., Yang, W.: Exploiting temporal contexts with strided transformer for 3d human pose estimation. IEEE Transactions on Multimedia 25, 1282-1293 (2022) 4, 10, 14 +18. Li, W., Liu, H., Tang, H., Wang, P., Van Gool, L.: Mhformer: Multi-hypothesis transformer for 3d human pose estimation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 13147-13156 (2022) 4, 9, 10, 11, 14 +19. Li, Y., Zhang, S., Wang, Z., Yang, S., Yang, W., Xia, S.T., Zhou, E.: Tokenpose: Learning keypoint tokens for human pose estimation. In: Proceedings of the IEEE/CVF International conference on computer vision. pp. 11313-11322 (2021) 3 +20. Lin, J., Lee, G.H.: Trajectory space factorization for deep video-based 3d human pose estimation. arXiv preprint arXiv:1908.08289 (2019) 11 +21. Liu, R., Shen, J., Wang, H., Chen, C., Cheung, S.c., Asari, V.: Attention mechanism exploits temporal contexts: Real-time 3d human pose reconstruction. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5064-5073 (2020) 3 +22. Martinez, J., Hossain, R., Romero, J., Little, J.J.: A simple yet effective baseline for 3d human pose estimation. In: Proceedings of the IEEE international conference on computer vision. pp. 2640-2649 (2017) 1, 3 +23. Mehta, D., Rhodin, H., Casas, D., Fua, P., Sotnychenko, O., Xu, W., Theobalt, C.: Monocular 3d human pose estimation in the wild using improved cnn supervision. In: 2017 international conference on 3D vision (3DV). pp. 506-516. IEEE (2017) 3, 9 +24. Mehta, D., Sridhar, S., Sotnychenko, O., Rhodin, H., Shafiei, M., Seidel, H.P., Xu, W., Casas, D., Theobalt, C.: Vnect: Real-time 3d human pose estimation with a single rgb camera. Acm transactions on graphics (tog) 36(4), 1-14 (2017) 4 +25. Newell, A., Yang, K., Deng, J.: Stacked hourglass networks for human pose estimation. In: Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part VIII 14. pp. 483-499. Springer (2016) 3 +26. Pavlakos, G., Zhou, X., Derpanis, K.G., Daniilidis, K.: Coarse-to-fine volumetric prediction for single-image 3d human pose. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 7025-7034 (2017) 3 +27. Pavllo, D., Feichtenhofer, C., Grangier, D., Auli, M.: 3d human pose estimation in video with temporal convolutions and semi-supervised training. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 7753-7762 (2019) 2, 4, 9, 10 +28. Press, O., Smith, N.A., Lewis, M.: Train short, test long: Attention with linear biases enables input length extrapolation. arXiv preprint arXiv:2108.12409 (2021) 5 +29. Rayat Imtiaz Hossain, M., Little, J.J.: Exploiting temporal information for 3d pose estimation. arXiv e-prints pp. arXiv-1711 (2017) 2 + +30. Sandler, M., Howard, A., Zhu, M., Zhmoginov, A., Chen, L.C.: Mobilenetv2: Inverted residuals and linear bottlenecks. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 4510-4520 (2018) 4 +31. Shan, W., Liu, Z., Zhang, X., Wang, S., Ma, S., Gao, W.: P-stmo: Pre-trained spatial temporal many-to-one model for 3d human pose estimation. In: European Conference on Computer Vision. pp. 461-478. Springer (2022) 10, 11, 14 +32. Su, J., Lu, Y., Pan, S., Murtadha, A., Wen, B., Liu, Y.: Roformer: Enhanced transformer with rotary position embedding. arXiv preprint arXiv:2104.09864 (2021) 4 +33. Sun, K., Xiao, B., Liu, D., Wang, J.: Deep high-resolution representation learning for human pose estimation. In: CVPR (2019) 3 +34. Sun, X., Shang, J., Liang, S., Wei, Y.: Compositional human pose regression. In: Proceedings of the IEEE international conference on computer vision. pp. 2602-2611 (2017) 3 +35. Sun, Y., Dong, L., Huang, S., Ma, S., Xia, Y., Xue, J., Wang, J., Wei, F.: Retentive network: A successor to transformer for large language models. arXiv preprint arXiv:2307.08621 (2023) 2, 5 +36. Sun, Y., Dong, L., Patra, B., Ma, S., Huang, S., Benhaim, A., Chaudhary, V., Song, X., Wei, F.: A length-extrapolatable transformer. arXiv preprint arXiv:2212.10554 (2022) 4 +37. Svenstrup, M., Tranberg, S., Andersen, H.J., Bak, T.: Pose estimation and adaptive robot behaviour for human-robot interaction. In: 2009 IEEE International Conference on Robotics and Automation. pp. 3571-3576. IEEE (2009) 1 +38. Tang, Z., Qiu, Z., Hao, Y., Hong, R., Yao, T.: 3d human pose estimation with spatio-temporal criss-cross attention. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 4790-4799 (2023) 9, 10, 11, 12, 13, 14 +39. Wang, J., Yan, S., Xiong, Y., Lin, D.: Motion guided 3d pose estimation from videos. In: European Conference on Computer Vision. pp. 764-780. Springer (2020) 1, 9, 11 +40. Wehrbein, T., Rudolph, M., Rosenhahn, B., Wandt, B.: Probabilistic monocular 3d human pose estimation with normalizing flows. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 11199-11208 (2021) 1 +41. Xu, T., Takano, W.: Graph stacked hourglass networks for 3d human pose estimation. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 16105-16114 (2021) 1 +42. Xu, Y., Zhang, J., Zhang, Q., Tao, D.: Vitpose: Simple vision transformer baselines for human pose estimation. Advances in Neural Information Processing Systems 35, 38571-38584 (2022) 3 +43. Xue, Y., Chen, J., Gu, X., Ma, H., Ma, H.: Boosting monocular 3d human pose estimation with part aware attention. IEEE Transactions on Image Processing 31, 4278-4291 (2022) 2, 10, 11 +44. Yan, S., Xiong, Y., Lin, D.: Spatial temporal graph convolutional networks for skeleton-based action recognition. In: Proceedings of the AAAI conference on artificial intelligence. vol. 32 (2018) 1 +45. Zeng, A., Sun, X., Yang, L., Zhao, N., Liu, M., Xu, Q.: Learning skeletal graph neural networks for hard 3d pose estimation. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 11436-11445 (2021) 1 +46. Zhang, J., Tu, Z., Yang, J., Chen, Y., Yuan, J.: Mixste: Seq2seq mixed spatiotemporal encoder for 3d human pose estimation in video. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 13232-13242 (2022) 2, 8, 9, 10, 11, 12, 13, 14 + +47. Zhang, X., Zhou, X., Lin, M., Sun, J.: Shufflenet: An extremely efficient convolutional neural network for mobile devices. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 6848-6856 (2018) 4 +48. Zhao, Q., Zheng, C., Liu, M., Chen, C.: A single 2d pose with context is worth hundreds for 3d human pose estimation. Advances in Neural Information Processing Systems 36 (2024) 1, 3 +49. Zhao, Q., Zheng, C., Liu, M., Wang, P., Chen, C.: Poseformerv2: Exploring frequency domain for efficient and robust 3d human pose estimation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 8877-8886 (2023) 2 +50. Zheng, C., Zhu, S., Mendieta, M., Yang, T., Chen, C., Ding, Z.: 3d human pose estimation with spatial and temporal transformers. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 11656-11665 (2021) 2, 4, 9, 10, 11 +51. Zhu, W., Ma, X., Liu, Z., Liu, L., Wu, W., Wang, Y.: Motionbert: A unified perspective on learning human motion representations. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 15085-15099 (2023) 4, 9 \ No newline at end of file diff --git a/2024/3D Human Pose Estimation via Non-Causal Retentive Networks/images.zip b/2024/3D Human Pose Estimation via Non-Causal Retentive Networks/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..b42eff179f2292f790e815d7bdd5c05d76b9299a --- /dev/null +++ b/2024/3D Human Pose Estimation via Non-Causal Retentive Networks/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b35c546d617236d3fc8ff7ce5ebc4f5b709cf93625dc0b83b1cd5e68a3cbe7a +size 704656 diff --git a/2024/3D Human Pose Estimation via Non-Causal Retentive Networks/layout.json b/2024/3D Human Pose Estimation via Non-Causal Retentive Networks/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..db6eb1a867afe635cd17562c1cd893148c9079c4 --- /dev/null +++ b/2024/3D Human Pose Estimation via Non-Causal Retentive Networks/layout.json @@ -0,0 +1,8711 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 153, + 111, + 462, + 146 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 153, + 111, + 462, + 146 + ], + "spans": [ + { + "bbox": [ + 153, + 111, + 462, + 146 + ], + "type": "text", + "content": "3D Human Pose Estimation via Non-Causal Retentive Networks" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 149, + 167, + 465, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 167, + 465, + 194 + ], + "spans": [ + { + "bbox": [ + 149, + 167, + 465, + 194 + ], + "type": "text", + "content": "Kaili Zheng" + }, + { + "bbox": [ + 149, + 167, + 465, + 194 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 149, + 167, + 465, + 194 + ], + "type": "text", + "content": ", Feixiang Lu" + }, + { + "bbox": [ + 149, + 167, + 465, + 194 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 149, + 167, + 465, + 194 + ], + "type": "text", + "content": ", Yihao Lv" + }, + { + "bbox": [ + 149, + 167, + 465, + 194 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 149, + 167, + 465, + 194 + ], + "type": "text", + "content": ", Liangjun Zhang" + }, + { + "bbox": [ + 149, + 167, + 465, + 194 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 149, + 167, + 465, + 194 + ], + "type": "text", + "content": ", Chenyi Guo" + }, + { + "bbox": [ + 149, + 167, + 465, + 194 + ], + "type": "inline_equation", + "content": "^{1\\boxtimes}" + }, + { + "bbox": [ + 149, + 167, + 465, + 194 + ], + "type": "text", + "content": ", and Ji Wu" + }, + { + "bbox": [ + 149, + 167, + 465, + 194 + ], + "type": "inline_equation", + "content": "^{1,3,4\\boxtimes}" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 181, + 201, + 432, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 181, + 201, + 432, + 222 + ], + "spans": [ + { + "bbox": [ + 181, + 201, + 432, + 222 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 181, + 201, + 432, + 222 + ], + "type": "text", + "content": " Department of Electronic Engineering, Tsinghua University " + }, + { + "bbox": [ + 181, + 201, + 432, + 222 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 181, + 201, + 432, + 222 + ], + "type": "text", + "content": " Baidu Research" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 151, + 223, + 461, + 257 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 223, + 461, + 257 + ], + "spans": [ + { + "bbox": [ + 151, + 223, + 461, + 257 + ], + "type": "text", + "content": "3 College of AI, Tsinghua University \n4 Beijing National Research Center for Information Science and Technology \nzk122@mails.tsinghua.edu.cn" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 160, + 298, + 455, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 298, + 455, + 474 + ], + "spans": [ + { + "bbox": [ + 160, + 298, + 455, + 474 + ], + "type": "text", + "content": "Abstract. Temporal dependencies are essential in 3D human pose estimation to mitigate depth ambiguity. Previous methods typically use a fixed-length sliding window to capture these dependencies. However, they treat past and future frames equally, ignoring the fact that relying on too many future frames increases the inference latency. In this paper, we present a 3D human pose estimation model based on Retentive Networks (RetNet) that incorporates temporal information by utilizing a large number of past frames and a few future frames. The Non-Causal RetNet (NC-RetNet) is designed to allow the originally causal RetNet to be aware of future information. Additionally, we propose a knowledge transfer strategy, i.e., training the model with a larger chunk size and using a smaller chunk size during inference, to reduce latency while maintaining comparable accuracy. Extensive experiments have been conducted on the Human3.6M and MPI-INF-3DHP datasets, and the results demonstrate that our method achieves state-of-the-art performance. Code and models are available at https://github.com/Kelly510/PoseRetNet." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 160, + 483, + 453, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 483, + 453, + 506 + ], + "spans": [ + { + "bbox": [ + 160, + 483, + 453, + 506 + ], + "type": "text", + "content": "Keywords: 3D Human Pose Estimation " + }, + { + "bbox": [ + 160, + 483, + 453, + 506 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 160, + 483, + 453, + 506 + ], + "type": "text", + "content": " Temporal Dependency " + }, + { + "bbox": [ + 160, + 483, + 453, + 506 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 160, + 483, + 453, + 506 + ], + "type": "text", + "content": " Retentive Networks" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 527, + 230, + 540 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 527, + 230, + 540 + ], + "spans": [ + { + "bbox": [ + 132, + 527, + 230, + 540 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 551, + 482, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 551, + 482, + 647 + ], + "spans": [ + { + "bbox": [ + 130, + 551, + 482, + 647 + ], + "type": "text", + "content": "Monocular 3D Human Pose Estimation (HPE) aims to reconstruct the 3D positions of human body joints based on monocular observations. This popular computer vision task has a wide range of applications, including action recognition [44], human-robot interaction [37] and motion analysis [11]. Most of the previous works [2,22,39-41,45,48] adopt the 2D-to-3D lifting pipeline which predicts 3D human pose based on 2D keypoint detection results. It is challenging due to the depth ambiguity issue, namely, one 2D detection result may correspond to multiple 3D human skeletons." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 132, + 653, + 265, + 665 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 653, + 265, + 665 + ], + "spans": [ + { + "bbox": [ + 132, + 653, + 265, + 665 + ], + "type": "inline_equation", + "content": "\\boxtimes" + }, + { + "bbox": [ + 132, + 653, + 265, + 665 + ], + "type": "text", + "content": " denotes corresponding author." + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 133, + 116, + 482, + 247 + ], + "blocks": [ + { + "bbox": [ + 133, + 116, + 482, + 247 + ], + "lines": [ + { + "bbox": [ + 133, + 116, + 482, + 247 + ], + "spans": [ + { + "bbox": [ + 133, + 116, + 482, + 247 + ], + "type": "image", + "image_path": "d43a6bc9dcd8f883044c0b3e11f2c13a09273476a056116aaf2462a2aebd94b8.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 259, + 482, + 326 + ], + "lines": [ + { + "bbox": [ + 130, + 259, + 482, + 326 + ], + "spans": [ + { + "bbox": [ + 130, + 259, + 482, + 326 + ], + "type": "text", + "content": "Fig. 1: (Left) The framework of our method, which utilizes long-term historical information from the cross-chunk state and relies on only a few future frames within the chunk. The past, current, and future frames are denoted by blue, green, and red borders, respectively. (Right) Comparison of Mean Per-Joint Position Error (MPJPE) on the Human3.6M dataset under different test chunk sizes. Our method outperforms previous state-of-the-art remarkably, especially under small chunk sizes." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 353, + 482, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 353, + 482, + 555 + ], + "spans": [ + { + "bbox": [ + 130, + 353, + 482, + 555 + ], + "type": "text", + "content": "To mitigate the depth ambiguity, monocular 3D human pose estimation models usually take multiple frames as the input and exploit additional temporal dependencies of human pose to reduce the ambiguity [1, 27, 29, 43, 46, 49, 50]. Specifically, a sliding-window of fixed length is usually adopted to capture the temporal dependencies, where the length of window is referred to as the number of frames or chunk size. A larger chunk size typically results in better accuracy performance as it allows for the perception of more long-range temporal information. However, previous methods treat past and future frames equally, and a larger chunk size also means that the model relies on the arrival of more future frames before inference, which significantly increases the inference latency. For instance, consider the seq2frame framework, which aims to predict the 3D pose of the center frame among the input frames. If the chunk size is 243 and the input frame rate is " + }, + { + "bbox": [ + 130, + 353, + 482, + 555 + ], + "type": "inline_equation", + "content": "10\\mathrm{Hz}" + }, + { + "bbox": [ + 130, + 353, + 482, + 555 + ], + "type": "text", + "content": ", the inference latency will be " + }, + { + "bbox": [ + 130, + 353, + 482, + 555 + ], + "type": "inline_equation", + "content": "(243 - 1)\\div 2\\div 10 = 12.1" + }, + { + "bbox": [ + 130, + 353, + 482, + 555 + ], + "type": "text", + "content": " seconds. For seq2seq framework in the same case, the inference latency for the first frame within the chunk is " + }, + { + "bbox": [ + 130, + 353, + 482, + 555 + ], + "type": "inline_equation", + "content": "(243 - 1)\\div 10 = 24.2" + }, + { + "bbox": [ + 130, + 353, + 482, + 555 + ], + "type": "text", + "content": " seconds and that for the last frame is zero. The average latency is 12.1 seconds as well. This is considerably longer than the forward time of the model itself." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 558, + 482, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 558, + 482, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 558, + 482, + 665 + ], + "type": "text", + "content": "To address this problem, we propose a 3D human pose estimation model based on Retentive Networks (RetNet) [35]. Fig. 1(left) illustrates the framework of our method. Different from previous methods that use similar amounts of past and future frames to incorporate temporal information, our method mainly extracts temporal information from past frames (blue) and uses only a few future frames (red) within the current chunk for refinement. The RetNet can easily capture long-term historical information by using the cross-chunk state, and the Non-Causal RetNet (NC-RetNet) is further designed to make the originally causal RetNet be aware of the future frames. Moreover, we develop a knowl" + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 228, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 228, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 228, + 102 + ], + "type": "text", + "content": "K. Zheng et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 479, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 479, + 174 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 479, + 174 + ], + "type": "text", + "content": "edge transfer strategy of training the model with a large chunk size and using a small chunk size during inference. Thanks to the long-term historical information brought by the cross-chunk state, decreasing the test chunk size does not significantly affect performance, as shown in Fig. 1(right), but greatly reduces the inference latency." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 175, + 479, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 175, + 479, + 258 + ], + "spans": [ + { + "bbox": [ + 130, + 175, + 479, + 258 + ], + "type": "text", + "content": "Extensive experiments have been conducted on two datasets, Human3.6M [14] and MPI-INF-3DHP [23], both quantitatively and qualitatively. The results demonstrate that our method outperforms state-of-the-art with a clear margin in terms of accuracy and continuity, especially when the model infers with a small chunk size. Our method even surpasses state-of-the-art with a smaller chunk size during inference. The ablation study also validates the efficacy of the components in our method. Our main contributions can be summarized as follows." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 135, + 269, + 480, + 387 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 136, + 269, + 479, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 269, + 479, + 315 + ], + "spans": [ + { + "bbox": [ + 136, + 269, + 479, + 315 + ], + "type": "text", + "content": "1. This is the first study to investigate the potential of RetNet in 3D human pose estimation. And we introduce NC-RetNet to extract temporal information, which leverages past frames through the cross-chunk state and a limited number of future frames within the chunk." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 135, + 316, + 480, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 316, + 480, + 350 + ], + "spans": [ + { + "bbox": [ + 135, + 316, + 480, + 350 + ], + "type": "text", + "content": "2. The NC-RetNet can be trained using a large chunk size and infer using a small chunk size without significant performance deterioration, but with a notable decrease in inference latency." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 135, + 352, + 479, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 352, + 479, + 387 + ], + "spans": [ + { + "bbox": [ + 135, + 352, + 479, + 387 + ], + "type": "text", + "content": "3. Extensive experiments have been conducted and the results demonstrate that our method is the state-of-the-art in terms of accuracy and continuity, especially when the test chunk size is small." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 132, + 405, + 237, + 418 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 405, + 237, + 418 + ], + "spans": [ + { + "bbox": [ + 132, + 405, + 237, + 418 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 430, + 301, + 441 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 430, + 301, + 441 + ], + "spans": [ + { + "bbox": [ + 132, + 430, + 301, + 441 + ], + "type": "text", + "content": "2.1 3D Human Pose Estimation" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 449, + 480, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 449, + 480, + 605 + ], + "spans": [ + { + "bbox": [ + 130, + 449, + 480, + 605 + ], + "type": "text", + "content": "Monocular 3D human pose estimation is a fundamental computer vision task with a broad range of applications. Direct estimation of the 3D positions of human joints from raw image pixels [26, 34] is difficult not only because of the complexity of extracting image features, but also due to the lack of image-3D data pairs. For these reasons, Martinez et al. [22] propose to estimate 3D human pose in a two-stage manner: detect 2D keypoints from images first and then lift 2D to 3D. Since this approach can utilize existing 2D pose estimation systems [3, 19, 25, 33, 42] and a large amount of 3D motion capture data, it has received a lot of attention. In this paper, we also focus on the 2D-to-3D lifting task. Although there are methods such as [48] propose to leverage visual cues only to mitigate depth ambiguity, these methods are unable to produce reconstructions with good continuity. Therefore, temporal dependencies are very crucial for monocular human pose estimation models." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 622, + 359, + 634 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 622, + 359, + 634 + ], + "spans": [ + { + "bbox": [ + 132, + 622, + 359, + 634 + ], + "type": "text", + "content": "2.2 Exploitation of Temporal Dependencies" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 130, + 641, + 479, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 641, + 479, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 641, + 479, + 665 + ], + "type": "text", + "content": "Previous methods mostly adopt four architectures to exploit temporal dependencies: CNN, RNN, GCN [15] and transformer [21]. For example, to model" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 264, + 91, + 447, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 264, + 91, + 447, + 101 + ], + "spans": [ + { + "bbox": [ + 264, + 91, + 447, + 101 + ], + "type": "text", + "content": "3D HPE via Non-Causal Retentive Networks" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 307 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 307 + ], + "type": "text", + "content": "the temporal dependencies of human motion, Pavllo et al. [27] propose a temporal convolution model that utilizes dilated temporal convolutions to capture long-term information and model the temporal dependencies of human motion. The temporal receptive field depends on the dilation ratio and the number of layers. Similarly, Choi et al. [5] utilizes GRU [7] to extract features from the past frames and future frames within a fixed-length window respectively before integration. Cai et al. [1] exploit graph convolutions [15] to model the graph structure of different human joints. Along the time axis, this method treats the joints at different time steps as the graph nodes where any two consecutive joints are adjacent in the graph. Poseformer [50] proposed by Zheng et al. is the first work to introduce transformers to 3D human pose estimation task. This model incorporates the Spatial Transformer Module to encode the geometric structure of the human pose in a single frame into a token, and the Temporal Transformer Encoder to model temporal dependencies between frames. Since then, a lot of works [8,10,16-18,51] have emerged to explore the potential of transformers in 3D human pose estimation." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 308, + 482, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 308, + 482, + 403 + ], + "spans": [ + { + "bbox": [ + 130, + 308, + 482, + 403 + ], + "type": "text", + "content": "Although these methods leverage different architectures to extract spatial-temporal information from 2D sequences, they share a common framework that employs a fixed number of frames to predict the result. Moreover, the chunk size has a significant impact on the accuracy, and a larger chunk size is usually beneficial for performance. However, previous works have not taken into account that a larger chunk size also significantly increases the inference delay. This motivates us to develop a method that balances the accuracy and inference latency better." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 131, + 420, + 336, + 431 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 420, + 336, + 431 + ], + "spans": [ + { + "bbox": [ + 131, + 420, + 336, + 431 + ], + "type": "text", + "content": "2.3 Real-time Human Pose Estimation" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 438, + 482, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 438, + 482, + 571 + ], + "spans": [ + { + "bbox": [ + 130, + 438, + 482, + 571 + ], + "type": "text", + "content": "In addition to accuracy, low inference latency is also desired for human pose estimation models in many scenarios, and significant efforts have been devoted to reducing the inference latency. On one hand, since human pose estimation models typically use a backbone model to extract image features, general-purpose lightweight backbones [9,13,30,47], can be used directly to replace the backbone in HPE models [6]. On the other hand, simplifying the pipeline can improve the model's efficiency. For example, Vnect [24] is proposed to combine the bounding box detection, 2D keypoint detection, and 2D-to-3D lifting into one model. However, existing methods only focus on decreasing the forward time of the HPE models, but do not consider the inference latency caused by large chunk sizes, as our method does." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 131, + 586, + 270, + 600 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 586, + 270, + 600 + ], + "spans": [ + { + "bbox": [ + 131, + 586, + 270, + 600 + ], + "type": "text", + "content": "2.4 Length Extrapolation" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "type": "text", + "content": "Models in natural language processing are expected to be generalizable across sequences of varying lengths, particularly to sequences longer than the training samples. This desired property is called length extrapolation. To achieve this, the use of relative position embedding, such as RoPE [32] and xPos [36], is necessary because it does not require the input sequences to be of fixed length." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 228, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 228, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 228, + 102 + ], + "type": "text", + "content": "K. Zheng et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 479, + 188 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 479, + 188 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 479, + 188 + ], + "type": "text", + "content": "Additionally, there are methods [4, 28] to improve the format of the attention module to achieve length extrapolation. For example, ALIBI [28] proposes to subtract the absolute temporal distance of two tokens from the attention score, which enhances the performance on extremely long sequences. Our knowledge transfer strategy is similar to length extrapolation, except that we concentrate on the model's transition from large chunks to smaller ones." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 206, + 202, + 219 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 206, + 202, + 219 + ], + "spans": [ + { + "bbox": [ + 132, + 206, + 202, + 219 + ], + "type": "text", + "content": "3 Method" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 232, + 220, + 243 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 232, + 220, + 243 + ], + "spans": [ + { + "bbox": [ + 132, + 232, + 220, + 243 + ], + "type": "text", + "content": "3.1 Preliminary" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 251, + 479, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 251, + 479, + 312 + ], + "spans": [ + { + "bbox": [ + 130, + 251, + 479, + 312 + ], + "type": "text", + "content": "RetNet [35] is a sequence modeling network that produces a contextualized feature sequence of length " + }, + { + "bbox": [ + 130, + 251, + 479, + 312 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 130, + 251, + 479, + 312 + ], + "type": "text", + "content": " given an input sequence " + }, + { + "bbox": [ + 130, + 251, + 479, + 312 + ], + "type": "inline_equation", + "content": "X \\in \\mathbb{R}^{L \\times d}" + }, + { + "bbox": [ + 130, + 251, + 479, + 312 + ], + "type": "text", + "content": ". The basic module of RetNet is retention, which has three mathematically equivalent representations: parallel, recurrent, and chunkwise recurrent. We present a detailed explanation of these representations below." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 329, + 480, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 329, + 480, + 413 + ], + "spans": [ + { + "bbox": [ + 130, + 329, + 480, + 413 + ], + "type": "text", + "content": "Parallel Given the input sequence " + }, + { + "bbox": [ + 130, + 329, + 480, + 413 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 130, + 329, + 480, + 413 + ], + "type": "text", + "content": ", the query " + }, + { + "bbox": [ + 130, + 329, + 480, + 413 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 130, + 329, + 480, + 413 + ], + "type": "text", + "content": " and key " + }, + { + "bbox": [ + 130, + 329, + 480, + 413 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 130, + 329, + 480, + 413 + ], + "type": "text", + "content": " are derived by applying the linear projection and RoPE. " + }, + { + "bbox": [ + 130, + 329, + 480, + 413 + ], + "type": "inline_equation", + "content": "P_{q}, P_{k}" + }, + { + "bbox": [ + 130, + 329, + 480, + 413 + ], + "type": "text", + "content": " are the rotary position embedding for the query and the key respectively. The value " + }, + { + "bbox": [ + 130, + 329, + 480, + 413 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 130, + 329, + 480, + 413 + ], + "type": "text", + "content": " is obtained by the linear projection only. " + }, + { + "bbox": [ + 130, + 329, + 480, + 413 + ], + "type": "inline_equation", + "content": "D \\in \\mathbb{R}^{L \\times L}" + }, + { + "bbox": [ + 130, + 329, + 480, + 413 + ], + "type": "text", + "content": " is the combination of causal masking and exponential decay with respect to the relative distance. " + }, + { + "bbox": [ + 130, + 329, + 480, + 413 + ], + "type": "inline_equation", + "content": "\\odot" + }, + { + "bbox": [ + 130, + 329, + 480, + 413 + ], + "type": "text", + "content": " denotes the element-wise product. Since the value in the mask is non-zero only when the reference token " + }, + { + "bbox": [ + 130, + 329, + 480, + 413 + ], + "type": "inline_equation", + "content": "(m^{th})" + }, + { + "bbox": [ + 130, + 329, + 480, + 413 + ], + "type": "text", + "content": " is earlier than the target token " + }, + { + "bbox": [ + 130, + 329, + 480, + 413 + ], + "type": "inline_equation", + "content": "(n^{th})" + }, + { + "bbox": [ + 130, + 329, + 480, + 413 + ], + "type": "text", + "content": ", RetNet is a fully causal model." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 212, + 423, + 401, + 435 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 212, + 423, + 401, + 435 + ], + "spans": [ + { + "bbox": [ + 212, + 423, + 401, + 435 + ], + "type": "interline_equation", + "content": "Q = P _ {q} \\left(X W _ {Q}\\right), K = P _ {k} \\left(X W _ {K}\\right), V = X W _ {V}", + "image_path": "94f4f87af891b5478400ce12858ee3e11aa27ce1ce2837892af944a362dd4c8b.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 212, + 438, + 480, + 469 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 212, + 438, + 480, + 469 + ], + "spans": [ + { + "bbox": [ + 212, + 438, + 480, + 469 + ], + "type": "interline_equation", + "content": "D _ {n m} = \\left\\{ \\begin{array}{l l} \\gamma^ {n - m}, & n \\geq m \\\\ 0, & n < m \\end{array} \\right. \\tag {1}", + "image_path": "f1ab7bcce3b0cc6d180af2a76e2f1fa06c39b2232086118aac045dbb48e38da8.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 212, + 472, + 346, + 484 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 212, + 472, + 346, + 484 + ], + "spans": [ + { + "bbox": [ + 212, + 472, + 346, + 484 + ], + "type": "interline_equation", + "content": "\\operatorname {R e t e n t i o n} (X) = \\left(Q K ^ {T} \\odot D\\right) V", + "image_path": "4320caaf16ec8db3e96e1b445c28a9b0bf5342bb7b03ed0ea570804b22eeb966.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 502, + 479, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 502, + 479, + 552 + ], + "spans": [ + { + "bbox": [ + 130, + 502, + 479, + 552 + ], + "type": "text", + "content": "Recurrent " + }, + { + "bbox": [ + 130, + 502, + 479, + 552 + ], + "type": "inline_equation", + "content": "S_{n} \\in \\mathbb{R}^{d \\times d}" + }, + { + "bbox": [ + 130, + 502, + 479, + 552 + ], + "type": "text", + "content": " represents the state of time step " + }, + { + "bbox": [ + 130, + 502, + 479, + 552 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 130, + 502, + 479, + 552 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 130, + 502, + 479, + 552 + ], + "type": "inline_equation", + "content": "Q_{n}, K_{n}, V_{n}" + }, + { + "bbox": [ + 130, + 502, + 479, + 552 + ], + "type": "text", + "content": " is the value of the same " + }, + { + "bbox": [ + 130, + 502, + 479, + 552 + ], + "type": "inline_equation", + "content": "Q, K, V" + }, + { + "bbox": [ + 130, + 502, + 479, + 552 + ], + "type": "text", + "content": " in Eq. (1) at time step " + }, + { + "bbox": [ + 130, + 502, + 479, + 552 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 130, + 502, + 479, + 552 + ], + "type": "text", + "content": ". This representation also shows that RetNet is entirely causal, as the output for the " + }, + { + "bbox": [ + 130, + 502, + 479, + 552 + ], + "type": "inline_equation", + "content": "n^{th}" + }, + { + "bbox": [ + 130, + 502, + 479, + 552 + ], + "type": "text", + "content": " frame depends solely on the previous state " + }, + { + "bbox": [ + 130, + 502, + 479, + 552 + ], + "type": "inline_equation", + "content": "S_{n-1}" + }, + { + "bbox": [ + 130, + 502, + 479, + 552 + ], + "type": "text", + "content": " and the " + }, + { + "bbox": [ + 130, + 502, + 479, + 552 + ], + "type": "inline_equation", + "content": "n^{th}" + }, + { + "bbox": [ + 130, + 502, + 479, + 552 + ], + "type": "text", + "content": " input." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 229, + 560, + 480, + 579 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 560, + 480, + 579 + ], + "spans": [ + { + "bbox": [ + 229, + 560, + 480, + 579 + ], + "type": "interline_equation", + "content": "S _ {n} = \\gamma S _ {n - 1} + K _ {n} ^ {T} V _ {n} \\tag {2}", + "image_path": "6eeb4b087c107cbeb1c6dc913599caaf59c73db8929f8d2cf984ec736c1e00cf.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 229, + 578, + 385, + 588 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 578, + 385, + 588 + ], + "spans": [ + { + "bbox": [ + 229, + 578, + 385, + 588 + ], + "type": "interline_equation", + "content": "\\operatorname {R e t e n t i o n} \\left(X _ {n}\\right) = Q _ {n} S _ {n}, n = 1, \\dots , L", + "image_path": "865c6e5a70df8f727935fdfdeaf977e5934332a04cc56914c503c9758d9d308d.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 130, + 605, + 480, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 605, + 480, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 605, + 480, + 666 + ], + "type": "text", + "content": "Chunkwise Recurrent This representation is the hybrid form of the above two representations. Suppose the input sequence is segmented into chunks of length " + }, + { + "bbox": [ + 130, + 605, + 480, + 666 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 130, + 605, + 480, + 666 + ], + "type": "text", + "content": ". Denote " + }, + { + "bbox": [ + 130, + 605, + 480, + 666 + ], + "type": "inline_equation", + "content": "X_{iT:(i+1)T}" + }, + { + "bbox": [ + 130, + 605, + 480, + 666 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 130, + 605, + 480, + 666 + ], + "type": "inline_equation", + "content": "X_{[i]}" + }, + { + "bbox": [ + 130, + 605, + 480, + 666 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 130, + 605, + 480, + 666 + ], + "type": "inline_equation", + "content": "[i]" + }, + { + "bbox": [ + 130, + 605, + 480, + 666 + ], + "type": "text", + "content": " indicates the " + }, + { + "bbox": [ + 130, + 605, + 480, + 666 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 130, + 605, + 480, + 666 + ], + "type": "text", + "content": "-th chunk. Within the chunk, the model follows the parallel representation and the cross-chunk information is passed following the recurrent representation. The " + }, + { + "bbox": [ + 130, + 605, + 480, + 666 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 130, + 605, + 480, + 666 + ], + "type": "text", + "content": " here is similar" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 264, + 91, + 448, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 264, + 91, + 448, + 101 + ], + "spans": [ + { + "bbox": [ + 264, + 91, + 448, + 101 + ], + "type": "text", + "content": "3D HPE via Non-Causal Retentive Networks" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 136, + 116, + 479, + 229 + ], + "blocks": [ + { + "bbox": [ + 136, + 116, + 479, + 229 + ], + "lines": [ + { + "bbox": [ + 136, + 116, + 479, + 229 + ], + "spans": [ + { + "bbox": [ + 136, + 116, + 479, + 229 + ], + "type": "image", + "image_path": "f727394a1ee857546ce8c1acd6a4eaf4314f7f883b2ccf713669ca8611a62375.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 237, + 482, + 293 + ], + "lines": [ + { + "bbox": [ + 130, + 237, + 482, + 293 + ], + "spans": [ + { + "bbox": [ + 130, + 237, + 482, + 293 + ], + "type": "text", + "content": "Fig. 2: (Left) The causal masks in the parallel and chunkwise recurrent representations of the original RetNet. The model can only perceive historical frames although there are several future frames in the current chunk. (Right) We propose Non-Causal RetNet (NC-RetNet), which utilizes all the frames within current chunk using the full mask and can be trained in parallel with the staircase-shaped mask." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 316, + 482, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 316, + 482, + 354 + ], + "spans": [ + { + "bbox": [ + 130, + 316, + 482, + 354 + ], + "type": "text", + "content": "to that in Eq. (1), but its shape changes from " + }, + { + "bbox": [ + 130, + 316, + 482, + 354 + ], + "type": "inline_equation", + "content": "L \\times L" + }, + { + "bbox": [ + 130, + 316, + 482, + 354 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 130, + 316, + 482, + 354 + ], + "type": "inline_equation", + "content": "T \\times T" + }, + { + "bbox": [ + 130, + 316, + 482, + 354 + ], + "type": "text", + "content": ". The " + }, + { + "bbox": [ + 130, + 316, + 482, + 354 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 130, + 316, + 482, + 354 + ], + "type": "text", + "content": "'s in Eq. (1) and Eq. (3) are illustrated in Fig. 2(left). " + }, + { + "bbox": [ + 130, + 316, + 482, + 354 + ], + "type": "inline_equation", + "content": "\\zeta" + }, + { + "bbox": [ + 130, + 316, + 482, + 354 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 316, + 482, + 354 + ], + "type": "inline_equation", + "content": "\\xi" + }, + { + "bbox": [ + 130, + 316, + 482, + 354 + ], + "type": "text", + "content": " are both " + }, + { + "bbox": [ + 130, + 316, + 482, + 354 + ], + "type": "inline_equation", + "content": "T \\times d" + }, + { + "bbox": [ + 130, + 316, + 482, + 354 + ], + "type": "text", + "content": " matrices and the " + }, + { + "bbox": [ + 130, + 316, + 482, + 354 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 130, + 316, + 482, + 354 + ], + "type": "text", + "content": "-th row of them is " + }, + { + "bbox": [ + 130, + 316, + 482, + 354 + ], + "type": "inline_equation", + "content": "\\gamma^{T - r - 1}" + }, + { + "bbox": [ + 130, + 316, + 482, + 354 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 316, + 482, + 354 + ], + "type": "inline_equation", + "content": "\\gamma^{r + 1}" + }, + { + "bbox": [ + 130, + 316, + 482, + 354 + ], + "type": "text", + "content": " respectively." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 191, + 360, + 317, + 378 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 191, + 360, + 317, + 378 + ], + "spans": [ + { + "bbox": [ + 191, + 360, + 317, + 378 + ], + "type": "interline_equation", + "content": "S _ {i} = K _ {[ i ]} ^ {T} (V _ {[ i ]} \\odot \\zeta) + \\gamma^ {T} S _ {i - 1}", + "image_path": "eb5648f42375f327d803a297d4682ecfbecf5235bb6d655528522fd1984e796c.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 191, + 378, + 481, + 409 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 191, + 378, + 481, + 409 + ], + "spans": [ + { + "bbox": [ + 191, + 378, + 481, + 409 + ], + "type": "interline_equation", + "content": "\\operatorname {R e t e n t i o n} \\left(X _ {[ i ]}\\right) = \\underbrace {\\left(Q _ {[ i ]} K _ {[ i ]} ^ {T} \\odot D\\right) V _ {[ i ]}} _ {\\text {I n n e r - C h u n k}} + \\underbrace {\\left(Q _ {[ i ]} S _ {i - 1}\\right) \\odot \\xi} _ {\\text {C r o s s - C h u n k}} \\tag {3}", + "image_path": "4ebf8806ded024d9bff1aa94df7392db4f848c73370b5adb7099d818886ea09d.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 413, + 482, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 413, + 482, + 472 + ], + "spans": [ + { + "bbox": [ + 130, + 413, + 482, + 472 + ], + "type": "text", + "content": "Since low inference latency is required in real-time scenarios, the parallel representation is not suitable. Moreover, the recurrent representation is the special case of the chunkwise recurrent representation when " + }, + { + "bbox": [ + 130, + 413, + 482, + 472 + ], + "type": "inline_equation", + "content": "T = 1" + }, + { + "bbox": [ + 130, + 413, + 482, + 472 + ], + "type": "text", + "content": ". Therefore, we focus on the chunkwise recurrent representation of RetNet to design our human pose estimation model." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 131, + 490, + 261, + 501 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 490, + 261, + 501 + ], + "spans": [ + { + "bbox": [ + 131, + 490, + 261, + 501 + ], + "type": "text", + "content": "3.2 Non-Causal RetNet" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 510, + 482, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 510, + 482, + 593 + ], + "spans": [ + { + "bbox": [ + 130, + 510, + 482, + 593 + ], + "type": "text", + "content": "Although the chunkwise recurrent representation of RetNet processes the input sequence chunk by chunk, it does not utilize all the information in the current chunk. As is shown in Fig. 2(left), the masking in the chunkwise recurrent representation is a lower triangular matrix. This means that the estimation of the current frame only uses the frames before it, regardless of the future frames within the chunk. However, leveraging certain future information can be very helpful for the accuracy of human pose estimation models." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 594, + 482, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 594, + 482, + 667 + ], + "spans": [ + { + "bbox": [ + 130, + 594, + 482, + 667 + ], + "type": "text", + "content": "To solve this problem, we modify the causal masking in RetNet to exploit all the information within the current chunk and propose Non-Causal RetNet (NC-RetNet). Formally, the new " + }, + { + "bbox": [ + 130, + 594, + 482, + 667 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 130, + 594, + 482, + 667 + ], + "type": "text", + "content": " in the chunkwise recurrent representation is given by Eq. (4). The new masking is a full matrix instead of a lower triangular matrix. When predicting the 3D pose of the " + }, + { + "bbox": [ + 130, + 594, + 482, + 667 + ], + "type": "inline_equation", + "content": "n^{th}" + }, + { + "bbox": [ + 130, + 594, + 482, + 667 + ], + "type": "text", + "content": " frame, we can calculate the exponential decay of both past frames " + }, + { + "bbox": [ + 130, + 594, + 482, + 667 + ], + "type": "inline_equation", + "content": "(m < n)" + }, + { + "bbox": [ + 130, + 594, + 482, + 667 + ], + "type": "text", + "content": " and future frames " + }, + { + "bbox": [ + 130, + 594, + 482, + 667 + ], + "type": "inline_equation", + "content": "(m > n)" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 229, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 229, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 229, + 102 + ], + "type": "text", + "content": "K. Zheng et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 186 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 186 + ], + "type": "text", + "content": "within the chunk by using the absolute distance " + }, + { + "bbox": [ + 130, + 116, + 482, + 186 + ], + "type": "inline_equation", + "content": "|n - m|" + }, + { + "bbox": [ + 130, + 116, + 482, + 186 + ], + "type": "text", + "content": " between the two frames. The mathematical expression of the new " + }, + { + "bbox": [ + 130, + 116, + 482, + 186 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 130, + 116, + 482, + 186 + ], + "type": "text", + "content": " matrix in the parallel representation can be found in the Supp. Mat. The chunk size, denoted by " + }, + { + "bbox": [ + 130, + 116, + 482, + 186 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 130, + 116, + 482, + 186 + ], + "type": "text", + "content": ", can be adjusted to balance the accuracy and inference latency. The larger " + }, + { + "bbox": [ + 130, + 116, + 482, + 186 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 130, + 116, + 482, + 186 + ], + "type": "text", + "content": " is, the more future information can be perceived by the model, but the longer the inference latency will be." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 214, + 188, + 481, + 203 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 214, + 188, + 481, + 203 + ], + "spans": [ + { + "bbox": [ + 214, + 188, + 481, + 203 + ], + "type": "interline_equation", + "content": "D = \\left\\{D _ {n m} \\right\\} = \\left\\{\\gamma^ {| n - m |} \\right\\}, n, m \\in \\{1, \\dots , T \\} \\tag {4}", + "image_path": "3e86eb4f2ef318c09f66bce0d28eafd7ec2f4cd370e53d81f8cc2475b3b9a751.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 209, + 480, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 209, + 480, + 255 + ], + "spans": [ + { + "bbox": [ + 130, + 209, + 480, + 255 + ], + "type": "text", + "content": "Fig. 2(right) illustrates the masks in the parallel and chunkwise recurrent representations of this non-causal retention. Note that the model can also be trained in parallel by using the staircase-shaped mask, but it does not have the recurrent representation unless " + }, + { + "bbox": [ + 130, + 209, + 480, + 255 + ], + "type": "inline_equation", + "content": "T = 1" + }, + { + "bbox": [ + 130, + 209, + 480, + 255 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 257, + 480, + 328 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 257, + 480, + 328 + ], + "spans": [ + { + "bbox": [ + 130, + 257, + 480, + 328 + ], + "type": "text", + "content": "By using the non-causal masking, NC-RetNet exploits temporal dependencies from the cross-chunk state which provides long-term historical information, and only a few future frames which provides some future information. Therefore, the temporal receptive field of our method is not limited by the chunk size. In fact, the chunk size in our method only affects the amount of future information while historical information is always adequate due to the cross-chunk state." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 131, + 348, + 362, + 361 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 348, + 362, + 361 + ], + "spans": [ + { + "bbox": [ + 131, + 348, + 362, + 361 + ], + "type": "text", + "content": "3.3 Transfer Knowledge from LargeChunks" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 370, + 482, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 370, + 482, + 502 + ], + "spans": [ + { + "bbox": [ + 130, + 370, + 482, + 502 + ], + "type": "text", + "content": "We further develop a strategy for our NC-RetNet to improve its performance under small test chunk sizes, which is to train the model with a large chunk size and infer with a small chunk size. Since the model uses xPos, a relative position embedding, it is able to handle 2D sequences of different lengths from the form. In addition, the cross-chunk state " + }, + { + "bbox": [ + 130, + 370, + 482, + 502 + ], + "type": "inline_equation", + "content": "S_{i}" + }, + { + "bbox": [ + 130, + 370, + 482, + 502 + ], + "type": "text", + "content": " in the chunkwise recurrent representation is updated every chunk, containing a lot of information from previous chunks. With this long-term historical information, the model becomes insensitive to the length of future frames. Therefore, using a smaller chunk size during inference does not significantly decrease accuracy, but greatly reduces inference latency. This indicates that some knowledge is transferred from large chunks when training to the small chunks during inference." + } + ] + } + ], + "index": 7 + }, + { + "type": "code", + "bbox": [ + 132, + 539, + 482, + 661 + ], + "blocks": [ + { + "bbox": [ + 133, + 525, + 307, + 537 + ], + "lines": [ + { + "bbox": [ + 133, + 525, + 307, + 537 + ], + "spans": [ + { + "bbox": [ + 133, + 525, + 307, + 537 + ], + "type": "text", + "content": "Algorithm 1 Pseudo-code for training" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 132, + 539, + 482, + 661 + ], + "lines": [ + { + "bbox": [ + 132, + 539, + 482, + 661 + ], + "spans": [ + { + "bbox": [ + 132, + 539, + 482, + 661 + ], + "type": "text", + "content": "Input: Training dataloader, initialized model, training chunk size " + }, + { + "bbox": [ + 132, + 539, + 482, + 661 + ], + "type": "inline_equation", + "content": "T_{l}" + }, + { + "bbox": [ + 132, + 539, + 482, + 661 + ], + "type": "text", + "content": " \nOutput: model after training for input_2d, target in dataloader do " + }, + { + "bbox": [ + 132, + 539, + 482, + 661 + ], + "type": "inline_equation", + "content": "\\mathrm{L} =" + }, + { + "bbox": [ + 132, + 539, + 482, + 661 + ], + "type": "text", + "content": " input_2d.size(1) # Total length of the input sequence D_parallel " + }, + { + "bbox": [ + 132, + 539, + 482, + 661 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 132, + 539, + 482, + 661 + ], + "type": "text", + "content": " get_D_parallel(L, " + }, + { + "bbox": [ + 132, + 539, + 482, + 661 + ], + "type": "inline_equation", + "content": "T_{l})" + }, + { + "bbox": [ + 132, + 539, + 482, + 661 + ], + "type": "text", + "content": " #Get the staircase-shaped mask for parallel training given the total length and chunk size pred " + }, + { + "bbox": [ + 132, + 539, + 482, + 661 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 132, + 539, + 482, + 661 + ], + "type": "text", + "content": " model.forward_parallel(input_2d,D_parallel) loss " + }, + { + "bbox": [ + 132, + 539, + 482, + 661 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 132, + 539, + 482, + 661 + ], + "type": "text", + "content": " loss_func(pred,target) loss.backup() optimizer.step() \nend for" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "code_body" + } + ], + "index": 9, + "sub_type": "algorithm" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 264, + 91, + 447, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 264, + 91, + 447, + 101 + ], + "spans": [ + { + "bbox": [ + 264, + 91, + 447, + 101 + ], + "type": "text", + "content": "3D HPE via Non-Causal Retentive Networks" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 132, + 130, + 481, + 274 + ], + "blocks": [ + { + "bbox": [ + 133, + 115, + 312, + 128 + ], + "lines": [ + { + "bbox": [ + 133, + 115, + 312, + 128 + ], + "spans": [ + { + "bbox": [ + 133, + 115, + 312, + 128 + ], + "type": "text", + "content": "Algorithm 2 Pseudo-code for inference" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 132, + 130, + 481, + 274 + ], + "lines": [ + { + "bbox": [ + 132, + 130, + 481, + 274 + ], + "spans": [ + { + "bbox": [ + 132, + 130, + 481, + 274 + ], + "type": "text", + "content": "Input: 2D stream, trained model, test chunk size " + }, + { + "bbox": [ + 132, + 130, + 481, + 274 + ], + "type": "inline_equation", + "content": "T_{s}" + }, + { + "bbox": [ + 132, + 130, + 481, + 274 + ], + "type": "text", + "content": " \nOutput: 3D stream " + }, + { + "bbox": [ + 132, + 130, + 481, + 274 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{y}_{-}\\mathbf{n}\\}" + }, + { + "bbox": [ + 132, + 130, + 481, + 274 + ], + "type": "text", + "content": " D_chunkwise " + }, + { + "bbox": [ + 132, + 130, + 481, + 274 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 132, + 130, + 481, + 274 + ], + "type": "text", + "content": " get_D_chunkwise " + }, + { + "bbox": [ + 132, + 130, + 481, + 274 + ], + "type": "inline_equation", + "content": "(T_{s})\\#" + }, + { + "bbox": [ + 132, + 130, + 481, + 274 + ], + "type": "text", + "content": " Get the full mask for chunkwise inference given the test chunk size s_n, x_n = None, [] \nfor x_i in stream do x_n.append(x_i) if len(x_n) == T_s then y_n, s_n = model.forward_chunkwise(x_n, D_chunkwise, s_n, n) x_n = [] output(y_n) # Output y_n every chunk for downstream task \nend if \nend for" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "code_body" + } + ], + "index": 3, + "sub_type": "algorithm" + }, + { + "bbox": [ + 130, + 297, + 482, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 297, + 482, + 478 + ], + "spans": [ + { + "bbox": [ + 130, + 297, + 482, + 478 + ], + "type": "text", + "content": "The details of the training and testing are elaborated below. During training, we utilize RetNet's parallel representation to achieve training parallelism. We set the training chunk size to a large number " + }, + { + "bbox": [ + 130, + 297, + 482, + 478 + ], + "type": "inline_equation", + "content": "T_{l}" + }, + { + "bbox": [ + 130, + 297, + 482, + 478 + ], + "type": "text", + "content": ", to capture long-term patterns of human motion. The training pseudo-code is as shown in Algorithm 1. The parallel representation used during training implicitly incorporates the cross-chunk state. This means that the model can theoretically observe historical information over a long period of time as well as many future frames. During inference, the test chunk size " + }, + { + "bbox": [ + 130, + 297, + 482, + 478 + ], + "type": "inline_equation", + "content": "T_{s}" + }, + { + "bbox": [ + 130, + 297, + 482, + 478 + ], + "type": "text", + "content": " is set smaller than the training chunk size " + }, + { + "bbox": [ + 130, + 297, + 482, + 478 + ], + "type": "inline_equation", + "content": "T_{l}" + }, + { + "bbox": [ + 130, + 297, + 482, + 478 + ], + "type": "text", + "content": " and the chunkwise-recurrent representation is used. The pseudo-code for inference is as in Algorithm 2. Given a stream of 2D keypoints, the model processes the stream in the chunkwise-recurrent representation every " + }, + { + "bbox": [ + 130, + 297, + 482, + 478 + ], + "type": "inline_equation", + "content": "T_{s}" + }, + { + "bbox": [ + 130, + 297, + 482, + 478 + ], + "type": "text", + "content": " frames based on the current chunk x_n as well as an explicit cross-chunk state s_n. This cross-chunk state contains information about previous chunks and makes the model insensitive to the number of future frames. Therefore, although the chunk is smaller than the training chunks, the model can still extract stable temporal features." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 494, + 279, + 506 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 494, + 279, + 506 + ], + "spans": [ + { + "bbox": [ + 132, + 494, + 279, + 506 + ], + "type": "text", + "content": "3.4 Implementation Details" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 513, + 482, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 513, + 482, + 597 + ], + "spans": [ + { + "bbox": [ + 130, + 513, + 482, + 597 + ], + "type": "text", + "content": "We implement our idea based on the state-of-the-art seq2seq method, MixSTE [46], by replacing its temporal encoder with RetNet. Since the movement of distal joints is more erratic than that of torso joints, the estimation of these distal joints should rely on more local temporal information. Therefore, we assign different decay coefficients to different human joints, which is referred to as joint-related decay coefficients. The chunkwise recurrent representation of it can be formulated in Eq. (5), where " + }, + { + "bbox": [ + 130, + 513, + 482, + 597 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 130, + 513, + 482, + 597 + ], + "type": "text", + "content": " is the index of human joints." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 202, + 604, + 358, + 620 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 202, + 604, + 358, + 620 + ], + "spans": [ + { + "bbox": [ + 202, + 604, + 358, + 620 + ], + "type": "interline_equation", + "content": "S _ {i, p} = K _ {[ i ], p} ^ {T} \\left(V _ {[ i ], p} \\odot \\zeta_ {p}\\right) + \\gamma_ {p} ^ {T} S _ {i - 1, p}", + "image_path": "901e747ebf36d9912574f2570683bd05be64fd8a7d28f363117edb014483219b.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 202, + 621, + 481, + 642 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 202, + 621, + 481, + 642 + ], + "spans": [ + { + "bbox": [ + 202, + 621, + 481, + 642 + ], + "type": "interline_equation", + "content": "I n n e r _ {i, p} = \\left(Q _ {[ i ], p} K _ {[ i ], p} ^ {T} \\odot D _ {p}\\right) V _ {[ i ], p} \\tag {5}", + "image_path": "011a612e621a3c16e2fd52207226918474cb4fa539235c9521ccea77c30b3a03.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 202, + 639, + 334, + 653 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 202, + 639, + 334, + 653 + ], + "spans": [ + { + "bbox": [ + 202, + 639, + 334, + 653 + ], + "type": "interline_equation", + "content": "C r o s s _ {i, p} = \\left(Q _ {[ i ], p} S _ {i - 1, p}\\right) \\odot \\xi_ {p}", + "image_path": "b5c54c342b481307015724ccf11a7aaeaa751f2ba7a684f105026c2b6bd56173.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 202, + 654, + 410, + 668 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 202, + 654, + 410, + 668 + ], + "spans": [ + { + "bbox": [ + 202, + 654, + 410, + 668 + ], + "type": "interline_equation", + "content": "\\operatorname {R e t e n t i o n} \\left(X _ {[ i ]}\\right) = \\operatorname {C o n c a t} \\left(\\operatorname {I n n e r} _ {i, p} + \\operatorname {C r o s s} _ {i, p}\\right)", + "image_path": "97e7e13c257cbc7e5ab80c740456293ce97ce73fb9d4a034eba9b061be7ee0d1.jpg" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 229, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 229, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 229, + 102 + ], + "type": "text", + "content": "K. Zheng et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 479, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 479, + 186 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 479, + 186 + ], + "type": "text", + "content": "The loss function and training strategies are the same as in MixSTE. The training chunk size is 243 on the Human3.6M dataset and 81 on the MPI-INF-3DHP dataset, and then we test the model with different chunk sizes to get the results. The overall architecture of our model is given in the Supp. Mat. We also implement our idea on MotionBERT [51] and the results can be found in the Supp. Mat." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 131, + 205, + 230, + 218 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 205, + 230, + 218 + ], + "spans": [ + { + "bbox": [ + 131, + 205, + 230, + 218 + ], + "type": "text", + "content": "4 Experiments" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 131, + 229, + 334, + 241 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 229, + 334, + 241 + ], + "spans": [ + { + "bbox": [ + 131, + 229, + 334, + 241 + ], + "type": "text", + "content": "4.1 Datasets and Evaluation Protocols" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 247, + 482, + 429 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 247, + 482, + 429 + ], + "spans": [ + { + "bbox": [ + 130, + 247, + 482, + 429 + ], + "type": "text", + "content": "Experiments are conducted on two human pose estimation datasets: Human3.6M [14] and MPI-INF-3DHP [23]. Human3.6M is the most widely used indoor dataset for single-person 3D human pose estimation, containing about 3.6 million images collected from 11 professional actors. Following the common practice [2,27,46,50], we use the samples of S1, S5, S6, S7, S8 for training and evaluate on S9 and S11 subjects. Mean Per-Joint Position Error (MPJPE) and Procrustes-Aligned MPJPE (PA-MPJPE) are evaluated on this dataset. We also report the Mean Per-Joint Velocity Error (MPJVE) results, which reflect the continuity of the predicted results. MPI-INF-3DHP is a more challenging 3D human pose estimation dataset because it includes both indoor and outdoor scenes. The samples are collected from 8 subjects, each performing 8 actions. The test set consists of 6 subjects in different scenes. We follow the setup in [2,39,46,50]. For the MPI-INF-3DHP dataset, we report the results of MPJPE, Percentage of Correct Keypoints (PCK) within the " + }, + { + "bbox": [ + 130, + 247, + 482, + 429 + ], + "type": "inline_equation", + "content": "150\\mathrm{mm}" + }, + { + "bbox": [ + 130, + 247, + 482, + 429 + ], + "type": "text", + "content": " range, and Area Under Curve (AUC), following [18,38,50]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 131, + 444, + 288, + 456 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 444, + 288, + 456 + ], + "spans": [ + { + "bbox": [ + 131, + 444, + 288, + 456 + ], + "type": "text", + "content": "4.2 Quantitative Comparison" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 462, + 482, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 462, + 482, + 605 + ], + "spans": [ + { + "bbox": [ + 130, + 462, + 482, + 605 + ], + "type": "text", + "content": "Results on Human3.6M We first use the 2D keypoints detected by CPN [3] as the input, and the results are shown in Tab. 1. It can be seen that our method achieves comparable performance with the state-of-the-art when the chunk size for inference is large (" + }, + { + "bbox": [ + 130, + 462, + 482, + 605 + ], + "type": "inline_equation", + "content": "T = 243" + }, + { + "bbox": [ + 130, + 462, + 482, + 605 + ], + "type": "text", + "content": "). Moreover, our method outperforms previous methods by a clear margin when the chunk size is small (" + }, + { + "bbox": [ + 130, + 462, + 482, + 605 + ], + "type": "inline_equation", + "content": "T = 27" + }, + { + "bbox": [ + 130, + 462, + 482, + 605 + ], + "type": "text", + "content": ", 81), as the accuracy of our method only decreases slightly when the chunk size is reduced. Furthermore, the MPJPE at " + }, + { + "bbox": [ + 130, + 462, + 482, + 605 + ], + "type": "inline_equation", + "content": "T = 27" + }, + { + "bbox": [ + 130, + 462, + 482, + 605 + ], + "type": "text", + "content": " is comparable to the previous state-of-the-art method at " + }, + { + "bbox": [ + 130, + 462, + 482, + 605 + ], + "type": "inline_equation", + "content": "T = 81" + }, + { + "bbox": [ + 130, + 462, + 482, + 605 + ], + "type": "text", + "content": " (42.1 mm vs. 42.0 mm). This indicates that our method can provide similarly accurate predictions with much lower inference latency. Additionally, our method exhibits significant improvement in continuity compared with previous methods, with a 0.2 mm per frame improvement on the MPJVE metric." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 606, + 482, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 606, + 482, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 606, + 482, + 665 + ], + "type": "text", + "content": "We further use the ground truth 2D keypoints of the Human3.6M dataset as input to test the upper bound of our method, as shown in Tab. 2. The results indicate that using the model trained with 243 frames to infer with a chunk size of 81 improves the MPJPE metric by " + }, + { + "bbox": [ + 130, + 606, + 482, + 665 + ], + "type": "inline_equation", + "content": "2.3\\mathrm{mm}" + }, + { + "bbox": [ + 130, + 606, + 482, + 665 + ], + "type": "text", + "content": " compared to the previous state-of-the-art " + }, + { + "bbox": [ + 130, + 606, + 482, + 665 + ], + "type": "inline_equation", + "content": "(22.4\\mathrm{mm}" + }, + { + "bbox": [ + 130, + 606, + 482, + 665 + ], + "type": "text", + "content": " vs. " + }, + { + "bbox": [ + 130, + 606, + 482, + 665 + ], + "type": "inline_equation", + "content": "25.7\\mathrm{mm})" + }, + { + "bbox": [ + 130, + 606, + 482, + 665 + ], + "type": "text", + "content": ". Moreover, the MPJPE of our method at " + }, + { + "bbox": [ + 130, + 606, + 482, + 665 + ], + "type": "inline_equation", + "content": "T = 27" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 264, + 91, + 447, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 264, + 91, + 447, + 101 + ], + "spans": [ + { + "bbox": [ + 264, + 91, + 447, + 101 + ], + "type": "text", + "content": "3D HPE via Non-Causal Retentive Networks" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 479, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 479, + 152 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 479, + 152 + ], + "type": "text", + "content": "is remarkably lower than that of previous methods at " + }, + { + "bbox": [ + 130, + 116, + 479, + 152 + ], + "type": "inline_equation", + "content": "T = 81" + }, + { + "bbox": [ + 130, + 116, + 479, + 152 + ], + "type": "text", + "content": ", demonstrating our method's efficient utilization of transferred knowledge to achieve higher accuracy with lower inference latency." + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 134, + 205, + 481, + 352 + ], + "blocks": [ + { + "bbox": [ + 132, + 174, + 481, + 198 + ], + "lines": [ + { + "bbox": [ + 132, + 174, + 481, + 198 + ], + "spans": [ + { + "bbox": [ + 132, + 174, + 481, + 198 + ], + "type": "text", + "content": "Table 1: Comparison of MPJPE, PA-MPJPE and MPJVE on the Human3.6M dataset using 2D keypoints detected by CPN [3] as input. " + }, + { + "bbox": [ + 132, + 174, + 481, + 198 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 132, + 174, + 481, + 198 + ], + "type": "text", + "content": " is the chunk size when testing." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 134, + 205, + 481, + 352 + ], + "lines": [ + { + "bbox": [ + 134, + 205, + 481, + 352 + ], + "spans": [ + { + "bbox": [ + 134, + 205, + 481, + 352 + ], + "type": "table", + "html": "
MPJPETDir.Disc.Eat.GreetPhonePhotoPosePurch.SitSitD.SmokeWaitWalkD.WalkWalkT.Average
MixSTE [46]27---------------45.1
STCFformer [38]2740.744.641.241.945.853.741.540.955.963.844.641.544.729.530.844.1
Ours2738.041.540.040.044.151.339.841.753.158.343.539.842.028.429.642.1
Anatomy [2]8142.143.841.043.846.153.542.443.153.960.545.742.146.232.233.844.6
PoseFormer [50]8141.544.839.842.546.551.642.142.053.360.745.543.346.131.832.244.3
Xue et al. [43]8142.145.340.942.945.452.742.642.555.361.844.941.744.929.930.844.2
P-STMO [31]8141.744.541.042.946.051.342.841.354.961.845.142.843.830.830.744.1
MixSTE [46]8139.843.038.640.143.450.640.641.452.256.743.840.843.929.430.342.4
STCFformer [38]8140.643.038.340.243.552.640.340.151.857.742.839.842.328.029.542.0
Ours8136.940.539.038.643.349.638.840.252.656.542.638.840.526.828.440.9
VideoPose3D [27]24345.246.743.345.648.155.144.644.357.365.847.144.049.032.833.946.8
Anatomy [2]24341.443.540.142.946.651.941.742.353.960.245.441.746.031.532.744.1
Xue et al. [43]24339.942.740.342.345.052.840.439.356.961.244.141.342.828.429.343.1
MHFormer [18]35139.243.140.140.944.951.240.641.353.560.343.741.143.829.830.643.0
P-STMO [31]24338.942.740.441.145.649.740.939.955.559.444.942.242.729.429.442.8
MixSTE [46]24337.640.937.339.742.349.940.139.851.755.042.139.841.027.927.940.9
STCFformer [38]24338.441.236.838.042.750.538.738.252.556.841.838.440.226.227.740.5
Ours24336.940.138.738.342.948.638.240.052.555.442.338.739.726.227.840.4
", + "image_path": "917480d22f541b00740c288666abde74a1ab509f3792d0033f57d0fa2d4f470f.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 134, + 357, + 481, + 480 + ], + "blocks": [ + { + "bbox": [ + 134, + 357, + 481, + 480 + ], + "lines": [ + { + "bbox": [ + 134, + 357, + 481, + 480 + ], + "spans": [ + { + "bbox": [ + 134, + 357, + 481, + 480 + ], + "type": "table", + "html": "
PA-MPJPETDir.Disc.Eat.GreetPhonePhotoPosePurch.SitSitD.SmokeWaitWalkD.WalkWalkT.Average
STCFormer [38]2731.935.132.734.134.941.332.131.645.050.636.031.735.523.625.134.8
Ours2731.733.932.333.335.239.131.031.944.048.736.031.034.623.024.834.0
Anatomy [2]8133.135.333.435.936.141.732.833.342.649.437.032.736.525.527.935.6
PoseFormer [50]8132.534.832.634.635.339.532.132.042.848.534.832.435.324.526.034.6
Xue et al. [43]8131.635.532.334.235.140.332.332.344.549.635.831.635.023.724.734.6
MixSTE [46]8132.034.231.733.734.439.232.031.842.946.935.532.034.423.625.233.9
STCFormer [38]8130.433.831.131.733.539.530.830.041.845.834.330.132.821.923.432.7
Ours8130.533.131.431.633.038.429.830.643.645.434.430.332.421.522.232.6
VideoPose3D [27]24334.136.134.437.236.442.234.433.645.052.537.433.837.825.627.336.5
Anatomy [2]24332.635.132.835.436.340.432.432.342.749.036.832.436.024.926.535.0
Xue et al. [43]24331.234.131.933.833.939.531.630.045.448.135.031.133.522.423.633.7
P-STMO [31]24331.335.232.933.935.439.332.531.544.648.236.332.934.423.823.934.4
MixSTE [46]24330.833.130.331.833.139.131.130.542.544.534.030.832.722.122.932.6
STCFormer [38]24329.333.030.730.632.738.229.728.842.245.033.329.431.520.922.331.8
Ours24330.833.131.331.833.437.730.130.543.445.534.330.331.521.422.732.5
", + "image_path": "1091134b2efadcdd1b22b2846411347d506e3d0049143c03eeca5f2c60a79a6a.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 134, + 485, + 481, + 548 + ], + "blocks": [ + { + "bbox": [ + 134, + 485, + 481, + 548 + ], + "lines": [ + { + "bbox": [ + 134, + 485, + 481, + 548 + ], + "spans": [ + { + "bbox": [ + 134, + 485, + 481, + 548 + ], + "type": "table", + "html": "
MPJVETDir.Disc.Eat.GreetPhonePhotoPosePurch.SitSitD.SmokeWaitWalkD.WalkWalkT.Average
VideoPose3D [27]2433.03.12.23.42.32.72.73.12.12.92.32.43.73.12.82.8
Anatomy [2]2432.72.82.03.12.02.42.42.81.82.42.02.13.42.72.42.5
PoseFormer [50]813.23.42.63.62.63.02.93.22.63.32.72.73.83.22.93.1
StridedFormer [17]3512.42.51.82.81.82.22.22.51.52.01.81.93.22.52.12.2
MixSTE [46]2432.52.71.92.81.92.22.32.61.62.21.92.03.12.62.22.3
Ours812.32.41.82.61.72.12.12.51.52.11.81.93.02.42.02.2
Ours2432.32.41.82.61.72.12.12.51.52.11.81.93.02.42.02.0
", + "image_path": "bbc80ed98c3768cf51eab5477c82c524f09a91df87951cab66fe8ca6b6cca1df.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 593, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 593, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 593, + 482, + 666 + ], + "type": "text", + "content": "Results on MPI-INF-3DHP The results on the MPI-INF-3DHP datasets are shown in Tab. 3. An improvement of " + }, + { + "bbox": [ + 130, + 593, + 482, + 666 + ], + "type": "inline_equation", + "content": "0.9\\mathrm{mm}" + }, + { + "bbox": [ + 130, + 593, + 482, + 666 + ], + "type": "text", + "content": " on the MPJPE metric is achieved at " + }, + { + "bbox": [ + 130, + 593, + 482, + 666 + ], + "type": "inline_equation", + "content": "T = 81" + }, + { + "bbox": [ + 130, + 593, + 482, + 666 + ], + "type": "text", + "content": ", and the improvement becomes more remarkable as the chunk size decreases. In particular, our method outperforms previous methods very significantly with an improvement of " + }, + { + "bbox": [ + 130, + 593, + 482, + 666 + ], + "type": "inline_equation", + "content": "4.1\\mathrm{mm}" + }, + { + "bbox": [ + 130, + 593, + 482, + 666 + ], + "type": "text", + "content": " on the MPJPE metric when " + }, + { + "bbox": [ + 130, + 593, + 482, + 666 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 130, + 593, + 482, + 666 + ], + "type": "text", + "content": " is 9. Similar to the phenomenon on the Human3.6M dataset, our method is" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 228, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 228, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 228, + 102 + ], + "type": "text", + "content": "K. Zheng et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 134, + 146, + 481, + 249 + ], + "blocks": [ + { + "bbox": [ + 132, + 114, + 480, + 137 + ], + "lines": [ + { + "bbox": [ + 132, + 114, + 480, + 137 + ], + "spans": [ + { + "bbox": [ + 132, + 114, + 480, + 137 + ], + "type": "text", + "content": "Table 2: Comparison of MPJPE on the Human3.6M dataset using 2D ground truth keypoints as input." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 134, + 146, + 481, + 249 + ], + "lines": [ + { + "bbox": [ + 134, + 146, + 481, + 249 + ], + "spans": [ + { + "bbox": [ + 134, + 146, + 481, + 249 + ], + "type": "table", + "html": "
MPJPETDir.Disc.Eat.GreetPhonePhotoPosePurch.SitSitD.SmokeWaitWalkD.WalkWalkT.Average
Ours2723.724.823.524.423.628.127.225.326.727.925.023.523.617.218.724.2
PoseFormer [50]8130.033.629.931.030.233.334.831.437.838.631.731.529.023.323.131.3
Xue et al. [43]8127.628.824.925.726.730.630.826.435.832.727.126.225.619.220.627.2
MixSTE [46]8125.627.824.525.724.929.928.627.429.929.026.125.025.218.719.925.9
STCFformer [38]8126.226.523.424.625.028.628.324.630.933.725.725.324.618.619.725.7
Ours8120.922.521.821.522.025.623.423.728.128.823.920.921.114.916.322.4
Xue et al. [43]24325.825.223.323.524.027.427.924.429.330.124.924.123.318.619.724.7
MHFormer [18]35127.732.129.128.930.033.933.031.237.039.330.031.029.422.223.030.5
P-STMO [31]24328.530.128.627.929.833.231.327.836.037.429.729.528.121.021.029.3
MixSTE [46]24321.622.020.421.020.824.324.721.926.924.921.221.520.814.715.721.6
STCFformer [38]24321.422.621.021.323.826.024.220.028.928.022.321.420.114.215.022.0
Ours24320.021.120.920.820.124.923.522.526.539.621.720.920.414.515.721.5
", + "image_path": "5cfd6856267fbab9aaeaa33c9a663ac0cfb4409b5f810b5795b0c3c22c1b7183.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 273, + 480, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 273, + 480, + 331 + ], + "spans": [ + { + "bbox": [ + 130, + 273, + 480, + 331 + ], + "type": "text", + "content": "comparable to or better than previous methods with an even smaller chunk size. For example, the MPJPE of our method at " + }, + { + "bbox": [ + 130, + 273, + 480, + 331 + ], + "type": "inline_equation", + "content": "T = 27" + }, + { + "bbox": [ + 130, + 273, + 480, + 331 + ], + "type": "text", + "content": " is better than STCFformer at " + }, + { + "bbox": [ + 130, + 273, + 480, + 331 + ], + "type": "inline_equation", + "content": "T = 81" + }, + { + "bbox": [ + 130, + 273, + 480, + 331 + ], + "type": "text", + "content": " (22.7 mm vs. 23.1 mm). And the MPJPE of our method at " + }, + { + "bbox": [ + 130, + 273, + 480, + 331 + ], + "type": "inline_equation", + "content": "T = 9" + }, + { + "bbox": [ + 130, + 273, + 480, + 331 + ], + "type": "text", + "content": " is similar to STCFformer at " + }, + { + "bbox": [ + 130, + 273, + 480, + 331 + ], + "type": "inline_equation", + "content": "T = 27" + }, + { + "bbox": [ + 130, + 273, + 480, + 331 + ], + "type": "text", + "content": " (24.1 mm vs. 24.2 mm). These results show that our method generalizes well on different datasets." + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 214, + 384, + 394, + 575 + ], + "blocks": [ + { + "bbox": [ + 130, + 353, + 480, + 374 + ], + "lines": [ + { + "bbox": [ + 130, + 353, + 480, + 374 + ], + "spans": [ + { + "bbox": [ + 130, + 353, + 480, + 374 + ], + "type": "text", + "content": "Table 3: Comparison of quantitative results on the MPI-INF-3DHP dataset. " + }, + { + "bbox": [ + 130, + 353, + 480, + 374 + ], + "type": "inline_equation", + "content": "\\uparrow" + }, + { + "bbox": [ + 130, + 353, + 480, + 374 + ], + "type": "text", + "content": ": higher is better. " + }, + { + "bbox": [ + 130, + 353, + 480, + 374 + ], + "type": "inline_equation", + "content": "\\downarrow" + }, + { + "bbox": [ + 130, + 353, + 480, + 374 + ], + "type": "text", + "content": ": lower is better." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 214, + 384, + 394, + 575 + ], + "lines": [ + { + "bbox": [ + 214, + 384, + 394, + 575 + ], + "spans": [ + { + "bbox": [ + 214, + 384, + 394, + 575 + ], + "type": "table", + "html": "
MethodTPCK↑AUC↑MPJPE↓
PoseFormer [50]988.656.477.1
CrossFormer [10]989.157.576.3
MHFormer [18]993.863.358.0
STCFormal [38]998.281.528.2
Ours998.983.324.1
Lin et al. [20]2583.651.479.8
MixSTE [46]2794.466.554.9
STCFormal [38]2798.483.424.2
Ours2799.184.122.7
UGCN [39]9686.962.168.1
Anatomy [2]8187.853.879.1
Hu et al. [12]9697.969.542.5
Einfalt et al. [8]8195.467.646.9
P-STMO [31]8197.975.832.2
STCFormal [38]8198.783.923.1
Ours8199.184.422.2
", + "image_path": "37b15a4eb1ca9f3d84460b62dd7d67465d5c8fe74652c1efdce46bee2741738e.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 609, + 257, + 621 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 609, + 257, + 621 + ], + "spans": [ + { + "bbox": [ + 132, + 609, + 257, + 621 + ], + "type": "text", + "content": "4.3 Qualitative Results" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 629, + 480, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 629, + 480, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 629, + 480, + 665 + ], + "type": "text", + "content": "Visualization on Continuity We compute the MPJVE of the results predicted by MixSTE, STCFormer and our method at different timesteps, and visualize the curves in Fig. 3. It can be seen that the MPJVE of our method is lower than" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 264, + 91, + 447, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 264, + 91, + 447, + 101 + ], + "spans": [ + { + "bbox": [ + 264, + 91, + 447, + 101 + ], + "type": "text", + "content": "3D HPE via Non-Causal Retentive Networks" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 479, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 479, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 479, + 100 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 224 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 224 + ], + "type": "text", + "content": "that of previous methods. Our method captures temporal information using non-overlapping shift windows, similar to MixSTE. However, our method produces more continuous results at the edge between two chunks compared to MixSTE. MixSTE independently estimates two consecutive chunks, which results in a lack of continuity at the edge. In contrast, our method incorporates temporal information from previous chunks through the cross-chunk state, which improves the continuity. Compared to STCFoer, our method generally produces more continuous results. This is because our method generates multiple frames each time, allowing for the continuity constraints to the output." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 185, + 246, + 428, + 344 + ], + "blocks": [ + { + "bbox": [ + 185, + 246, + 428, + 344 + ], + "lines": [ + { + "bbox": [ + 185, + 246, + 428, + 344 + ], + "spans": [ + { + "bbox": [ + 185, + 246, + 428, + 344 + ], + "type": "image", + "image_path": "d4a2a04bd5c9b75b20b917823782835ace91f8d0da2d1cac95d9ceab8033f48d.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 131, + 354, + 482, + 375 + ], + "lines": [ + { + "bbox": [ + 131, + 354, + 482, + 375 + ], + "spans": [ + { + "bbox": [ + 131, + 354, + 482, + 375 + ], + "type": "text", + "content": "Fig. 3: Comparison of the MPJVE curves over time between MixSTE, STCFormer and our method." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 136, + 424, + 478, + 608 + ], + "blocks": [ + { + "bbox": [ + 136, + 424, + 478, + 608 + ], + "lines": [ + { + "bbox": [ + 136, + 424, + 478, + 608 + ], + "spans": [ + { + "bbox": [ + 136, + 424, + 478, + 608 + ], + "type": "image", + "image_path": "9b633f75a3a335b0c645f7cd5dd5a44eaafd41d3708e049ff26326234e45eab3.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 617, + 482, + 662 + ], + "lines": [ + { + "bbox": [ + 130, + 617, + 482, + 662 + ], + "spans": [ + { + "bbox": [ + 130, + 617, + 482, + 662 + ], + "type": "text", + "content": "Fig. 4: Comparison of some visualization results predicted by MixSTE [46], STC-Former [38] and our method. The black skeletons are the ground truth, and the red skeletons are the predicted results. The comparison with MixSTE is shown in green circles, while the comparison with STC-Former is shown in blue circles." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 228, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 228, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 228, + 102 + ], + "type": "text", + "content": "K. Zheng et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 175 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 175 + ], + "type": "text", + "content": "Visualization of Results We present some visualization examples in Fig. 4, where the results are predicted by MixSTE [46], STCFoer [38] and our method, respectively. It can be seen that our method predicts more accurate results, and the improvement is visually obvious. More visualization results can be found in the Supp. Mat." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 195, + 238, + 208 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 195, + 238, + 208 + ], + "spans": [ + { + "bbox": [ + 132, + 195, + 238, + 208 + ], + "type": "text", + "content": "4.4 Ablation Study" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 216, + 482, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 216, + 482, + 324 + ], + "spans": [ + { + "bbox": [ + 130, + 216, + 482, + 324 + ], + "type": "text", + "content": "Ablations on Knowledge Transfer The knowledge transferred from large training chunks to smaller test chunks plays an important role in our method. To demonstrate this, we train the model with chunk sizes of 27 and 81, respectively, and compare the performance of these models with that of the model trained with " + }, + { + "bbox": [ + 130, + 216, + 482, + 324 + ], + "type": "inline_equation", + "content": "T = 243" + }, + { + "bbox": [ + 130, + 216, + 482, + 324 + ], + "type": "text", + "content": ". The results are shown in Tab. 4 (" + }, + { + "bbox": [ + 130, + 216, + 482, + 324 + ], + "type": "inline_equation", + "content": "2^{nd}" + }, + { + "bbox": [ + 130, + 216, + 482, + 324 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 130, + 216, + 482, + 324 + ], + "type": "inline_equation", + "content": "4^{th}" + }, + { + "bbox": [ + 130, + 216, + 482, + 324 + ], + "type": "text", + "content": " rows). It can be seen that compared with the models trained with " + }, + { + "bbox": [ + 130, + 216, + 482, + 324 + ], + "type": "inline_equation", + "content": "T = 27" + }, + { + "bbox": [ + 130, + 216, + 482, + 324 + ], + "type": "text", + "content": " and 81, using the models trained with a larger chunk size (" + }, + { + "bbox": [ + 130, + 216, + 482, + 324 + ], + "type": "inline_equation", + "content": "T = 243" + }, + { + "bbox": [ + 130, + 216, + 482, + 324 + ], + "type": "text", + "content": ") for inference is significantly better. This indicates that the knowledge learned with large chunks is useful for reasoning about small chunks." + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 153, + 366, + 458, + 492 + ], + "blocks": [ + { + "bbox": [ + 154, + 345, + 457, + 357 + ], + "lines": [ + { + "bbox": [ + 154, + 345, + 457, + 357 + ], + "spans": [ + { + "bbox": [ + 154, + 345, + 457, + 357 + ], + "type": "text", + "content": "Table 4: Comparison of different methods in terms of knowledge transfer." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 153, + 366, + 458, + 492 + ], + "lines": [ + { + "bbox": [ + 153, + 366, + 458, + 492 + ], + "spans": [ + { + "bbox": [ + 153, + 366, + 458, + 492 + ], + "type": "table", + "html": "
MethodTrain TTest T = 27Test T = 81Test T = 243
Previous SOTASame as test T44.142.040.5
Ours2743.7--
Ours8143.041.9-
Ours24342.140.940.4
MixSTE w.t. xPos2745.3--
MixSTE w.t. xPos8147.042.6-
MixSTE w.t. xPos24348.844.141.1
Ours w.o. state2746.3--
Ours w.o. state8149.144.0-
Ours w.o. state24354.249.842.5
", + "image_path": "5356be9cd0c8ff29e349005839e1938e7ee2099b25bee0f3ddbc3b70096411ab.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 533, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 533, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 533, + 482, + 666 + ], + "type": "text", + "content": "Effect of Cross-Chunk State We compare our methods with two baselines that do not use the cross-chunk state: the MixSTE model with xPos as the position embedding, and the model based on RetNet but without the cross-chunk state. These two baselines are able to handle sequences of different lengths, but can only use within-chunk information. The results are shown in Tab. 4 (bottom six rows). It can be seen that the two baselines without long-term historical information deteriorate rapidly as the gap between the training and test chunk sizes increases. This means that they cannot efficiently transfer knowledge from large chunks to small chunks. Therefore, the cross-chunk state is essential for knowledge transfer in our method, and our NC-RetNet is the first method to have this knowledge transfer property." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 264, + 91, + 447, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 264, + 91, + 447, + 101 + ], + "spans": [ + { + "bbox": [ + 264, + 91, + 447, + 101 + ], + "type": "text", + "content": "3D HPE via Non-Causal Retentive Networks" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 213 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 213 + ], + "type": "text", + "content": "Comparison of Computational Cost The comparison of the model parameters and computational cost of our method and previous methods is as shown in Tab. 5. For seq2seq methods, the FLOPs are averaged over the number of frames, since the prediction of a single inference yields results over multiple frames. It can be seen that our modification of MixSTE does not bring any increase in the model parameters or FLOPs. And compared to STCFormer [38], which has comparable performance to our method at " + }, + { + "bbox": [ + 130, + 116, + 482, + 213 + ], + "type": "inline_equation", + "content": "T = 243" + }, + { + "bbox": [ + 130, + 116, + 482, + 213 + ], + "type": "text", + "content": ", the computational cost of our method is much lower (430 M vs. 78107 M)." + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 177, + 267, + 434, + 350 + ], + "blocks": [ + { + "bbox": [ + 131, + 235, + 482, + 259 + ], + "lines": [ + { + "bbox": [ + 131, + 235, + 482, + 259 + ], + "spans": [ + { + "bbox": [ + 131, + 235, + 482, + 259 + ], + "type": "text", + "content": "Table 5: Comparison of model parameters, computational cost. FLOPs for seq2seq and our methods is averaged over the number of output frames, as is done in [46]." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 177, + 267, + 434, + 350 + ], + "lines": [ + { + "bbox": [ + 177, + 267, + 434, + 350 + ], + "spans": [ + { + "bbox": [ + 177, + 267, + 434, + 350 + ], + "type": "table", + "html": "
MethodParams (M)FLOPs (M)MPJPE (T=243)
StridedFormer [17]4.2137244.0
P-STMO [31]6.7173742.8
MHFormer [18]24.7481243.2
MixSTE [46]33.657240.9
STCFemale [38]18.97810740.5
Ours25.243040.4
", + "image_path": "518dfffa3d6f9d4c06c124f1b18a40f4a3e50f0f357388c475b92e4faeef0a2c.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 131, + 396, + 220, + 408 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 396, + 220, + 408 + ], + "spans": [ + { + "bbox": [ + 131, + 396, + 220, + 408 + ], + "type": "text", + "content": "5 Conclusion" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 426, + 482, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 426, + 482, + 559 + ], + "spans": [ + { + "bbox": [ + 130, + 426, + 482, + 559 + ], + "type": "text", + "content": "In this paper, we propose the first 3D human pose estimation model based on Retentive Networks, NC-RetNet. By using the non-causal masking, it effectively leverages a large number of past frames and a limited number of future frames to incorporate temporal information. Furthermore, we introduce a knowledge transfer strategy that involves training the model with a larger chunk size and using a smaller chunk size during inference, resulting in reduced inference latency without too much loss in accuracy. Through extensive experiments on the Human3.6M and MPI-INF-3DHP datasets, our approach has demonstrated state-of-the-art performance even with a smaller test chunk size. In conclusion, our method achieves a good balance between high accuracy and low inference latency, making it suitable for real-time scenarios." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 582, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 582, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 582, + 482, + 666 + ], + "type": "text", + "content": "Limitations Admittedly, there are two limitations in our work. Firstly, the fundamental theory behind our method's ability to transfer knowledge is unclear, despite our study of the effect of the cross-chunk state. Secondly, we have only tested our method in the 2D-to-3D lifting task. However, the idea of transferring knowledge from large chunks to smaller chunks is universal to many sequential data in computer vision. Further work is required to explain the theory and explore more applications." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 228, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 228, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 228, + 102 + ], + "type": "text", + "content": "K. Zheng et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 114, + 197, + 126 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 114, + 197, + 126 + ], + "spans": [ + { + "bbox": [ + 132, + 114, + 197, + 126 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 138, + 142, + 481, + 665 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 138, + 142, + 481, + 186 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 142, + 481, + 186 + ], + "spans": [ + { + "bbox": [ + 138, + 142, + 481, + 186 + ], + "type": "text", + "content": "1. Cai, Y., Ge, L., Liu, J., Cai, J., Cham, T.J., Yuan, J., Thalmann, N.M.: Exploiting spatial-temporal relationships for 3d pose estimation via graph convolutional networks. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 2272-2281 (2019) 2, 4" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 138, + 186, + 481, + 220 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 186, + 481, + 220 + ], + "spans": [ + { + "bbox": [ + 138, + 186, + 481, + 220 + ], + "type": "text", + "content": "2. Chen, T., Fang, C., Shen, X., Zhu, Y., Chen, Z., Luo, J.: Anatomy-aware 3d human pose estimation with bone-based pose decomposition. IEEE Transactions on Circuits and Systems for Video Technology 32(1), 198-209 (2021) 1, 9, 10, 11" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 138, + 220, + 481, + 254 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 220, + 481, + 254 + ], + "spans": [ + { + "bbox": [ + 138, + 220, + 481, + 254 + ], + "type": "text", + "content": "3. Chen, Y., Wang, Z., Peng, Y., Zhang, Z., Yu, G., Sun, J.: Cascaded pyramid network for multi-person pose estimation. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 7103-7112 (2018) 3, 9, 10" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 138, + 254, + 481, + 287 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 254, + 481, + 287 + ], + "spans": [ + { + "bbox": [ + 138, + 254, + 481, + 287 + ], + "type": "text", + "content": "4. Chi, T.C., Fan, T.H., Ramadge, P.J., Rudnicky, A.: Kerple: Kernelized relative positional embedding for length extrapolation. Advances in Neural Information Processing Systems 35, 8386-8399 (2022) 5" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 138, + 288, + 481, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 288, + 481, + 331 + ], + "spans": [ + { + "bbox": [ + 138, + 288, + 481, + 331 + ], + "type": "text", + "content": "5. Choi, H., Moon, G., Chang, J.Y., Lee, K.M.: Beyond static features for temporally consistent 3d human pose and shape from a video. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 1964-1973 (2021) 4" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 138, + 332, + 481, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 332, + 481, + 365 + ], + "spans": [ + { + "bbox": [ + 138, + 332, + 481, + 365 + ], + "type": "text", + "content": "6. Choi, S., Choi, S., Kim, C.: Mobilehumanpose: Toward real-time 3d human pose estimation in mobile devices. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 2328-2338 (2021) 4" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 138, + 365, + 481, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 365, + 481, + 397 + ], + "spans": [ + { + "bbox": [ + 138, + 365, + 481, + 397 + ], + "type": "text", + "content": "7. Chung, J., Gulcehre, C., Cho, K., Bengio, Y.: Empirical evaluation of gated recurrent neural networks on sequence modeling. arXiv preprint arXiv:1412.3555 (2014) 4" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 138, + 399, + 481, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 399, + 481, + 441 + ], + "spans": [ + { + "bbox": [ + 138, + 399, + 481, + 441 + ], + "type": "text", + "content": "8. Einfalt, M., Ludwig, K., Lienhart, R.: Uplift and upsample: Efficient 3d human pose estimation with uplifting transformers. In: Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision. pp. 2903-2913 (2023) 4, 11" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 138, + 443, + 481, + 476 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 443, + 481, + 476 + ], + "spans": [ + { + "bbox": [ + 138, + 443, + 481, + 476 + ], + "type": "text", + "content": "9. Han, K., Wang, Y., Tian, Q., Guo, J., Xu, C., Xu, C.: Ghostnet: More features from cheap operations. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 1580-1589 (2020) 4" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 138, + 477, + 481, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 477, + 481, + 510 + ], + "spans": [ + { + "bbox": [ + 138, + 477, + 481, + 510 + ], + "type": "text", + "content": "0. Hassanin, M., Khamiss, A., Bennamoun, M., Boussaid, F., Radwan, I.: Crossformer: Cross spatio-temporal transformer for 3d human pose estimation. arXiv preprint arXiv:2203.13387 (2022) 4, 11" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 138, + 510, + 481, + 553 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 510, + 481, + 553 + ], + "spans": [ + { + "bbox": [ + 138, + 510, + 481, + 553 + ], + "type": "text", + "content": "1. Hesse, N., Schröder, A.S., Müller-Felber, W., Bodensteiner, C., Arens, M., Hofmann, U.G.: Body pose estimation in depth images for infant motion analysis. In: 2017 39th Annual International Conference of the IEEE Engineering in Medicine and Biology Society (EMBC). pp. 1909-1912. IEEE (2017) 1" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 138, + 555, + 481, + 587 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 555, + 481, + 587 + ], + "spans": [ + { + "bbox": [ + 138, + 555, + 481, + 587 + ], + "type": "text", + "content": "2. Hu, W., Zhang, C., Zhan, F., Zhang, L., Wong, T.T.: Conditional directed graph convolution for 3d human pose estimation. In: Proceedings of the 29th ACM International Conference on Multimedia. pp. 602-611 (2021) 11" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 138, + 588, + 481, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 588, + 481, + 620 + ], + "spans": [ + { + "bbox": [ + 138, + 588, + 481, + 620 + ], + "type": "text", + "content": "3. Iandola, F.N., Han, S., Moskewicz, M.W., Ashraf, K., Dally, W.J., Keutzer, K.: SqueezeNet: Alexnet-level accuracy with 50x fewer parameters and " + }, + { + "bbox": [ + 138, + 588, + 481, + 620 + ], + "type": "inline_equation", + "content": "< 0.5" + }, + { + "bbox": [ + 138, + 588, + 481, + 620 + ], + "type": "text", + "content": " mb model size. arXiv preprint arXiv:1602.07360 (2016) 4" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 138, + 621, + 481, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 621, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 138, + 621, + 481, + 665 + ], + "type": "text", + "content": "4. Ionescu, C., Papava, D., Olaru, V., Sminchisescu, C.: Human3. 6m: Large scale datasets and predictive methods for 3d human sensing in natural environments. IEEE transactions on pattern analysis and machine intelligence 36(7), 1325-1339 (2013) 3, 9" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 264, + 91, + 447, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 264, + 91, + 447, + 101 + ], + "spans": [ + { + "bbox": [ + 264, + 91, + 447, + 101 + ], + "type": "text", + "content": "3D HPE via Non-Causal Retentive Networks" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 481, + 665 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 133, + 116, + 480, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 116, + 480, + 138 + ], + "spans": [ + { + "bbox": [ + 133, + 116, + 480, + 138 + ], + "type": "text", + "content": "15. Kipf, T.N., Welling, M.: Semi-supervised classification with graph convolutional networks. arXiv preprint arXiv:1609.02907 (2016) 3, 4" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 133, + 140, + 481, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 140, + 481, + 183 + ], + "spans": [ + { + "bbox": [ + 133, + 140, + 481, + 183 + ], + "type": "text", + "content": "16. Li, H., Shi, B., Dai, W., Zheng, H., Wang, B., Sun, Y., Guo, M., Li, C., Zou, J., Xiong, H.: Pose-oriented transformer with uncertainty-guided refinement for 2d-to-3d human pose estimation. In: Proceedings of the AAAI Conference on Artificial Intelligence. vol. 37, pp. 1296-1304 (2023) 4" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 184, + 480, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 184, + 480, + 217 + ], + "spans": [ + { + "bbox": [ + 132, + 184, + 480, + 217 + ], + "type": "text", + "content": "17. Li, W., Liu, H., Ding, R., Liu, M., Wang, P., Yang, W.: Exploiting temporal contexts with strided transformer for 3d human pose estimation. IEEE Transactions on Multimedia 25, 1282-1293 (2022) 4, 10, 14" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 218, + 480, + 261 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 218, + 480, + 261 + ], + "spans": [ + { + "bbox": [ + 132, + 218, + 480, + 261 + ], + "type": "text", + "content": "18. Li, W., Liu, H., Tang, H., Wang, P., Van Gool, L.: Mhformer: Multi-hypothesis transformer for 3d human pose estimation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 13147-13156 (2022) 4, 9, 10, 11, 14" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 262, + 480, + 305 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 262, + 480, + 305 + ], + "spans": [ + { + "bbox": [ + 132, + 262, + 480, + 305 + ], + "type": "text", + "content": "19. Li, Y., Zhang, S., Wang, Z., Yang, S., Yang, W., Xia, S.T., Zhou, E.: Tokenpose: Learning keypoint tokens for human pose estimation. In: Proceedings of the IEEE/CVF International conference on computer vision. pp. 11313-11322 (2021) 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 307, + 480, + 329 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 307, + 480, + 329 + ], + "spans": [ + { + "bbox": [ + 132, + 307, + 480, + 329 + ], + "type": "text", + "content": "20. Lin, J., Lee, G.H.: Trajectory space factorization for deep video-based 3d human pose estimation. arXiv preprint arXiv:1908.08289 (2019) 11" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 331, + 480, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 331, + 480, + 373 + ], + "spans": [ + { + "bbox": [ + 132, + 331, + 480, + 373 + ], + "type": "text", + "content": "21. Liu, R., Shen, J., Wang, H., Chen, C., Cheung, S.c., Asari, V.: Attention mechanism exploits temporal contexts: Real-time 3d human pose reconstruction. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5064-5073 (2020) 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 375, + 480, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 375, + 480, + 407 + ], + "spans": [ + { + "bbox": [ + 132, + 375, + 480, + 407 + ], + "type": "text", + "content": "22. Martinez, J., Hossain, R., Romero, J., Little, J.J.: A simple yet effective baseline for 3d human pose estimation. In: Proceedings of the IEEE international conference on computer vision. pp. 2640-2649 (2017) 1, 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 409, + 480, + 451 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 409, + 480, + 451 + ], + "spans": [ + { + "bbox": [ + 132, + 409, + 480, + 451 + ], + "type": "text", + "content": "23. Mehta, D., Rhodin, H., Casas, D., Fua, P., Sotnychenko, O., Xu, W., Theobalt, C.: Monocular 3d human pose estimation in the wild using improved cnn supervision. In: 2017 international conference on 3D vision (3DV). pp. 506-516. IEEE (2017) 3, 9" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 453, + 480, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 453, + 480, + 485 + ], + "spans": [ + { + "bbox": [ + 132, + 453, + 480, + 485 + ], + "type": "text", + "content": "24. Mehta, D., Sridhar, S., Sotnychenko, O., Rhodin, H., Shafiei, M., Seidel, H.P., Xu, W., Casas, D., Theobalt, C.: Vnect: Real-time 3d human pose estimation with a single rgb camera. Acm transactions on graphics (tog) 36(4), 1-14 (2017) 4" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 487, + 480, + 529 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 487, + 480, + 529 + ], + "spans": [ + { + "bbox": [ + 132, + 487, + 480, + 529 + ], + "type": "text", + "content": "25. Newell, A., Yang, K., Deng, J.: Stacked hourglass networks for human pose estimation. In: Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part VIII 14. pp. 483-499. Springer (2016) 3" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 132, + 531, + 480, + 564 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 531, + 480, + 564 + ], + "spans": [ + { + "bbox": [ + 132, + 531, + 480, + 564 + ], + "type": "text", + "content": "26. Pavlakos, G., Zhou, X., Derpanis, K.G., Daniilidis, K.: Coarse-to-fine volumetric prediction for single-image 3d human pose. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 7025-7034 (2017) 3" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 132, + 565, + 480, + 608 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 565, + 480, + 608 + ], + "spans": [ + { + "bbox": [ + 132, + 565, + 480, + 608 + ], + "type": "text", + "content": "27. Pavllo, D., Feichtenhofer, C., Grangier, D., Auli, M.: 3d human pose estimation in video with temporal convolutions and semi-supervised training. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 7753-7762 (2019) 2, 4, 9, 10" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 132, + 609, + 480, + 640 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 609, + 480, + 640 + ], + "spans": [ + { + "bbox": [ + 132, + 609, + 480, + 640 + ], + "type": "text", + "content": "28. Press, O., Smith, N.A., Lewis, M.: Train short, test long: Attention with linear biases enables input length extrapolation. arXiv preprint arXiv:2108.12409 (2021) 5" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 132, + 643, + 480, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 643, + 480, + 665 + ], + "spans": [ + { + "bbox": [ + 132, + 643, + 480, + 665 + ], + "type": "text", + "content": "29. Rayat Imtiaz Hossain, M., Little, J.J.: Exploiting temporal information for 3d pose estimation. arXiv e-prints pp. arXiv-1711 (2017) 2" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 228, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 228, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 228, + 101 + ], + "type": "text", + "content": "K. Zheng et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 665 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 149 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 149 + ], + "type": "text", + "content": "30. Sandler, M., Howard, A., Zhu, M., Zhmoginov, A., Chen, L.C.: Mobilenetv2: Inverted residuals and linear bottlenecks. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 4510-4520 (2018) 4" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 149, + 482, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 149, + 482, + 182 + ], + "spans": [ + { + "bbox": [ + 130, + 149, + 482, + 182 + ], + "type": "text", + "content": "31. Shan, W., Liu, Z., Zhang, X., Wang, S., Ma, S., Gao, W.: P-stmo: Pre-trained spatial temporal many-to-one model for 3d human pose estimation. In: European Conference on Computer Vision. pp. 461-478. Springer (2022) 10, 11, 14" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 182, + 481, + 203 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 182, + 481, + 203 + ], + "spans": [ + { + "bbox": [ + 130, + 182, + 481, + 203 + ], + "type": "text", + "content": "32. Su, J., Lu, Y., Pan, S., Murtadha, A., Wen, B., Liu, Y.: Roformer: Enhanced transformer with rotary position embedding. arXiv preprint arXiv:2104.09864 (2021) 4" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 203, + 481, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 203, + 481, + 224 + ], + "spans": [ + { + "bbox": [ + 130, + 203, + 481, + 224 + ], + "type": "text", + "content": "33. Sun, K., Xiao, B., Liu, D., Wang, J.: Deep high-resolution representation learning for human pose estimation. In: CVPR (2019) 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 224, + 481, + 256 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 224, + 481, + 256 + ], + "spans": [ + { + "bbox": [ + 130, + 224, + 481, + 256 + ], + "type": "text", + "content": "34. Sun, X., Shang, J., Liang, S., Wei, Y.: Compositional human pose regression. In: Proceedings of the IEEE international conference on computer vision. pp. 2602-2611 (2017) 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 256, + 481, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 256, + 481, + 289 + ], + "spans": [ + { + "bbox": [ + 130, + 256, + 481, + 289 + ], + "type": "text", + "content": "35. Sun, Y., Dong, L., Huang, S., Ma, S., Xia, Y., Xue, J., Wang, J., Wei, F.: Retentive network: A successor to transformer for large language models. arXiv preprint arXiv:2307.08621 (2023) 2, 5" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 289, + 481, + 320 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 289, + 481, + 320 + ], + "spans": [ + { + "bbox": [ + 130, + 289, + 481, + 320 + ], + "type": "text", + "content": "36. Sun, Y., Dong, L., Patra, B., Ma, S., Huang, S., Benhaim, A., Chaudhary, V., Song, X., Wei, F.: A length-extrapolatable transformer. arXiv preprint arXiv:2212.10554 (2022) 4" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 320, + 481, + 353 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 320, + 481, + 353 + ], + "spans": [ + { + "bbox": [ + 130, + 320, + 481, + 353 + ], + "type": "text", + "content": "37. Svenstrup, M., Tranberg, S., Andersen, H.J., Bak, T.: Pose estimation and adaptive robot behaviour for human-robot interaction. In: 2009 IEEE International Conference on Robotics and Automation. pp. 3571-3576. IEEE (2009) 1" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 353, + 481, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 353, + 481, + 396 + ], + "spans": [ + { + "bbox": [ + 130, + 353, + 481, + 396 + ], + "type": "text", + "content": "38. Tang, Z., Qiu, Z., Hao, Y., Hong, R., Yao, T.: 3d human pose estimation with spatio-temporal criss-cross attention. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 4790-4799 (2023) 9, 10, 11, 12, 13, 14" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 396, + 481, + 429 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 396, + 481, + 429 + ], + "spans": [ + { + "bbox": [ + 130, + 396, + 481, + 429 + ], + "type": "text", + "content": "39. Wang, J., Yan, S., Xiong, Y., Lin, D.: Motion guided 3d pose estimation from videos. In: European Conference on Computer Vision. pp. 764-780. Springer (2020) 1, 9, 11" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 130, + 428, + 481, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 428, + 481, + 460 + ], + "spans": [ + { + "bbox": [ + 130, + 428, + 481, + 460 + ], + "type": "text", + "content": "40. Wehrbein, T., Rudolph, M., Rosenhahn, B., Wandt, B.: Probabilistic monocular 3d human pose estimation with normalizing flows. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 11199-11208 (2021) 1" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 130, + 460, + 481, + 493 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 460, + 481, + 493 + ], + "spans": [ + { + "bbox": [ + 130, + 460, + 481, + 493 + ], + "type": "text", + "content": "41. Xu, T., Takano, W.: Graph stacked hourglass networks for 3d human pose estimation. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 16105-16114 (2021) 1" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 130, + 492, + 481, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 492, + 481, + 525 + ], + "spans": [ + { + "bbox": [ + 130, + 492, + 481, + 525 + ], + "type": "text", + "content": "42. Xu, Y., Zhang, J., Zhang, Q., Tao, D.: Vitpose: Simple vision transformer baselines for human pose estimation. Advances in Neural Information Processing Systems 35, 38571-38584 (2022) 3" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 130, + 525, + 481, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 525, + 481, + 557 + ], + "spans": [ + { + "bbox": [ + 130, + 525, + 481, + 557 + ], + "type": "text", + "content": "43. Xue, Y., Chen, J., Gu, X., Ma, H., Ma, H.: Boosting monocular 3d human pose estimation with part aware attention. IEEE Transactions on Image Processing 31, 4278-4291 (2022) 2, 10, 11" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 130, + 557, + 481, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 557, + 481, + 590 + ], + "spans": [ + { + "bbox": [ + 130, + 557, + 481, + 590 + ], + "type": "text", + "content": "44. Yan, S., Xiong, Y., Lin, D.: Spatial temporal graph convolutional networks for skeleton-based action recognition. In: Proceedings of the AAAI conference on artificial intelligence. vol. 32 (2018) 1" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 130, + 590, + 481, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 590, + 481, + 622 + ], + "spans": [ + { + "bbox": [ + 130, + 590, + 481, + 622 + ], + "type": "text", + "content": "45. Zeng, A., Sun, X., Yang, L., Zhao, N., Liu, M., Xu, Q.: Learning skeletal graph neural networks for hard 3d pose estimation. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 11436-11445 (2021) 1" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 130, + 622, + 481, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 622, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 622, + 481, + 665 + ], + "type": "text", + "content": "46. Zhang, J., Tu, Z., Yang, J., Chen, Y., Yuan, J.: Mixste: Seq2seq mixed spatiotemporal encoder for 3d human pose estimation in video. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 13232-13242 (2022) 2, 8, 9, 10, 11, 12, 13, 14" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 264, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 264, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 264, + 91, + 447, + 100 + ], + "type": "text", + "content": "3D HPE via Non-Causal Retentive Networks" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 481, + 304 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 133, + 116, + 481, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 116, + 481, + 149 + ], + "spans": [ + { + "bbox": [ + 133, + 116, + 481, + 149 + ], + "type": "text", + "content": "47. Zhang, X., Zhou, X., Lin, M., Sun, J.: Shufflenet: An extremely efficient convolutional neural network for mobile devices. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 6848-6856 (2018) 4" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 150, + 481, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 150, + 481, + 182 + ], + "spans": [ + { + "bbox": [ + 132, + 150, + 481, + 182 + ], + "type": "text", + "content": "48. Zhao, Q., Zheng, C., Liu, M., Chen, C.: A single 2d pose with context is worth hundreds for 3d human pose estimation. Advances in Neural Information Processing Systems 36 (2024) 1, 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 183, + 481, + 225 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 183, + 481, + 225 + ], + "spans": [ + { + "bbox": [ + 132, + 183, + 481, + 225 + ], + "type": "text", + "content": "49. Zhao, Q., Zheng, C., Liu, M., Wang, P., Chen, C.: Poseformerv2: Exploring frequency domain for efficient and robust 3d human pose estimation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 8877-8886 (2023) 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 227, + 481, + 269 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 227, + 481, + 269 + ], + "spans": [ + { + "bbox": [ + 132, + 227, + 481, + 269 + ], + "type": "text", + "content": "50. Zheng, C., Zhu, S., Mendieta, M., Yang, T., Chen, C., Ding, Z.: 3d human pose estimation with spatial and temporal transformers. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 11656-11665 (2021) 2, 4, 9, 10, 11" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 270, + 481, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 270, + 481, + 304 + ], + "spans": [ + { + "bbox": [ + 132, + 270, + 481, + 304 + ], + "type": "text", + "content": "51. Zhu, W., Ma, X., Liu, Z., Liu, L., Wu, W., Wang, Y.: Motionbert: A unified perspective on learning human motion representations. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 15085-15099 (2023) 4, 9" + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 229, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 229, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 229, + 102 + ], + "type": "text", + "content": "K. Zheng et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/3D Open-Vocabulary Panoptic Segmentation with 2D-3D Vision-Language Distillation/eb0bea0f-431f-4835-9237-239fd0d64e99_content_list.json b/2024/3D Open-Vocabulary Panoptic Segmentation with 2D-3D Vision-Language Distillation/eb0bea0f-431f-4835-9237-239fd0d64e99_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..e6ad70bc926e285a8e409ad2340b99127d219a25 --- /dev/null +++ b/2024/3D Open-Vocabulary Panoptic Segmentation with 2D-3D Vision-Language Distillation/eb0bea0f-431f-4835-9237-239fd0d64e99_content_list.json @@ -0,0 +1,1837 @@ +[ + { + "type": "text", + "text": "3D Open-Vocabulary Panoptic Segmentation with 2D-3D Vision-Language Distillation", + "text_level": 1, + "bbox": [ + 217, + 140, + 785, + 186 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zihao Xiao $^{1*}$ , Longlong Jing $^{2}$ , Shangxuan Wu $^{2}$ , Alex Zihao Zhu $^{2}$ , Jingwei Ji $^{2}$ , Chiyu Max Jiang $^{2}$ , Wei-Chih Hung $^{2}$ , Thomas Funkhouser $^{3}$ , Weicheng Kuo $^{4}$ , Anelia Angelova $^{4}$ , Yin Zhou $^{2}$ , and Shiwei Sheng $^{2*}$", + "bbox": [ + 220, + 210, + 781, + 258 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Johns Hopkins University, 2 Waymo, 3 Google Research, 4 Google DeepMind", + "bbox": [ + 238, + 268, + 764, + 284 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract. 3D panoptic segmentation is a challenging perception task, especially in autonomous driving. It aims to predict both semantic and instance annotations for 3D points in a scene. Although prior 3D panoptic segmentation approaches have achieved great performance on closed-set benchmarks, generalizing these approaches to unseen things and unseen stuff categories remains an open problem. For unseen object categories, 2D open-vocabulary segmentation has achieved promising results that solely rely on frozen CLIP backbones and assembling multiple classification outputs. However, we find that simply extending these 2D models to 3D does not guarantee good performance due to poor per-mask classification quality, especially for novel stuff categories. In this paper, we propose the first method to tackle 3D open-vocabulary panoptic segmentation. Our model takes advantage of the fusion between learnable LiDAR features and dense frozen vision CLIP features, using a single classification head to make predictions for both base and novel classes. To further improve the classification performance on novel classes and leverage the CLIP model, we propose two novel loss functions: object-level distillation loss and voxel-level distillation loss. Our experiments on the nuScenes and SemanticKITTI datasets show that our method outperforms the strong baseline by a large margin.", + "bbox": [ + 259, + 316, + 743, + 595 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Keywords: Autonomous driving $\\cdot$ 3D panoptic segmentation $\\cdot$ Vision-language", + "bbox": [ + 259, + 607, + 740, + 635 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 215, + 681, + 375, + 696 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "3D panoptic segmentation is a crucial task in computer vision with many real-world applications, most notably in autonomous driving. It combines 3D semantic and instance segmentation to produce per-point predictions for two different types of objects: things (e.g., car) and stuff (e.g., road). To date, there has been significant progress in 3D panoptic segmentation [27, 40, 42, 47, 52, 58]. Most recently, methods such as [47] produce panoptic segmentation predictions directly from point clouds by leveraging learned queries to represent objects and", + "bbox": [ + 212, + 710, + 787, + 818 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "* Work done while at Waymo", + "bbox": [ + 230, + 825, + 433, + 840 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Transformer-based [45] architectures [2, 4] to perform the modeling. However, existing models only predict panoptic segmentation results for a closed-set of objects. They fail to create predictions for the majority of unseen object categories in the scene, hindering the application of these algorithms to real-world scenarios, especially for autonomous driving. In this work, we focus on segmenting unseen things and unseen stuff objects in autonomous driving scenarios. We follow [10, 53] and develop models under the open-vocabulary setting: we divide the object categories into base (seen) categories and novel (unseen) categories, and evaluate models that are only trained on base categories.", + "bbox": [ + 212, + 146, + 787, + 282 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Such open-world computer vision tasks [3] benefit from the recent advancements in vision-language (V-L) models [22, 39]. In 2D vision, there are many successful methods in open-vocabulary object detection [12, 15, 24] and segmentation [11, 50, 54]. These methods make predictions in a shared image-text embedding space, where predictions for unseen categories are produced by comparing the similarity of an object with the text embedding of the category. However, these methods are only possible due to the vast amounts of paired image-text data available, making it difficult to train similar models for 3D data.", + "bbox": [ + 212, + 286, + 787, + 407 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Instead, researchers have continued to leverage the effectiveness of these 2D vision-language models for 3D with the help of pixel-point correspondences by running inference on 2D images and then aligning with the 3D features. These methods have achieved promising results on open-vocabulary semantic segmentation [10,35,53,55] and instance segmentation [10,43,53], individually. However, there are no methods that address the problem of 3D open-vocabulary panoptic segmentation, i.e., addressing both open-vocabulary semantic segmentation and open-vocabulary instance segmentation at the same time. The challenge lies in how to handle segmentation for novel things and stuff objects simultaneously.", + "bbox": [ + 212, + 410, + 787, + 546 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3D open-vocabulary panoptic segmentation is a challenging problem, due to both the significant domain gaps between the camera and LiDAR modalities and unsolved problems in open-vocabulary segmentation. Many existing open-vocabulary works rely on similarities between text embeddings of class names and pre-trained V-L features to obtain associations between predictions and classes [35,43,55]. However, while projecting 2D V-L features to 3D can account for a large part of the scene, there are often many points unaccounted for due to unmatched pixel/point distributions and differing fields of view between sensors. Some 3D open-vocabulary works [10,53] apply contrastive learning to obtain better association between language and points, but they require extra captioning models and do not address the difficulties of detecting novel stuff classes.", + "bbox": [ + 212, + 549, + 787, + 715 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this work, we aim to address these two issues with a novel architecture for 3D open-vocabulary panoptic segmentation. Building on existing 3D closed-set panoptic segmentation methods, we train a learned LiDAR feature encoder in parallel with a frozen, pre-trained camera CLIP model. By fusing the 3D LiDAR features with the 2D CLIP features, our model is able to learn rich features throughout the entire 3D sensing volume, even if there are no camera features in certain regions. In addition, we apply a pair of novel distillation losses that allow the 3D encoder to learn both object-level and voxel-level features which", + "bbox": [ + 212, + 719, + 787, + 839 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Z. Xiao et al.", + "bbox": [ + 271, + 114, + 361, + 127 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "live inside the CLIP feature space. This provides a learned module in 3D space which can directly be compared with text embeddings. These losses also provide useful training supervision to unknown parts of the scene where there would otherwise be no loss gradient.", + "bbox": [ + 212, + 146, + 782, + 205 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "With the proposed model and loss functions, our method significantly outperforms the strong baseline on multiple datasets. Our contributions are summarized as follows:", + "bbox": [ + 212, + 207, + 784, + 251 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We present the first approach for 3D open-vocabulary panoptic segmentation in autonomous driving.", + "- We propose two novel loss functions, object-level distillation loss and voxel-level distillation loss to help segment novel things and novel stuff objects.", + "- We experimentally show that our proposed method significantly outperforms that strong baseline model on both nuScenes and SemanticKITTI datasets." + ], + "bbox": [ + 225, + 260, + 782, + 349 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Related Work", + "text_level": 1, + "bbox": [ + 215, + 371, + 387, + 386 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "This work is closely related to 3D panoptic segmentation, 2D open-vocabulary segmentation, and 3D open-vocabulary segmentation.", + "bbox": [ + 212, + 402, + 782, + 431 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3D panoptic segmentation. The goal of 3D panoptic segmentation is to group 3D points according to their semantics and identities. This is a challenging task and relies on a good representation of the 3D data [1,20,36,37,44,46,48]. Most panoptic segmentation models have separate branches for instance segmentation and semantic segmentation [19,27,44,58]. By following DETR [5], the recently proposed P3Former [47] uses learnable queries and a transformer architecture to obtain state-of-the-art performance on multiple panoptic segmentation benchmarks. Although those closed-set methods achieve incredible results, they cannot predict the labels and masks for novel classes.", + "bbox": [ + 212, + 431, + 784, + 566 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2D open-vocabulary segmentation. 2D open-vocabulary segmentation aims to group image pixels according to their semantics or identities for base (seen) or novel (unseen) categories. The prediction on novel categories is usually done by leveraging large V-L models [22,39]. There are many works that focus on open vocabulary semantic segmentation [14,17,26,29,31,34,49,51,56,57,59]. Some work has also explored open-vocabulary panoptic segmentation [11,38,50]. Recently, FC-CLIP [54] proposes a single-stage framework based on a frozen convolutional CLIP backbone [21,32,39] for 2D open-vocabulary panoptic segmentation that achieves state-of-the-art performance. However, due to the camera-LiDAR domain gap, we show that simply extending it to 3D leads to poor performance.", + "bbox": [ + 212, + 568, + 784, + 718 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3D open-vocabulary segmentation. 3D open-vocabulary segmentation is less explored due to the lack of 3D point-to-text association. One common practice is to utilize V-L models and use 2D-3D pairings to obtain rich, structured information in 3D [7,8,10,16,18,35,41,43,53,55]. Notably, CLIP2Scene [7] proposes a semantic-driven cross-modal contrastive learning framework. PLA [10] leverages images as a bridge and builds hierarchical 3D-caption pairs for contrastive learning. OpenScene [35] extracts per-pixel CLIP features using a pre-trained V-L model [14,26] then derives dense 3D features by projecting 3D points onto", + "bbox": [ + 212, + 719, + 784, + 839 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "3D Open-Vocabulary Panoptic Segmentation", + "bbox": [ + 429, + 114, + 730, + 128 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/c2ed55e4ce883f6a1e5a8a4609c300937e9486cfdde370a2bb9dc79354a37c65.jpg", + "image_caption": [ + "Fig. 1: Overview of our method. Given a LiDAR point cloud and the corresponding camera images, LiDAR features are extracted with a learnable LiDAR encoder, while vision features are extracted by a frozen CLIP vision model. The extracted LiDAR features and the frozen CLIP vision features are then fused and fed to a query-based transformer model to predict instance masks and semantic classes." + ], + "image_footnote": [], + "bbox": [ + 218, + 143, + 787, + 297 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "image planes. One concurrent work, RegionPLC [53], utilizes regional visual prompts to create dense captions and perform point-discriminative contrastive learning, which is used for semantic segmentation or instance segmentation, individually. In contrast, our work does not rely on any captioning model or extra contrastive learning, but only depends on pre-trained CLIP features. Our model also handles semantic segmentation and instance segmentation simultaneously.", + "bbox": [ + 212, + 420, + 787, + 512 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3 Method", + "text_level": 1, + "bbox": [ + 215, + 534, + 330, + 550 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "This section is organized as follows. First, we define the 3D open-vocabulary panoptic segmentation task. Then we provide detailed descriptions of the model architecture as well as the proposed loss functions. The overview of our method is presented in Fig. 1, and the two proposed loss functions are illustrated in Fig. 2 (a) and Fig. 2 (b).", + "bbox": [ + 212, + 566, + 787, + 643 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1 Problem Definition", + "text_level": 1, + "bbox": [ + 215, + 662, + 421, + 679 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In 3D panoptic segmentation, the goal is to annotate every point in a point cloud. For stuff classes, (e.g. road, vegetation), a category label is assigned according to its semantics. For things classes (e.g. cars, pedestrians), an instance label is assigned to an object in addition to its semantic label.", + "bbox": [ + 212, + 689, + 787, + 748 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In open-vocabulary panoptic segmentation, the models are trained on $C_B$ base(seen) categories. At test time, besides these $C_B$ base categories, the data will contain $C_N$ novel(unseen) categories. Following the settings of prior work [15, 24, 54], we assume the availability of the name of the novel categories during inference, but the novel categories are not present in the training data and their names are not known. Note that we do not apply any prompt engineering, as", + "bbox": [ + 212, + 750, + 787, + 840 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Z. Xiao et al.", + "bbox": [ + 271, + 114, + 362, + 126 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "this is not the focus of this paper. We follow OpenScene [35] to obtain the CLIP text embedding for each category.", + "bbox": [ + 212, + 146, + 782, + 176 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2 3D Open-Vocabulary Panoptic Segmentation", + "text_level": 1, + "bbox": [ + 214, + 196, + 632, + 214 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Most of the previous 3D open-vocabulary works only address semantic segmentation [7,8,10,16,18,35,41,53,55] or instance segmentation [43,53] separately, and there is no existing work for the 3D open-vocabulary panoptic segmentation task, which handles novel things and novel stuff objects simultaneously. A natural idea would be extending the 2D open vocabulary segmentation methods to build the 3D counterpart. We start with P3Former [47], a state-of-the-art transformer-based 3D closed-set panoptic segmentation model, and add the essential components to support open-vocabulary capability by following FC-CLIP [54], a 2D open-vocabulary segmentation model that achieves state-of-the-art performance on multiple datasets. However, we found that this simple extension leads to poor performance in our experiments, and in this work we propose several new features to improve the performance of our model. More implementation details for this baseline can be found in the supplementary material.", + "bbox": [ + 212, + 220, + 787, + 416 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In order to improve the open vocabulary capability of our model, we propose significant changes to the P3Former architecture, as well as two new loss functions. The architecture of our method is shown in Fig. 1 and mainly consists of multimodal feature fusion, a segmentation head, and input text embeddings for open-vocabulary classification.", + "bbox": [ + 212, + 417, + 787, + 492 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Multimodal feature fusion. The core idea of many recent 2D open-vocabulary works is to leverage the features of large-scale vision-language models [22, 39]. These methods [54] mainly rely on frozen CLIP features and use a transformer model to perform the 2D panoptic segmentation task. However, this is not optimal for 3D tasks since many points do not have corresponding valid camera pixels, leading to invalid features preventing meaningful predictions. To fully exploit the power of the CLIP vision features and learn complementary features from both CLIP features from camera and features from LiDAR, we generate predictions from the fusion of CLIP features extracted by a frozen CLIP model and learned LiDAR features from a LiDAR encoder.", + "bbox": [ + 212, + 492, + 787, + 643 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "As shown in Fig. 1, there are three major components for the multimodal feature fusion including a LiDAR encoder, a vision CLIP encoder, and voxel-level feature fusion. The LiDAR encoder is a model which takes an unordered set of points as input and extracts per-point features. We apply voxelization to the features from the LiDAR encoder, producing output features $F_{lidar} \\in \\mathbb{R}^{V \\times D_{lidar}}$ , where $V$ is the number of the voxels and $D_{lidar}$ is the dimension of the learned LiDAR feature. The Vision CLIP encoder is a pre-trained V-L segmentation model [14] which extracts pixel-wise CLIP features from each camera image. Within each voxel, every LiDAR point is projected into the camera image plane based on the intrinsic and extrinsic calibration parameters to index into the corresponding vision CLIP features, then the vision CLIP features of all the points belonging to each voxel are averaged to represent that voxel. Zero padding is used for points which do not have any valid corresponding camera pixels. The", + "bbox": [ + 212, + 643, + 787, + 840 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "3D Open-Vocabulary Panoptic Segmentation", + "bbox": [ + 429, + 114, + 730, + 128 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "voxel CLIP features will be referred as $F_{vclip} \\in \\mathbb{R}^{V \\times D_{emb}}$ , where $V$ is the number of voxels after voxelization and $D_{emb}$ is the dimension of the CLIP features. Finally, the learned per-voxel LiDAR features and frozen per-voxel vision CLIP features are concatenated together to be used as input into the transformer decoder in the segmentation head. This feature fusion enables our model to learn complementary information from both the LiDAR and CLIP features, allowing us to fine-tune our backbone for each dataset's specific data distribution.", + "bbox": [ + 212, + 145, + 787, + 252 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Segmentation head. The segmentation head is a transformer [45] model that takes the LiDAR-Vision fused feature as input to produce panoptic segmentation results. Prior works, including existing 2D open-vocabulary works such as FC-CLIP [54], typically use learnable queries $q$ to represent each instance or thing, and they contain a mask prediction head $f_{mask}$ to produce the corresponding mask for each individual object and a classification head $f_{cls}$ to predict the per-mask class score for each known class. However, as a result, they also need to rely on another classifier to handle novel categories. Our goal is to use a single model to handle the prediction for both base and novel categories. Thus, we predict a class embedding instead of a class score for each mask. During training, the model learns to regress an analogy to the CLIP vision embedding for each mask, and the category prediction can be obtained by calculating its similarity with the CLIP text embedding of text queries during the inference stage. The class embedding $f_{cls}$ prediction is defined as:", + "bbox": [ + 212, + 252, + 787, + 464 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nv _ {q} = f _ {c l s} (q) \\in \\mathbb {R} ^ {D _ {e m b}}, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 426, + 470, + 785, + 489 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $v_{q}$ is in the CLIP embedding space. The predicted class logits are then computed from the cosine similarity between the predicted class embedding and the text embedding of every category name from the evaluation set using a frozen CLIP model. The classification logits are defined as:", + "bbox": [ + 212, + 494, + 787, + 556 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\ns _ {v _ {q}} = \\frac {1}{T} \\left[ \\cos \\left(v _ {q}, t _ {1}\\right), \\cos \\left(v _ {q}, t _ {2}\\right), \\dots , \\cos \\left(v _ {q}, t _ {C}\\right) \\right] \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 338, + 561, + 785, + 590 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $t_i \\in \\mathbb{R}^{D_{emb}}$ , $i \\in \\{1, 2, \\dots, C\\}$ is the text embedding, $C$ is the number of categories ( $C_B$ in training and $C_B + C_N$ in testing), and $T$ is a learnable temperature term that controls the concentration of the distribution.", + "bbox": [ + 212, + 597, + 787, + 643 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Query assignment. A common practice [9, 54] for transformer-based panoptic segmentation models is to utilize a single set of queries to make predictions for both things and stuff classes jointly. In contrast, P3Former uses one query set to represent things classes after bipartite matching and one fixed query set for stuff classes. We have found that this separation of things queries and stuff queries makes our model converge faster and improve overall performance, and similar pattern has been observed in other tasks [28]. However, the fixed set of queries for stuff classes is not applicable to the open-vocabulary setting due to the unknown number of novel stuff classes. To take advantage of the benefits of separating things queries and stuff queries, we propose to predict the base stuff classes with a fixed set of queries and utilize a set of learnable queries to target base things classes and all novel (things and stuff) classes. More details of the query assignment can be found in the supplementary materials.", + "bbox": [ + 212, + 643, + 789, + 840 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Z. Xiao et al.", + "bbox": [ + 271, + 114, + 362, + 127 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/fe8337038a0e671baf59695920bf28d56480a51f0114959972774dceeffc93bd.jpg", + "image_caption": [ + "(a) Object-Level Distillation Loss." + ], + "image_footnote": [], + "bbox": [ + 218, + 143, + 506, + 253 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/1030de4f7afa5dff5320a7deefbb48e8a91f284f5fa6543f0a3dbeb766d7bd9f.jpg", + "image_caption": [ + "(b) Voxel-Level Distillation Loss.", + "Fig. 2: (a) the proposed object-level distillation loss, and (b) the proposed voxel-level distillation loss." + ], + "image_footnote": [], + "bbox": [ + 511, + 145, + 782, + 255 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.3 Loss Function", + "text_level": 1, + "bbox": [ + 215, + 345, + 375, + 359 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Closed-set panoptic segmentation models [47] are typically optimized with objective functions consisting of a classification loss $L_{cls}$ and a mask prediction loss $L_{mask}$ . We follow P3Former [47] for these two losses: the classification loss $L_{cls}$ optimizes the focal loss [30] between the class predictions and the category labels, while the mask loss $L_{mask}$ optimizes the voxel-query classification loss. Besides the two standard loss functions, we propose two simple yet effective losses to apply distillation from the CLIP model at different levels.", + "bbox": [ + 212, + 369, + 784, + 474 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Object-level distillation loss. Similar to previous methods [50, 54], we use the cosine similarity between predicted class embeddings and class text CLIP embeddings to produce classification scores. However, the classification loss applied to Eq. (2) only enforces similarity to known classes. In this work, we make the assumption that the frozen CLIP features are discriminative with respect to open-vocabulary classes and have good out-of-distribution generalization. We propose an additional training loss which forces our predicted object-level class embeddings to be similar to the CLIP embeddings within their corresponding masks after matching. Similar to [54], we utilize voxel vision CLIP features to get an embedding for each query $q$ by mask pooling Vision CLIP features:", + "bbox": [ + 212, + 476, + 785, + 628 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nw _ {q} = \\frac {1}{\\left| M _ {q} \\right|} \\sum_ {p} \\mathbb {1} (p \\in M _ {q}) F _ {v c l i p} (p) \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 379, + 635, + 785, + 672 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $M_q$ is the set of points $p$ belonging to the mask for query $q$ . Our object-level distillation loss is then defined as:", + "bbox": [ + 214, + 681, + 784, + 710 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nL _ {O} = \\frac {1}{\\left| Q _ {\\text {m a t c h e d}} \\right|} \\sum_ {q \\in Q _ {\\text {m a t c h e d}}} 1 - \\cos \\left(v _ {q}, w _ {q}\\right), \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 343, + 717, + 785, + 755 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $Q_{\\text{matched}}$ is the set of queries matched with ground truth objects during training, $v$ is the set of predicted class embeddings, and $w$ is the set of mask-pooled CLIP embeddings. This loss forces the model to directly distill object-level camera CLIP features and improves model performance for novel things classes. We also experimented with applying $L_O$ to all predicted masks, but we", + "bbox": [ + 214, + 763, + 787, + 840 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "3D Open-Vocabulary Panoptic Segmentation", + "bbox": [ + 429, + 114, + 730, + 128 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 774, + 114, + 784, + 126 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "It is found that this slightly reduced model performance, likely due to the presence of masks that do not correspond to any objects in the scene.", + "bbox": [ + 212, + 146, + 782, + 176 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Voxel-level distillation loss. While the object-level distillation loss distills the per-object features from CLIP model, it does not provide any supervision for the mask prediction head, which would otherwise only receive supervision for known classes. We found this particularly problematic for unknown stuff classes, which tend to be more spread out and cover larger and more diverse parts of the scene. In addition, it is only being applied to queries with relatively accurate mask predictions in order to learn useful CLIP features. To target these issues, we propose the voxel-level distillation loss to explicitly learn voxel-level CLIP features, which do not depend on any labels and can be applied on all queries. In particular, the voxel-level distillation loss is defined as:", + "bbox": [ + 212, + 176, + 787, + 328 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\nF _ {r e c} = M _ {Q} ^ {T} F _ {Q e m b} \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 439, + 335, + 785, + 354 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "where $Q$ is the number of queries, $F_{Qemb} \\in \\mathbb{R}^{Q \\times D_{emb}}$ is the predicted embedding for all queries and $M_Q \\in \\mathbb{R}^{Q \\times V}$ is the predicted per-voxel mask probabilities for all queries. The reconstructed features can be regarded as the weighted sum of all queries for each voxel. We supervise these features with the voxel CLIP features:", + "bbox": [ + 212, + 363, + 787, + 425 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\nL _ {V} = L _ {1} \\left(F _ {\\text {r e c}}, F _ {\\text {v c l i p}}\\right) \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 424, + 439, + 785, + 455 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Unlike the object-level distillation loss, which is only applied to queries with matched ground truth, this loss is applied to all predicted mask scores and queries. In our experiments, we found that this loss significantly improves performance on novel stuff categories in particular, likely as it does not require exact matches with the ground truth, which can be difficult for large stuff classes. However, this loss is still susceptible to noisy or low quality mask scores, and we found that larger weights for this loss can disrupt training.", + "bbox": [ + 212, + 459, + 787, + 566 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "To summarize, $L_{O}$ helps get rid of the ensemble of classifiers in [14, 15, 24, 50, 54] and enables open-vocabulary ability with one trainable classifier. $L_{V}$ uses a scene-level representation represented by the embedding of all queries, while previous methods only consider object-level representation. Combining $L_{O}$ with $L_{V}$ enables segmenting novel things and novel stuff objects simultaneously. Our final objective function can be written as:", + "bbox": [ + 212, + 566, + 787, + 656 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\nL = w _ {\\alpha} * L _ {c l s} + w _ {\\beta} * L _ {m a s k} + w _ {\\lambda} * L _ {O} + w _ {\\gamma} * L _ {V} \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 326, + 667, + 785, + 683 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": ", where $w_{\\alpha}, w_{\\beta}, w_{\\lambda}, w_{\\gamma}$ , are weights for the corresponding objective functions.", + "bbox": [ + 214, + 691, + 781, + 707 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3.4 Implementation Details", + "text_level": 1, + "bbox": [ + 214, + 726, + 455, + 739 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "For the LiDAR encoder and segmentation head, we follow the implementation of the state-of-the-art closed-set 3D panoptic segmentation method P3Former [47]. For the Vision CLIP encoder, we use OpenSeg [14], due to its remarkable performance on the recent open-vocabulary 3D semantic segmentation task [35]. For the Text CLIP encoder, we use CLIP [39] with ViT-L/14 [45] backbone, following other state-of-the-art open vocabulary works [35].", + "bbox": [ + 212, + 750, + 787, + 840 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Z. Xiao et al.", + "bbox": [ + 271, + 114, + 362, + 127 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4 Experiments", + "text_level": 1, + "bbox": [ + 215, + 143, + 375, + 162 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.1 Experimental Setting", + "text_level": 1, + "bbox": [ + 215, + 171, + 439, + 186 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Following the state-of-the-art closed-set 3D panoptic segmentation work [27,40, 42,47,52,58], we conduct experiments and ablation studies on the nuScenes [4] and SemanticKITTI [2,13] datasets.", + "bbox": [ + 212, + 191, + 782, + 236 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "nuScenes. The nuScenes dataset [4] is a public benchmark for autonomous driving. It consists of 1000 run segments and is further divided into prescribed train/val/test splits. We use all key frames with panoptic labels in the training set(28130 frames) to train the model. Following the most recent state-of-the-art model P3Former [47], we evaluate the models on the validation set(6019 frames). There are 16 semantic classes, including 10 things classes and 6 stuff classes.", + "bbox": [ + 212, + 237, + 784, + 327 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "SemanticKITTI. SemanticKITTI [2, 13] is the first large dataset for LiDAR panoptic segmentation for autonomous driving. We conduct experiments on the training and validation sets, where panoptic segmentation labels are available. 3D open-vocabulary methods often require point and pixel pairing. In the SemanticKITTI dataset, however, the ego-vehicle is only equipped with frontal cameras. Thus, we filter out the points that are not visible in the camera view based on the provided camera parameters for both training and evaluation. There are 19 semantic classes, including 8 things classes and 11 stuff classes.", + "bbox": [ + 212, + 327, + 784, + 446 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Data split. Both the nuScenes and SemanticKITTI datasets do not provide official base and novel class splits. Following the state-of-the-art 3D open-vocabulary segmentation work [6,10,53], we randomly split the classes into base and novel, while keeping the ratio between base and novel classes around $3:1$ . For nuScenes, the number of class for base and novel split are 12 and 4 respectively, and this setting will be referred as B12/N4. For SemanticKITTI, the number of class for base and novel split are 14 and 5, and this setting will be referred as B14/N5. We use the same splits in the main comparison with prior methods, and provide the results of more variations in the ablation studies and supplementary materials.", + "bbox": [ + 212, + 448, + 784, + 583 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Training details. We follow most of the architecture configurations in the official P3Former [47] implementation. We set $w_{\\alpha} = 1$ , $w_{\\beta} = 1$ , $w_{\\lambda} = 1$ , $w_{\\gamma} = 0.1$ for both datasets. We use the AdamW [23, 33] optimizer with a weight decay of 0.01. We set the initial learning rate as 0.0008 with a multi-step decay schedule. The models are trained for 40 epochs, and we use the checkpoint of the last epoch for evaluation. To avoid ambiguous class names and better utilize the CLIP text embedding, we follow [25, 35, 54] and apply multi-label mapping for the text queries. During inference, if there are multiple labels for one class, we derive the class score by getting the maximum scores among these labels.", + "bbox": [ + 212, + 584, + 784, + 718 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Evaluation metrics. We use panoptic quality $(PQ)$ as the major evaluation metric for the panoptic segmentation task. $PQ$ is formulated as:", + "bbox": [ + 212, + 720, + 782, + 750 + ], + "page_idx": 8 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {P Q} = \\underbrace {\\frac {\\sum_ {T P} \\operatorname {I o U}}{| T P |}} _ {\\mathrm {S Q}} \\times \\underbrace {\\frac {| T P |}{| T P | + \\frac {1}{2} | F P | + \\frac {1}{2} | F N |}} _ {\\mathrm {R Q}}. \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 357, + 753, + 784, + 805 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "$PQ$ is the multiplication of segmentation quality $(SQ)$ and recognition quality $(RQ)$ . We report all the three metrics $(PQ, RQ, SQ)$ for all classes. We also", + "bbox": [ + 215, + 809, + 784, + 840 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "3D Open-Vocabulary Panoptic Segmentation", + "bbox": [ + 429, + 114, + 730, + 130 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/46ccebcc6142168e389b257a212aa0a4cc27967f146ad85411e4e19968399726.jpg", + "image_caption": [ + "PFC" + ], + "image_footnote": [], + "bbox": [ + 222, + 146, + 359, + 252 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/03a482f0bf7d8ecb5f721997e9f60b65493a6d973e51d6094b58fba088eda3d0.jpg", + "image_caption": [ + "Ours" + ], + "image_footnote": [], + "bbox": [ + 361, + 146, + 496, + 252 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/b5c327dfdf5140a46ce816482806940a69ec57f903f122fe338734a3ceb83214.jpg", + "image_caption": [ + "PFC", + "Fig. 3: Open-vocabulary panoptic segmentation results from PFC and our method on nuScenes. PFC predicts inaccurate category and masks for the novel pedestrian (red), bus (yellow) and vegetation (green), while ours makes correct predictions." + ], + "image_footnote": [], + "bbox": [ + 506, + 146, + 645, + 252 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/f49229697b6a698e03b4b3e1da595b07cbdfaa5efe782dd2c74ed82ad758bd8a.jpg", + "image_caption": [ + "Ours" + ], + "image_footnote": [], + "bbox": [ + 645, + 146, + 782, + 252 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "report $PQ$ , $RQ$ , $SQ$ for novel things objects and novel stuff objects separately. In particular, $PQ_{N}^{Th}$ means $PQ$ for novel things classes and $PQ_{N}^{St}$ stands for $PQ$ for novel stuff classes. We also report the mean Intersection over Union (mIoU) for all classes to measure semantic segmentation quality.", + "bbox": [ + 212, + 361, + 784, + 422 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4.2 P3Former-FC-CLIP Baseline", + "text_level": 1, + "bbox": [ + 215, + 446, + 501, + 459 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "As a baseline for novel-class panoptic segmentation, we construct a model from a fusion of P3Former [47] and FC-CLIP [54]. This baseline will be called P3Former-FC-CLIP (PFC). The baseline model takes the frozen voxel vision CLIP features as input, and the final prediction is obtained by geometric ensembling [14,15,24, 50,54] of the results from the classification head $f_{cls}$ and another frozen classifier based on the similarity between the average-pool class embedding $w_{q}$ and the CLIP text embedding. Following FC-CLIP [54], the same set of learnable queries were used to represent both things and stuff classes. In summary, this baseline provides a comparison against our proposed method without the multimodal feature fusion module, the unified segmentation head, and the distillation losses. More information on this baseline can be found in the supplementary material.", + "bbox": [ + 212, + 472, + 787, + 638 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4.3 Main Results", + "text_level": 1, + "bbox": [ + 215, + 662, + 372, + 676 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Since there are no existing methods for the 3D open-vocabulary panoptic segmentation task, we mainly compare with three methods to demonstrate the capability of our method: (1) the strong open-vocabulary baseline method PFC to fairly demonstrate the strength of our method, (2) the closed-set state-of-the-art 3D panoptic segmentation method P3Former to understand the headroom of our method, and (3) the open-set, zero-shot state-of-the-art method for 3D semantic segmentation, OpenScene [35]. Comparisons on the nuScenes and SemanticKITTI datasets are shown in Tab. 1 and Tab. 3.", + "bbox": [ + 212, + 688, + 787, + 808 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Results on nuScenes dataset. Table 1 shows the quantitative comparison on the validation set of the nuScenes dataset. Our method significantly outperforms", + "bbox": [ + 214, + 809, + 787, + 839 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Z. Xiao et al.", + "bbox": [ + 271, + 114, + 361, + 126 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/56683cb3423c13f51450df98b6110b095568fbfc49c7222ff603428fa4e43207.jpg", + "table_caption": [ + "Table 1: Quantitative results of panoptic segmentation on nuScenes. We compare the performance of open-vocabulary and fully supervised models. All open vocabulary models share the same randomly picked base/novel split: B12/N4. The novel things classes are bus, pedestrian and motorcycle. The novel stuff class is vegetation." + ], + "table_footnote": [], + "table_body": "
ModelTypeSupervision\\( {PQ} \\)\\( P{Q}_{N}^{Th} \\)\\( P{Q}_{N}^{St} \\)\\( {RQ} \\)\\( R{Q}_{N}^{Th} \\)\\( R{Q}_{N}^{St} \\)\\( {SQ} \\)\\( S{Q}_{N}^{Th} \\)\\( S{Q}_{N}^{St} \\)mIoU
P3Former [47]closed-setfull75.985.182.984.789.995.989.894.786.576.8
OpenScene [35]open-voczero-shot---------42.1
PFCopen-vocpartial54.837.30.563.642.10.884.289.360.455.5
Oursopen-vocpartial62.049.635.270.955.646.087.089.176.760.1
", + "bbox": [ + 220, + 214, + 782, + 308 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/d8337e3f981d6cefd95141df4521fae0648cabe99c68e757b8f71f25eba394c9.jpg", + "table_caption": [ + "Table 2: Performance for base classes on nuScenes. We report the performance on base classes for models in Tab. 1. A gap still exists between open and closed-set methods for base classes. We show that this is due to lack of supervision of the whole scene as P3Former achieves similar performance when only trained on base categories." + ], + "table_footnote": [], + "table_body": "
ModelSupervisionTraining DataBase ThingsBase Stuff
\\( PQ_{B}^{Th} \\)\\( RQ_{B}^{Th} \\)\\( SQ_{B}^{Th} \\)\\( PQ_{B}^{St} \\)\\( RQ_{B}^{St} \\)\\( SQ_{B}^{St} \\)
P3Former [47]fullbase+novel73.480.590.973.985.385.9
P3Former [47]partialbase65.271.388.064.277.481.8
PFCpartialbase65.673.389.061.075.483.7
Ourspartialbase66.773.789.869.282.183.7
", + "bbox": [ + 246, + 395, + 751, + 503 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "the strong baseline PFC across all metrics. PFC works relatively well for the novel things classes, but performance on the novel stuff class collapses. This is likely because stuff classes tend to cover large parts of the scene, leading to diverse per-voxel CLIP features which may not be good representatives for their respective classes. Qualitative comparison is provided in Fig. 3.", + "bbox": [ + 215, + 535, + 785, + 611 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "To further understand the headroom of our method, we also compare our model with the closed-set P3Former. Note that the comparison here is deliberately unfair since the supervision signals are different. Compared with the closed-set P3Former, our segmentation quality $(SQ)$ is good while there is a large gap on mask classification quality $(RQ)$ . The gap is largely due to regressions in the novel classes, where precise supervision is not available for open-vocabulary models. For base classes, as shown in Tab. 2, the gap is relatively small except for a drop in $RQ_{B}^{Th}$ . We believe the closed-set P3Former sees ground truth supervision for the entire scene, while open-set methods do not receive supervision in the 'unknown class' regions. In fact, when P3Former is only trained on base categories, the performance is worse than our proposed method. Besides the comparison with the closed-set method, we also compare with the zero-shot state-of-the-art method OpenScene [35] which does not use any labels for training. In this comparison, our model significantly outperforms OpenScene in the mIoU metric for semantic segmentation. Note that this comparison is not en", + "bbox": [ + 215, + 613, + 787, + 839 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "3D Open-Vocabulary Panoptic Segmentation", + "bbox": [ + 431, + 114, + 730, + 128 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 767, + 114, + 782, + 126 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/87db42f18713dcb53c4a14bd54942c5e524f129aad48e26945869d3ce14efe29.jpg", + "table_caption": [ + "Table 3: Quantitative results of panoptic segmentation on SemanticKITTI. We compare the performance different models. All open vocabulary models share the same randomly picked base/novel split: B14/N5. The novel things classes are bicycle and truck. The novel stuff classes are sidewalk, building and trunk." + ], + "table_footnote": [], + "table_body": "
ModelTypeSupervision\\( {PQ} \\)\\( P{Q}_{N}^{Th} \\)\\( P{Q}_{N}^{St} \\)\\( {RQ} \\)\\( R{Q}_{N}^{Th} \\)\\( R{Q}_{N}^{St} \\)\\( {SQ} \\)\\( S{Q}_{N}^{Th} \\)\\( S{Q}_{N}^{St} \\)mIoU
P3Former [47]closed-setfull62.165.974.271.374.886.877.188.383.961.6
PFCopen-vocpartial33.712.00.440.115.00.667.681.147.333.4
Oursopen-vocpartial42.213.117.850.416.226.773.084.067.244.6
", + "bbox": [ + 220, + 214, + 782, + 292 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/066d5c16d0c51e486d4a77afdee8c44fe6ba3f3fd74b392da69e0b32a20b5487.jpg", + "table_caption": [ + "Table 4: Impact of each component. We evaluate the impact of each component using the base/novel split in Tab. 1. We observe that each component can provide improvements over the PCF baseline. Noticeably, $L_{V}$ brings the biggest improvement." + ], + "table_footnote": [], + "table_body": "
Components\\(PQ\\)\\(PQ^{Th}_{N}\\)\\(PQ^{St}_{N}\\)\\(RQ\\)\\(RQ^{Th}_{N}\\)\\(RQ^{St}_{N}\\)\\(SQ\\)\\(SQ^{Th}_{N}\\)\\(SQ^{St}_{N}\\)mIoU
QAFusion\\(L_{O}\\)\\(L_{V}\\)
54.837.30.563.642.10.884.289.360.455.5
55.535.70.464.040.80.784.387.456.556.6
56.438.10.465.043.50.684.687.461.356.4
56.343.80.264.849.20.385.188.964.054.0
62.049.635.270.955.646.087.089.176.760.1
", + "bbox": [ + 220, + 359, + 782, + 474 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "tirely fair, as our method is trained with partial labels. Instead, the comparison is useful to understand the gap between the two types of open-vocabulary methods. The concurrent work RegionPLC [53] also reports open-vocabulary results for the semantic segmentation task on the nuScenes dataset. However, we cannot directly compare with this method since it removes one class (other-flat) and does not provide its base/novel split.", + "bbox": [ + 212, + 505, + 784, + 594 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Results on SemanticKITTI dataset. To demonstrate the generalization ability of our method across different datasets, we report the results on SemanticKITTI dataset in Tab. 3. Overall, we observe similar patterns as on the nuScenes dataset. The baseline achieves relatively poor overall performance and struggles with the novel stuff classes. Using our architecture and loss functions, our model significantly outperforms PFC on $PQ$ , with the largest margin for novel stuff classes. Note that the gap between the open-vocabulary methods (ours and PFC) and the closed-set method is larger on SemanticKITTI, likely due to the smaller dataset limiting performance.", + "bbox": [ + 212, + 595, + 785, + 731 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "4.4 Ablation Studies and Analysis", + "text_level": 1, + "bbox": [ + 215, + 753, + 511, + 768 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "To better understand the effectiveness of each component, we conduct ablation studies for each design choice and loss function on the nuScenes dataset. These results are shown in Tab. 4. We conduct five sets of experiments, starting with the PFC baseline and build upon it four ablations with different combinations.", + "bbox": [ + 212, + 779, + 785, + 839 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Z. Xiao et al.", + "bbox": [ + 271, + 114, + 361, + 126 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/b7f28a71c3230e0be03f6febdfa47f433e56ac1930ff57f76671763c77f88ddc.jpg", + "table_caption": [ + "Table 5: Performance on a different split. We compare the performance with a split with 5 novel classes (B11/N5). The novel things classes are bicycle, car and construction vehicle. The novel stuff classes are terrain and man-made. Our method consistently outperforms the PFC baseline across all the metrics by a large margin." + ], + "table_footnote": [], + "table_body": "
ModelTypeSupervision\\( {PQ} \\)\\( P{Q}_{N}^{Th} \\)\\( P{Q}_{N}^{St} \\)\\( {RQ} \\)\\( R{Q}_{N}^{Th} \\)\\( R{Q}_{N}^{St} \\)\\( {SQ} \\)\\( S{Q}_{N}^{Th} \\)\\( S{Q}_{N}^{St} \\)mIoU
P3Former [47]closed-setfull75.870.571.783.876.485.590.191.683.675.0
PFCopen-vocpartial43.927.70.651.733.21.080.282.462.745.2
Oursopen-vocpartial52.856.016.460.561.822.684.989.768.749.9
", + "bbox": [ + 220, + 213, + 784, + 281 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Impact of query assignment. Starting from the PFC baseline model, we add our proposed fixed query assignment for stuff categories. As shown in the second row of Tab. 4, with query assignment, the overall $PQ$ improves by 0.7. The performance for the novel classes drop slightly, but improvement on the base classes overcomes this for the overall PQ.", + "bbox": [ + 212, + 310, + 785, + 385 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Impact of feature fusion. The third row of Tab. 4 shows the impact of feature fusion. Without feature fusion, our model already achieves 55.5 $PQ$ , demonstrating the power of the CLIP vision features. The third row shows that the performance with feature fusion for the model input improves the overall $PQ$ by 0.9. This slightly improved the overall performance, but the improvement on the novel things class is the most significant, demonstrating that the learned LiDAR features and CLIP vision features are indeed complementary for the task.", + "bbox": [ + 212, + 386, + 787, + 491 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Impact of object-level distillation loss. The fourth row of the results in Tab. 4 shows the impact of the proposed object-level distillation loss. Note that for models with the object-level distillation loss, we remove the frozen class classification head and the ensemble in the PFC baseline, consolidating to a single class embedding head. Although the $RQ_N^{St}$ slightly dips by 0.3 for the novel stuff classes, this loss can significantly improve the $RQ_N^{Th}$ for the novel things class by 5.7.", + "bbox": [ + 212, + 492, + 789, + 583 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Impact of voxel-level distillation loss. We study the impact of the voxel-level distillation loss to see if it can further improve the performance given all of our designs. The results are shown in the last row of Tab. 4. With this loss function, $PQ$ significantly improves by 5.7. The improvement on the novel split is particularly large, especially for the novel stuff classes. The $PQ_N^{St}$ of the novel stuff class improves from 0.2 to 35.2, which demonstrates the importance of the voxel-level supervision to the performance of the novel stuff class.", + "bbox": [ + 212, + 583, + 787, + 688 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Performance of different splits. To validate the generalizability of our method, we conduct experiments on a different split (B11/N5) for the nuScenes dataset. As shown in Tab. 5, our proposed method consistently and significantly outperforms the strong baseline method. This again demonstrates the effectiveness of our design and the proposed loss functions.", + "bbox": [ + 212, + 688, + 790, + 763 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Open-vocabulary exploration. In previous experiments, we follow other 3D open-vocabulary works [6,10,53] and provide analytical results on pre-defined object categories, mainly due to the limited categories in current panoptic segmentation datasets. In practice, our model goes beyond detecting these object categories: we can take class embeddings $v_{q}$ in Eq. (1) and compute the cosine", + "bbox": [ + 212, + 763, + 787, + 842 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "3D Open-Vocabulary Panoptic Segmentation", + "bbox": [ + 429, + 114, + 730, + 128 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 767, + 114, + 785, + 126 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/98d9e5be7fd79d582fc7a56707fae5615716791e0acada95c535e0be4d8becf1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 218, + 145, + 356, + 213 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/67d6879f793a2c079aad88c809f56ee7be42a073290d78abaea033f4de792bae.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 357, + 146, + 496, + 214 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/c29b3c5ba8912dc03032330f6556c5b2b7e2609239965d6e1ae4d15a6aeb5f35.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 218, + 215, + 356, + 281 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/1bd89946be43210e51bf955c8d074dc8f6ef220a519079704c674fa2b468bd4d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 357, + 215, + 496, + 281 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/189fe77276a8fd53569ca1b701fccaabaf91e8e3d916f3622e9bd57eee51143c.jpg", + "image_caption": [ + "Fig. 4: Open-vocabulary exploration. We show the novel materials/objects in blue color. The orientation of the ego vehicle is fixed in the LiDAR point visualization while the reference images come from on of the surrounding cameras of the ego vehicle." + ], + "image_footnote": [], + "bbox": [ + 218, + 282, + 356, + 349 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/2c6392ed18f73f379dd6ae65d57e4e23b6037b79c328fbefda6b84d74c6e4ae6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 357, + 282, + 496, + 349 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/65c583fc9c1aecf7263b48fe32370c3921ab2cdbac5ef60aa2aab84d17fbd290.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 146, + 643, + 213 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/670ad5ad40c8b587e753530c04d562565b21367f3fc399552966e167b2e48ebb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 643, + 146, + 782, + 214 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/957ab75cf34867d2c124ea033aa633e725104e5483e822d50b0b944fa672a87f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 214, + 643, + 281 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/64973897491e7f7083474880ff5c2fc0d4bf2ae35f9cd447c7ce3d2dfbb90046.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 643, + 214, + 782, + 281 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/9bad14e1b3f7f7c649d6d116003dabc4f014c63f585d6e6c6b356011b9700377.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 282, + 643, + 349 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/19a7b86e73f80b45740874c952f9a4126ea7596c98fa72fa60aeecac97d071e8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 643, + 282, + 782, + 349 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "similarity with CLIP embedding of any text. Fig. 4 shows that we can detect novel materials/objects that are not in the predefined category list. Note that the concept of open vocabulary is very different from domain adaptation, as open vocabulary refers to the ability to deal with novel inputs in a scene while domain adaptation addresses the difference in data distributions in different scenes.", + "bbox": [ + 212, + 449, + 787, + 523 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Limitations. Our models are only evaluated on current autonomous driving panoptic segmentation benchmarks, with limited number of category annotations. To further evaluate open-vocabulary performance, a large-scale autonomous driving benchmark with more diverse object categories is greatly desired.", + "bbox": [ + 212, + 525, + 790, + 585 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "5 Conclusion", + "text_level": 1, + "bbox": [ + 215, + 609, + 359, + 626 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "In this paper, we present the first approach for the open-vocabulary 3D panoptic segmentation task in autonomous driving by leveraging large vision-language models. We experimentally verified that simply extending the 2D open-vocabulary segmentation method into 3D does not yield good performance, and demonstrated that our proposed model design and loss functions significantly boost performance for this task. Our method significantly outperformed the strong baseline on multiple well-established benchmarks. We hope our work can shed light on the future studies of the 3D open-vocabulary panoptic segmentation.", + "bbox": [ + 212, + 643, + 790, + 765 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Acknowledgements. We would like to thank Mahyar Najibi, Chao Jia, Zhenyao Zhu, Yolanda Wang, Charles R. Qi, Dragomir Anguelov, Tom Ouyang, Ruichi Yu, Chris Sweeney, Colin Graber, Yingwei Li, Sangjin Lee, Weilong Yang, and Congcong Li for the help to the project.", + "bbox": [ + 214, + 779, + 787, + 840 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Z. Xiao et al.", + "bbox": [ + 271, + 114, + 361, + 126 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 215, + 143, + 321, + 159 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "1. Alonso, I., Riazuelo, L., Montesano, L., Murillo, A.C.: 3d-mininet: Learning a 2d representation from point clouds for fast and efficient 3d lidar semantic segmentation. IEEE Robotics and Automation Letters 5(4), 5432-5439 (2020)", + "2. Behley, J., Garbade, M., Milioto, A., Quenzel, J., Behnke, S., Stachniss, C., Gall, J.: SemanticKITTI: A Dataset for Semantic Scene Understanding of LiDAR Sequences. In: ICCV (2019)", + "3. Bendale, A., Boult, T.: Towards open world recognition. In: CVPR (2015)", + "4. Caesar, H., Bankiti, V., Lang, A.H., Vora, S., Liong, V.E., Xu, Q., Krishnan, A., Pan, Y., Baldan, G., Beijbom, O.: nuscenes: A multimodal dataset for autonomous driving. In: CVPR (2020)", + "5. Carion, N., Massa, F., Synnaeve, G., Usunier, N., Kirillov, A., Zagoruyko, S.: End-to-end object detection with transformers. In: ECCV (2020)", + "6. Cen, J., Yun, P., Zhang, S., Cai, J., Luan, D., Wang, M.Y., Liu, M., Tang, M.: Open-world semantic segmentation for LIDAR point clouds. In: ECCV (2022)", + "7. Chen, R., Liu, Y., Kong, L., Zhu, X., Ma, Y., Li, Y., Hou, Y., Qiao, Y., Wang, W.: Clip2scene: Towards label-efficient 3d scene understanding by clip. In: CVPR (2023)", + "8. Chen, Z., Li, B.: Bridging the domain gap: Self-supervised 3d scene understanding with foundation models. arXiv preprint arXiv:2305.08776 (2023)", + "9. Cheng, B., Schwing, A., Kirillov, A.: Per-pixel classification is not all you need for semantic segmentation. In: NeurIPS (2021)", + "10. Ding, R., Yang, J., Xue, C., Zhang, W., Bai, S., Qi, X.: Pla: Language-driven open-vocabulary 3d scene understanding. In: CVPR (2023)", + "1. Ding, Z., Wang, J., Tu, Z.: Open-vocabulary universal image segmentation with maskclip. In: ICML (2023)", + "2. Du, Y., Wei, F., Zhang, Z., Shi, M., Gao, Y., Li, G.: Learning to prompt for open-vocabulary object detection with vision-language model. In: CVPR (2022)", + "3. Geiger, A., Lenz, P., Urtasun, R.: Are we ready for Autonomous Driving? The KITTI Vision Benchmark Suite. In: CVPR (2012)", + "4. Ghiasi, G., Gu, X., Cui, Y., Lin, T.Y.: Scaling open-vocabulary image segmentation with image-level labels. In: ECCV (2022)", + "5. Gu, X., Lin, T.Y., Kuo, W., Cui, Y.: Open-vocabulary object detection via vision and language knowledge distillation. ICLR (2022)", + "6. Ha, H., Song, S.: Semantic abstraction: Open-world 3d scene understanding from 2d vision-language models. In: CoRL (2022)", + "7. He, W., Jamonnak, S., Gou, L., Ren, L.: Clip-s4: Language-guided self-supervised semantic segmentation. In: CVPR (2023)", + "8. Hegde, D., Valanarasu, J.M.J., Patel, V.M.: Clip goes 3d: Leveraging prompt tuning for language grounded 3d recognition. arXiv preprint arXiv:2303.11313 (2023)", + "9. Hong, F., Zhou, H., Zhu, X., Li, H., Liu, Z.: Lidar-based panoptic segmentation via dynamic shifting network. In: CVPR (2021)", + "20. Hu, Q., Yang, B., Xie, L., Rosa, S., Guo, Y., Wang, Z., Trigoni, N., Markham, A.: Learning semantic segmentation of large-scale point clouds with random sampling. IEEE Transactions on Pattern Analysis and Machine Intelligence 44(11), 8338-8354 (2021)", + "21. Ilharco, G., Wortsman, M., Wightman, R., Gordon, C., Carlini, N., Taori, R., Dave, A., Shankar, V., Namkoong, H., Miller, J., Hajishirzi, H., Farhadi, A., Schmidt, L.: Openclip (Jul 2021). https://doi.org/10.5281/zenodo.5143773, https://doi.org/10.5281/zenodo.5143773" + ], + "bbox": [ + 225, + 172, + 784, + 839 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "3D Open-Vocabulary Panoptic Segmentation", + "bbox": [ + 429, + 114, + 730, + 128 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 767, + 116, + 784, + 126 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "22. Jia, C., Yang, Y., Xia, Y., Chen, Y.T., Parekh, Z., Pham, H., Le, Q., Sung, Y.H., Li, Z., Duerig, T.: Scaling up visual and vision-language representation learning with noisy text supervision. In: ICML (2021)", + "23. Kingma, D.P., Ba, J.: Adam: A method for stochastic optimization. In: ICLR (2015)", + "24. Kuo, W., Cui, Y., Gu, X., Piergiovanni, A., Angelova, A.: F-vlm: Open-vocabulary object detection upon frozen vision and language models. In: ICLR (2023)", + "25. Lambert, J., Liu, Z., Sener, O., Hays, J., Koltun, V.: Mseg: A composite dataset for multi-domain semantic segmentation. In: CVPR (2020)", + "26. Li, B., Weinberger, K.Q., Belongie, S., Koltun, V., Ranftl, R.: Language-driven semantic segmentation. In: ICLR (2022)", + "27. Li, J., He, X., Wen, Y., Gao, Y., Cheng, X., Zhang, D.: Panoptic-phenet: Towards real-time and high-precision lidar panoptic segmentation via clustering pseudo heatmap. In: CVPR (2022)", + "28. Li, Z., Wang, W., Xie, E., Yu, Z., Anandkumar, A., Alvarez, J.M., Luo, P., Lu, T.: Panoptic segformer: Delving deeper into panoptic segmentation with transformers. In: CVPR (2022)", + "29. Liang, F., Wu, B., Dai, X., Li, K., Zhao, Y., Zhang, H., Zhang, P., Vajda, P., Marculescu, D.: Open-vocabulary semantic segmentation with mask-adapted clip. In: CVPR (2023)", + "30. Lin, T.Y., Goyal, P., Girshick, R., He, K., Dollar, P.: Focal loss for dense object detection. In: ICCV (2017)", + "31. Liu, Q., Wen, Y., Han, J., Xu, C., Xu, H., Liang, X.: Open-world semantic segmentation via contrasting and clustering vision-language embedding. In: ECCV (2022)", + "32. Liu, Z., Mao, H., Wu, C.Y., Feichtenhofer, C., Darrell, T., Xie, S.: A convnet for the 2020s. In: CVPR (2022)", + "33. Loshchilov, I., Hutter, F.: Decoupled weight decay regularization. In: ICLR (2019)", + "34. Ma, C., Yang, Y., Wang, Y., Zhang, Y., Xie, W.: Open-vocabulary semantic segmentation with frozen vision-language models. BMVC (2022)", + "35. Peng, S., Genova, K., Jiang, C., Tagliasacchi, A., Pollefeys, M., Funkhouser, T., et al.: Openscene: 3d scene understanding with open vocabularies. In: CVPR (2023)", + "36. Qi, C.R., Su, H., Mo, K., Guibas, L.J.: Pointnet: Deep learning on point sets for 3d classification and segmentation. In: CVPR (2017)", + "37. Qi, C.R., Yi, L., Su, H., Guibas, L.J.: Pointnet++: Deep hierarchical feature learning on point sets in a metric space. NeurIPS (2017)", + "38. Qin, J., Wu, J., Yan, P., Li, M., Yuxi, R., Xiao, X., Wang, Y., Wang, R., Wen, S., Pan, X., et al.: Freeseg: Unified, universal and open-vocabulary image segmentation. In: CVPR (2023)", + "39. Radford, A., Kim, J.W., Hallacy, C., Ramesh, A., Goh, G., Agarwal, S., Sastry, G., Askell, A., Mishkin, P., Clark, J., et al.: Learning transferable visual models from natural language supervision. In: ICML (2021)", + "40. Razani, R., Cheng, R., Li, E., Taghavi, E., Ren, Y., Bingbing, L.: Gp-s3net: Graph-based panoptic sparse semantic segmentation network. In: ICCV (2021)", + "41. Rozenberszki, D., Litany, O., Dai, A.: Language-grounded indoor 3d semantic segmentation in the wild. In: ECCV (2022)", + "42. Sirohi, K., Mohan, R., Buscher, D., Burgard, W., Valada, A.: Efficientlps: Efficient lidar panoptic segmentation. IEEE Transactions on Robotics 38(3), 1894-1914 (2021)", + "43. Takmaz, A., Fedele, E., Sumner, R.W., Pollefeys, M., Tombari, F., Engelmann, F.: Openmask3d: Open-vocabulary 3d instance segmentation. In: NeuRIPS (2023)" + ], + "bbox": [ + 215, + 146, + 785, + 840 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Z. Xiao et al.", + "bbox": [ + 271, + 114, + 361, + 126 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "44. Tang, H., Liu, Z., Zhao, S., Lin, Y., Lin, J., Wang, H., Han, S.: Searching efficient 3d architectures with sparse point-voxel convolution. In: ECCV (2020)", + "45. Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, L., Polosukhin, I.: Attention is all you need. In: NeurIPS (2017)", + "46. Wu, W., Fuxin, L., Shan, Q.: Pointconvformer: Revenge of the point-based convolution. In: CVPR (2023)", + "47. Xiao, Z., Zhang, W., Wang, T., Loy, C.C., Lin, D., Pang, J.: Position-guided point cloud panoptic segmentation transformer. arXiv preprint (2023)", + "48. Xu, J., Zhang, R., Dou, J., Zhu, Y., Sun, J., Pu, S.: Rpvnet: A deep and efficient range-point-voxel fusion network for lidar point cloud segmentation. In: ICCV (2021)", + "49. Xu, J., De Mello, S., Liu, S., Byeon, W., Breuel, T., Kautz, J., Wang, X.: Groupvit: Semantic segmentation emerges from text supervision. In: CVPR (2022)", + "50. Xu, J., Liu, S., Vahdat, A., Byeon, W., Wang, X., De Mello, S.: Open-vocabulary panoptic segmentation with text-to-image diffusion models. In: CVPR (2023)", + "51. Xu, M., Zhang, Z., Wei, F., Lin, Y., Cao, Y., Hu, H., Bai, X.: A simple baseline for open-vocabulary semantic segmentation with pre-trained vision-language model. In: ECCV (2022)", + "52. Xu, S., Wan, R., Ye, M., Zou, X., Cao, T.: Sparse cross-scale attention network for efficient lidar panoptic segmentation. In: AAAI (2022)", + "53. Yang, J., Ding, R., Wang, Z., Qi, X.: Regionplc: Regional point-language contrastive learning for open-world 3d scene understanding. In: CVPR (2024)", + "54. Yu, Q., He, J., Deng, X., Shen, X., Chen, L.C.: Convolutions die hard: Open-vocabulary segmentation with single frozen convolutional clip. In: NeurIPS (2023)", + "55. Zhang, J., Dong, R., Ma, K.: Clip-fo3d: Learning free open-world 3d scene representations from 2d dense clip. In: ICCV (2023)", + "56. Zhou, C., Loy, C.C., Dai, B.: Extract free dense labels from clip. In: ECCV (2022)", + "57. Zhou, Z., Lei, Y., Zhang, B., Liu, L., Liu, Y.: Zegclip: Towards adapting clip for zero-shot semantic segmentation. In: CVPR (2023)", + "58. Zhou, Z., Zhang, Y., Foroosh, H.: Panoptic-polarnet: Proposal-free lidar point cloud panoptic segmentation. In: CVPR (2021)", + "59. Zou, X., Dou, Z.Y., Yang, J., Gan, Z., Li, L., Li, C., Dai, X., Behl, H., Wang, J., Yuan, L., et al.: Generalized decoding for pixel, image, and language. In: CVPR (2023)" + ], + "bbox": [ + 212, + 146, + 787, + 617 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "3D Open-Vocabulary Panoptic Segmentation", + "bbox": [ + 429, + 114, + 730, + 128 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 767, + 114, + 785, + 126 + ], + "page_idx": 16 + } +] \ No newline at end of file diff --git a/2024/3D Open-Vocabulary Panoptic Segmentation with 2D-3D Vision-Language Distillation/eb0bea0f-431f-4835-9237-239fd0d64e99_model.json b/2024/3D Open-Vocabulary Panoptic Segmentation with 2D-3D Vision-Language Distillation/eb0bea0f-431f-4835-9237-239fd0d64e99_model.json new file mode 100644 index 0000000000000000000000000000000000000000..eaacdb178e03d9fbef4fe432242254d95c80c4de --- /dev/null +++ b/2024/3D Open-Vocabulary Panoptic Segmentation with 2D-3D Vision-Language Distillation/eb0bea0f-431f-4835-9237-239fd0d64e99_model.json @@ -0,0 +1,2544 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.218, + 0.141, + 0.787, + 0.187 + ], + "angle": 0, + "content": "3D Open-Vocabulary Panoptic Segmentation with 2D-3D Vision-Language Distillation" + }, + { + "type": "text", + "bbox": [ + 0.221, + 0.212, + 0.782, + 0.26 + ], + "angle": 0, + "content": "Zihao Xiao\\(^{1*}\\), Longlong Jing\\(^{2}\\), Shangxuan Wu\\(^{2}\\), Alex Zihao Zhu\\(^{2}\\), Jingwei Ji\\(^{2}\\), Chiyu Max Jiang\\(^{2}\\), Wei-Chih Hung\\(^{2}\\), Thomas Funkhouser\\(^{3}\\), Weicheng Kuo\\(^{4}\\), Anelia Angelova\\(^{4}\\), Yin Zhou\\(^{2}\\), and Shiwei Sheng\\(^{2*}\\)" + }, + { + "type": "text", + "bbox": [ + 0.239, + 0.269, + 0.765, + 0.285 + ], + "angle": 0, + "content": "1 Johns Hopkins University, 2 Waymo, 3 Google Research, 4 Google DeepMind" + }, + { + "type": "text", + "bbox": [ + 0.261, + 0.318, + 0.744, + 0.596 + ], + "angle": 0, + "content": "Abstract. 3D panoptic segmentation is a challenging perception task, especially in autonomous driving. It aims to predict both semantic and instance annotations for 3D points in a scene. Although prior 3D panoptic segmentation approaches have achieved great performance on closed-set benchmarks, generalizing these approaches to unseen things and unseen stuff categories remains an open problem. For unseen object categories, 2D open-vocabulary segmentation has achieved promising results that solely rely on frozen CLIP backbones and assembling multiple classification outputs. However, we find that simply extending these 2D models to 3D does not guarantee good performance due to poor per-mask classification quality, especially for novel stuff categories. In this paper, we propose the first method to tackle 3D open-vocabulary panoptic segmentation. Our model takes advantage of the fusion between learnable LiDAR features and dense frozen vision CLIP features, using a single classification head to make predictions for both base and novel classes. To further improve the classification performance on novel classes and leverage the CLIP model, we propose two novel loss functions: object-level distillation loss and voxel-level distillation loss. Our experiments on the nuScenes and SemanticKITTI datasets show that our method outperforms the strong baseline by a large margin." + }, + { + "type": "text", + "bbox": [ + 0.261, + 0.608, + 0.741, + 0.636 + ], + "angle": 0, + "content": "Keywords: Autonomous driving \\(\\cdot\\) 3D panoptic segmentation \\(\\cdot\\) Vision-language" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.682, + 0.377, + 0.698 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.712, + 0.788, + 0.819 + ], + "angle": 0, + "content": "3D panoptic segmentation is a crucial task in computer vision with many real-world applications, most notably in autonomous driving. It combines 3D semantic and instance segmentation to produce per-point predictions for two different types of objects: things (e.g., car) and stuff (e.g., road). To date, there has been significant progress in 3D panoptic segmentation [27, 40, 42, 47, 52, 58]. Most recently, methods such as [47] produce panoptic segmentation predictions directly from point clouds by leveraging learned queries to represent objects and" + }, + { + "type": "page_footnote", + "bbox": [ + 0.232, + 0.826, + 0.434, + 0.841 + ], + "angle": 0, + "content": "* Work done while at Waymo" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "2" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.362, + 0.128 + ], + "angle": 0, + "content": "Z. Xiao et al." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.284 + ], + "angle": 0, + "content": "Transformer-based [45] architectures [2, 4] to perform the modeling. However, existing models only predict panoptic segmentation results for a closed-set of objects. They fail to create predictions for the majority of unseen object categories in the scene, hindering the application of these algorithms to real-world scenarios, especially for autonomous driving. In this work, we focus on segmenting unseen things and unseen stuff objects in autonomous driving scenarios. We follow [10, 53] and develop models under the open-vocabulary setting: we divide the object categories into base (seen) categories and novel (unseen) categories, and evaluate models that are only trained on base categories." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.287, + 0.788, + 0.408 + ], + "angle": 0, + "content": "Such open-world computer vision tasks [3] benefit from the recent advancements in vision-language (V-L) models [22, 39]. In 2D vision, there are many successful methods in open-vocabulary object detection [12, 15, 24] and segmentation [11, 50, 54]. These methods make predictions in a shared image-text embedding space, where predictions for unseen categories are produced by comparing the similarity of an object with the text embedding of the category. However, these methods are only possible due to the vast amounts of paired image-text data available, making it difficult to train similar models for 3D data." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.411, + 0.788, + 0.547 + ], + "angle": 0, + "content": "Instead, researchers have continued to leverage the effectiveness of these 2D vision-language models for 3D with the help of pixel-point correspondences by running inference on 2D images and then aligning with the 3D features. These methods have achieved promising results on open-vocabulary semantic segmentation [10,35,53,55] and instance segmentation [10,43,53], individually. However, there are no methods that address the problem of 3D open-vocabulary panoptic segmentation, i.e., addressing both open-vocabulary semantic segmentation and open-vocabulary instance segmentation at the same time. The challenge lies in how to handle segmentation for novel things and stuff objects simultaneously." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.55, + 0.788, + 0.716 + ], + "angle": 0, + "content": "3D open-vocabulary panoptic segmentation is a challenging problem, due to both the significant domain gaps between the camera and LiDAR modalities and unsolved problems in open-vocabulary segmentation. Many existing open-vocabulary works rely on similarities between text embeddings of class names and pre-trained V-L features to obtain associations between predictions and classes [35,43,55]. However, while projecting 2D V-L features to 3D can account for a large part of the scene, there are often many points unaccounted for due to unmatched pixel/point distributions and differing fields of view between sensors. Some 3D open-vocabulary works [10,53] apply contrastive learning to obtain better association between language and points, but they require extra captioning models and do not address the difficulties of detecting novel stuff classes." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.72, + 0.788, + 0.84 + ], + "angle": 0, + "content": "In this work, we aim to address these two issues with a novel architecture for 3D open-vocabulary panoptic segmentation. Building on existing 3D closed-set panoptic segmentation methods, we train a learned LiDAR feature encoder in parallel with a frozen, pre-trained camera CLIP model. By fusing the 3D LiDAR features with the 2D CLIP features, our model is able to learn rich features throughout the entire 3D sensing volume, even if there are no camera features in certain regions. In addition, we apply a pair of novel distillation losses that allow the 3D encoder to learn both object-level and voxel-level features which" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.431, + 0.115, + 0.732, + 0.13 + ], + "angle": 0, + "content": "3D Open-Vocabulary Panoptic Segmentation" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "3" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.784, + 0.207 + ], + "angle": 0, + "content": "live inside the CLIP feature space. This provides a learned module in 3D space which can directly be compared with text embeddings. These losses also provide useful training supervision to unknown parts of the scene where there would otherwise be no loss gradient." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.208, + 0.785, + 0.252 + ], + "angle": 0, + "content": "With the proposed model and loss functions, our method significantly outperforms the strong baseline on multiple datasets. Our contributions are summarized as follows:" + }, + { + "type": "text", + "bbox": [ + 0.226, + 0.261, + 0.784, + 0.29 + ], + "angle": 0, + "content": "- We present the first approach for 3D open-vocabulary panoptic segmentation in autonomous driving." + }, + { + "type": "text", + "bbox": [ + 0.226, + 0.291, + 0.784, + 0.32 + ], + "angle": 0, + "content": "- We propose two novel loss functions, object-level distillation loss and voxel-level distillation loss to help segment novel things and novel stuff objects." + }, + { + "type": "text", + "bbox": [ + 0.226, + 0.321, + 0.784, + 0.35 + ], + "angle": 0, + "content": "- We experimentally show that our proposed method significantly outperforms that strong baseline model on both nuScenes and SemanticKITTI datasets." + }, + { + "type": "list", + "bbox": [ + 0.226, + 0.261, + 0.784, + 0.35 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.372, + 0.388, + 0.387 + ], + "angle": 0, + "content": "2 Related Work" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.403, + 0.784, + 0.432 + ], + "angle": 0, + "content": "This work is closely related to 3D panoptic segmentation, 2D open-vocabulary segmentation, and 3D open-vocabulary segmentation." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.433, + 0.785, + 0.568 + ], + "angle": 0, + "content": "3D panoptic segmentation. The goal of 3D panoptic segmentation is to group 3D points according to their semantics and identities. This is a challenging task and relies on a good representation of the 3D data [1,20,36,37,44,46,48]. Most panoptic segmentation models have separate branches for instance segmentation and semantic segmentation [19,27,44,58]. By following DETR [5], the recently proposed P3Former [47] uses learnable queries and a transformer architecture to obtain state-of-the-art performance on multiple panoptic segmentation benchmarks. Although those closed-set methods achieve incredible results, they cannot predict the labels and masks for novel classes." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.569, + 0.785, + 0.719 + ], + "angle": 0, + "content": "2D open-vocabulary segmentation. 2D open-vocabulary segmentation aims to group image pixels according to their semantics or identities for base (seen) or novel (unseen) categories. The prediction on novel categories is usually done by leveraging large V-L models [22,39]. There are many works that focus on open vocabulary semantic segmentation [14,17,26,29,31,34,49,51,56,57,59]. Some work has also explored open-vocabulary panoptic segmentation [11,38,50]. Recently, FC-CLIP [54] proposes a single-stage framework based on a frozen convolutional CLIP backbone [21,32,39] for 2D open-vocabulary panoptic segmentation that achieves state-of-the-art performance. However, due to the camera-LiDAR domain gap, we show that simply extending it to 3D leads to poor performance." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.72, + 0.785, + 0.84 + ], + "angle": 0, + "content": "3D open-vocabulary segmentation. 3D open-vocabulary segmentation is less explored due to the lack of 3D point-to-text association. One common practice is to utilize V-L models and use 2D-3D pairings to obtain rich, structured information in 3D [7,8,10,16,18,35,41,43,53,55]. Notably, CLIP2Scene [7] proposes a semantic-driven cross-modal contrastive learning framework. PLA [10] leverages images as a bridge and builds hierarchical 3D-caption pairs for contrastive learning. OpenScene [35] extracts per-pixel CLIP features using a pre-trained V-L model [14,26] then derives dense 3D features by projecting 3D points onto" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "4" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.363, + 0.127 + ], + "angle": 0, + "content": "Z. Xiao et al." + }, + { + "type": "image", + "bbox": [ + 0.22, + 0.144, + 0.788, + 0.299 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.322, + 0.788, + 0.392 + ], + "angle": 0, + "content": "Fig. 1: Overview of our method. Given a LiDAR point cloud and the corresponding camera images, LiDAR features are extracted with a learnable LiDAR encoder, while vision features are extracted by a frozen CLIP vision model. The extracted LiDAR features and the frozen CLIP vision features are then fused and fed to a query-based transformer model to predict instance masks and semantic classes." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.421, + 0.788, + 0.513 + ], + "angle": 0, + "content": "image planes. One concurrent work, RegionPLC [53], utilizes regional visual prompts to create dense captions and perform point-discriminative contrastive learning, which is used for semantic segmentation or instance segmentation, individually. In contrast, our work does not rely on any captioning model or extra contrastive learning, but only depends on pre-trained CLIP features. Our model also handles semantic segmentation and instance segmentation simultaneously." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.535, + 0.331, + 0.551 + ], + "angle": 0, + "content": "3 Method" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.567, + 0.788, + 0.645 + ], + "angle": 0, + "content": "This section is organized as follows. First, we define the 3D open-vocabulary panoptic segmentation task. Then we provide detailed descriptions of the model architecture as well as the proposed loss functions. The overview of our method is presented in Fig. 1, and the two proposed loss functions are illustrated in Fig. 2 (a) and Fig. 2 (b)." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.664, + 0.422, + 0.68 + ], + "angle": 0, + "content": "3.1 Problem Definition" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.69, + 0.788, + 0.749 + ], + "angle": 0, + "content": "In 3D panoptic segmentation, the goal is to annotate every point in a point cloud. For stuff classes, (e.g. road, vegetation), a category label is assigned according to its semantics. For things classes (e.g. cars, pedestrians), an instance label is assigned to an object in addition to its semantic label." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.75, + 0.788, + 0.841 + ], + "angle": 0, + "content": "In open-vocabulary panoptic segmentation, the models are trained on \\( C_B \\) base(seen) categories. At test time, besides these \\( C_B \\) base categories, the data will contain \\( C_N \\) novel(unseen) categories. Following the settings of prior work [15, 24, 54], we assume the availability of the name of the novel categories during inference, but the novel categories are not present in the training data and their names are not known. Note that we do not apply any prompt engineering, as" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.431, + 0.115, + 0.732, + 0.13 + ], + "angle": 0, + "content": "3D Open-Vocabulary Panoptic Segmentation" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "5" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.784, + 0.178 + ], + "angle": 0, + "content": "this is not the focus of this paper. We follow OpenScene [35] to obtain the CLIP text embedding for each category." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.198, + 0.633, + 0.215 + ], + "angle": 0, + "content": "3.2 3D Open-Vocabulary Panoptic Segmentation" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.222, + 0.788, + 0.417 + ], + "angle": 0, + "content": "Most of the previous 3D open-vocabulary works only address semantic segmentation [7,8,10,16,18,35,41,53,55] or instance segmentation [43,53] separately, and there is no existing work for the 3D open-vocabulary panoptic segmentation task, which handles novel things and novel stuff objects simultaneously. A natural idea would be extending the 2D open vocabulary segmentation methods to build the 3D counterpart. We start with P3Former [47], a state-of-the-art transformer-based 3D closed-set panoptic segmentation model, and add the essential components to support open-vocabulary capability by following FC-CLIP [54], a 2D open-vocabulary segmentation model that achieves state-of-the-art performance on multiple datasets. However, we found that this simple extension leads to poor performance in our experiments, and in this work we propose several new features to improve the performance of our model. More implementation details for this baseline can be found in the supplementary material." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.418, + 0.788, + 0.493 + ], + "angle": 0, + "content": "In order to improve the open vocabulary capability of our model, we propose significant changes to the P3Former architecture, as well as two new loss functions. The architecture of our method is shown in Fig. 1 and mainly consists of multimodal feature fusion, a segmentation head, and input text embeddings for open-vocabulary classification." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.493, + 0.788, + 0.644 + ], + "angle": 0, + "content": "Multimodal feature fusion. The core idea of many recent 2D open-vocabulary works is to leverage the features of large-scale vision-language models [22, 39]. These methods [54] mainly rely on frozen CLIP features and use a transformer model to perform the 2D panoptic segmentation task. However, this is not optimal for 3D tasks since many points do not have corresponding valid camera pixels, leading to invalid features preventing meaningful predictions. To fully exploit the power of the CLIP vision features and learn complementary features from both CLIP features from camera and features from LiDAR, we generate predictions from the fusion of CLIP features extracted by a frozen CLIP model and learned LiDAR features from a LiDAR encoder." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.645, + 0.788, + 0.841 + ], + "angle": 0, + "content": "As shown in Fig. 1, there are three major components for the multimodal feature fusion including a LiDAR encoder, a vision CLIP encoder, and voxel-level feature fusion. The LiDAR encoder is a model which takes an unordered set of points as input and extracts per-point features. We apply voxelization to the features from the LiDAR encoder, producing output features \\( F_{lidar} \\in \\mathbb{R}^{V \\times D_{lidar}} \\), where \\( V \\) is the number of the voxels and \\( D_{lidar} \\) is the dimension of the learned LiDAR feature. The Vision CLIP encoder is a pre-trained V-L segmentation model [14] which extracts pixel-wise CLIP features from each camera image. Within each voxel, every LiDAR point is projected into the camera image plane based on the intrinsic and extrinsic calibration parameters to index into the corresponding vision CLIP features, then the vision CLIP features of all the points belonging to each voxel are averaged to represent that voxel. Zero padding is used for points which do not have any valid corresponding camera pixels. The" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "6" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.363, + 0.128 + ], + "angle": 0, + "content": "Z. Xiao et al." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.146, + 0.788, + 0.253 + ], + "angle": 0, + "content": "voxel CLIP features will be referred as \\( F_{vclip} \\in \\mathbb{R}^{V \\times D_{emb}} \\), where \\( V \\) is the number of voxels after voxelization and \\( D_{emb} \\) is the dimension of the CLIP features. Finally, the learned per-voxel LiDAR features and frozen per-voxel vision CLIP features are concatenated together to be used as input into the transformer decoder in the segmentation head. This feature fusion enables our model to learn complementary information from both the LiDAR and CLIP features, allowing us to fine-tune our backbone for each dataset's specific data distribution." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.253, + 0.789, + 0.465 + ], + "angle": 0, + "content": "Segmentation head. The segmentation head is a transformer [45] model that takes the LiDAR-Vision fused feature as input to produce panoptic segmentation results. Prior works, including existing 2D open-vocabulary works such as FC-CLIP [54], typically use learnable queries \\( q \\) to represent each instance or thing, and they contain a mask prediction head \\( f_{mask} \\) to produce the corresponding mask for each individual object and a classification head \\( f_{cls} \\) to predict the per-mask class score for each known class. However, as a result, they also need to rely on another classifier to handle novel categories. Our goal is to use a single model to handle the prediction for both base and novel categories. Thus, we predict a class embedding instead of a class score for each mask. During training, the model learns to regress an analogy to the CLIP vision embedding for each mask, and the category prediction can be obtained by calculating its similarity with the CLIP text embedding of text queries during the inference stage. The class embedding \\( f_{cls} \\) prediction is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.427, + 0.472, + 0.786, + 0.49 + ], + "angle": 0, + "content": "\\[\nv _ {q} = f _ {c l s} (q) \\in \\mathbb {R} ^ {D _ {e m b}}, \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.496, + 0.788, + 0.557 + ], + "angle": 0, + "content": "where \\( v_{q} \\) is in the CLIP embedding space. The predicted class logits are then computed from the cosine similarity between the predicted class embedding and the text embedding of every category name from the evaluation set using a frozen CLIP model. The classification logits are defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.339, + 0.563, + 0.786, + 0.592 + ], + "angle": 0, + "content": "\\[\ns _ {v _ {q}} = \\frac {1}{T} \\left[ \\cos \\left(v _ {q}, t _ {1}\\right), \\cos \\left(v _ {q}, t _ {2}\\right), \\dots , \\cos \\left(v _ {q}, t _ {C}\\right) \\right] \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.598, + 0.788, + 0.644 + ], + "angle": 0, + "content": "where \\( t_i \\in \\mathbb{R}^{D_{emb}} \\), \\( i \\in \\{1, 2, \\dots, C\\} \\) is the text embedding, \\( C \\) is the number of categories (\\( C_B \\) in training and \\( C_B + C_N \\) in testing), and \\( T \\) is a learnable temperature term that controls the concentration of the distribution." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.645, + 0.79, + 0.841 + ], + "angle": 0, + "content": "Query assignment. A common practice [9, 54] for transformer-based panoptic segmentation models is to utilize a single set of queries to make predictions for both things and stuff classes jointly. In contrast, P3Former uses one query set to represent things classes after bipartite matching and one fixed query set for stuff classes. We have found that this separation of things queries and stuff queries makes our model converge faster and improve overall performance, and similar pattern has been observed in other tasks [28]. However, the fixed set of queries for stuff classes is not applicable to the open-vocabulary setting due to the unknown number of novel stuff classes. To take advantage of the benefits of separating things queries and stuff queries, we propose to predict the base stuff classes with a fixed set of queries and utilize a set of learnable queries to target base things classes and all novel (things and stuff) classes. More details of the query assignment can be found in the supplementary materials." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.431, + 0.115, + 0.732, + 0.13 + ], + "angle": 0, + "content": "3D Open-Vocabulary Panoptic Segmentation" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.116, + 0.785, + 0.127 + ], + "angle": 0, + "content": "7" + }, + { + "type": "image", + "bbox": [ + 0.219, + 0.145, + 0.508, + 0.255 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.297, + 0.256, + 0.433, + 0.266 + ], + "angle": 0, + "content": "(a) Object-Level Distillation Loss." + }, + { + "type": "image", + "bbox": [ + 0.512, + 0.146, + 0.784, + 0.256 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.581, + 0.256, + 0.713, + 0.266 + ], + "angle": 0, + "content": "(b) Voxel-Level Distillation Loss." + }, + { + "type": "image_caption", + "bbox": [ + 0.215, + 0.29, + 0.785, + 0.318 + ], + "angle": 0, + "content": "Fig. 2: (a) the proposed object-level distillation loss, and (b) the proposed voxel-level distillation loss." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.347, + 0.377, + 0.361 + ], + "angle": 0, + "content": "3.3 Loss Function" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.371, + 0.785, + 0.476 + ], + "angle": 0, + "content": "Closed-set panoptic segmentation models [47] are typically optimized with objective functions consisting of a classification loss \\( L_{cls} \\) and a mask prediction loss \\( L_{mask} \\). We follow P3Former [47] for these two losses: the classification loss \\( L_{cls} \\) optimizes the focal loss [30] between the class predictions and the category labels, while the mask loss \\( L_{mask} \\) optimizes the voxel-query classification loss. Besides the two standard loss functions, we propose two simple yet effective losses to apply distillation from the CLIP model at different levels." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.477, + 0.787, + 0.629 + ], + "angle": 0, + "content": "Object-level distillation loss. Similar to previous methods [50, 54], we use the cosine similarity between predicted class embeddings and class text CLIP embeddings to produce classification scores. However, the classification loss applied to Eq. (2) only enforces similarity to known classes. In this work, we make the assumption that the frozen CLIP features are discriminative with respect to open-vocabulary classes and have good out-of-distribution generalization. We propose an additional training loss which forces our predicted object-level class embeddings to be similar to the CLIP embeddings within their corresponding masks after matching. Similar to [54], we utilize voxel vision CLIP features to get an embedding for each query \\( q \\) by mask pooling Vision CLIP features:" + }, + { + "type": "equation", + "bbox": [ + 0.38, + 0.636, + 0.787, + 0.673 + ], + "angle": 0, + "content": "\\[\nw _ {q} = \\frac {1}{\\left| M _ {q} \\right|} \\sum_ {p} \\mathbb {1} (p \\in M _ {q}) F _ {v c l i p} (p) \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.682, + 0.785, + 0.711 + ], + "angle": 0, + "content": "where \\( M_q \\) is the set of points \\( p \\) belonging to the mask for query \\( q \\). Our object-level distillation loss is then defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.344, + 0.718, + 0.787, + 0.756 + ], + "angle": 0, + "content": "\\[\nL _ {O} = \\frac {1}{\\left| Q _ {\\text {m a t c h e d}} \\right|} \\sum_ {q \\in Q _ {\\text {m a t c h e d}}} 1 - \\cos \\left(v _ {q}, w _ {q}\\right), \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.765, + 0.788, + 0.841 + ], + "angle": 0, + "content": "where \\( Q_{\\text{matched}} \\) is the set of queries matched with ground truth objects during training, \\( v \\) is the set of predicted class embeddings, and \\( w \\) is the set of mask-pooled CLIP embeddings. This loss forces the model to directly distill object-level camera CLIP features and improves model performance for novel things classes. We also experimented with applying \\( L_O \\) to all predicted masks, but we" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "8" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.363, + 0.128 + ], + "angle": 0, + "content": "Z. Xiao et al." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.784, + 0.177 + ], + "angle": 0, + "content": "It is found that this slightly reduced model performance, likely due to the presence of masks that do not correspond to any objects in the scene." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.178, + 0.788, + 0.329 + ], + "angle": 0, + "content": "Voxel-level distillation loss. While the object-level distillation loss distills the per-object features from CLIP model, it does not provide any supervision for the mask prediction head, which would otherwise only receive supervision for known classes. We found this particularly problematic for unknown stuff classes, which tend to be more spread out and cover larger and more diverse parts of the scene. In addition, it is only being applied to queries with relatively accurate mask predictions in order to learn useful CLIP features. To target these issues, we propose the voxel-level distillation loss to explicitly learn voxel-level CLIP features, which do not depend on any labels and can be applied on all queries. In particular, the voxel-level distillation loss is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.441, + 0.337, + 0.786, + 0.356 + ], + "angle": 0, + "content": "\\[\nF _ {r e c} = M _ {Q} ^ {T} F _ {Q e m b} \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.364, + 0.788, + 0.426 + ], + "angle": 0, + "content": "where \\(Q\\) is the number of queries, \\(F_{Qemb} \\in \\mathbb{R}^{Q \\times D_{emb}}\\) is the predicted embedding for all queries and \\(M_Q \\in \\mathbb{R}^{Q \\times V}\\) is the predicted per-voxel mask probabilities for all queries. The reconstructed features can be regarded as the weighted sum of all queries for each voxel. We supervise these features with the voxel CLIP features:" + }, + { + "type": "equation", + "bbox": [ + 0.426, + 0.44, + 0.786, + 0.457 + ], + "angle": 0, + "content": "\\[\nL _ {V} = L _ {1} \\left(F _ {\\text {r e c}}, F _ {\\text {v c l i p}}\\right) \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.46, + 0.788, + 0.567 + ], + "angle": 0, + "content": "Unlike the object-level distillation loss, which is only applied to queries with matched ground truth, this loss is applied to all predicted mask scores and queries. In our experiments, we found that this loss significantly improves performance on novel stuff categories in particular, likely as it does not require exact matches with the ground truth, which can be difficult for large stuff classes. However, this loss is still susceptible to noisy or low quality mask scores, and we found that larger weights for this loss can disrupt training." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.567, + 0.788, + 0.657 + ], + "angle": 0, + "content": "To summarize, \\( L_{O} \\) helps get rid of the ensemble of classifiers in [14, 15, 24, 50, 54] and enables open-vocabulary ability with one trainable classifier. \\( L_{V} \\) uses a scene-level representation represented by the embedding of all queries, while previous methods only consider object-level representation. Combining \\( L_{O} \\) with \\( L_{V} \\) enables segmenting novel things and novel stuff objects simultaneously. Our final objective function can be written as:" + }, + { + "type": "equation", + "bbox": [ + 0.327, + 0.668, + 0.786, + 0.684 + ], + "angle": 0, + "content": "\\[\nL = w _ {\\alpha} * L _ {c l s} + w _ {\\beta} * L _ {m a s k} + w _ {\\lambda} * L _ {O} + w _ {\\gamma} * L _ {V} \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.692, + 0.782, + 0.708 + ], + "angle": 0, + "content": ", where \\(w_{\\alpha}, w_{\\beta}, w_{\\lambda}, w_{\\gamma}\\), are weights for the corresponding objective functions." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.727, + 0.457, + 0.741 + ], + "angle": 0, + "content": "3.4 Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.75, + 0.788, + 0.842 + ], + "angle": 0, + "content": "For the LiDAR encoder and segmentation head, we follow the implementation of the state-of-the-art closed-set 3D panoptic segmentation method P3Former [47]. For the Vision CLIP encoder, we use OpenSeg [14], due to its remarkable performance on the recent open-vocabulary 3D semantic segmentation task [35]. For the Text CLIP encoder, we use CLIP [39] with ViT-L/14 [45] backbone, following other state-of-the-art open vocabulary works [35]." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.431, + 0.115, + 0.732, + 0.131 + ], + "angle": 0, + "content": "3D Open-Vocabulary Panoptic Segmentation" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "9" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.145, + 0.376, + 0.163 + ], + "angle": 0, + "content": "4 Experiments" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.172, + 0.44, + 0.188 + ], + "angle": 0, + "content": "4.1 Experimental Setting" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.192, + 0.784, + 0.237 + ], + "angle": 0, + "content": "Following the state-of-the-art closed-set 3D panoptic segmentation work [27,40, 42,47,52,58], we conduct experiments and ablation studies on the nuScenes [4] and SemanticKITTI [2,13] datasets." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.238, + 0.785, + 0.328 + ], + "angle": 0, + "content": "nuScenes. The nuScenes dataset [4] is a public benchmark for autonomous driving. It consists of 1000 run segments and is further divided into prescribed train/val/test splits. We use all key frames with panoptic labels in the training set(28130 frames) to train the model. Following the most recent state-of-the-art model P3Former [47], we evaluate the models on the validation set(6019 frames). There are 16 semantic classes, including 10 things classes and 6 stuff classes." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.328, + 0.785, + 0.448 + ], + "angle": 0, + "content": "SemanticKITTI. SemanticKITTI [2, 13] is the first large dataset for LiDAR panoptic segmentation for autonomous driving. We conduct experiments on the training and validation sets, where panoptic segmentation labels are available. 3D open-vocabulary methods often require point and pixel pairing. In the SemanticKITTI dataset, however, the ego-vehicle is only equipped with frontal cameras. Thus, we filter out the points that are not visible in the camera view based on the provided camera parameters for both training and evaluation. There are 19 semantic classes, including 8 things classes and 11 stuff classes." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.449, + 0.785, + 0.584 + ], + "angle": 0, + "content": "Data split. Both the nuScenes and SemanticKITTI datasets do not provide official base and novel class splits. Following the state-of-the-art 3D open-vocabulary segmentation work [6,10,53], we randomly split the classes into base and novel, while keeping the ratio between base and novel classes around \\(3:1\\). For nuScenes, the number of class for base and novel split are 12 and 4 respectively, and this setting will be referred as B12/N4. For SemanticKITTI, the number of class for base and novel split are 14 and 5, and this setting will be referred as B14/N5. We use the same splits in the main comparison with prior methods, and provide the results of more variations in the ablation studies and supplementary materials." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.585, + 0.785, + 0.719 + ], + "angle": 0, + "content": "Training details. We follow most of the architecture configurations in the official P3Former [47] implementation. We set \\( w_{\\alpha} = 1 \\), \\( w_{\\beta} = 1 \\), \\( w_{\\lambda} = 1 \\), \\( w_{\\gamma} = 0.1 \\) for both datasets. We use the AdamW [23, 33] optimizer with a weight decay of 0.01. We set the initial learning rate as 0.0008 with a multi-step decay schedule. The models are trained for 40 epochs, and we use the checkpoint of the last epoch for evaluation. To avoid ambiguous class names and better utilize the CLIP text embedding, we follow [25, 35, 54] and apply multi-label mapping for the text queries. During inference, if there are multiple labels for one class, we derive the class score by getting the maximum scores among these labels." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.721, + 0.784, + 0.751 + ], + "angle": 0, + "content": "Evaluation metrics. We use panoptic quality \\((PQ)\\) as the major evaluation metric for the panoptic segmentation task. \\(PQ\\) is formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.358, + 0.755, + 0.785, + 0.806 + ], + "angle": 0, + "content": "\\[\n\\mathrm {P Q} = \\underbrace {\\frac {\\sum_ {T P} \\operatorname {I o U}}{| T P |}} _ {\\mathrm {S Q}} \\times \\underbrace {\\frac {| T P |}{| T P | + \\frac {1}{2} | F P | + \\frac {1}{2} | F N |}} _ {\\mathrm {R Q}}. \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.81, + 0.785, + 0.842 + ], + "angle": 0, + "content": "\\(PQ\\) is the multiplication of segmentation quality \\((SQ)\\) and recognition quality \\((RQ)\\). We report all the three metrics \\((PQ, RQ, SQ)\\) for all classes. We also" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "10" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.362, + 0.127 + ], + "angle": 0, + "content": "Z. Xiao et al." + }, + { + "type": "image", + "bbox": [ + 0.223, + 0.147, + 0.36, + 0.253 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.28, + 0.255, + 0.302, + 0.263 + ], + "angle": 0, + "content": "PFC" + }, + { + "type": "image", + "bbox": [ + 0.362, + 0.147, + 0.498, + 0.253 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.416, + 0.255, + 0.442, + 0.264 + ], + "angle": 0, + "content": "Ours" + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.147, + 0.646, + 0.253 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.567, + 0.255, + 0.59, + 0.264 + ], + "angle": 0, + "content": "PFC" + }, + { + "type": "image", + "bbox": [ + 0.647, + 0.147, + 0.784, + 0.253 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.704, + 0.255, + 0.729, + 0.264 + ], + "angle": 0, + "content": "Ours" + }, + { + "type": "image_caption", + "bbox": [ + 0.215, + 0.29, + 0.785, + 0.332 + ], + "angle": 0, + "content": "Fig. 3: Open-vocabulary panoptic segmentation results from PFC and our method on nuScenes. PFC predicts inaccurate category and masks for the novel pedestrian (red), bus (yellow) and vegetation (green), while ours makes correct predictions." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.362, + 0.785, + 0.424 + ], + "angle": 0, + "content": "report \\( PQ \\), \\( RQ \\), \\( SQ \\) for novel things objects and novel stuff objects separately. In particular, \\( PQ_{N}^{Th} \\) means \\( PQ \\) for novel things classes and \\( PQ_{N}^{St} \\) stands for \\( PQ \\) for novel stuff classes. We also report the mean Intersection over Union (mIoU) for all classes to measure semantic segmentation quality." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.447, + 0.502, + 0.46 + ], + "angle": 0, + "content": "4.2 P3Former-FC-CLIP Baseline" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.473, + 0.788, + 0.64 + ], + "angle": 0, + "content": "As a baseline for novel-class panoptic segmentation, we construct a model from a fusion of P3Former [47] and FC-CLIP [54]. This baseline will be called P3Former-FC-CLIP (PFC). The baseline model takes the frozen voxel vision CLIP features as input, and the final prediction is obtained by geometric ensembling [14,15,24, 50,54] of the results from the classification head \\( f_{cls} \\) and another frozen classifier based on the similarity between the average-pool class embedding \\( w_{q} \\) and the CLIP text embedding. Following FC-CLIP [54], the same set of learnable queries were used to represent both things and stuff classes. In summary, this baseline provides a comparison against our proposed method without the multimodal feature fusion module, the unified segmentation head, and the distillation losses. More information on this baseline can be found in the supplementary material." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.663, + 0.373, + 0.677 + ], + "angle": 0, + "content": "4.3 Main Results" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.689, + 0.788, + 0.809 + ], + "angle": 0, + "content": "Since there are no existing methods for the 3D open-vocabulary panoptic segmentation task, we mainly compare with three methods to demonstrate the capability of our method: (1) the strong open-vocabulary baseline method PFC to fairly demonstrate the strength of our method, (2) the closed-set state-of-the-art 3D panoptic segmentation method P3Former to understand the headroom of our method, and (3) the open-set, zero-shot state-of-the-art method for 3D semantic segmentation, OpenScene [35]. Comparisons on the nuScenes and SemanticKITTI datasets are shown in Tab. 1 and Tab. 3." + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.81, + 0.788, + 0.84 + ], + "angle": 0, + "content": "Results on nuScenes dataset. Table 1 shows the quantitative comparison on the validation set of the nuScenes dataset. Our method significantly outperforms" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.432, + 0.115, + 0.732, + 0.13 + ], + "angle": 0, + "content": "3D Open-Vocabulary Panoptic Segmentation" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.116, + 0.784, + 0.127 + ], + "angle": 0, + "content": "11" + }, + { + "type": "table_caption", + "bbox": [ + 0.217, + 0.145, + 0.788, + 0.201 + ], + "angle": 0, + "content": "Table 1: Quantitative results of panoptic segmentation on nuScenes. We compare the performance of open-vocabulary and fully supervised models. All open vocabulary models share the same randomly picked base/novel split: B12/N4. The novel things classes are bus, pedestrian and motorcycle. The novel stuff class is vegetation." + }, + { + "type": "table", + "bbox": [ + 0.222, + 0.215, + 0.784, + 0.309 + ], + "angle": 0, + "content": "
ModelTypeSupervision\\( {PQ} \\)\\( P{Q}_{N}^{Th} \\)\\( P{Q}_{N}^{St} \\)\\( {RQ} \\)\\( R{Q}_{N}^{Th} \\)\\( R{Q}_{N}^{St} \\)\\( {SQ} \\)\\( S{Q}_{N}^{Th} \\)\\( S{Q}_{N}^{St} \\)mIoU
P3Former [47]closed-setfull75.985.182.984.789.995.989.894.786.576.8
OpenScene [35]open-voczero-shot---------42.1
PFCopen-vocpartial54.837.30.563.642.10.884.289.360.455.5
Oursopen-vocpartial62.049.635.270.955.646.087.089.176.760.1
" + }, + { + "type": "table_caption", + "bbox": [ + 0.217, + 0.326, + 0.788, + 0.381 + ], + "angle": 0, + "content": "Table 2: Performance for base classes on nuScenes. We report the performance on base classes for models in Tab. 1. A gap still exists between open and closed-set methods for base classes. We show that this is due to lack of supervision of the whole scene as P3Former achieves similar performance when only trained on base categories." + }, + { + "type": "table", + "bbox": [ + 0.248, + 0.396, + 0.753, + 0.505 + ], + "angle": 0, + "content": "
ModelSupervisionTraining DataBase ThingsBase Stuff
\\( PQ_{B}^{Th} \\)\\( RQ_{B}^{Th} \\)\\( SQ_{B}^{Th} \\)\\( PQ_{B}^{St} \\)\\( RQ_{B}^{St} \\)\\( SQ_{B}^{St} \\)
P3Former [47]fullbase+novel73.480.590.973.985.385.9
P3Former [47]partialbase65.271.388.064.277.481.8
PFCpartialbase65.673.389.061.075.483.7
Ourspartialbase66.773.789.869.282.183.7
" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.536, + 0.787, + 0.612 + ], + "angle": 0, + "content": "the strong baseline PFC across all metrics. PFC works relatively well for the novel things classes, but performance on the novel stuff class collapses. This is likely because stuff classes tend to cover large parts of the scene, leading to diverse per-voxel CLIP features which may not be good representatives for their respective classes. Qualitative comparison is provided in Fig. 3." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.614, + 0.788, + 0.84 + ], + "angle": 0, + "content": "To further understand the headroom of our method, we also compare our model with the closed-set P3Former. Note that the comparison here is deliberately unfair since the supervision signals are different. Compared with the closed-set P3Former, our segmentation quality \\((SQ)\\) is good while there is a large gap on mask classification quality \\((RQ)\\). The gap is largely due to regressions in the novel classes, where precise supervision is not available for open-vocabulary models. For base classes, as shown in Tab. 2, the gap is relatively small except for a drop in \\(RQ_{B}^{Th}\\). We believe the closed-set P3Former sees ground truth supervision for the entire scene, while open-set methods do not receive supervision in the 'unknown class' regions. In fact, when P3Former is only trained on base categories, the performance is worse than our proposed method. Besides the comparison with the closed-set method, we also compare with the zero-shot state-of-the-art method OpenScene [35] which does not use any labels for training. In this comparison, our model significantly outperforms OpenScene in the mIoU metric for semantic segmentation. Note that this comparison is not en" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "12" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.362, + 0.127 + ], + "angle": 0, + "content": "Z. Xiao et al." + }, + { + "type": "table_caption", + "bbox": [ + 0.216, + 0.145, + 0.787, + 0.201 + ], + "angle": 0, + "content": "Table 3: Quantitative results of panoptic segmentation on SemanticKITTI. We compare the performance different models. All open vocabulary models share the same randomly picked base/novel split: B14/N5. The novel things classes are bicycle and truck. The novel stuff classes are sidewalk, building and trunk." + }, + { + "type": "table", + "bbox": [ + 0.221, + 0.215, + 0.784, + 0.294 + ], + "angle": 0, + "content": "
ModelTypeSupervision\\( {PQ} \\)\\( P{Q}_{N}^{Th} \\)\\( P{Q}_{N}^{St} \\)\\( {RQ} \\)\\( R{Q}_{N}^{Th} \\)\\( R{Q}_{N}^{St} \\)\\( {SQ} \\)\\( S{Q}_{N}^{Th} \\)\\( S{Q}_{N}^{St} \\)mIoU
P3Former [47]closed-setfull62.165.974.271.374.886.877.188.383.961.6
PFCopen-vocpartial33.712.00.440.115.00.667.681.147.333.4
Oursopen-vocpartial42.213.117.850.416.226.773.084.067.244.6
" + }, + { + "type": "table_caption", + "bbox": [ + 0.216, + 0.305, + 0.787, + 0.348 + ], + "angle": 0, + "content": "Table 4: Impact of each component. We evaluate the impact of each component using the base/novel split in Tab. 1. We observe that each component can provide improvements over the PCF baseline. Noticeably, \\( L_{V} \\) brings the biggest improvement." + }, + { + "type": "table", + "bbox": [ + 0.221, + 0.361, + 0.784, + 0.476 + ], + "angle": 0, + "content": "
Components\\(PQ\\)\\(PQ^{Th}_{N}\\)\\(PQ^{St}_{N}\\)\\(RQ\\)\\(RQ^{Th}_{N}\\)\\(RQ^{St}_{N}\\)\\(SQ\\)\\(SQ^{Th}_{N}\\)\\(SQ^{St}_{N}\\)mIoU
QAFusion\\(L_{O}\\)\\(L_{V}\\)
54.837.30.563.642.10.884.289.360.455.5
55.535.70.464.040.80.784.387.456.556.6
56.438.10.465.043.50.684.687.461.356.4
56.343.80.264.849.20.385.188.964.054.0
62.049.635.270.955.646.087.089.176.760.1
" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.506, + 0.785, + 0.595 + ], + "angle": 0, + "content": "tirely fair, as our method is trained with partial labels. Instead, the comparison is useful to understand the gap between the two types of open-vocabulary methods. The concurrent work RegionPLC [53] also reports open-vocabulary results for the semantic segmentation task on the nuScenes dataset. However, we cannot directly compare with this method since it removes one class (other-flat) and does not provide its base/novel split." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.596, + 0.787, + 0.732 + ], + "angle": 0, + "content": "Results on SemanticKITTI dataset. To demonstrate the generalization ability of our method across different datasets, we report the results on SemanticKITTI dataset in Tab. 3. Overall, we observe similar patterns as on the nuScenes dataset. The baseline achieves relatively poor overall performance and struggles with the novel stuff classes. Using our architecture and loss functions, our model significantly outperforms PFC on \\( PQ \\), with the largest margin for novel stuff classes. Note that the gap between the open-vocabulary methods (ours and PFC) and the closed-set method is larger on SemanticKITTI, likely due to the smaller dataset limiting performance." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.755, + 0.512, + 0.77 + ], + "angle": 0, + "content": "4.4 Ablation Studies and Analysis" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.78, + 0.787, + 0.84 + ], + "angle": 0, + "content": "To better understand the effectiveness of each component, we conduct ablation studies for each design choice and loss function on the nuScenes dataset. These results are shown in Tab. 4. We conduct five sets of experiments, starting with the PFC baseline and build upon it four ablations with different combinations." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.431, + 0.115, + 0.732, + 0.13 + ], + "angle": 0, + "content": "3D Open-Vocabulary Panoptic Segmentation" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.116, + 0.786, + 0.127 + ], + "angle": 0, + "content": "13" + }, + { + "type": "table_caption", + "bbox": [ + 0.214, + 0.145, + 0.788, + 0.203 + ], + "angle": 0, + "content": "Table 5: Performance on a different split. We compare the performance with a split with 5 novel classes (B11/N5). The novel things classes are bicycle, car and construction vehicle. The novel stuff classes are terrain and man-made. Our method consistently outperforms the PFC baseline across all the metrics by a large margin." + }, + { + "type": "table", + "bbox": [ + 0.221, + 0.214, + 0.785, + 0.282 + ], + "angle": 0, + "content": "
ModelTypeSupervision\\( {PQ} \\)\\( P{Q}_{N}^{Th} \\)\\( P{Q}_{N}^{St} \\)\\( {RQ} \\)\\( R{Q}_{N}^{Th} \\)\\( R{Q}_{N}^{St} \\)\\( {SQ} \\)\\( S{Q}_{N}^{Th} \\)\\( S{Q}_{N}^{St} \\)mIoU
P3Former [47]closed-setfull75.870.571.783.876.485.590.191.683.675.0
PFCopen-vocpartial43.927.70.651.733.21.080.282.462.745.2
Oursopen-vocpartial52.856.016.460.561.822.684.989.768.749.9
" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.311, + 0.787, + 0.386 + ], + "angle": 0, + "content": "Impact of query assignment. Starting from the PFC baseline model, we add our proposed fixed query assignment for stuff categories. As shown in the second row of Tab. 4, with query assignment, the overall \\( PQ \\) improves by 0.7. The performance for the novel classes drop slightly, but improvement on the base classes overcomes this for the overall PQ." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.387, + 0.788, + 0.492 + ], + "angle": 0, + "content": "Impact of feature fusion. The third row of Tab. 4 shows the impact of feature fusion. Without feature fusion, our model already achieves 55.5 \\( PQ \\), demonstrating the power of the CLIP vision features. The third row shows that the performance with feature fusion for the model input improves the overall \\( PQ \\) by 0.9. This slightly improved the overall performance, but the improvement on the novel things class is the most significant, demonstrating that the learned LiDAR features and CLIP vision features are indeed complementary for the task." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.493, + 0.79, + 0.584 + ], + "angle": 0, + "content": "Impact of object-level distillation loss. The fourth row of the results in Tab. 4 shows the impact of the proposed object-level distillation loss. Note that for models with the object-level distillation loss, we remove the frozen class classification head and the ensemble in the PFC baseline, consolidating to a single class embedding head. Although the \\( RQ_N^{St} \\) slightly dips by 0.3 for the novel stuff classes, this loss can significantly improve the \\( RQ_N^{Th} \\) for the novel things class by 5.7." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.584, + 0.788, + 0.689 + ], + "angle": 0, + "content": "Impact of voxel-level distillation loss. We study the impact of the voxel-level distillation loss to see if it can further improve the performance given all of our designs. The results are shown in the last row of Tab. 4. With this loss function, \\( PQ \\) significantly improves by 5.7. The improvement on the novel split is particularly large, especially for the novel stuff classes. The \\( PQ_N^{St} \\) of the novel stuff class improves from 0.2 to 35.2, which demonstrates the importance of the voxel-level supervision to the performance of the novel stuff class." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.689, + 0.792, + 0.764 + ], + "angle": 0, + "content": "Performance of different splits. To validate the generalizability of our method, we conduct experiments on a different split (B11/N5) for the nuScenes dataset. As shown in Tab. 5, our proposed method consistently and significantly outperforms the strong baseline method. This again demonstrates the effectiveness of our design and the proposed loss functions." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.765, + 0.788, + 0.843 + ], + "angle": 0, + "content": "Open-vocabulary exploration. In previous experiments, we follow other 3D open-vocabulary works [6,10,53] and provide analytical results on pre-defined object categories, mainly due to the limited categories in current panoptic segmentation datasets. In practice, our model goes beyond detecting these object categories: we can take class embeddings \\( v_{q} \\) in Eq. (1) and compute the cosine" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "14" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.362, + 0.127 + ], + "angle": 0, + "content": "Z. Xiao et al." + }, + { + "type": "image", + "bbox": [ + 0.219, + 0.146, + 0.357, + 0.214 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.358, + 0.147, + 0.497, + 0.215 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.219, + 0.216, + 0.357, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.358, + 0.216, + 0.497, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.219, + 0.283, + 0.357, + 0.35 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.358, + 0.283, + 0.497, + 0.35 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.147, + 0.644, + 0.214 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.645, + 0.147, + 0.783, + 0.215 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.215, + 0.644, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.645, + 0.215, + 0.783, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.283, + 0.644, + 0.35 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.645, + 0.283, + 0.783, + 0.35 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.215, + 0.377, + 0.788, + 0.42 + ], + "angle": 0, + "content": "Fig. 4: Open-vocabulary exploration. We show the novel materials/objects in blue color. The orientation of the ego vehicle is fixed in the LiDAR point visualization while the reference images come from on of the surrounding cameras of the ego vehicle." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.45, + 0.788, + 0.525 + ], + "angle": 0, + "content": "similarity with CLIP embedding of any text. Fig. 4 shows that we can detect novel materials/objects that are not in the predefined category list. Note that the concept of open vocabulary is very different from domain adaptation, as open vocabulary refers to the ability to deal with novel inputs in a scene while domain adaptation addresses the difference in data distributions in different scenes." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.526, + 0.792, + 0.587 + ], + "angle": 0, + "content": "Limitations. Our models are only evaluated on current autonomous driving panoptic segmentation benchmarks, with limited number of category annotations. To further evaluate open-vocabulary performance, a large-scale autonomous driving benchmark with more diverse object categories is greatly desired." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.611, + 0.36, + 0.627 + ], + "angle": 0, + "content": "5 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.644, + 0.791, + 0.766 + ], + "angle": 0, + "content": "In this paper, we present the first approach for the open-vocabulary 3D panoptic segmentation task in autonomous driving by leveraging large vision-language models. We experimentally verified that simply extending the 2D open-vocabulary segmentation method into 3D does not yield good performance, and demonstrated that our proposed model design and loss functions significantly boost performance for this task. Our method significantly outperformed the strong baseline on multiple well-established benchmarks. We hope our work can shed light on the future studies of the 3D open-vocabulary panoptic segmentation." + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.78, + 0.788, + 0.841 + ], + "angle": 0, + "content": "Acknowledgements. We would like to thank Mahyar Najibi, Chao Jia, Zhenyao Zhu, Yolanda Wang, Charles R. Qi, Dragomir Anguelov, Tom Ouyang, Ruichi Yu, Chris Sweeney, Colin Graber, Yingwei Li, Sangjin Lee, Weilong Yang, and Congcong Li for the help to the project." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.431, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "3D Open-Vocabulary Panoptic Segmentation" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "15" + }, + { + "type": "title", + "bbox": [ + 0.217, + 0.145, + 0.323, + 0.16 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.174, + 0.785, + 0.215 + ], + "angle": 0, + "content": "1. Alonso, I., Riazuelo, L., Montesano, L., Murillo, A.C.: 3d-mininet: Learning a 2d representation from point clouds for fast and efficient 3d lidar semantic segmentation. IEEE Robotics and Automation Letters 5(4), 5432-5439 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.216, + 0.785, + 0.257 + ], + "angle": 0, + "content": "2. Behley, J., Garbade, M., Milioto, A., Quenzel, J., Behnke, S., Stachniss, C., Gall, J.: SemanticKITTI: A Dataset for Semantic Scene Understanding of LiDAR Sequences. In: ICCV (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.257, + 0.737, + 0.27 + ], + "angle": 0, + "content": "3. Bendale, A., Boult, T.: Towards open world recognition. In: CVPR (2015)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.27, + 0.785, + 0.311 + ], + "angle": 0, + "content": "4. Caesar, H., Bankiti, V., Lang, A.H., Vora, S., Liong, V.E., Xu, Q., Krishnan, A., Pan, Y., Baldan, G., Beijbom, O.: nuscenes: A multimodal dataset for autonomous driving. In: CVPR (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.311, + 0.785, + 0.338 + ], + "angle": 0, + "content": "5. Carion, N., Massa, F., Synnaeve, G., Usunier, N., Kirillov, A., Zagoruyko, S.: End-to-end object detection with transformers. In: ECCV (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.338, + 0.785, + 0.365 + ], + "angle": 0, + "content": "6. Cen, J., Yun, P., Zhang, S., Cai, J., Luan, D., Wang, M.Y., Liu, M., Tang, M.: Open-world semantic segmentation for LIDAR point clouds. In: ECCV (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.365, + 0.785, + 0.405 + ], + "angle": 0, + "content": "7. Chen, R., Liu, Y., Kong, L., Zhu, X., Ma, Y., Li, Y., Hou, Y., Qiao, Y., Wang, W.: Clip2scene: Towards label-efficient 3d scene understanding by clip. In: CVPR (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.406, + 0.785, + 0.433 + ], + "angle": 0, + "content": "8. Chen, Z., Li, B.: Bridging the domain gap: Self-supervised 3d scene understanding with foundation models. arXiv preprint arXiv:2305.08776 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.433, + 0.785, + 0.46 + ], + "angle": 0, + "content": "9. Cheng, B., Schwing, A., Kirillov, A.: Per-pixel classification is not all you need for semantic segmentation. In: NeurIPS (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.46, + 0.785, + 0.487 + ], + "angle": 0, + "content": "10. Ding, R., Yang, J., Xue, C., Zhang, W., Bai, S., Qi, X.: Pla: Language-driven open-vocabulary 3d scene understanding. In: CVPR (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.488, + 0.785, + 0.514 + ], + "angle": 0, + "content": "1. Ding, Z., Wang, J., Tu, Z.: Open-vocabulary universal image segmentation with maskclip. In: ICML (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.514, + 0.785, + 0.542 + ], + "angle": 0, + "content": "2. Du, Y., Wei, F., Zhang, Z., Shi, M., Gao, Y., Li, G.: Learning to prompt for open-vocabulary object detection with vision-language model. In: CVPR (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.542, + 0.785, + 0.568 + ], + "angle": 0, + "content": "3. Geiger, A., Lenz, P., Urtasun, R.: Are we ready for Autonomous Driving? The KITTI Vision Benchmark Suite. In: CVPR (2012)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.568, + 0.785, + 0.595 + ], + "angle": 0, + "content": "4. Ghiasi, G., Gu, X., Cui, Y., Lin, T.Y.: Scaling open-vocabulary image segmentation with image-level labels. In: ECCV (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.595, + 0.785, + 0.622 + ], + "angle": 0, + "content": "5. Gu, X., Lin, T.Y., Kuo, W., Cui, Y.: Open-vocabulary object detection via vision and language knowledge distillation. ICLR (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.622, + 0.785, + 0.649 + ], + "angle": 0, + "content": "6. Ha, H., Song, S.: Semantic abstraction: Open-world 3d scene understanding from 2d vision-language models. In: CoRL (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.649, + 0.785, + 0.676 + ], + "angle": 0, + "content": "7. He, W., Jamonnak, S., Gou, L., Ren, L.: Clip-s4: Language-guided self-supervised semantic segmentation. In: CVPR (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.677, + 0.785, + 0.704 + ], + "angle": 0, + "content": "8. Hegde, D., Valanarasu, J.M.J., Patel, V.M.: Clip goes 3d: Leveraging prompt tuning for language grounded 3d recognition. arXiv preprint arXiv:2303.11313 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.704, + 0.785, + 0.731 + ], + "angle": 0, + "content": "9. Hong, F., Zhou, H., Zhu, X., Li, H., Liu, Z.: Lidar-based panoptic segmentation via dynamic shifting network. In: CVPR (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.731, + 0.785, + 0.785 + ], + "angle": 0, + "content": "20. Hu, Q., Yang, B., Xie, L., Rosa, S., Guo, Y., Wang, Z., Trigoni, N., Markham, A.: Learning semantic segmentation of large-scale point clouds with random sampling. IEEE Transactions on Pattern Analysis and Machine Intelligence 44(11), 8338-8354 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.785, + 0.785, + 0.84 + ], + "angle": 0, + "content": "21. Ilharco, G., Wortsman, M., Wightman, R., Gordon, C., Carlini, N., Taori, R., Dave, A., Shankar, V., Namkoong, H., Miller, J., Hajishirzi, H., Farhadi, A., Schmidt, L.: Openclip (Jul 2021). https://doi.org/10.5281/zenodo.5143773, https://doi.org/10.5281/zenodo.5143773" + }, + { + "type": "list", + "bbox": [ + 0.226, + 0.174, + 0.785, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "16" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.362, + 0.127 + ], + "angle": 0, + "content": "Z. Xiao et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.147, + 0.786, + 0.189 + ], + "angle": 0, + "content": "22. Jia, C., Yang, Y., Xia, Y., Chen, Y.T., Parekh, Z., Pham, H., Le, Q., Sung, Y.H., Li, Z., Duerig, T.: Scaling up visual and vision-language representation learning with noisy text supervision. In: ICML (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.189, + 0.786, + 0.217 + ], + "angle": 0, + "content": "23. Kingma, D.P., Ba, J.: Adam: A method for stochastic optimization. In: ICLR (2015)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.217, + 0.786, + 0.243 + ], + "angle": 0, + "content": "24. Kuo, W., Cui, Y., Gu, X., Piergiovanni, A., Angelova, A.: F-vlm: Open-vocabulary object detection upon frozen vision and language models. In: ICLR (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.243, + 0.786, + 0.271 + ], + "angle": 0, + "content": "25. Lambert, J., Liu, Z., Sener, O., Hays, J., Koltun, V.: Mseg: A composite dataset for multi-domain semantic segmentation. In: CVPR (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.271, + 0.786, + 0.297 + ], + "angle": 0, + "content": "26. Li, B., Weinberger, K.Q., Belongie, S., Koltun, V., Ranftl, R.: Language-driven semantic segmentation. In: ICLR (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.297, + 0.786, + 0.338 + ], + "angle": 0, + "content": "27. Li, J., He, X., Wen, Y., Gao, Y., Cheng, X., Zhang, D.: Panoptic-phenet: Towards real-time and high-precision lidar panoptic segmentation via clustering pseudo heatmap. In: CVPR (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.338, + 0.786, + 0.379 + ], + "angle": 0, + "content": "28. Li, Z., Wang, W., Xie, E., Yu, Z., Anandkumar, A., Alvarez, J.M., Luo, P., Lu, T.: Panoptic segformer: Delving deeper into panoptic segmentation with transformers. In: CVPR (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.379, + 0.786, + 0.42 + ], + "angle": 0, + "content": "29. Liang, F., Wu, B., Dai, X., Li, K., Zhao, Y., Zhang, H., Zhang, P., Vajda, P., Marculescu, D.: Open-vocabulary semantic segmentation with mask-adapted clip. In: CVPR (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.42, + 0.786, + 0.447 + ], + "angle": 0, + "content": "30. Lin, T.Y., Goyal, P., Girshick, R., He, K., Dollar, P.: Focal loss for dense object detection. In: ICCV (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.447, + 0.786, + 0.488 + ], + "angle": 0, + "content": "31. Liu, Q., Wen, Y., Han, J., Xu, C., Xu, H., Liang, X.: Open-world semantic segmentation via contrasting and clustering vision-language embedding. In: ECCV (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.488, + 0.786, + 0.515 + ], + "angle": 0, + "content": "32. Liu, Z., Mao, H., Wu, C.Y., Feichtenhofer, C., Darrell, T., Xie, S.: A convnet for the 2020s. In: CVPR (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.515, + 0.786, + 0.529 + ], + "angle": 0, + "content": "33. Loshchilov, I., Hutter, F.: Decoupled weight decay regularization. In: ICLR (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.529, + 0.786, + 0.555 + ], + "angle": 0, + "content": "34. Ma, C., Yang, Y., Wang, Y., Zhang, Y., Xie, W.: Open-vocabulary semantic segmentation with frozen vision-language models. BMVC (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.555, + 0.786, + 0.582 + ], + "angle": 0, + "content": "35. Peng, S., Genova, K., Jiang, C., Tagliasacchi, A., Pollefeys, M., Funkhouser, T., et al.: Openscene: 3d scene understanding with open vocabularies. In: CVPR (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.582, + 0.786, + 0.609 + ], + "angle": 0, + "content": "36. Qi, C.R., Su, H., Mo, K., Guibas, L.J.: Pointnet: Deep learning on point sets for 3d classification and segmentation. In: CVPR (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.609, + 0.786, + 0.637 + ], + "angle": 0, + "content": "37. Qi, C.R., Yi, L., Su, H., Guibas, L.J.: Pointnet++: Deep hierarchical feature learning on point sets in a metric space. NeurIPS (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.637, + 0.786, + 0.677 + ], + "angle": 0, + "content": "38. Qin, J., Wu, J., Yan, P., Li, M., Yuxi, R., Xiao, X., Wang, Y., Wang, R., Wen, S., Pan, X., et al.: Freeseg: Unified, universal and open-vocabulary image segmentation. In: CVPR (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.677, + 0.786, + 0.718 + ], + "angle": 0, + "content": "39. Radford, A., Kim, J.W., Hallacy, C., Ramesh, A., Goh, G., Agarwal, S., Sastry, G., Askell, A., Mishkin, P., Clark, J., et al.: Learning transferable visual models from natural language supervision. In: ICML (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.718, + 0.786, + 0.746 + ], + "angle": 0, + "content": "40. Razani, R., Cheng, R., Li, E., Taghavi, E., Ren, Y., Bingbing, L.: Gp-s3net: Graph-based panoptic sparse semantic segmentation network. In: ICCV (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.746, + 0.786, + 0.772 + ], + "angle": 0, + "content": "41. Rozenberszki, D., Litany, O., Dai, A.: Language-grounded indoor 3d semantic segmentation in the wild. In: ECCV (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.772, + 0.786, + 0.813 + ], + "angle": 0, + "content": "42. Sirohi, K., Mohan, R., Buscher, D., Burgard, W., Valada, A.: Efficientlps: Efficient lidar panoptic segmentation. IEEE Transactions on Robotics 38(3), 1894-1914 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.813, + 0.786, + 0.841 + ], + "angle": 0, + "content": "43. Takmaz, A., Fedele, E., Sumner, R.W., Pollefeys, M., Tombari, F., Engelmann, F.: Openmask3d: Open-vocabulary 3d instance segmentation. In: NeuRIPS (2023)" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.147, + 0.786, + 0.841 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.431, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "3D Open-Vocabulary Panoptic Segmentation" + }, + { + "type": "page_number", + "bbox": [ + 0.768, + 0.116, + 0.786, + 0.127 + ], + "angle": 0, + "content": "17" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.175 + ], + "angle": 0, + "content": "44. Tang, H., Liu, Z., Zhao, S., Lin, Y., Lin, J., Wang, H., Han, S.: Searching efficient 3d architectures with sparse point-voxel convolution. In: ECCV (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.175, + 0.787, + 0.203 + ], + "angle": 0, + "content": "45. Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, L., Polosukhin, I.: Attention is all you need. In: NeurIPS (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.203, + 0.787, + 0.231 + ], + "angle": 0, + "content": "46. Wu, W., Fuxin, L., Shan, Q.: Pointconvformer: Revenge of the point-based convolution. In: CVPR (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.231, + 0.787, + 0.259 + ], + "angle": 0, + "content": "47. Xiao, Z., Zhang, W., Wang, T., Loy, C.C., Lin, D., Pang, J.: Position-guided point cloud panoptic segmentation transformer. arXiv preprint (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.259, + 0.787, + 0.3 + ], + "angle": 0, + "content": "48. Xu, J., Zhang, R., Dou, J., Zhu, Y., Sun, J., Pu, S.: Rpvnet: A deep and efficient range-point-voxel fusion network for lidar point cloud segmentation. In: ICCV (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.3, + 0.787, + 0.328 + ], + "angle": 0, + "content": "49. Xu, J., De Mello, S., Liu, S., Byeon, W., Breuel, T., Kautz, J., Wang, X.: Groupvit: Semantic segmentation emerges from text supervision. In: CVPR (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.328, + 0.787, + 0.356 + ], + "angle": 0, + "content": "50. Xu, J., Liu, S., Vahdat, A., Byeon, W., Wang, X., De Mello, S.: Open-vocabulary panoptic segmentation with text-to-image diffusion models. In: CVPR (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.356, + 0.787, + 0.397 + ], + "angle": 0, + "content": "51. Xu, M., Zhang, Z., Wei, F., Lin, Y., Cao, Y., Hu, H., Bai, X.: A simple baseline for open-vocabulary semantic segmentation with pre-trained vision-language model. In: ECCV (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.397, + 0.787, + 0.425 + ], + "angle": 0, + "content": "52. Xu, S., Wan, R., Ye, M., Zou, X., Cao, T.: Sparse cross-scale attention network for efficient lidar panoptic segmentation. In: AAAI (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.425, + 0.787, + 0.452 + ], + "angle": 0, + "content": "53. Yang, J., Ding, R., Wang, Z., Qi, X.: Regionplc: Regional point-language contrastive learning for open-world 3d scene understanding. In: CVPR (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.452, + 0.787, + 0.481 + ], + "angle": 0, + "content": "54. Yu, Q., He, J., Deng, X., Shen, X., Chen, L.C.: Convolutions die hard: Open-vocabulary segmentation with single frozen convolutional clip. In: NeurIPS (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.481, + 0.787, + 0.508 + ], + "angle": 0, + "content": "55. Zhang, J., Dong, R., Ma, K.: Clip-fo3d: Learning free open-world 3d scene representations from 2d dense clip. In: ICCV (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.508, + 0.787, + 0.522 + ], + "angle": 0, + "content": "56. Zhou, C., Loy, C.C., Dai, B.: Extract free dense labels from clip. In: ECCV (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.522, + 0.787, + 0.55 + ], + "angle": 0, + "content": "57. Zhou, Z., Lei, Y., Zhang, B., Liu, L., Liu, Y.: Zegclip: Towards adapting clip for zero-shot semantic segmentation. In: CVPR (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.55, + 0.787, + 0.577 + ], + "angle": 0, + "content": "58. Zhou, Z., Zhang, Y., Foroosh, H.: Panoptic-polarnet: Proposal-free lidar point cloud panoptic segmentation. In: CVPR (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.577, + 0.787, + 0.618 + ], + "angle": 0, + "content": "59. Zou, X., Dou, Z.Y., Yang, J., Gan, Z., Li, L., Li, C., Dai, X., Behl, H., Wang, J., Yuan, L., et al.: Generalized decoding for pixel, image, and language. In: CVPR (2023)" + }, + { + "type": "list", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.618 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/2024/3D Open-Vocabulary Panoptic Segmentation with 2D-3D Vision-Language Distillation/eb0bea0f-431f-4835-9237-239fd0d64e99_origin.pdf b/2024/3D Open-Vocabulary Panoptic Segmentation with 2D-3D Vision-Language Distillation/eb0bea0f-431f-4835-9237-239fd0d64e99_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..a970cd77afb71ac8cc2cd37afc34ff714a124050 --- /dev/null +++ b/2024/3D Open-Vocabulary Panoptic Segmentation with 2D-3D Vision-Language Distillation/eb0bea0f-431f-4835-9237-239fd0d64e99_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9222969de34c5258fe38f7ed87a1a30560b0b0f058e2c334532146b2e4eb29ef +size 5039103 diff --git a/2024/3D Open-Vocabulary Panoptic Segmentation with 2D-3D Vision-Language Distillation/full.md b/2024/3D Open-Vocabulary Panoptic Segmentation with 2D-3D Vision-Language Distillation/full.md new file mode 100644 index 0000000000000000000000000000000000000000..d82a0665af6e1df19bbe8079c4f0ea8d2b22bd04 --- /dev/null +++ b/2024/3D Open-Vocabulary Panoptic Segmentation with 2D-3D Vision-Language Distillation/full.md @@ -0,0 +1,329 @@ +# 3D Open-Vocabulary Panoptic Segmentation with 2D-3D Vision-Language Distillation + +Zihao Xiao $^{1*}$ , Longlong Jing $^{2}$ , Shangxuan Wu $^{2}$ , Alex Zihao Zhu $^{2}$ , Jingwei Ji $^{2}$ , Chiyu Max Jiang $^{2}$ , Wei-Chih Hung $^{2}$ , Thomas Funkhouser $^{3}$ , Weicheng Kuo $^{4}$ , Anelia Angelova $^{4}$ , Yin Zhou $^{2}$ , and Shiwei Sheng $^{2*}$ + +1 Johns Hopkins University, 2 Waymo, 3 Google Research, 4 Google DeepMind + +Abstract. 3D panoptic segmentation is a challenging perception task, especially in autonomous driving. It aims to predict both semantic and instance annotations for 3D points in a scene. Although prior 3D panoptic segmentation approaches have achieved great performance on closed-set benchmarks, generalizing these approaches to unseen things and unseen stuff categories remains an open problem. For unseen object categories, 2D open-vocabulary segmentation has achieved promising results that solely rely on frozen CLIP backbones and assembling multiple classification outputs. However, we find that simply extending these 2D models to 3D does not guarantee good performance due to poor per-mask classification quality, especially for novel stuff categories. In this paper, we propose the first method to tackle 3D open-vocabulary panoptic segmentation. Our model takes advantage of the fusion between learnable LiDAR features and dense frozen vision CLIP features, using a single classification head to make predictions for both base and novel classes. To further improve the classification performance on novel classes and leverage the CLIP model, we propose two novel loss functions: object-level distillation loss and voxel-level distillation loss. Our experiments on the nuScenes and SemanticKITTI datasets show that our method outperforms the strong baseline by a large margin. + +Keywords: Autonomous driving $\cdot$ 3D panoptic segmentation $\cdot$ Vision-language + +# 1 Introduction + +3D panoptic segmentation is a crucial task in computer vision with many real-world applications, most notably in autonomous driving. It combines 3D semantic and instance segmentation to produce per-point predictions for two different types of objects: things (e.g., car) and stuff (e.g., road). To date, there has been significant progress in 3D panoptic segmentation [27, 40, 42, 47, 52, 58]. Most recently, methods such as [47] produce panoptic segmentation predictions directly from point clouds by leveraging learned queries to represent objects and + +Transformer-based [45] architectures [2, 4] to perform the modeling. However, existing models only predict panoptic segmentation results for a closed-set of objects. They fail to create predictions for the majority of unseen object categories in the scene, hindering the application of these algorithms to real-world scenarios, especially for autonomous driving. In this work, we focus on segmenting unseen things and unseen stuff objects in autonomous driving scenarios. We follow [10, 53] and develop models under the open-vocabulary setting: we divide the object categories into base (seen) categories and novel (unseen) categories, and evaluate models that are only trained on base categories. + +Such open-world computer vision tasks [3] benefit from the recent advancements in vision-language (V-L) models [22, 39]. In 2D vision, there are many successful methods in open-vocabulary object detection [12, 15, 24] and segmentation [11, 50, 54]. These methods make predictions in a shared image-text embedding space, where predictions for unseen categories are produced by comparing the similarity of an object with the text embedding of the category. However, these methods are only possible due to the vast amounts of paired image-text data available, making it difficult to train similar models for 3D data. + +Instead, researchers have continued to leverage the effectiveness of these 2D vision-language models for 3D with the help of pixel-point correspondences by running inference on 2D images and then aligning with the 3D features. These methods have achieved promising results on open-vocabulary semantic segmentation [10,35,53,55] and instance segmentation [10,43,53], individually. However, there are no methods that address the problem of 3D open-vocabulary panoptic segmentation, i.e., addressing both open-vocabulary semantic segmentation and open-vocabulary instance segmentation at the same time. The challenge lies in how to handle segmentation for novel things and stuff objects simultaneously. + +3D open-vocabulary panoptic segmentation is a challenging problem, due to both the significant domain gaps between the camera and LiDAR modalities and unsolved problems in open-vocabulary segmentation. Many existing open-vocabulary works rely on similarities between text embeddings of class names and pre-trained V-L features to obtain associations between predictions and classes [35,43,55]. However, while projecting 2D V-L features to 3D can account for a large part of the scene, there are often many points unaccounted for due to unmatched pixel/point distributions and differing fields of view between sensors. Some 3D open-vocabulary works [10,53] apply contrastive learning to obtain better association between language and points, but they require extra captioning models and do not address the difficulties of detecting novel stuff classes. + +In this work, we aim to address these two issues with a novel architecture for 3D open-vocabulary panoptic segmentation. Building on existing 3D closed-set panoptic segmentation methods, we train a learned LiDAR feature encoder in parallel with a frozen, pre-trained camera CLIP model. By fusing the 3D LiDAR features with the 2D CLIP features, our model is able to learn rich features throughout the entire 3D sensing volume, even if there are no camera features in certain regions. In addition, we apply a pair of novel distillation losses that allow the 3D encoder to learn both object-level and voxel-level features which + +live inside the CLIP feature space. This provides a learned module in 3D space which can directly be compared with text embeddings. These losses also provide useful training supervision to unknown parts of the scene where there would otherwise be no loss gradient. + +With the proposed model and loss functions, our method significantly outperforms the strong baseline on multiple datasets. Our contributions are summarized as follows: + +- We present the first approach for 3D open-vocabulary panoptic segmentation in autonomous driving. +- We propose two novel loss functions, object-level distillation loss and voxel-level distillation loss to help segment novel things and novel stuff objects. +- We experimentally show that our proposed method significantly outperforms that strong baseline model on both nuScenes and SemanticKITTI datasets. + +# 2 Related Work + +This work is closely related to 3D panoptic segmentation, 2D open-vocabulary segmentation, and 3D open-vocabulary segmentation. + +3D panoptic segmentation. The goal of 3D panoptic segmentation is to group 3D points according to their semantics and identities. This is a challenging task and relies on a good representation of the 3D data [1,20,36,37,44,46,48]. Most panoptic segmentation models have separate branches for instance segmentation and semantic segmentation [19,27,44,58]. By following DETR [5], the recently proposed P3Former [47] uses learnable queries and a transformer architecture to obtain state-of-the-art performance on multiple panoptic segmentation benchmarks. Although those closed-set methods achieve incredible results, they cannot predict the labels and masks for novel classes. + +2D open-vocabulary segmentation. 2D open-vocabulary segmentation aims to group image pixels according to their semantics or identities for base (seen) or novel (unseen) categories. The prediction on novel categories is usually done by leveraging large V-L models [22,39]. There are many works that focus on open vocabulary semantic segmentation [14,17,26,29,31,34,49,51,56,57,59]. Some work has also explored open-vocabulary panoptic segmentation [11,38,50]. Recently, FC-CLIP [54] proposes a single-stage framework based on a frozen convolutional CLIP backbone [21,32,39] for 2D open-vocabulary panoptic segmentation that achieves state-of-the-art performance. However, due to the camera-LiDAR domain gap, we show that simply extending it to 3D leads to poor performance. + +3D open-vocabulary segmentation. 3D open-vocabulary segmentation is less explored due to the lack of 3D point-to-text association. One common practice is to utilize V-L models and use 2D-3D pairings to obtain rich, structured information in 3D [7,8,10,16,18,35,41,43,53,55]. Notably, CLIP2Scene [7] proposes a semantic-driven cross-modal contrastive learning framework. PLA [10] leverages images as a bridge and builds hierarchical 3D-caption pairs for contrastive learning. OpenScene [35] extracts per-pixel CLIP features using a pre-trained V-L model [14,26] then derives dense 3D features by projecting 3D points onto + +![](images/c2ed55e4ce883f6a1e5a8a4609c300937e9486cfdde370a2bb9dc79354a37c65.jpg) +Fig. 1: Overview of our method. Given a LiDAR point cloud and the corresponding camera images, LiDAR features are extracted with a learnable LiDAR encoder, while vision features are extracted by a frozen CLIP vision model. The extracted LiDAR features and the frozen CLIP vision features are then fused and fed to a query-based transformer model to predict instance masks and semantic classes. + +image planes. One concurrent work, RegionPLC [53], utilizes regional visual prompts to create dense captions and perform point-discriminative contrastive learning, which is used for semantic segmentation or instance segmentation, individually. In contrast, our work does not rely on any captioning model or extra contrastive learning, but only depends on pre-trained CLIP features. Our model also handles semantic segmentation and instance segmentation simultaneously. + +# 3 Method + +This section is organized as follows. First, we define the 3D open-vocabulary panoptic segmentation task. Then we provide detailed descriptions of the model architecture as well as the proposed loss functions. The overview of our method is presented in Fig. 1, and the two proposed loss functions are illustrated in Fig. 2 (a) and Fig. 2 (b). + +# 3.1 Problem Definition + +In 3D panoptic segmentation, the goal is to annotate every point in a point cloud. For stuff classes, (e.g. road, vegetation), a category label is assigned according to its semantics. For things classes (e.g. cars, pedestrians), an instance label is assigned to an object in addition to its semantic label. + +In open-vocabulary panoptic segmentation, the models are trained on $C_B$ base(seen) categories. At test time, besides these $C_B$ base categories, the data will contain $C_N$ novel(unseen) categories. Following the settings of prior work [15, 24, 54], we assume the availability of the name of the novel categories during inference, but the novel categories are not present in the training data and their names are not known. Note that we do not apply any prompt engineering, as + +this is not the focus of this paper. We follow OpenScene [35] to obtain the CLIP text embedding for each category. + +# 3.2 3D Open-Vocabulary Panoptic Segmentation + +Most of the previous 3D open-vocabulary works only address semantic segmentation [7,8,10,16,18,35,41,53,55] or instance segmentation [43,53] separately, and there is no existing work for the 3D open-vocabulary panoptic segmentation task, which handles novel things and novel stuff objects simultaneously. A natural idea would be extending the 2D open vocabulary segmentation methods to build the 3D counterpart. We start with P3Former [47], a state-of-the-art transformer-based 3D closed-set panoptic segmentation model, and add the essential components to support open-vocabulary capability by following FC-CLIP [54], a 2D open-vocabulary segmentation model that achieves state-of-the-art performance on multiple datasets. However, we found that this simple extension leads to poor performance in our experiments, and in this work we propose several new features to improve the performance of our model. More implementation details for this baseline can be found in the supplementary material. + +In order to improve the open vocabulary capability of our model, we propose significant changes to the P3Former architecture, as well as two new loss functions. The architecture of our method is shown in Fig. 1 and mainly consists of multimodal feature fusion, a segmentation head, and input text embeddings for open-vocabulary classification. + +Multimodal feature fusion. The core idea of many recent 2D open-vocabulary works is to leverage the features of large-scale vision-language models [22, 39]. These methods [54] mainly rely on frozen CLIP features and use a transformer model to perform the 2D panoptic segmentation task. However, this is not optimal for 3D tasks since many points do not have corresponding valid camera pixels, leading to invalid features preventing meaningful predictions. To fully exploit the power of the CLIP vision features and learn complementary features from both CLIP features from camera and features from LiDAR, we generate predictions from the fusion of CLIP features extracted by a frozen CLIP model and learned LiDAR features from a LiDAR encoder. + +As shown in Fig. 1, there are three major components for the multimodal feature fusion including a LiDAR encoder, a vision CLIP encoder, and voxel-level feature fusion. The LiDAR encoder is a model which takes an unordered set of points as input and extracts per-point features. We apply voxelization to the features from the LiDAR encoder, producing output features $F_{lidar} \in \mathbb{R}^{V \times D_{lidar}}$ , where $V$ is the number of the voxels and $D_{lidar}$ is the dimension of the learned LiDAR feature. The Vision CLIP encoder is a pre-trained V-L segmentation model [14] which extracts pixel-wise CLIP features from each camera image. Within each voxel, every LiDAR point is projected into the camera image plane based on the intrinsic and extrinsic calibration parameters to index into the corresponding vision CLIP features, then the vision CLIP features of all the points belonging to each voxel are averaged to represent that voxel. Zero padding is used for points which do not have any valid corresponding camera pixels. The + +voxel CLIP features will be referred as $F_{vclip} \in \mathbb{R}^{V \times D_{emb}}$ , where $V$ is the number of voxels after voxelization and $D_{emb}$ is the dimension of the CLIP features. Finally, the learned per-voxel LiDAR features and frozen per-voxel vision CLIP features are concatenated together to be used as input into the transformer decoder in the segmentation head. This feature fusion enables our model to learn complementary information from both the LiDAR and CLIP features, allowing us to fine-tune our backbone for each dataset's specific data distribution. + +Segmentation head. The segmentation head is a transformer [45] model that takes the LiDAR-Vision fused feature as input to produce panoptic segmentation results. Prior works, including existing 2D open-vocabulary works such as FC-CLIP [54], typically use learnable queries $q$ to represent each instance or thing, and they contain a mask prediction head $f_{mask}$ to produce the corresponding mask for each individual object and a classification head $f_{cls}$ to predict the per-mask class score for each known class. However, as a result, they also need to rely on another classifier to handle novel categories. Our goal is to use a single model to handle the prediction for both base and novel categories. Thus, we predict a class embedding instead of a class score for each mask. During training, the model learns to regress an analogy to the CLIP vision embedding for each mask, and the category prediction can be obtained by calculating its similarity with the CLIP text embedding of text queries during the inference stage. The class embedding $f_{cls}$ prediction is defined as: + +$$ +v _ {q} = f _ {c l s} (q) \in \mathbb {R} ^ {D _ {e m b}}, \tag {1} +$$ + +where $v_{q}$ is in the CLIP embedding space. The predicted class logits are then computed from the cosine similarity between the predicted class embedding and the text embedding of every category name from the evaluation set using a frozen CLIP model. The classification logits are defined as: + +$$ +s _ {v _ {q}} = \frac {1}{T} \left[ \cos \left(v _ {q}, t _ {1}\right), \cos \left(v _ {q}, t _ {2}\right), \dots , \cos \left(v _ {q}, t _ {C}\right) \right] \tag {2} +$$ + +where $t_i \in \mathbb{R}^{D_{emb}}$ , $i \in \{1, 2, \dots, C\}$ is the text embedding, $C$ is the number of categories ( $C_B$ in training and $C_B + C_N$ in testing), and $T$ is a learnable temperature term that controls the concentration of the distribution. + +Query assignment. A common practice [9, 54] for transformer-based panoptic segmentation models is to utilize a single set of queries to make predictions for both things and stuff classes jointly. In contrast, P3Former uses one query set to represent things classes after bipartite matching and one fixed query set for stuff classes. We have found that this separation of things queries and stuff queries makes our model converge faster and improve overall performance, and similar pattern has been observed in other tasks [28]. However, the fixed set of queries for stuff classes is not applicable to the open-vocabulary setting due to the unknown number of novel stuff classes. To take advantage of the benefits of separating things queries and stuff queries, we propose to predict the base stuff classes with a fixed set of queries and utilize a set of learnable queries to target base things classes and all novel (things and stuff) classes. More details of the query assignment can be found in the supplementary materials. + +![](images/fe8337038a0e671baf59695920bf28d56480a51f0114959972774dceeffc93bd.jpg) +(a) Object-Level Distillation Loss. + +![](images/1030de4f7afa5dff5320a7deefbb48e8a91f284f5fa6543f0a3dbeb766d7bd9f.jpg) +(b) Voxel-Level Distillation Loss. +Fig. 2: (a) the proposed object-level distillation loss, and (b) the proposed voxel-level distillation loss. + +# 3.3 Loss Function + +Closed-set panoptic segmentation models [47] are typically optimized with objective functions consisting of a classification loss $L_{cls}$ and a mask prediction loss $L_{mask}$ . We follow P3Former [47] for these two losses: the classification loss $L_{cls}$ optimizes the focal loss [30] between the class predictions and the category labels, while the mask loss $L_{mask}$ optimizes the voxel-query classification loss. Besides the two standard loss functions, we propose two simple yet effective losses to apply distillation from the CLIP model at different levels. + +Object-level distillation loss. Similar to previous methods [50, 54], we use the cosine similarity between predicted class embeddings and class text CLIP embeddings to produce classification scores. However, the classification loss applied to Eq. (2) only enforces similarity to known classes. In this work, we make the assumption that the frozen CLIP features are discriminative with respect to open-vocabulary classes and have good out-of-distribution generalization. We propose an additional training loss which forces our predicted object-level class embeddings to be similar to the CLIP embeddings within their corresponding masks after matching. Similar to [54], we utilize voxel vision CLIP features to get an embedding for each query $q$ by mask pooling Vision CLIP features: + +$$ +w _ {q} = \frac {1}{\left| M _ {q} \right|} \sum_ {p} \mathbb {1} (p \in M _ {q}) F _ {v c l i p} (p) \tag {3} +$$ + +where $M_q$ is the set of points $p$ belonging to the mask for query $q$ . Our object-level distillation loss is then defined as: + +$$ +L _ {O} = \frac {1}{\left| Q _ {\text {m a t c h e d}} \right|} \sum_ {q \in Q _ {\text {m a t c h e d}}} 1 - \cos \left(v _ {q}, w _ {q}\right), \tag {4} +$$ + +where $Q_{\text{matched}}$ is the set of queries matched with ground truth objects during training, $v$ is the set of predicted class embeddings, and $w$ is the set of mask-pooled CLIP embeddings. This loss forces the model to directly distill object-level camera CLIP features and improves model performance for novel things classes. We also experimented with applying $L_O$ to all predicted masks, but we + +It is found that this slightly reduced model performance, likely due to the presence of masks that do not correspond to any objects in the scene. + +Voxel-level distillation loss. While the object-level distillation loss distills the per-object features from CLIP model, it does not provide any supervision for the mask prediction head, which would otherwise only receive supervision for known classes. We found this particularly problematic for unknown stuff classes, which tend to be more spread out and cover larger and more diverse parts of the scene. In addition, it is only being applied to queries with relatively accurate mask predictions in order to learn useful CLIP features. To target these issues, we propose the voxel-level distillation loss to explicitly learn voxel-level CLIP features, which do not depend on any labels and can be applied on all queries. In particular, the voxel-level distillation loss is defined as: + +$$ +F _ {r e c} = M _ {Q} ^ {T} F _ {Q e m b} \tag {5} +$$ + +where $Q$ is the number of queries, $F_{Qemb} \in \mathbb{R}^{Q \times D_{emb}}$ is the predicted embedding for all queries and $M_Q \in \mathbb{R}^{Q \times V}$ is the predicted per-voxel mask probabilities for all queries. The reconstructed features can be regarded as the weighted sum of all queries for each voxel. We supervise these features with the voxel CLIP features: + +$$ +L _ {V} = L _ {1} \left(F _ {\text {r e c}}, F _ {\text {v c l i p}}\right) \tag {6} +$$ + +Unlike the object-level distillation loss, which is only applied to queries with matched ground truth, this loss is applied to all predicted mask scores and queries. In our experiments, we found that this loss significantly improves performance on novel stuff categories in particular, likely as it does not require exact matches with the ground truth, which can be difficult for large stuff classes. However, this loss is still susceptible to noisy or low quality mask scores, and we found that larger weights for this loss can disrupt training. + +To summarize, $L_{O}$ helps get rid of the ensemble of classifiers in [14, 15, 24, 50, 54] and enables open-vocabulary ability with one trainable classifier. $L_{V}$ uses a scene-level representation represented by the embedding of all queries, while previous methods only consider object-level representation. Combining $L_{O}$ with $L_{V}$ enables segmenting novel things and novel stuff objects simultaneously. Our final objective function can be written as: + +$$ +L = w _ {\alpha} * L _ {c l s} + w _ {\beta} * L _ {m a s k} + w _ {\lambda} * L _ {O} + w _ {\gamma} * L _ {V} \tag {7} +$$ + +, where $w_{\alpha}, w_{\beta}, w_{\lambda}, w_{\gamma}$ , are weights for the corresponding objective functions. + +# 3.4 Implementation Details + +For the LiDAR encoder and segmentation head, we follow the implementation of the state-of-the-art closed-set 3D panoptic segmentation method P3Former [47]. For the Vision CLIP encoder, we use OpenSeg [14], due to its remarkable performance on the recent open-vocabulary 3D semantic segmentation task [35]. For the Text CLIP encoder, we use CLIP [39] with ViT-L/14 [45] backbone, following other state-of-the-art open vocabulary works [35]. + +# 4 Experiments + +# 4.1 Experimental Setting + +Following the state-of-the-art closed-set 3D panoptic segmentation work [27,40, 42,47,52,58], we conduct experiments and ablation studies on the nuScenes [4] and SemanticKITTI [2,13] datasets. + +nuScenes. The nuScenes dataset [4] is a public benchmark for autonomous driving. It consists of 1000 run segments and is further divided into prescribed train/val/test splits. We use all key frames with panoptic labels in the training set(28130 frames) to train the model. Following the most recent state-of-the-art model P3Former [47], we evaluate the models on the validation set(6019 frames). There are 16 semantic classes, including 10 things classes and 6 stuff classes. + +SemanticKITTI. SemanticKITTI [2, 13] is the first large dataset for LiDAR panoptic segmentation for autonomous driving. We conduct experiments on the training and validation sets, where panoptic segmentation labels are available. 3D open-vocabulary methods often require point and pixel pairing. In the SemanticKITTI dataset, however, the ego-vehicle is only equipped with frontal cameras. Thus, we filter out the points that are not visible in the camera view based on the provided camera parameters for both training and evaluation. There are 19 semantic classes, including 8 things classes and 11 stuff classes. + +Data split. Both the nuScenes and SemanticKITTI datasets do not provide official base and novel class splits. Following the state-of-the-art 3D open-vocabulary segmentation work [6,10,53], we randomly split the classes into base and novel, while keeping the ratio between base and novel classes around $3:1$ . For nuScenes, the number of class for base and novel split are 12 and 4 respectively, and this setting will be referred as B12/N4. For SemanticKITTI, the number of class for base and novel split are 14 and 5, and this setting will be referred as B14/N5. We use the same splits in the main comparison with prior methods, and provide the results of more variations in the ablation studies and supplementary materials. + +Training details. We follow most of the architecture configurations in the official P3Former [47] implementation. We set $w_{\alpha} = 1$ , $w_{\beta} = 1$ , $w_{\lambda} = 1$ , $w_{\gamma} = 0.1$ for both datasets. We use the AdamW [23, 33] optimizer with a weight decay of 0.01. We set the initial learning rate as 0.0008 with a multi-step decay schedule. The models are trained for 40 epochs, and we use the checkpoint of the last epoch for evaluation. To avoid ambiguous class names and better utilize the CLIP text embedding, we follow [25, 35, 54] and apply multi-label mapping for the text queries. During inference, if there are multiple labels for one class, we derive the class score by getting the maximum scores among these labels. + +Evaluation metrics. We use panoptic quality $(PQ)$ as the major evaluation metric for the panoptic segmentation task. $PQ$ is formulated as: + +$$ +\mathrm {P Q} = \underbrace {\frac {\sum_ {T P} \operatorname {I o U}}{| T P |}} _ {\mathrm {S Q}} \times \underbrace {\frac {| T P |}{| T P | + \frac {1}{2} | F P | + \frac {1}{2} | F N |}} _ {\mathrm {R Q}}. \tag {8} +$$ + +$PQ$ is the multiplication of segmentation quality $(SQ)$ and recognition quality $(RQ)$ . We report all the three metrics $(PQ, RQ, SQ)$ for all classes. We also + +![](images/46ccebcc6142168e389b257a212aa0a4cc27967f146ad85411e4e19968399726.jpg) +PFC + +![](images/03a482f0bf7d8ecb5f721997e9f60b65493a6d973e51d6094b58fba088eda3d0.jpg) +Ours + +![](images/b5c327dfdf5140a46ce816482806940a69ec57f903f122fe338734a3ceb83214.jpg) +PFC +Fig. 3: Open-vocabulary panoptic segmentation results from PFC and our method on nuScenes. PFC predicts inaccurate category and masks for the novel pedestrian (red), bus (yellow) and vegetation (green), while ours makes correct predictions. + +![](images/f49229697b6a698e03b4b3e1da595b07cbdfaa5efe782dd2c74ed82ad758bd8a.jpg) +Ours + +report $PQ$ , $RQ$ , $SQ$ for novel things objects and novel stuff objects separately. In particular, $PQ_{N}^{Th}$ means $PQ$ for novel things classes and $PQ_{N}^{St}$ stands for $PQ$ for novel stuff classes. We also report the mean Intersection over Union (mIoU) for all classes to measure semantic segmentation quality. + +# 4.2 P3Former-FC-CLIP Baseline + +As a baseline for novel-class panoptic segmentation, we construct a model from a fusion of P3Former [47] and FC-CLIP [54]. This baseline will be called P3Former-FC-CLIP (PFC). The baseline model takes the frozen voxel vision CLIP features as input, and the final prediction is obtained by geometric ensembling [14,15,24, 50,54] of the results from the classification head $f_{cls}$ and another frozen classifier based on the similarity between the average-pool class embedding $w_{q}$ and the CLIP text embedding. Following FC-CLIP [54], the same set of learnable queries were used to represent both things and stuff classes. In summary, this baseline provides a comparison against our proposed method without the multimodal feature fusion module, the unified segmentation head, and the distillation losses. More information on this baseline can be found in the supplementary material. + +# 4.3 Main Results + +Since there are no existing methods for the 3D open-vocabulary panoptic segmentation task, we mainly compare with three methods to demonstrate the capability of our method: (1) the strong open-vocabulary baseline method PFC to fairly demonstrate the strength of our method, (2) the closed-set state-of-the-art 3D panoptic segmentation method P3Former to understand the headroom of our method, and (3) the open-set, zero-shot state-of-the-art method for 3D semantic segmentation, OpenScene [35]. Comparisons on the nuScenes and SemanticKITTI datasets are shown in Tab. 1 and Tab. 3. + +Results on nuScenes dataset. Table 1 shows the quantitative comparison on the validation set of the nuScenes dataset. Our method significantly outperforms + +Table 1: Quantitative results of panoptic segmentation on nuScenes. We compare the performance of open-vocabulary and fully supervised models. All open vocabulary models share the same randomly picked base/novel split: B12/N4. The novel things classes are bus, pedestrian and motorcycle. The novel stuff class is vegetation. + +
ModelTypeSupervision\( {PQ} \)\( P{Q}_{N}^{Th} \)\( P{Q}_{N}^{St} \)\( {RQ} \)\( R{Q}_{N}^{Th} \)\( R{Q}_{N}^{St} \)\( {SQ} \)\( S{Q}_{N}^{Th} \)\( S{Q}_{N}^{St} \)mIoU
P3Former [47]closed-setfull75.985.182.984.789.995.989.894.786.576.8
OpenScene [35]open-voczero-shot---------42.1
PFCopen-vocpartial54.837.30.563.642.10.884.289.360.455.5
Oursopen-vocpartial62.049.635.270.955.646.087.089.176.760.1
+ +Table 2: Performance for base classes on nuScenes. We report the performance on base classes for models in Tab. 1. A gap still exists between open and closed-set methods for base classes. We show that this is due to lack of supervision of the whole scene as P3Former achieves similar performance when only trained on base categories. + +
ModelSupervisionTraining DataBase ThingsBase Stuff
\( PQ_{B}^{Th} \)\( RQ_{B}^{Th} \)\( SQ_{B}^{Th} \)\( PQ_{B}^{St} \)\( RQ_{B}^{St} \)\( SQ_{B}^{St} \)
P3Former [47]fullbase+novel73.480.590.973.985.385.9
P3Former [47]partialbase65.271.388.064.277.481.8
PFCpartialbase65.673.389.061.075.483.7
Ourspartialbase66.773.789.869.282.183.7
+ +the strong baseline PFC across all metrics. PFC works relatively well for the novel things classes, but performance on the novel stuff class collapses. This is likely because stuff classes tend to cover large parts of the scene, leading to diverse per-voxel CLIP features which may not be good representatives for their respective classes. Qualitative comparison is provided in Fig. 3. + +To further understand the headroom of our method, we also compare our model with the closed-set P3Former. Note that the comparison here is deliberately unfair since the supervision signals are different. Compared with the closed-set P3Former, our segmentation quality $(SQ)$ is good while there is a large gap on mask classification quality $(RQ)$ . The gap is largely due to regressions in the novel classes, where precise supervision is not available for open-vocabulary models. For base classes, as shown in Tab. 2, the gap is relatively small except for a drop in $RQ_{B}^{Th}$ . We believe the closed-set P3Former sees ground truth supervision for the entire scene, while open-set methods do not receive supervision in the 'unknown class' regions. In fact, when P3Former is only trained on base categories, the performance is worse than our proposed method. Besides the comparison with the closed-set method, we also compare with the zero-shot state-of-the-art method OpenScene [35] which does not use any labels for training. In this comparison, our model significantly outperforms OpenScene in the mIoU metric for semantic segmentation. Note that this comparison is not en + +Table 3: Quantitative results of panoptic segmentation on SemanticKITTI. We compare the performance different models. All open vocabulary models share the same randomly picked base/novel split: B14/N5. The novel things classes are bicycle and truck. The novel stuff classes are sidewalk, building and trunk. + +
ModelTypeSupervision\( {PQ} \)\( P{Q}_{N}^{Th} \)\( P{Q}_{N}^{St} \)\( {RQ} \)\( R{Q}_{N}^{Th} \)\( R{Q}_{N}^{St} \)\( {SQ} \)\( S{Q}_{N}^{Th} \)\( S{Q}_{N}^{St} \)mIoU
P3Former [47]closed-setfull62.165.974.271.374.886.877.188.383.961.6
PFCopen-vocpartial33.712.00.440.115.00.667.681.147.333.4
Oursopen-vocpartial42.213.117.850.416.226.773.084.067.244.6
+ +Table 4: Impact of each component. We evaluate the impact of each component using the base/novel split in Tab. 1. We observe that each component can provide improvements over the PCF baseline. Noticeably, $L_{V}$ brings the biggest improvement. + +
Components\(PQ\)\(PQ^{Th}_{N}\)\(PQ^{St}_{N}\)\(RQ\)\(RQ^{Th}_{N}\)\(RQ^{St}_{N}\)\(SQ\)\(SQ^{Th}_{N}\)\(SQ^{St}_{N}\)mIoU
QAFusion\(L_{O}\)\(L_{V}\)
54.837.30.563.642.10.884.289.360.455.5
55.535.70.464.040.80.784.387.456.556.6
56.438.10.465.043.50.684.687.461.356.4
56.343.80.264.849.20.385.188.964.054.0
62.049.635.270.955.646.087.089.176.760.1
+ +tirely fair, as our method is trained with partial labels. Instead, the comparison is useful to understand the gap between the two types of open-vocabulary methods. The concurrent work RegionPLC [53] also reports open-vocabulary results for the semantic segmentation task on the nuScenes dataset. However, we cannot directly compare with this method since it removes one class (other-flat) and does not provide its base/novel split. + +Results on SemanticKITTI dataset. To demonstrate the generalization ability of our method across different datasets, we report the results on SemanticKITTI dataset in Tab. 3. Overall, we observe similar patterns as on the nuScenes dataset. The baseline achieves relatively poor overall performance and struggles with the novel stuff classes. Using our architecture and loss functions, our model significantly outperforms PFC on $PQ$ , with the largest margin for novel stuff classes. Note that the gap between the open-vocabulary methods (ours and PFC) and the closed-set method is larger on SemanticKITTI, likely due to the smaller dataset limiting performance. + +# 4.4 Ablation Studies and Analysis + +To better understand the effectiveness of each component, we conduct ablation studies for each design choice and loss function on the nuScenes dataset. These results are shown in Tab. 4. We conduct five sets of experiments, starting with the PFC baseline and build upon it four ablations with different combinations. + +Table 5: Performance on a different split. We compare the performance with a split with 5 novel classes (B11/N5). The novel things classes are bicycle, car and construction vehicle. The novel stuff classes are terrain and man-made. Our method consistently outperforms the PFC baseline across all the metrics by a large margin. + +
ModelTypeSupervision\( {PQ} \)\( P{Q}_{N}^{Th} \)\( P{Q}_{N}^{St} \)\( {RQ} \)\( R{Q}_{N}^{Th} \)\( R{Q}_{N}^{St} \)\( {SQ} \)\( S{Q}_{N}^{Th} \)\( S{Q}_{N}^{St} \)mIoU
P3Former [47]closed-setfull75.870.571.783.876.485.590.191.683.675.0
PFCopen-vocpartial43.927.70.651.733.21.080.282.462.745.2
Oursopen-vocpartial52.856.016.460.561.822.684.989.768.749.9
+ +Impact of query assignment. Starting from the PFC baseline model, we add our proposed fixed query assignment for stuff categories. As shown in the second row of Tab. 4, with query assignment, the overall $PQ$ improves by 0.7. The performance for the novel classes drop slightly, but improvement on the base classes overcomes this for the overall PQ. + +Impact of feature fusion. The third row of Tab. 4 shows the impact of feature fusion. Without feature fusion, our model already achieves 55.5 $PQ$ , demonstrating the power of the CLIP vision features. The third row shows that the performance with feature fusion for the model input improves the overall $PQ$ by 0.9. This slightly improved the overall performance, but the improvement on the novel things class is the most significant, demonstrating that the learned LiDAR features and CLIP vision features are indeed complementary for the task. + +Impact of object-level distillation loss. The fourth row of the results in Tab. 4 shows the impact of the proposed object-level distillation loss. Note that for models with the object-level distillation loss, we remove the frozen class classification head and the ensemble in the PFC baseline, consolidating to a single class embedding head. Although the $RQ_N^{St}$ slightly dips by 0.3 for the novel stuff classes, this loss can significantly improve the $RQ_N^{Th}$ for the novel things class by 5.7. + +Impact of voxel-level distillation loss. We study the impact of the voxel-level distillation loss to see if it can further improve the performance given all of our designs. The results are shown in the last row of Tab. 4. With this loss function, $PQ$ significantly improves by 5.7. The improvement on the novel split is particularly large, especially for the novel stuff classes. The $PQ_N^{St}$ of the novel stuff class improves from 0.2 to 35.2, which demonstrates the importance of the voxel-level supervision to the performance of the novel stuff class. + +Performance of different splits. To validate the generalizability of our method, we conduct experiments on a different split (B11/N5) for the nuScenes dataset. As shown in Tab. 5, our proposed method consistently and significantly outperforms the strong baseline method. This again demonstrates the effectiveness of our design and the proposed loss functions. + +Open-vocabulary exploration. In previous experiments, we follow other 3D open-vocabulary works [6,10,53] and provide analytical results on pre-defined object categories, mainly due to the limited categories in current panoptic segmentation datasets. In practice, our model goes beyond detecting these object categories: we can take class embeddings $v_{q}$ in Eq. (1) and compute the cosine + +![](images/98d9e5be7fd79d582fc7a56707fae5615716791e0acada95c535e0be4d8becf1.jpg) + +![](images/67d6879f793a2c079aad88c809f56ee7be42a073290d78abaea033f4de792bae.jpg) + +![](images/c29b3c5ba8912dc03032330f6556c5b2b7e2609239965d6e1ae4d15a6aeb5f35.jpg) + +![](images/1bd89946be43210e51bf955c8d074dc8f6ef220a519079704c674fa2b468bd4d.jpg) + +![](images/189fe77276a8fd53569ca1b701fccaabaf91e8e3d916f3622e9bd57eee51143c.jpg) +Fig. 4: Open-vocabulary exploration. We show the novel materials/objects in blue color. The orientation of the ego vehicle is fixed in the LiDAR point visualization while the reference images come from on of the surrounding cameras of the ego vehicle. + +![](images/2c6392ed18f73f379dd6ae65d57e4e23b6037b79c328fbefda6b84d74c6e4ae6.jpg) + +![](images/65c583fc9c1aecf7263b48fe32370c3921ab2cdbac5ef60aa2aab84d17fbd290.jpg) + +![](images/670ad5ad40c8b587e753530c04d562565b21367f3fc399552966e167b2e48ebb.jpg) + +![](images/957ab75cf34867d2c124ea033aa633e725104e5483e822d50b0b944fa672a87f.jpg) + +![](images/64973897491e7f7083474880ff5c2fc0d4bf2ae35f9cd447c7ce3d2dfbb90046.jpg) + +![](images/9bad14e1b3f7f7c649d6d116003dabc4f014c63f585d6e6c6b356011b9700377.jpg) + +![](images/19a7b86e73f80b45740874c952f9a4126ea7596c98fa72fa60aeecac97d071e8.jpg) + +similarity with CLIP embedding of any text. Fig. 4 shows that we can detect novel materials/objects that are not in the predefined category list. Note that the concept of open vocabulary is very different from domain adaptation, as open vocabulary refers to the ability to deal with novel inputs in a scene while domain adaptation addresses the difference in data distributions in different scenes. + +Limitations. Our models are only evaluated on current autonomous driving panoptic segmentation benchmarks, with limited number of category annotations. To further evaluate open-vocabulary performance, a large-scale autonomous driving benchmark with more diverse object categories is greatly desired. + +# 5 Conclusion + +In this paper, we present the first approach for the open-vocabulary 3D panoptic segmentation task in autonomous driving by leveraging large vision-language models. We experimentally verified that simply extending the 2D open-vocabulary segmentation method into 3D does not yield good performance, and demonstrated that our proposed model design and loss functions significantly boost performance for this task. Our method significantly outperformed the strong baseline on multiple well-established benchmarks. We hope our work can shed light on the future studies of the 3D open-vocabulary panoptic segmentation. + +Acknowledgements. We would like to thank Mahyar Najibi, Chao Jia, Zhenyao Zhu, Yolanda Wang, Charles R. Qi, Dragomir Anguelov, Tom Ouyang, Ruichi Yu, Chris Sweeney, Colin Graber, Yingwei Li, Sangjin Lee, Weilong Yang, and Congcong Li for the help to the project. + +# References + +1. Alonso, I., Riazuelo, L., Montesano, L., Murillo, A.C.: 3d-mininet: Learning a 2d representation from point clouds for fast and efficient 3d lidar semantic segmentation. IEEE Robotics and Automation Letters 5(4), 5432-5439 (2020) +2. Behley, J., Garbade, M., Milioto, A., Quenzel, J., Behnke, S., Stachniss, C., Gall, J.: SemanticKITTI: A Dataset for Semantic Scene Understanding of LiDAR Sequences. In: ICCV (2019) +3. Bendale, A., Boult, T.: Towards open world recognition. In: CVPR (2015) +4. Caesar, H., Bankiti, V., Lang, A.H., Vora, S., Liong, V.E., Xu, Q., Krishnan, A., Pan, Y., Baldan, G., Beijbom, O.: nuscenes: A multimodal dataset for autonomous driving. In: CVPR (2020) +5. Carion, N., Massa, F., Synnaeve, G., Usunier, N., Kirillov, A., Zagoruyko, S.: End-to-end object detection with transformers. In: ECCV (2020) +6. Cen, J., Yun, P., Zhang, S., Cai, J., Luan, D., Wang, M.Y., Liu, M., Tang, M.: Open-world semantic segmentation for LIDAR point clouds. In: ECCV (2022) +7. Chen, R., Liu, Y., Kong, L., Zhu, X., Ma, Y., Li, Y., Hou, Y., Qiao, Y., Wang, W.: Clip2scene: Towards label-efficient 3d scene understanding by clip. In: CVPR (2023) +8. Chen, Z., Li, B.: Bridging the domain gap: Self-supervised 3d scene understanding with foundation models. arXiv preprint arXiv:2305.08776 (2023) +9. Cheng, B., Schwing, A., Kirillov, A.: Per-pixel classification is not all you need for semantic segmentation. In: NeurIPS (2021) +10. Ding, R., Yang, J., Xue, C., Zhang, W., Bai, S., Qi, X.: Pla: Language-driven open-vocabulary 3d scene understanding. In: CVPR (2023) +1. Ding, Z., Wang, J., Tu, Z.: Open-vocabulary universal image segmentation with maskclip. In: ICML (2023) +2. Du, Y., Wei, F., Zhang, Z., Shi, M., Gao, Y., Li, G.: Learning to prompt for open-vocabulary object detection with vision-language model. In: CVPR (2022) +3. Geiger, A., Lenz, P., Urtasun, R.: Are we ready for Autonomous Driving? The KITTI Vision Benchmark Suite. In: CVPR (2012) +4. Ghiasi, G., Gu, X., Cui, Y., Lin, T.Y.: Scaling open-vocabulary image segmentation with image-level labels. In: ECCV (2022) +5. Gu, X., Lin, T.Y., Kuo, W., Cui, Y.: Open-vocabulary object detection via vision and language knowledge distillation. ICLR (2022) +6. Ha, H., Song, S.: Semantic abstraction: Open-world 3d scene understanding from 2d vision-language models. In: CoRL (2022) +7. He, W., Jamonnak, S., Gou, L., Ren, L.: Clip-s4: Language-guided self-supervised semantic segmentation. In: CVPR (2023) +8. Hegde, D., Valanarasu, J.M.J., Patel, V.M.: Clip goes 3d: Leveraging prompt tuning for language grounded 3d recognition. arXiv preprint arXiv:2303.11313 (2023) +9. Hong, F., Zhou, H., Zhu, X., Li, H., Liu, Z.: Lidar-based panoptic segmentation via dynamic shifting network. In: CVPR (2021) +20. Hu, Q., Yang, B., Xie, L., Rosa, S., Guo, Y., Wang, Z., Trigoni, N., Markham, A.: Learning semantic segmentation of large-scale point clouds with random sampling. IEEE Transactions on Pattern Analysis and Machine Intelligence 44(11), 8338-8354 (2021) +21. Ilharco, G., Wortsman, M., Wightman, R., Gordon, C., Carlini, N., Taori, R., Dave, A., Shankar, V., Namkoong, H., Miller, J., Hajishirzi, H., Farhadi, A., Schmidt, L.: Openclip (Jul 2021). https://doi.org/10.5281/zenodo.5143773, https://doi.org/10.5281/zenodo.5143773 + +22. Jia, C., Yang, Y., Xia, Y., Chen, Y.T., Parekh, Z., Pham, H., Le, Q., Sung, Y.H., Li, Z., Duerig, T.: Scaling up visual and vision-language representation learning with noisy text supervision. In: ICML (2021) +23. Kingma, D.P., Ba, J.: Adam: A method for stochastic optimization. In: ICLR (2015) +24. Kuo, W., Cui, Y., Gu, X., Piergiovanni, A., Angelova, A.: F-vlm: Open-vocabulary object detection upon frozen vision and language models. In: ICLR (2023) +25. Lambert, J., Liu, Z., Sener, O., Hays, J., Koltun, V.: Mseg: A composite dataset for multi-domain semantic segmentation. In: CVPR (2020) +26. Li, B., Weinberger, K.Q., Belongie, S., Koltun, V., Ranftl, R.: Language-driven semantic segmentation. In: ICLR (2022) +27. Li, J., He, X., Wen, Y., Gao, Y., Cheng, X., Zhang, D.: Panoptic-phenet: Towards real-time and high-precision lidar panoptic segmentation via clustering pseudo heatmap. In: CVPR (2022) +28. Li, Z., Wang, W., Xie, E., Yu, Z., Anandkumar, A., Alvarez, J.M., Luo, P., Lu, T.: Panoptic segformer: Delving deeper into panoptic segmentation with transformers. In: CVPR (2022) +29. Liang, F., Wu, B., Dai, X., Li, K., Zhao, Y., Zhang, H., Zhang, P., Vajda, P., Marculescu, D.: Open-vocabulary semantic segmentation with mask-adapted clip. In: CVPR (2023) +30. Lin, T.Y., Goyal, P., Girshick, R., He, K., Dollar, P.: Focal loss for dense object detection. In: ICCV (2017) +31. Liu, Q., Wen, Y., Han, J., Xu, C., Xu, H., Liang, X.: Open-world semantic segmentation via contrasting and clustering vision-language embedding. In: ECCV (2022) +32. Liu, Z., Mao, H., Wu, C.Y., Feichtenhofer, C., Darrell, T., Xie, S.: A convnet for the 2020s. In: CVPR (2022) +33. Loshchilov, I., Hutter, F.: Decoupled weight decay regularization. In: ICLR (2019) +34. Ma, C., Yang, Y., Wang, Y., Zhang, Y., Xie, W.: Open-vocabulary semantic segmentation with frozen vision-language models. BMVC (2022) +35. Peng, S., Genova, K., Jiang, C., Tagliasacchi, A., Pollefeys, M., Funkhouser, T., et al.: Openscene: 3d scene understanding with open vocabularies. In: CVPR (2023) +36. Qi, C.R., Su, H., Mo, K., Guibas, L.J.: Pointnet: Deep learning on point sets for 3d classification and segmentation. In: CVPR (2017) +37. Qi, C.R., Yi, L., Su, H., Guibas, L.J.: Pointnet++: Deep hierarchical feature learning on point sets in a metric space. NeurIPS (2017) +38. Qin, J., Wu, J., Yan, P., Li, M., Yuxi, R., Xiao, X., Wang, Y., Wang, R., Wen, S., Pan, X., et al.: Freeseg: Unified, universal and open-vocabulary image segmentation. In: CVPR (2023) +39. Radford, A., Kim, J.W., Hallacy, C., Ramesh, A., Goh, G., Agarwal, S., Sastry, G., Askell, A., Mishkin, P., Clark, J., et al.: Learning transferable visual models from natural language supervision. In: ICML (2021) +40. Razani, R., Cheng, R., Li, E., Taghavi, E., Ren, Y., Bingbing, L.: Gp-s3net: Graph-based panoptic sparse semantic segmentation network. In: ICCV (2021) +41. Rozenberszki, D., Litany, O., Dai, A.: Language-grounded indoor 3d semantic segmentation in the wild. In: ECCV (2022) +42. Sirohi, K., Mohan, R., Buscher, D., Burgard, W., Valada, A.: Efficientlps: Efficient lidar panoptic segmentation. IEEE Transactions on Robotics 38(3), 1894-1914 (2021) +43. Takmaz, A., Fedele, E., Sumner, R.W., Pollefeys, M., Tombari, F., Engelmann, F.: Openmask3d: Open-vocabulary 3d instance segmentation. In: NeuRIPS (2023) + +44. Tang, H., Liu, Z., Zhao, S., Lin, Y., Lin, J., Wang, H., Han, S.: Searching efficient 3d architectures with sparse point-voxel convolution. In: ECCV (2020) +45. Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, L., Polosukhin, I.: Attention is all you need. In: NeurIPS (2017) +46. Wu, W., Fuxin, L., Shan, Q.: Pointconvformer: Revenge of the point-based convolution. In: CVPR (2023) +47. Xiao, Z., Zhang, W., Wang, T., Loy, C.C., Lin, D., Pang, J.: Position-guided point cloud panoptic segmentation transformer. arXiv preprint (2023) +48. Xu, J., Zhang, R., Dou, J., Zhu, Y., Sun, J., Pu, S.: Rpvnet: A deep and efficient range-point-voxel fusion network for lidar point cloud segmentation. In: ICCV (2021) +49. Xu, J., De Mello, S., Liu, S., Byeon, W., Breuel, T., Kautz, J., Wang, X.: Groupvit: Semantic segmentation emerges from text supervision. In: CVPR (2022) +50. Xu, J., Liu, S., Vahdat, A., Byeon, W., Wang, X., De Mello, S.: Open-vocabulary panoptic segmentation with text-to-image diffusion models. In: CVPR (2023) +51. Xu, M., Zhang, Z., Wei, F., Lin, Y., Cao, Y., Hu, H., Bai, X.: A simple baseline for open-vocabulary semantic segmentation with pre-trained vision-language model. In: ECCV (2022) +52. Xu, S., Wan, R., Ye, M., Zou, X., Cao, T.: Sparse cross-scale attention network for efficient lidar panoptic segmentation. In: AAAI (2022) +53. Yang, J., Ding, R., Wang, Z., Qi, X.: Regionplc: Regional point-language contrastive learning for open-world 3d scene understanding. In: CVPR (2024) +54. Yu, Q., He, J., Deng, X., Shen, X., Chen, L.C.: Convolutions die hard: Open-vocabulary segmentation with single frozen convolutional clip. In: NeurIPS (2023) +55. Zhang, J., Dong, R., Ma, K.: Clip-fo3d: Learning free open-world 3d scene representations from 2d dense clip. In: ICCV (2023) +56. Zhou, C., Loy, C.C., Dai, B.: Extract free dense labels from clip. In: ECCV (2022) +57. Zhou, Z., Lei, Y., Zhang, B., Liu, L., Liu, Y.: Zegclip: Towards adapting clip for zero-shot semantic segmentation. In: CVPR (2023) +58. Zhou, Z., Zhang, Y., Foroosh, H.: Panoptic-polarnet: Proposal-free lidar point cloud panoptic segmentation. In: CVPR (2021) +59. Zou, X., Dou, Z.Y., Yang, J., Gan, Z., Li, L., Li, C., Dai, X., Behl, H., Wang, J., Yuan, L., et al.: Generalized decoding for pixel, image, and language. In: CVPR (2023) \ No newline at end of file diff --git a/2024/3D Open-Vocabulary Panoptic Segmentation with 2D-3D Vision-Language Distillation/images.zip b/2024/3D Open-Vocabulary Panoptic Segmentation with 2D-3D Vision-Language Distillation/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..2de6a91d0a386af04526a753fad2c5a226bdc4a2 --- /dev/null +++ b/2024/3D Open-Vocabulary Panoptic Segmentation with 2D-3D Vision-Language Distillation/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5fff90d37e907cee3846b8cd2d48ed07f4dfb7f987d279b7340722b60d012873 +size 456591 diff --git a/2024/3D Open-Vocabulary Panoptic Segmentation with 2D-3D Vision-Language Distillation/layout.json b/2024/3D Open-Vocabulary Panoptic Segmentation with 2D-3D Vision-Language Distillation/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..f3099b9bc00c87de280930cb69b4aa54db9e42f1 --- /dev/null +++ b/2024/3D Open-Vocabulary Panoptic Segmentation with 2D-3D Vision-Language Distillation/layout.json @@ -0,0 +1,9662 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 133, + 111, + 481, + 148 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 111, + 481, + 148 + ], + "spans": [ + { + "bbox": [ + 133, + 111, + 481, + 148 + ], + "type": "text", + "content": "3D Open-Vocabulary Panoptic Segmentation with 2D-3D Vision-Language Distillation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 135, + 167, + 478, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 167, + 478, + 205 + ], + "spans": [ + { + "bbox": [ + 135, + 167, + 478, + 205 + ], + "type": "text", + "content": "Zihao Xiao" + }, + { + "bbox": [ + 135, + 167, + 478, + 205 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 135, + 167, + 478, + 205 + ], + "type": "text", + "content": ", Longlong Jing" + }, + { + "bbox": [ + 135, + 167, + 478, + 205 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 135, + 167, + 478, + 205 + ], + "type": "text", + "content": ", Shangxuan Wu" + }, + { + "bbox": [ + 135, + 167, + 478, + 205 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 135, + 167, + 478, + 205 + ], + "type": "text", + "content": ", Alex Zihao Zhu" + }, + { + "bbox": [ + 135, + 167, + 478, + 205 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 135, + 167, + 478, + 205 + ], + "type": "text", + "content": ", Jingwei Ji" + }, + { + "bbox": [ + 135, + 167, + 478, + 205 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 135, + 167, + 478, + 205 + ], + "type": "text", + "content": ", Chiyu Max Jiang" + }, + { + "bbox": [ + 135, + 167, + 478, + 205 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 135, + 167, + 478, + 205 + ], + "type": "text", + "content": ", Wei-Chih Hung" + }, + { + "bbox": [ + 135, + 167, + 478, + 205 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 135, + 167, + 478, + 205 + ], + "type": "text", + "content": ", Thomas Funkhouser" + }, + { + "bbox": [ + 135, + 167, + 478, + 205 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 135, + 167, + 478, + 205 + ], + "type": "text", + "content": ", Weicheng Kuo" + }, + { + "bbox": [ + 135, + 167, + 478, + 205 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 135, + 167, + 478, + 205 + ], + "type": "text", + "content": ", Anelia Angelova" + }, + { + "bbox": [ + 135, + 167, + 478, + 205 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 135, + 167, + 478, + 205 + ], + "type": "text", + "content": ", Yin Zhou" + }, + { + "bbox": [ + 135, + 167, + 478, + 205 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 135, + 167, + 478, + 205 + ], + "type": "text", + "content": ", and Shiwei Sheng" + }, + { + "bbox": [ + 135, + 167, + 478, + 205 + ], + "type": "inline_equation", + "content": "^{2*}" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 146, + 213, + 468, + 225 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 146, + 213, + 468, + 225 + ], + "spans": [ + { + "bbox": [ + 146, + 213, + 468, + 225 + ], + "type": "text", + "content": "1 Johns Hopkins University, 2 Waymo, 3 Google Research, 4 Google DeepMind" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 159, + 251, + 455, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 251, + 455, + 472 + ], + "spans": [ + { + "bbox": [ + 159, + 251, + 455, + 472 + ], + "type": "text", + "content": "Abstract. 3D panoptic segmentation is a challenging perception task, especially in autonomous driving. It aims to predict both semantic and instance annotations for 3D points in a scene. Although prior 3D panoptic segmentation approaches have achieved great performance on closed-set benchmarks, generalizing these approaches to unseen things and unseen stuff categories remains an open problem. For unseen object categories, 2D open-vocabulary segmentation has achieved promising results that solely rely on frozen CLIP backbones and assembling multiple classification outputs. However, we find that simply extending these 2D models to 3D does not guarantee good performance due to poor per-mask classification quality, especially for novel stuff categories. In this paper, we propose the first method to tackle 3D open-vocabulary panoptic segmentation. Our model takes advantage of the fusion between learnable LiDAR features and dense frozen vision CLIP features, using a single classification head to make predictions for both base and novel classes. To further improve the classification performance on novel classes and leverage the CLIP model, we propose two novel loss functions: object-level distillation loss and voxel-level distillation loss. Our experiments on the nuScenes and SemanticKITTI datasets show that our method outperforms the strong baseline by a large margin." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 159, + 481, + 453, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 481, + 453, + 503 + ], + "spans": [ + { + "bbox": [ + 159, + 481, + 453, + 503 + ], + "type": "text", + "content": "Keywords: Autonomous driving " + }, + { + "bbox": [ + 159, + 481, + 453, + 503 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 159, + 481, + 453, + 503 + ], + "type": "text", + "content": " 3D panoptic segmentation " + }, + { + "bbox": [ + 159, + 481, + 453, + 503 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 159, + 481, + 453, + 503 + ], + "type": "text", + "content": " Vision-language" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 540, + 230, + 552 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 540, + 230, + 552 + ], + "spans": [ + { + "bbox": [ + 132, + 540, + 230, + 552 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 563, + 482, + 648 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 563, + 482, + 648 + ], + "spans": [ + { + "bbox": [ + 130, + 563, + 482, + 648 + ], + "type": "text", + "content": "3D panoptic segmentation is a crucial task in computer vision with many real-world applications, most notably in autonomous driving. It combines 3D semantic and instance segmentation to produce per-point predictions for two different types of objects: things (e.g., car) and stuff (e.g., road). To date, there has been significant progress in 3D panoptic segmentation [27, 40, 42, 47, 52, 58]. Most recently, methods such as [47] produce panoptic segmentation predictions directly from point clouds by leveraging learned queries to represent objects and" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 141, + 654, + 265, + 666 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 654, + 265, + 666 + ], + "spans": [ + { + "bbox": [ + 141, + 654, + 265, + 666 + ], + "type": "text", + "content": "* Work done while at Waymo" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 224 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 224 + ], + "type": "text", + "content": "Transformer-based [45] architectures [2, 4] to perform the modeling. However, existing models only predict panoptic segmentation results for a closed-set of objects. They fail to create predictions for the majority of unseen object categories in the scene, hindering the application of these algorithms to real-world scenarios, especially for autonomous driving. In this work, we focus on segmenting unseen things and unseen stuff objects in autonomous driving scenarios. We follow [10, 53] and develop models under the open-vocabulary setting: we divide the object categories into base (seen) categories and novel (unseen) categories, and evaluate models that are only trained on base categories." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 227, + 482, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 227, + 482, + 323 + ], + "spans": [ + { + "bbox": [ + 130, + 227, + 482, + 323 + ], + "type": "text", + "content": "Such open-world computer vision tasks [3] benefit from the recent advancements in vision-language (V-L) models [22, 39]. In 2D vision, there are many successful methods in open-vocabulary object detection [12, 15, 24] and segmentation [11, 50, 54]. These methods make predictions in a shared image-text embedding space, where predictions for unseen categories are produced by comparing the similarity of an object with the text embedding of the category. However, these methods are only possible due to the vast amounts of paired image-text data available, making it difficult to train similar models for 3D data." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 325, + 482, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 325, + 482, + 433 + ], + "spans": [ + { + "bbox": [ + 130, + 325, + 482, + 433 + ], + "type": "text", + "content": "Instead, researchers have continued to leverage the effectiveness of these 2D vision-language models for 3D with the help of pixel-point correspondences by running inference on 2D images and then aligning with the 3D features. These methods have achieved promising results on open-vocabulary semantic segmentation [10,35,53,55] and instance segmentation [10,43,53], individually. However, there are no methods that address the problem of 3D open-vocabulary panoptic segmentation, i.e., addressing both open-vocabulary semantic segmentation and open-vocabulary instance segmentation at the same time. The challenge lies in how to handle segmentation for novel things and stuff objects simultaneously." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 435, + 482, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 435, + 482, + 567 + ], + "spans": [ + { + "bbox": [ + 130, + 435, + 482, + 567 + ], + "type": "text", + "content": "3D open-vocabulary panoptic segmentation is a challenging problem, due to both the significant domain gaps between the camera and LiDAR modalities and unsolved problems in open-vocabulary segmentation. Many existing open-vocabulary works rely on similarities between text embeddings of class names and pre-trained V-L features to obtain associations between predictions and classes [35,43,55]. However, while projecting 2D V-L features to 3D can account for a large part of the scene, there are often many points unaccounted for due to unmatched pixel/point distributions and differing fields of view between sensors. Some 3D open-vocabulary works [10,53] apply contrastive learning to obtain better association between language and points, but they require extra captioning models and do not address the difficulties of detecting novel stuff classes." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 570, + 482, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 570, + 482, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 570, + 482, + 665 + ], + "type": "text", + "content": "In this work, we aim to address these two issues with a novel architecture for 3D open-vocabulary panoptic segmentation. Building on existing 3D closed-set panoptic segmentation methods, we train a learned LiDAR feature encoder in parallel with a frozen, pre-trained camera CLIP model. By fusing the 3D LiDAR features with the 2D CLIP features, our model is able to learn rich features throughout the entire 3D sensing volume, even if there are no camera features in certain regions. In addition, we apply a pair of novel distillation losses that allow the 3D encoder to learn both object-level and voxel-level features which" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 221, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 221, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 221, + 101 + ], + "type": "text", + "content": "Z. Xiao et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 479, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 479, + 163 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 479, + 163 + ], + "type": "text", + "content": "live inside the CLIP feature space. This provides a learned module in 3D space which can directly be compared with text embeddings. These losses also provide useful training supervision to unknown parts of the scene where there would otherwise be no loss gradient." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 164, + 480, + 199 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 164, + 480, + 199 + ], + "spans": [ + { + "bbox": [ + 130, + 164, + 480, + 199 + ], + "type": "text", + "content": "With the proposed model and loss functions, our method significantly outperforms the strong baseline on multiple datasets. Our contributions are summarized as follows:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 138, + 206, + 479, + 277 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 138, + 206, + 479, + 229 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 206, + 479, + 229 + ], + "spans": [ + { + "bbox": [ + 138, + 206, + 479, + 229 + ], + "type": "text", + "content": "- We present the first approach for 3D open-vocabulary panoptic segmentation in autonomous driving." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 138, + 230, + 479, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 230, + 479, + 253 + ], + "spans": [ + { + "bbox": [ + 138, + 230, + 479, + 253 + ], + "type": "text", + "content": "- We propose two novel loss functions, object-level distillation loss and voxel-level distillation loss to help segment novel things and novel stuff objects." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 138, + 254, + 479, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 254, + 479, + 277 + ], + "spans": [ + { + "bbox": [ + 138, + 254, + 479, + 277 + ], + "type": "text", + "content": "- We experimentally show that our proposed method significantly outperforms that strong baseline model on both nuScenes and SemanticKITTI datasets." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 132, + 294, + 237, + 306 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 294, + 237, + 306 + ], + "spans": [ + { + "bbox": [ + 132, + 294, + 237, + 306 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 319, + 479, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 319, + 479, + 342 + ], + "spans": [ + { + "bbox": [ + 130, + 319, + 479, + 342 + ], + "type": "text", + "content": "This work is closely related to 3D panoptic segmentation, 2D open-vocabulary segmentation, and 3D open-vocabulary segmentation." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 342, + 480, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 342, + 480, + 449 + ], + "spans": [ + { + "bbox": [ + 130, + 342, + 480, + 449 + ], + "type": "text", + "content": "3D panoptic segmentation. The goal of 3D panoptic segmentation is to group 3D points according to their semantics and identities. This is a challenging task and relies on a good representation of the 3D data [1,20,36,37,44,46,48]. Most panoptic segmentation models have separate branches for instance segmentation and semantic segmentation [19,27,44,58]. By following DETR [5], the recently proposed P3Former [47] uses learnable queries and a transformer architecture to obtain state-of-the-art performance on multiple panoptic segmentation benchmarks. Although those closed-set methods achieve incredible results, they cannot predict the labels and masks for novel classes." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 450, + 480, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 450, + 480, + 569 + ], + "spans": [ + { + "bbox": [ + 130, + 450, + 480, + 569 + ], + "type": "text", + "content": "2D open-vocabulary segmentation. 2D open-vocabulary segmentation aims to group image pixels according to their semantics or identities for base (seen) or novel (unseen) categories. The prediction on novel categories is usually done by leveraging large V-L models [22,39]. There are many works that focus on open vocabulary semantic segmentation [14,17,26,29,31,34,49,51,56,57,59]. Some work has also explored open-vocabulary panoptic segmentation [11,38,50]. Recently, FC-CLIP [54] proposes a single-stage framework based on a frozen convolutional CLIP backbone [21,32,39] for 2D open-vocabulary panoptic segmentation that achieves state-of-the-art performance. However, due to the camera-LiDAR domain gap, we show that simply extending it to 3D leads to poor performance." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 130, + 570, + 480, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 570, + 480, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 570, + 480, + 665 + ], + "type": "text", + "content": "3D open-vocabulary segmentation. 3D open-vocabulary segmentation is less explored due to the lack of 3D point-to-text association. One common practice is to utilize V-L models and use 2D-3D pairings to obtain rich, structured information in 3D [7,8,10,16,18,35,41,43,53,55]. Notably, CLIP2Scene [7] proposes a semantic-driven cross-modal contrastive learning framework. PLA [10] leverages images as a bridge and builds hierarchical 3D-caption pairs for contrastive learning. OpenScene [35] extracts per-pixel CLIP features using a pre-trained V-L model [14,26] then derives dense 3D features by projecting 3D points onto" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 263, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 263, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 263, + 91, + 447, + 102 + ], + "type": "text", + "content": "3D Open-Vocabulary Panoptic Segmentation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 134, + 114, + 482, + 236 + ], + "blocks": [ + { + "bbox": [ + 134, + 114, + 482, + 236 + ], + "lines": [ + { + "bbox": [ + 134, + 114, + 482, + 236 + ], + "spans": [ + { + "bbox": [ + 134, + 114, + 482, + 236 + ], + "type": "image", + "image_path": "c2ed55e4ce883f6a1e5a8a4609c300937e9486cfdde370a2bb9dc79354a37c65.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 255, + 482, + 310 + ], + "lines": [ + { + "bbox": [ + 130, + 255, + 482, + 310 + ], + "spans": [ + { + "bbox": [ + 130, + 255, + 482, + 310 + ], + "type": "text", + "content": "Fig. 1: Overview of our method. Given a LiDAR point cloud and the corresponding camera images, LiDAR features are extracted with a learnable LiDAR encoder, while vision features are extracted by a frozen CLIP vision model. The extracted LiDAR features and the frozen CLIP vision features are then fused and fed to a query-based transformer model to predict instance masks and semantic classes." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 333, + 482, + 406 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 333, + 482, + 406 + ], + "spans": [ + { + "bbox": [ + 130, + 333, + 482, + 406 + ], + "type": "text", + "content": "image planes. One concurrent work, RegionPLC [53], utilizes regional visual prompts to create dense captions and perform point-discriminative contrastive learning, which is used for semantic segmentation or instance segmentation, individually. In contrast, our work does not rely on any captioning model or extra contrastive learning, but only depends on pre-trained CLIP features. Our model also handles semantic segmentation and instance segmentation simultaneously." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 423, + 202, + 436 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 423, + 202, + 436 + ], + "spans": [ + { + "bbox": [ + 132, + 423, + 202, + 436 + ], + "type": "text", + "content": "3 Method" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 449, + 482, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 449, + 482, + 510 + ], + "spans": [ + { + "bbox": [ + 130, + 449, + 482, + 510 + ], + "type": "text", + "content": "This section is organized as follows. First, we define the 3D open-vocabulary panoptic segmentation task. Then we provide detailed descriptions of the model architecture as well as the proposed loss functions. The overview of our method is presented in Fig. 1, and the two proposed loss functions are illustrated in Fig. 2 (a) and Fig. 2 (b)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 525, + 258, + 538 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 525, + 258, + 538 + ], + "spans": [ + { + "bbox": [ + 132, + 525, + 258, + 538 + ], + "type": "text", + "content": "3.1 Problem Definition" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 546, + 482, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 546, + 482, + 593 + ], + "spans": [ + { + "bbox": [ + 130, + 546, + 482, + 593 + ], + "type": "text", + "content": "In 3D panoptic segmentation, the goal is to annotate every point in a point cloud. For stuff classes, (e.g. road, vegetation), a category label is assigned according to its semantics. For things classes (e.g. cars, pedestrians), an instance label is assigned to an object in addition to its semantic label." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "type": "text", + "content": "In open-vocabulary panoptic segmentation, the models are trained on " + }, + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "type": "inline_equation", + "content": "C_B" + }, + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "type": "text", + "content": " base(seen) categories. At test time, besides these " + }, + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "type": "inline_equation", + "content": "C_B" + }, + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "type": "text", + "content": " base categories, the data will contain " + }, + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "type": "inline_equation", + "content": "C_N" + }, + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "type": "text", + "content": " novel(unseen) categories. Following the settings of prior work [15, 24, 54], we assume the availability of the name of the novel categories during inference, but the novel categories are not present in the training data and their names are not known. Note that we do not apply any prompt engineering, as" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 222, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 222, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 222, + 100 + ], + "type": "text", + "content": "Z. Xiao et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "type": "text", + "content": "this is not the focus of this paper. We follow OpenScene [35] to obtain the CLIP text embedding for each category." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 131, + 156, + 387, + 170 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 156, + 387, + 170 + ], + "spans": [ + { + "bbox": [ + 131, + 156, + 387, + 170 + ], + "type": "text", + "content": "3.2 3D Open-Vocabulary Panoptic Segmentation" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 175, + 482, + 330 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 175, + 482, + 330 + ], + "spans": [ + { + "bbox": [ + 130, + 175, + 482, + 330 + ], + "type": "text", + "content": "Most of the previous 3D open-vocabulary works only address semantic segmentation [7,8,10,16,18,35,41,53,55] or instance segmentation [43,53] separately, and there is no existing work for the 3D open-vocabulary panoptic segmentation task, which handles novel things and novel stuff objects simultaneously. A natural idea would be extending the 2D open vocabulary segmentation methods to build the 3D counterpart. We start with P3Former [47], a state-of-the-art transformer-based 3D closed-set panoptic segmentation model, and add the essential components to support open-vocabulary capability by following FC-CLIP [54], a 2D open-vocabulary segmentation model that achieves state-of-the-art performance on multiple datasets. However, we found that this simple extension leads to poor performance in our experiments, and in this work we propose several new features to improve the performance of our model. More implementation details for this baseline can be found in the supplementary material." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 331, + 482, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 331, + 482, + 390 + ], + "spans": [ + { + "bbox": [ + 130, + 331, + 482, + 390 + ], + "type": "text", + "content": "In order to improve the open vocabulary capability of our model, we propose significant changes to the P3Former architecture, as well as two new loss functions. The architecture of our method is shown in Fig. 1 and mainly consists of multimodal feature fusion, a segmentation head, and input text embeddings for open-vocabulary classification." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 390, + 482, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 390, + 482, + 510 + ], + "spans": [ + { + "bbox": [ + 130, + 390, + 482, + 510 + ], + "type": "text", + "content": "Multimodal feature fusion. The core idea of many recent 2D open-vocabulary works is to leverage the features of large-scale vision-language models [22, 39]. These methods [54] mainly rely on frozen CLIP features and use a transformer model to perform the 2D panoptic segmentation task. However, this is not optimal for 3D tasks since many points do not have corresponding valid camera pixels, leading to invalid features preventing meaningful predictions. To fully exploit the power of the CLIP vision features and learn complementary features from both CLIP features from camera and features from LiDAR, we generate predictions from the fusion of CLIP features extracted by a frozen CLIP model and learned LiDAR features from a LiDAR encoder." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 510, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 510, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 510, + 482, + 666 + ], + "type": "text", + "content": "As shown in Fig. 1, there are three major components for the multimodal feature fusion including a LiDAR encoder, a vision CLIP encoder, and voxel-level feature fusion. The LiDAR encoder is a model which takes an unordered set of points as input and extracts per-point features. We apply voxelization to the features from the LiDAR encoder, producing output features " + }, + { + "bbox": [ + 130, + 510, + 482, + 666 + ], + "type": "inline_equation", + "content": "F_{lidar} \\in \\mathbb{R}^{V \\times D_{lidar}}" + }, + { + "bbox": [ + 130, + 510, + 482, + 666 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 130, + 510, + 482, + 666 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 130, + 510, + 482, + 666 + ], + "type": "text", + "content": " is the number of the voxels and " + }, + { + "bbox": [ + 130, + 510, + 482, + 666 + ], + "type": "inline_equation", + "content": "D_{lidar}" + }, + { + "bbox": [ + 130, + 510, + 482, + 666 + ], + "type": "text", + "content": " is the dimension of the learned LiDAR feature. The Vision CLIP encoder is a pre-trained V-L segmentation model [14] which extracts pixel-wise CLIP features from each camera image. Within each voxel, every LiDAR point is projected into the camera image plane based on the intrinsic and extrinsic calibration parameters to index into the corresponding vision CLIP features, then the vision CLIP features of all the points belonging to each voxel are averaged to represent that voxel. Zero padding is used for points which do not have any valid corresponding camera pixels. The" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 263, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 263, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 263, + 91, + 447, + 102 + ], + "type": "text", + "content": "3D Open-Vocabulary Panoptic Segmentation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 115, + 482, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 115, + 482, + 200 + ], + "spans": [ + { + "bbox": [ + 130, + 115, + 482, + 200 + ], + "type": "text", + "content": "voxel CLIP features will be referred as " + }, + { + "bbox": [ + 130, + 115, + 482, + 200 + ], + "type": "inline_equation", + "content": "F_{vclip} \\in \\mathbb{R}^{V \\times D_{emb}}" + }, + { + "bbox": [ + 130, + 115, + 482, + 200 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 130, + 115, + 482, + 200 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 130, + 115, + 482, + 200 + ], + "type": "text", + "content": " is the number of voxels after voxelization and " + }, + { + "bbox": [ + 130, + 115, + 482, + 200 + ], + "type": "inline_equation", + "content": "D_{emb}" + }, + { + "bbox": [ + 130, + 115, + 482, + 200 + ], + "type": "text", + "content": " is the dimension of the CLIP features. Finally, the learned per-voxel LiDAR features and frozen per-voxel vision CLIP features are concatenated together to be used as input into the transformer decoder in the segmentation head. This feature fusion enables our model to learn complementary information from both the LiDAR and CLIP features, allowing us to fine-tune our backbone for each dataset's specific data distribution." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 200, + 482, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 200, + 482, + 368 + ], + "spans": [ + { + "bbox": [ + 130, + 200, + 482, + 368 + ], + "type": "text", + "content": "Segmentation head. The segmentation head is a transformer [45] model that takes the LiDAR-Vision fused feature as input to produce panoptic segmentation results. Prior works, including existing 2D open-vocabulary works such as FC-CLIP [54], typically use learnable queries " + }, + { + "bbox": [ + 130, + 200, + 482, + 368 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 130, + 200, + 482, + 368 + ], + "type": "text", + "content": " to represent each instance or thing, and they contain a mask prediction head " + }, + { + "bbox": [ + 130, + 200, + 482, + 368 + ], + "type": "inline_equation", + "content": "f_{mask}" + }, + { + "bbox": [ + 130, + 200, + 482, + 368 + ], + "type": "text", + "content": " to produce the corresponding mask for each individual object and a classification head " + }, + { + "bbox": [ + 130, + 200, + 482, + 368 + ], + "type": "inline_equation", + "content": "f_{cls}" + }, + { + "bbox": [ + 130, + 200, + 482, + 368 + ], + "type": "text", + "content": " to predict the per-mask class score for each known class. However, as a result, they also need to rely on another classifier to handle novel categories. Our goal is to use a single model to handle the prediction for both base and novel categories. Thus, we predict a class embedding instead of a class score for each mask. During training, the model learns to regress an analogy to the CLIP vision embedding for each mask, and the category prediction can be obtained by calculating its similarity with the CLIP text embedding of text queries during the inference stage. The class embedding " + }, + { + "bbox": [ + 130, + 200, + 482, + 368 + ], + "type": "inline_equation", + "content": "f_{cls}" + }, + { + "bbox": [ + 130, + 200, + 482, + 368 + ], + "type": "text", + "content": " prediction is defined as:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 261, + 373, + 481, + 388 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 261, + 373, + 481, + 388 + ], + "spans": [ + { + "bbox": [ + 261, + 373, + 481, + 388 + ], + "type": "interline_equation", + "content": "v _ {q} = f _ {c l s} (q) \\in \\mathbb {R} ^ {D _ {e m b}}, \\tag {1}", + "image_path": "68287c85a9564de41e95d50a444057657f7cef31e86887fa8bf8867893097a5f.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 392, + 482, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 392, + 482, + 441 + ], + "spans": [ + { + "bbox": [ + 130, + 392, + 482, + 441 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 392, + 482, + 441 + ], + "type": "inline_equation", + "content": "v_{q}" + }, + { + "bbox": [ + 130, + 392, + 482, + 441 + ], + "type": "text", + "content": " is in the CLIP embedding space. The predicted class logits are then computed from the cosine similarity between the predicted class embedding and the text embedding of every category name from the evaluation set using a frozen CLIP model. The classification logits are defined as:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 207, + 445, + 481, + 468 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 207, + 445, + 481, + 468 + ], + "spans": [ + { + "bbox": [ + 207, + 445, + 481, + 468 + ], + "type": "interline_equation", + "content": "s _ {v _ {q}} = \\frac {1}{T} \\left[ \\cos \\left(v _ {q}, t _ {1}\\right), \\cos \\left(v _ {q}, t _ {2}\\right), \\dots , \\cos \\left(v _ {q}, t _ {C}\\right) \\right] \\tag {2}", + "image_path": "46a6406efe7934d7e4add9bdc8c696d9046e4984d55790842a4400706bd2d4ed.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 473, + 482, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 473, + 482, + 510 + ], + "spans": [ + { + "bbox": [ + 130, + 473, + 482, + 510 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 473, + 482, + 510 + ], + "type": "inline_equation", + "content": "t_i \\in \\mathbb{R}^{D_{emb}}" + }, + { + "bbox": [ + 130, + 473, + 482, + 510 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 473, + 482, + 510 + ], + "type": "inline_equation", + "content": "i \\in \\{1, 2, \\dots, C\\}" + }, + { + "bbox": [ + 130, + 473, + 482, + 510 + ], + "type": "text", + "content": " is the text embedding, " + }, + { + "bbox": [ + 130, + 473, + 482, + 510 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 130, + 473, + 482, + 510 + ], + "type": "text", + "content": " is the number of categories (" + }, + { + "bbox": [ + 130, + 473, + 482, + 510 + ], + "type": "inline_equation", + "content": "C_B" + }, + { + "bbox": [ + 130, + 473, + 482, + 510 + ], + "type": "text", + "content": " in training and " + }, + { + "bbox": [ + 130, + 473, + 482, + 510 + ], + "type": "inline_equation", + "content": "C_B + C_N" + }, + { + "bbox": [ + 130, + 473, + 482, + 510 + ], + "type": "text", + "content": " in testing), and " + }, + { + "bbox": [ + 130, + 473, + 482, + 510 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 130, + 473, + 482, + 510 + ], + "type": "text", + "content": " is a learnable temperature term that controls the concentration of the distribution." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 510, + 483, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 510, + 483, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 510, + 483, + 666 + ], + "type": "text", + "content": "Query assignment. A common practice [9, 54] for transformer-based panoptic segmentation models is to utilize a single set of queries to make predictions for both things and stuff classes jointly. In contrast, P3Former uses one query set to represent things classes after bipartite matching and one fixed query set for stuff classes. We have found that this separation of things queries and stuff queries makes our model converge faster and improve overall performance, and similar pattern has been observed in other tasks [28]. However, the fixed set of queries for stuff classes is not applicable to the open-vocabulary setting due to the unknown number of novel stuff classes. To take advantage of the benefits of separating things queries and stuff queries, we propose to predict the base stuff classes with a fixed set of queries and utilize a set of learnable queries to target base things classes and all novel (things and stuff) classes. More details of the query assignment can be found in the supplementary materials." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 222, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 222, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 222, + 101 + ], + "type": "text", + "content": "Z. Xiao et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 134, + 114, + 310, + 201 + ], + "blocks": [ + { + "bbox": [ + 134, + 114, + 310, + 201 + ], + "lines": [ + { + "bbox": [ + 134, + 114, + 310, + 201 + ], + "spans": [ + { + "bbox": [ + 134, + 114, + 310, + 201 + ], + "type": "image", + "image_path": "fe8337038a0e671baf59695920bf28d56480a51f0114959972774dceeffc93bd.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 181, + 202, + 264, + 210 + ], + "lines": [ + { + "bbox": [ + 181, + 202, + 264, + 210 + ], + "spans": [ + { + "bbox": [ + 181, + 202, + 264, + 210 + ], + "type": "text", + "content": "(a) Object-Level Distillation Loss." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 313, + 115, + 479, + 202 + ], + "blocks": [ + { + "bbox": [ + 313, + 115, + 479, + 202 + ], + "lines": [ + { + "bbox": [ + 313, + 115, + 479, + 202 + ], + "spans": [ + { + "bbox": [ + 313, + 115, + 479, + 202 + ], + "type": "image", + "image_path": "1030de4f7afa5dff5320a7deefbb48e8a91f284f5fa6543f0a3dbeb766d7bd9f.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 355, + 202, + 436, + 210 + ], + "lines": [ + { + "bbox": [ + 355, + 202, + 436, + 210 + ], + "spans": [ + { + "bbox": [ + 355, + 202, + 436, + 210 + ], + "type": "text", + "content": "(b) Voxel-Level Distillation Loss." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 131, + 229, + 480, + 251 + ], + "lines": [ + { + "bbox": [ + 131, + 229, + 480, + 251 + ], + "spans": [ + { + "bbox": [ + 131, + 229, + 480, + 251 + ], + "type": "text", + "content": "Fig. 2: (a) the proposed object-level distillation loss, and (b) the proposed voxel-level distillation loss." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 274, + 230, + 285 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 274, + 230, + 285 + ], + "spans": [ + { + "bbox": [ + 132, + 274, + 230, + 285 + ], + "type": "text", + "content": "3.3 Loss Function" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 293, + 480, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 293, + 480, + 376 + ], + "spans": [ + { + "bbox": [ + 130, + 293, + 480, + 376 + ], + "type": "text", + "content": "Closed-set panoptic segmentation models [47] are typically optimized with objective functions consisting of a classification loss " + }, + { + "bbox": [ + 130, + 293, + 480, + 376 + ], + "type": "inline_equation", + "content": "L_{cls}" + }, + { + "bbox": [ + 130, + 293, + 480, + 376 + ], + "type": "text", + "content": " and a mask prediction loss " + }, + { + "bbox": [ + 130, + 293, + 480, + 376 + ], + "type": "inline_equation", + "content": "L_{mask}" + }, + { + "bbox": [ + 130, + 293, + 480, + 376 + ], + "type": "text", + "content": ". We follow P3Former [47] for these two losses: the classification loss " + }, + { + "bbox": [ + 130, + 293, + 480, + 376 + ], + "type": "inline_equation", + "content": "L_{cls}" + }, + { + "bbox": [ + 130, + 293, + 480, + 376 + ], + "type": "text", + "content": " optimizes the focal loss [30] between the class predictions and the category labels, while the mask loss " + }, + { + "bbox": [ + 130, + 293, + 480, + 376 + ], + "type": "inline_equation", + "content": "L_{mask}" + }, + { + "bbox": [ + 130, + 293, + 480, + 376 + ], + "type": "text", + "content": " optimizes the voxel-query classification loss. Besides the two standard loss functions, we propose two simple yet effective losses to apply distillation from the CLIP model at different levels." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 377, + 481, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 377, + 481, + 498 + ], + "spans": [ + { + "bbox": [ + 130, + 377, + 481, + 498 + ], + "type": "text", + "content": "Object-level distillation loss. Similar to previous methods [50, 54], we use the cosine similarity between predicted class embeddings and class text CLIP embeddings to produce classification scores. However, the classification loss applied to Eq. (2) only enforces similarity to known classes. In this work, we make the assumption that the frozen CLIP features are discriminative with respect to open-vocabulary classes and have good out-of-distribution generalization. We propose an additional training loss which forces our predicted object-level class embeddings to be similar to the CLIP embeddings within their corresponding masks after matching. Similar to [54], we utilize voxel vision CLIP features to get an embedding for each query " + }, + { + "bbox": [ + 130, + 377, + 481, + 498 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 130, + 377, + 481, + 498 + ], + "type": "text", + "content": " by mask pooling Vision CLIP features:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 232, + 503, + 481, + 533 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 232, + 503, + 481, + 533 + ], + "spans": [ + { + "bbox": [ + 232, + 503, + 481, + 533 + ], + "type": "interline_equation", + "content": "w _ {q} = \\frac {1}{\\left| M _ {q} \\right|} \\sum_ {p} \\mathbb {1} (p \\in M _ {q}) F _ {v c l i p} (p) \\tag {3}", + "image_path": "9980e09487384f0ce7dc6e521c6b19850d4cc972d25371b3d7b1ad9fb2a7be74.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 131, + 540, + 480, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 540, + 480, + 563 + ], + "spans": [ + { + "bbox": [ + 131, + 540, + 480, + 563 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 131, + 540, + 480, + 563 + ], + "type": "inline_equation", + "content": "M_q" + }, + { + "bbox": [ + 131, + 540, + 480, + 563 + ], + "type": "text", + "content": " is the set of points " + }, + { + "bbox": [ + 131, + 540, + 480, + 563 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 131, + 540, + 480, + 563 + ], + "type": "text", + "content": " belonging to the mask for query " + }, + { + "bbox": [ + 131, + 540, + 480, + 563 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 131, + 540, + 480, + 563 + ], + "type": "text", + "content": ". Our object-level distillation loss is then defined as:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 210, + 568, + 481, + 598 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 210, + 568, + 481, + 598 + ], + "spans": [ + { + "bbox": [ + 210, + 568, + 481, + 598 + ], + "type": "interline_equation", + "content": "L _ {O} = \\frac {1}{\\left| Q _ {\\text {m a t c h e d}} \\right|} \\sum_ {q \\in Q _ {\\text {m a t c h e d}}} 1 - \\cos \\left(v _ {q}, w _ {q}\\right), \\tag {4}", + "image_path": "4f70cd1857edc5c1aa3c99da23debca1f33137228aff3428e8b1bf02c96aa602.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 131, + 605, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 605, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 131, + 605, + 482, + 666 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 131, + 605, + 482, + 666 + ], + "type": "inline_equation", + "content": "Q_{\\text{matched}}" + }, + { + "bbox": [ + 131, + 605, + 482, + 666 + ], + "type": "text", + "content": " is the set of queries matched with ground truth objects during training, " + }, + { + "bbox": [ + 131, + 605, + 482, + 666 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 131, + 605, + 482, + 666 + ], + "type": "text", + "content": " is the set of predicted class embeddings, and " + }, + { + "bbox": [ + 131, + 605, + 482, + 666 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 131, + 605, + 482, + 666 + ], + "type": "text", + "content": " is the set of mask-pooled CLIP embeddings. This loss forces the model to directly distill object-level camera CLIP features and improves model performance for novel things classes. We also experimented with applying " + }, + { + "bbox": [ + 131, + 605, + 482, + 666 + ], + "type": "inline_equation", + "content": "L_O" + }, + { + "bbox": [ + 131, + 605, + 482, + 666 + ], + "type": "text", + "content": " to all predicted masks, but we" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 263, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 263, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 263, + 91, + 447, + 102 + ], + "type": "text", + "content": "3D Open-Vocabulary Panoptic Segmentation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 91, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 91, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 91, + 480, + 100 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "type": "text", + "content": "It is found that this slightly reduced model performance, likely due to the presence of masks that do not correspond to any objects in the scene." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 140, + 482, + 260 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 140, + 482, + 260 + ], + "spans": [ + { + "bbox": [ + 130, + 140, + 482, + 260 + ], + "type": "text", + "content": "Voxel-level distillation loss. While the object-level distillation loss distills the per-object features from CLIP model, it does not provide any supervision for the mask prediction head, which would otherwise only receive supervision for known classes. We found this particularly problematic for unknown stuff classes, which tend to be more spread out and cover larger and more diverse parts of the scene. In addition, it is only being applied to queries with relatively accurate mask predictions in order to learn useful CLIP features. To target these issues, we propose the voxel-level distillation loss to explicitly learn voxel-level CLIP features, which do not depend on any labels and can be applied on all queries. In particular, the voxel-level distillation loss is defined as:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 269, + 266, + 481, + 281 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 269, + 266, + 481, + 281 + ], + "spans": [ + { + "bbox": [ + 269, + 266, + 481, + 281 + ], + "type": "interline_equation", + "content": "F _ {r e c} = M _ {Q} ^ {T} F _ {Q e m b} \\tag {5}", + "image_path": "a39ecfc053db71d025c9d136b4c31a7d31318237020d76f4f0254609b4fefca4.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 288, + 482, + 337 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 288, + 482, + 337 + ], + "spans": [ + { + "bbox": [ + 130, + 288, + 482, + 337 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 288, + 482, + 337 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 130, + 288, + 482, + 337 + ], + "type": "text", + "content": " is the number of queries, " + }, + { + "bbox": [ + 130, + 288, + 482, + 337 + ], + "type": "inline_equation", + "content": "F_{Qemb} \\in \\mathbb{R}^{Q \\times D_{emb}}" + }, + { + "bbox": [ + 130, + 288, + 482, + 337 + ], + "type": "text", + "content": " is the predicted embedding for all queries and " + }, + { + "bbox": [ + 130, + 288, + 482, + 337 + ], + "type": "inline_equation", + "content": "M_Q \\in \\mathbb{R}^{Q \\times V}" + }, + { + "bbox": [ + 130, + 288, + 482, + 337 + ], + "type": "text", + "content": " is the predicted per-voxel mask probabilities for all queries. The reconstructed features can be regarded as the weighted sum of all queries for each voxel. We supervise these features with the voxel CLIP features:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 260, + 348, + 481, + 361 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 260, + 348, + 481, + 361 + ], + "spans": [ + { + "bbox": [ + 260, + 348, + 481, + 361 + ], + "type": "interline_equation", + "content": "L _ {V} = L _ {1} \\left(F _ {\\text {r e c}}, F _ {\\text {v c l i p}}\\right) \\tag {6}", + "image_path": "66d22e5f8e46d176e9b943950a161865f43c0e50f55e9d134d9985ce1a425124.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 364, + 482, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 364, + 482, + 449 + ], + "spans": [ + { + "bbox": [ + 130, + 364, + 482, + 449 + ], + "type": "text", + "content": "Unlike the object-level distillation loss, which is only applied to queries with matched ground truth, this loss is applied to all predicted mask scores and queries. In our experiments, we found that this loss significantly improves performance on novel stuff categories in particular, likely as it does not require exact matches with the ground truth, which can be difficult for large stuff classes. However, this loss is still susceptible to noisy or low quality mask scores, and we found that larger weights for this loss can disrupt training." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 449, + 482, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 449, + 482, + 520 + ], + "spans": [ + { + "bbox": [ + 130, + 449, + 482, + 520 + ], + "type": "text", + "content": "To summarize, " + }, + { + "bbox": [ + 130, + 449, + 482, + 520 + ], + "type": "inline_equation", + "content": "L_{O}" + }, + { + "bbox": [ + 130, + 449, + 482, + 520 + ], + "type": "text", + "content": " helps get rid of the ensemble of classifiers in [14, 15, 24, 50, 54] and enables open-vocabulary ability with one trainable classifier. " + }, + { + "bbox": [ + 130, + 449, + 482, + 520 + ], + "type": "inline_equation", + "content": "L_{V}" + }, + { + "bbox": [ + 130, + 449, + 482, + 520 + ], + "type": "text", + "content": " uses a scene-level representation represented by the embedding of all queries, while previous methods only consider object-level representation. Combining " + }, + { + "bbox": [ + 130, + 449, + 482, + 520 + ], + "type": "inline_equation", + "content": "L_{O}" + }, + { + "bbox": [ + 130, + 449, + 482, + 520 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 130, + 449, + 482, + 520 + ], + "type": "inline_equation", + "content": "L_{V}" + }, + { + "bbox": [ + 130, + 449, + 482, + 520 + ], + "type": "text", + "content": " enables segmenting novel things and novel stuff objects simultaneously. Our final objective function can be written as:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 200, + 529, + 481, + 541 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 200, + 529, + 481, + 541 + ], + "spans": [ + { + "bbox": [ + 200, + 529, + 481, + 541 + ], + "type": "interline_equation", + "content": "L = w _ {\\alpha} * L _ {c l s} + w _ {\\beta} * L _ {m a s k} + w _ {\\lambda} * L _ {O} + w _ {\\gamma} * L _ {V} \\tag {7}", + "image_path": "a2bb058beb4ce28fa0ee8eb4914082279189b8b7c6aac14676430064ad34de34.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 131, + 548, + 478, + 560 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 548, + 478, + 560 + ], + "spans": [ + { + "bbox": [ + 131, + 548, + 478, + 560 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 131, + 548, + 478, + 560 + ], + "type": "inline_equation", + "content": "w_{\\alpha}, w_{\\beta}, w_{\\lambda}, w_{\\gamma}" + }, + { + "bbox": [ + 131, + 548, + 478, + 560 + ], + "type": "text", + "content": ", are weights for the corresponding objective functions." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 131, + 575, + 279, + 586 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 575, + 279, + 586 + ], + "spans": [ + { + "bbox": [ + 131, + 575, + 279, + 586 + ], + "type": "text", + "content": "3.4 Implementation Details" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "type": "text", + "content": "For the LiDAR encoder and segmentation head, we follow the implementation of the state-of-the-art closed-set 3D panoptic segmentation method P3Former [47]. For the Vision CLIP encoder, we use OpenSeg [14], due to its remarkable performance on the recent open-vocabulary 3D semantic segmentation task [35]. For the Text CLIP encoder, we use CLIP [39] with ViT-L/14 [45] backbone, following other state-of-the-art open vocabulary works [35]." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 222, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 222, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 222, + 101 + ], + "type": "text", + "content": "Z. Xiao et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 114, + 230, + 129 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 114, + 230, + 129 + ], + "spans": [ + { + "bbox": [ + 132, + 114, + 230, + 129 + ], + "type": "text", + "content": "4 Experiments" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 136, + 269, + 148 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 136, + 269, + 148 + ], + "spans": [ + { + "bbox": [ + 132, + 136, + 269, + 148 + ], + "type": "text", + "content": "4.1 Experimental Setting" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 152, + 479, + 187 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 152, + 479, + 187 + ], + "spans": [ + { + "bbox": [ + 130, + 152, + 479, + 187 + ], + "type": "text", + "content": "Following the state-of-the-art closed-set 3D panoptic segmentation work [27,40, 42,47,52,58], we conduct experiments and ablation studies on the nuScenes [4] and SemanticKITTI [2,13] datasets." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 188, + 480, + 259 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 188, + 480, + 259 + ], + "spans": [ + { + "bbox": [ + 130, + 188, + 480, + 259 + ], + "type": "text", + "content": "nuScenes. The nuScenes dataset [4] is a public benchmark for autonomous driving. It consists of 1000 run segments and is further divided into prescribed train/val/test splits. We use all key frames with panoptic labels in the training set(28130 frames) to train the model. Following the most recent state-of-the-art model P3Former [47], we evaluate the models on the validation set(6019 frames). There are 16 semantic classes, including 10 things classes and 6 stuff classes." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 259, + 480, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 259, + 480, + 354 + ], + "spans": [ + { + "bbox": [ + 130, + 259, + 480, + 354 + ], + "type": "text", + "content": "SemanticKITTI. SemanticKITTI [2, 13] is the first large dataset for LiDAR panoptic segmentation for autonomous driving. We conduct experiments on the training and validation sets, where panoptic segmentation labels are available. 3D open-vocabulary methods often require point and pixel pairing. In the SemanticKITTI dataset, however, the ego-vehicle is only equipped with frontal cameras. Thus, we filter out the points that are not visible in the camera view based on the provided camera parameters for both training and evaluation. There are 19 semantic classes, including 8 things classes and 11 stuff classes." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 355, + 480, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 355, + 480, + 462 + ], + "spans": [ + { + "bbox": [ + 130, + 355, + 480, + 462 + ], + "type": "text", + "content": "Data split. Both the nuScenes and SemanticKITTI datasets do not provide official base and novel class splits. Following the state-of-the-art 3D open-vocabulary segmentation work [6,10,53], we randomly split the classes into base and novel, while keeping the ratio between base and novel classes around " + }, + { + "bbox": [ + 130, + 355, + 480, + 462 + ], + "type": "inline_equation", + "content": "3:1" + }, + { + "bbox": [ + 130, + 355, + 480, + 462 + ], + "type": "text", + "content": ". For nuScenes, the number of class for base and novel split are 12 and 4 respectively, and this setting will be referred as B12/N4. For SemanticKITTI, the number of class for base and novel split are 14 and 5, and this setting will be referred as B14/N5. We use the same splits in the main comparison with prior methods, and provide the results of more variations in the ablation studies and supplementary materials." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 463, + 480, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 463, + 480, + 569 + ], + "spans": [ + { + "bbox": [ + 130, + 463, + 480, + 569 + ], + "type": "text", + "content": "Training details. We follow most of the architecture configurations in the official P3Former [47] implementation. We set " + }, + { + "bbox": [ + 130, + 463, + 480, + 569 + ], + "type": "inline_equation", + "content": "w_{\\alpha} = 1" + }, + { + "bbox": [ + 130, + 463, + 480, + 569 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 463, + 480, + 569 + ], + "type": "inline_equation", + "content": "w_{\\beta} = 1" + }, + { + "bbox": [ + 130, + 463, + 480, + 569 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 463, + 480, + 569 + ], + "type": "inline_equation", + "content": "w_{\\lambda} = 1" + }, + { + "bbox": [ + 130, + 463, + 480, + 569 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 463, + 480, + 569 + ], + "type": "inline_equation", + "content": "w_{\\gamma} = 0.1" + }, + { + "bbox": [ + 130, + 463, + 480, + 569 + ], + "type": "text", + "content": " for both datasets. We use the AdamW [23, 33] optimizer with a weight decay of 0.01. We set the initial learning rate as 0.0008 with a multi-step decay schedule. The models are trained for 40 epochs, and we use the checkpoint of the last epoch for evaluation. To avoid ambiguous class names and better utilize the CLIP text embedding, we follow [25, 35, 54] and apply multi-label mapping for the text queries. During inference, if there are multiple labels for one class, we derive the class score by getting the maximum scores among these labels." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 571, + 479, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 571, + 479, + 594 + ], + "spans": [ + { + "bbox": [ + 130, + 571, + 479, + 594 + ], + "type": "text", + "content": "Evaluation metrics. We use panoptic quality " + }, + { + "bbox": [ + 130, + 571, + 479, + 594 + ], + "type": "inline_equation", + "content": "(PQ)" + }, + { + "bbox": [ + 130, + 571, + 479, + 594 + ], + "type": "text", + "content": " as the major evaluation metric for the panoptic segmentation task. " + }, + { + "bbox": [ + 130, + 571, + 479, + 594 + ], + "type": "inline_equation", + "content": "PQ" + }, + { + "bbox": [ + 130, + 571, + 479, + 594 + ], + "type": "text", + "content": " is formulated as:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 219, + 597, + 480, + 638 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 597, + 480, + 638 + ], + "spans": [ + { + "bbox": [ + 219, + 597, + 480, + 638 + ], + "type": "interline_equation", + "content": "\\mathrm {P Q} = \\underbrace {\\frac {\\sum_ {T P} \\operatorname {I o U}}{| T P |}} _ {\\mathrm {S Q}} \\times \\underbrace {\\frac {| T P |}{| T P | + \\frac {1}{2} | F P | + \\frac {1}{2} | F N |}} _ {\\mathrm {R Q}}. \\tag {8}", + "image_path": "b89bb3eb7ee5a103a67511f180a9dc2e65045cfc2a9f4ba7bd9bd6b2eff54676.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 641, + 480, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 641, + 480, + 666 + ], + "spans": [ + { + "bbox": [ + 132, + 641, + 480, + 666 + ], + "type": "inline_equation", + "content": "PQ" + }, + { + "bbox": [ + 132, + 641, + 480, + 666 + ], + "type": "text", + "content": " is the multiplication of segmentation quality " + }, + { + "bbox": [ + 132, + 641, + 480, + 666 + ], + "type": "inline_equation", + "content": "(SQ)" + }, + { + "bbox": [ + 132, + 641, + 480, + 666 + ], + "type": "text", + "content": " and recognition quality " + }, + { + "bbox": [ + 132, + 641, + 480, + 666 + ], + "type": "inline_equation", + "content": "(RQ)" + }, + { + "bbox": [ + 132, + 641, + 480, + 666 + ], + "type": "text", + "content": ". We report all the three metrics " + }, + { + "bbox": [ + 132, + 641, + 480, + 666 + ], + "type": "inline_equation", + "content": "(PQ, RQ, SQ)" + }, + { + "bbox": [ + 132, + 641, + 480, + 666 + ], + "type": "text", + "content": " for all classes. We also" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 263, + 91, + 447, + 103 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 263, + 91, + 447, + 103 + ], + "spans": [ + { + "bbox": [ + 263, + 91, + 447, + 103 + ], + "type": "text", + "content": "3D Open-Vocabulary Panoptic Segmentation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 136, + 116, + 220, + 200 + ], + "blocks": [ + { + "bbox": [ + 136, + 116, + 220, + 200 + ], + "lines": [ + { + "bbox": [ + 136, + 116, + 220, + 200 + ], + "spans": [ + { + "bbox": [ + 136, + 116, + 220, + 200 + ], + "type": "image", + "image_path": "46ccebcc6142168e389b257a212aa0a4cc27967f146ad85411e4e19968399726.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 171, + 201, + 184, + 208 + ], + "lines": [ + { + "bbox": [ + 171, + 201, + 184, + 208 + ], + "spans": [ + { + "bbox": [ + 171, + 201, + 184, + 208 + ], + "type": "text", + "content": "PFC" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 221, + 116, + 304, + 200 + ], + "blocks": [ + { + "bbox": [ + 221, + 116, + 304, + 200 + ], + "lines": [ + { + "bbox": [ + 221, + 116, + 304, + 200 + ], + "spans": [ + { + "bbox": [ + 221, + 116, + 304, + 200 + ], + "type": "image", + "image_path": "03a482f0bf7d8ecb5f721997e9f60b65493a6d973e51d6094b58fba088eda3d0.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 254, + 201, + 270, + 209 + ], + "lines": [ + { + "bbox": [ + 254, + 201, + 270, + 209 + ], + "spans": [ + { + "bbox": [ + 254, + 201, + 270, + 209 + ], + "type": "text", + "content": "Ours" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 310, + 116, + 395, + 200 + ], + "blocks": [ + { + "bbox": [ + 310, + 116, + 395, + 200 + ], + "lines": [ + { + "bbox": [ + 310, + 116, + 395, + 200 + ], + "spans": [ + { + "bbox": [ + 310, + 116, + 395, + 200 + ], + "type": "image", + "image_path": "b5c327dfdf5140a46ce816482806940a69ec57f903f122fe338734a3ceb83214.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 347, + 201, + 361, + 209 + ], + "lines": [ + { + "bbox": [ + 347, + 201, + 361, + 209 + ], + "spans": [ + { + "bbox": [ + 347, + 201, + 361, + 209 + ], + "type": "text", + "content": "PFC" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 131, + 229, + 480, + 262 + ], + "lines": [ + { + "bbox": [ + 131, + 229, + 480, + 262 + ], + "spans": [ + { + "bbox": [ + 131, + 229, + 480, + 262 + ], + "type": "text", + "content": "Fig. 3: Open-vocabulary panoptic segmentation results from PFC and our method on nuScenes. PFC predicts inaccurate category and masks for the novel pedestrian (red), bus (yellow) and vegetation (green), while ours makes correct predictions." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 395, + 116, + 479, + 200 + ], + "blocks": [ + { + "bbox": [ + 395, + 116, + 479, + 200 + ], + "lines": [ + { + "bbox": [ + 395, + 116, + 479, + 200 + ], + "spans": [ + { + "bbox": [ + 395, + 116, + 479, + 200 + ], + "type": "image", + "image_path": "f49229697b6a698e03b4b3e1da595b07cbdfaa5efe782dd2c74ed82ad758bd8a.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 430, + 201, + 446, + 209 + ], + "lines": [ + { + "bbox": [ + 430, + 201, + 446, + 209 + ], + "spans": [ + { + "bbox": [ + 430, + 201, + 446, + 209 + ], + "type": "text", + "content": "Ours" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 286, + 480, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 286, + 480, + 335 + ], + "spans": [ + { + "bbox": [ + 130, + 286, + 480, + 335 + ], + "type": "text", + "content": "report " + }, + { + "bbox": [ + 130, + 286, + 480, + 335 + ], + "type": "inline_equation", + "content": "PQ" + }, + { + "bbox": [ + 130, + 286, + 480, + 335 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 286, + 480, + 335 + ], + "type": "inline_equation", + "content": "RQ" + }, + { + "bbox": [ + 130, + 286, + 480, + 335 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 286, + 480, + 335 + ], + "type": "inline_equation", + "content": "SQ" + }, + { + "bbox": [ + 130, + 286, + 480, + 335 + ], + "type": "text", + "content": " for novel things objects and novel stuff objects separately. In particular, " + }, + { + "bbox": [ + 130, + 286, + 480, + 335 + ], + "type": "inline_equation", + "content": "PQ_{N}^{Th}" + }, + { + "bbox": [ + 130, + 286, + 480, + 335 + ], + "type": "text", + "content": " means " + }, + { + "bbox": [ + 130, + 286, + 480, + 335 + ], + "type": "inline_equation", + "content": "PQ" + }, + { + "bbox": [ + 130, + 286, + 480, + 335 + ], + "type": "text", + "content": " for novel things classes and " + }, + { + "bbox": [ + 130, + 286, + 480, + 335 + ], + "type": "inline_equation", + "content": "PQ_{N}^{St}" + }, + { + "bbox": [ + 130, + 286, + 480, + 335 + ], + "type": "text", + "content": " stands for " + }, + { + "bbox": [ + 130, + 286, + 480, + 335 + ], + "type": "inline_equation", + "content": "PQ" + }, + { + "bbox": [ + 130, + 286, + 480, + 335 + ], + "type": "text", + "content": " for novel stuff classes. We also report the mean Intersection over Union (mIoU) for all classes to measure semantic segmentation quality." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 354, + 307, + 364 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 354, + 307, + 364 + ], + "spans": [ + { + "bbox": [ + 132, + 354, + 307, + 364 + ], + "type": "text", + "content": "4.2 P3Former-FC-CLIP Baseline" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 130, + 374, + 482, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 374, + 482, + 506 + ], + "spans": [ + { + "bbox": [ + 130, + 374, + 482, + 506 + ], + "type": "text", + "content": "As a baseline for novel-class panoptic segmentation, we construct a model from a fusion of P3Former [47] and FC-CLIP [54]. This baseline will be called P3Former-FC-CLIP (PFC). The baseline model takes the frozen voxel vision CLIP features as input, and the final prediction is obtained by geometric ensembling [14,15,24, 50,54] of the results from the classification head " + }, + { + "bbox": [ + 130, + 374, + 482, + 506 + ], + "type": "inline_equation", + "content": "f_{cls}" + }, + { + "bbox": [ + 130, + 374, + 482, + 506 + ], + "type": "text", + "content": " and another frozen classifier based on the similarity between the average-pool class embedding " + }, + { + "bbox": [ + 130, + 374, + 482, + 506 + ], + "type": "inline_equation", + "content": "w_{q}" + }, + { + "bbox": [ + 130, + 374, + 482, + 506 + ], + "type": "text", + "content": " and the CLIP text embedding. Following FC-CLIP [54], the same set of learnable queries were used to represent both things and stuff classes. In summary, this baseline provides a comparison against our proposed method without the multimodal feature fusion module, the unified segmentation head, and the distillation losses. More information on this baseline can be found in the supplementary material." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 132, + 525, + 228, + 536 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 525, + 228, + 536 + ], + "spans": [ + { + "bbox": [ + 132, + 525, + 228, + 536 + ], + "type": "text", + "content": "4.3 Main Results" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 130, + 545, + 482, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 545, + 482, + 640 + ], + "spans": [ + { + "bbox": [ + 130, + 545, + 482, + 640 + ], + "type": "text", + "content": "Since there are no existing methods for the 3D open-vocabulary panoptic segmentation task, we mainly compare with three methods to demonstrate the capability of our method: (1) the strong open-vocabulary baseline method PFC to fairly demonstrate the strength of our method, (2) the closed-set state-of-the-art 3D panoptic segmentation method P3Former to understand the headroom of our method, and (3) the open-set, zero-shot state-of-the-art method for 3D semantic segmentation, OpenScene [35]. Comparisons on the nuScenes and SemanticKITTI datasets are shown in Tab. 1 and Tab. 3." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 131, + 641, + 482, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 641, + 482, + 665 + ], + "spans": [ + { + "bbox": [ + 131, + 641, + 482, + 665 + ], + "type": "text", + "content": "Results on nuScenes dataset. Table 1 shows the quantitative comparison on the validation set of the nuScenes dataset. Our method significantly outperforms" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 221, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 221, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 221, + 100 + ], + "type": "text", + "content": "Z. Xiao et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 135, + 170, + 479, + 244 + ], + "blocks": [ + { + "bbox": [ + 132, + 114, + 482, + 159 + ], + "lines": [ + { + "bbox": [ + 132, + 114, + 482, + 159 + ], + "spans": [ + { + "bbox": [ + 132, + 114, + 482, + 159 + ], + "type": "text", + "content": "Table 1: Quantitative results of panoptic segmentation on nuScenes. We compare the performance of open-vocabulary and fully supervised models. All open vocabulary models share the same randomly picked base/novel split: B12/N4. The novel things classes are bus, pedestrian and motorcycle. The novel stuff class is vegetation." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 135, + 170, + 479, + 244 + ], + "lines": [ + { + "bbox": [ + 135, + 170, + 479, + 244 + ], + "spans": [ + { + "bbox": [ + 135, + 170, + 479, + 244 + ], + "type": "table", + "html": "
ModelTypeSupervision\\( {PQ} \\)\\( P{Q}_{N}^{Th} \\)\\( P{Q}_{N}^{St} \\)\\( {RQ} \\)\\( R{Q}_{N}^{Th} \\)\\( R{Q}_{N}^{St} \\)\\( {SQ} \\)\\( S{Q}_{N}^{Th} \\)\\( S{Q}_{N}^{St} \\)mIoU
P3Former [47]closed-setfull75.985.182.984.789.995.989.894.786.576.8
OpenScene [35]open-voczero-shot---------42.1
PFCopen-vocpartial54.837.30.563.642.10.884.289.360.455.5
Oursopen-vocpartial62.049.635.270.955.646.087.089.176.760.1
", + "image_path": "56683cb3423c13f51450df98b6110b095568fbfc49c7222ff603428fa4e43207.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 151, + 313, + 460, + 399 + ], + "blocks": [ + { + "bbox": [ + 132, + 258, + 482, + 301 + ], + "lines": [ + { + "bbox": [ + 132, + 258, + 482, + 301 + ], + "spans": [ + { + "bbox": [ + 132, + 258, + 482, + 301 + ], + "type": "text", + "content": "Table 2: Performance for base classes on nuScenes. We report the performance on base classes for models in Tab. 1. A gap still exists between open and closed-set methods for base classes. We show that this is due to lack of supervision of the whole scene as P3Former achieves similar performance when only trained on base categories." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 151, + 313, + 460, + 399 + ], + "lines": [ + { + "bbox": [ + 151, + 313, + 460, + 399 + ], + "spans": [ + { + "bbox": [ + 151, + 313, + 460, + 399 + ], + "type": "table", + "html": "
ModelSupervisionTraining DataBase ThingsBase Stuff
\\( PQ_{B}^{Th} \\)\\( RQ_{B}^{Th} \\)\\( SQ_{B}^{Th} \\)\\( PQ_{B}^{St} \\)\\( RQ_{B}^{St} \\)\\( SQ_{B}^{St} \\)
P3Former [47]fullbase+novel73.480.590.973.985.385.9
P3Former [47]partialbase65.271.388.064.277.481.8
PFCpartialbase65.673.389.061.075.483.7
Ourspartialbase66.773.789.869.282.183.7
", + "image_path": "d8337e3f981d6cefd95141df4521fae0648cabe99c68e757b8f71f25eba394c9.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 424, + 481, + 484 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 424, + 481, + 484 + ], + "spans": [ + { + "bbox": [ + 132, + 424, + 481, + 484 + ], + "type": "text", + "content": "the strong baseline PFC across all metrics. PFC works relatively well for the novel things classes, but performance on the novel stuff class collapses. This is likely because stuff classes tend to cover large parts of the scene, leading to diverse per-voxel CLIP features which may not be good representatives for their respective classes. Qualitative comparison is provided in Fig. 3." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 486, + 482, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 486, + 482, + 665 + ], + "spans": [ + { + "bbox": [ + 132, + 486, + 482, + 665 + ], + "type": "text", + "content": "To further understand the headroom of our method, we also compare our model with the closed-set P3Former. Note that the comparison here is deliberately unfair since the supervision signals are different. Compared with the closed-set P3Former, our segmentation quality " + }, + { + "bbox": [ + 132, + 486, + 482, + 665 + ], + "type": "inline_equation", + "content": "(SQ)" + }, + { + "bbox": [ + 132, + 486, + 482, + 665 + ], + "type": "text", + "content": " is good while there is a large gap on mask classification quality " + }, + { + "bbox": [ + 132, + 486, + 482, + 665 + ], + "type": "inline_equation", + "content": "(RQ)" + }, + { + "bbox": [ + 132, + 486, + 482, + 665 + ], + "type": "text", + "content": ". The gap is largely due to regressions in the novel classes, where precise supervision is not available for open-vocabulary models. For base classes, as shown in Tab. 2, the gap is relatively small except for a drop in " + }, + { + "bbox": [ + 132, + 486, + 482, + 665 + ], + "type": "inline_equation", + "content": "RQ_{B}^{Th}" + }, + { + "bbox": [ + 132, + 486, + 482, + 665 + ], + "type": "text", + "content": ". We believe the closed-set P3Former sees ground truth supervision for the entire scene, while open-set methods do not receive supervision in the 'unknown class' regions. In fact, when P3Former is only trained on base categories, the performance is worse than our proposed method. Besides the comparison with the closed-set method, we also compare with the zero-shot state-of-the-art method OpenScene [35] which does not use any labels for training. In this comparison, our model significantly outperforms OpenScene in the mIoU metric for semantic segmentation. Note that this comparison is not en" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 264, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 264, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 264, + 91, + 447, + 102 + ], + "type": "text", + "content": "3D Open-Vocabulary Panoptic Segmentation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 479, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 479, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 479, + 100 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 135, + 170, + 479, + 232 + ], + "blocks": [ + { + "bbox": [ + 132, + 114, + 481, + 159 + ], + "lines": [ + { + "bbox": [ + 132, + 114, + 481, + 159 + ], + "spans": [ + { + "bbox": [ + 132, + 114, + 481, + 159 + ], + "type": "text", + "content": "Table 3: Quantitative results of panoptic segmentation on SemanticKITTI. We compare the performance different models. All open vocabulary models share the same randomly picked base/novel split: B14/N5. The novel things classes are bicycle and truck. The novel stuff classes are sidewalk, building and trunk." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 135, + 170, + 479, + 232 + ], + "lines": [ + { + "bbox": [ + 135, + 170, + 479, + 232 + ], + "spans": [ + { + "bbox": [ + 135, + 170, + 479, + 232 + ], + "type": "table", + "html": "
ModelTypeSupervision\\( {PQ} \\)\\( P{Q}_{N}^{Th} \\)\\( P{Q}_{N}^{St} \\)\\( {RQ} \\)\\( R{Q}_{N}^{Th} \\)\\( R{Q}_{N}^{St} \\)\\( {SQ} \\)\\( S{Q}_{N}^{Th} \\)\\( S{Q}_{N}^{St} \\)mIoU
P3Former [47]closed-setfull62.165.974.271.374.886.877.188.383.961.6
PFCopen-vocpartial33.712.00.440.115.00.667.681.147.333.4
Oursopen-vocpartial42.213.117.850.416.226.773.084.067.244.6
", + "image_path": "87db42f18713dcb53c4a14bd54942c5e524f129aad48e26945869d3ce14efe29.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 135, + 285, + 479, + 376 + ], + "blocks": [ + { + "bbox": [ + 132, + 241, + 481, + 275 + ], + "lines": [ + { + "bbox": [ + 132, + 241, + 481, + 275 + ], + "spans": [ + { + "bbox": [ + 132, + 241, + 481, + 275 + ], + "type": "text", + "content": "Table 4: Impact of each component. We evaluate the impact of each component using the base/novel split in Tab. 1. We observe that each component can provide improvements over the PCF baseline. Noticeably, " + }, + { + "bbox": [ + 132, + 241, + 481, + 275 + ], + "type": "inline_equation", + "content": "L_{V}" + }, + { + "bbox": [ + 132, + 241, + 481, + 275 + ], + "type": "text", + "content": " brings the biggest improvement." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 135, + 285, + 479, + 376 + ], + "lines": [ + { + "bbox": [ + 135, + 285, + 479, + 376 + ], + "spans": [ + { + "bbox": [ + 135, + 285, + 479, + 376 + ], + "type": "table", + "html": "
Components\\(PQ\\)\\(PQ^{Th}_{N}\\)\\(PQ^{St}_{N}\\)\\(RQ\\)\\(RQ^{Th}_{N}\\)\\(RQ^{St}_{N}\\)\\(SQ\\)\\(SQ^{Th}_{N}\\)\\(SQ^{St}_{N}\\)mIoU
QAFusion\\(L_{O}\\)\\(L_{V}\\)
54.837.30.563.642.10.884.289.360.455.5
55.535.70.464.040.80.784.387.456.556.6
56.438.10.465.043.50.684.687.461.356.4
56.343.80.264.849.20.385.188.964.054.0
62.049.635.270.955.646.087.089.176.760.1
", + "image_path": "066d5c16d0c51e486d4a77afdee8c44fe6ba3f3fd74b392da69e0b32a20b5487.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 400, + 480, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 400, + 480, + 471 + ], + "spans": [ + { + "bbox": [ + 130, + 400, + 480, + 471 + ], + "type": "text", + "content": "tirely fair, as our method is trained with partial labels. Instead, the comparison is useful to understand the gap between the two types of open-vocabulary methods. The concurrent work RegionPLC [53] also reports open-vocabulary results for the semantic segmentation task on the nuScenes dataset. However, we cannot directly compare with this method since it removes one class (other-flat) and does not provide its base/novel split." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 472, + 481, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 472, + 481, + 579 + ], + "spans": [ + { + "bbox": [ + 130, + 472, + 481, + 579 + ], + "type": "text", + "content": "Results on SemanticKITTI dataset. To demonstrate the generalization ability of our method across different datasets, we report the results on SemanticKITTI dataset in Tab. 3. Overall, we observe similar patterns as on the nuScenes dataset. The baseline achieves relatively poor overall performance and struggles with the novel stuff classes. Using our architecture and loss functions, our model significantly outperforms PFC on " + }, + { + "bbox": [ + 130, + 472, + 481, + 579 + ], + "type": "inline_equation", + "content": "PQ" + }, + { + "bbox": [ + 130, + 472, + 481, + 579 + ], + "type": "text", + "content": ", with the largest margin for novel stuff classes. Note that the gap between the open-vocabulary methods (ours and PFC) and the closed-set method is larger on SemanticKITTI, likely due to the smaller dataset limiting performance." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 597, + 313, + 609 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 597, + 313, + 609 + ], + "spans": [ + { + "bbox": [ + 132, + 597, + 313, + 609 + ], + "type": "text", + "content": "4.4 Ablation Studies and Analysis" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 617, + 481, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 617, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 617, + 481, + 665 + ], + "type": "text", + "content": "To better understand the effectiveness of each component, we conduct ablation studies for each design choice and loss function on the nuScenes dataset. These results are shown in Tab. 4. We conduct five sets of experiments, starting with the PFC baseline and build upon it four ablations with different combinations." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 221, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 221, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 221, + 100 + ], + "type": "text", + "content": "Z. Xiao et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 135, + 169, + 480, + 223 + ], + "blocks": [ + { + "bbox": [ + 130, + 114, + 482, + 160 + ], + "lines": [ + { + "bbox": [ + 130, + 114, + 482, + 160 + ], + "spans": [ + { + "bbox": [ + 130, + 114, + 482, + 160 + ], + "type": "text", + "content": "Table 5: Performance on a different split. We compare the performance with a split with 5 novel classes (B11/N5). The novel things classes are bicycle, car and construction vehicle. The novel stuff classes are terrain and man-made. Our method consistently outperforms the PFC baseline across all the metrics by a large margin." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 135, + 169, + 480, + 223 + ], + "lines": [ + { + "bbox": [ + 135, + 169, + 480, + 223 + ], + "spans": [ + { + "bbox": [ + 135, + 169, + 480, + 223 + ], + "type": "table", + "html": "
ModelTypeSupervision\\( {PQ} \\)\\( P{Q}_{N}^{Th} \\)\\( P{Q}_{N}^{St} \\)\\( {RQ} \\)\\( R{Q}_{N}^{Th} \\)\\( R{Q}_{N}^{St} \\)\\( {SQ} \\)\\( S{Q}_{N}^{Th} \\)\\( S{Q}_{N}^{St} \\)mIoU
P3Former [47]closed-setfull75.870.571.783.876.485.590.191.683.675.0
PFCopen-vocpartial43.927.70.651.733.21.080.282.462.745.2
Oursopen-vocpartial52.856.016.460.561.822.684.989.768.749.9
", + "image_path": "b7f28a71c3230e0be03f6febdfa47f433e56ac1930ff57f76671763c77f88ddc.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 246, + 481, + 305 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 246, + 481, + 305 + ], + "spans": [ + { + "bbox": [ + 130, + 246, + 481, + 305 + ], + "type": "text", + "content": "Impact of query assignment. Starting from the PFC baseline model, we add our proposed fixed query assignment for stuff categories. As shown in the second row of Tab. 4, with query assignment, the overall " + }, + { + "bbox": [ + 130, + 246, + 481, + 305 + ], + "type": "inline_equation", + "content": "PQ" + }, + { + "bbox": [ + 130, + 246, + 481, + 305 + ], + "type": "text", + "content": " improves by 0.7. The performance for the novel classes drop slightly, but improvement on the base classes overcomes this for the overall PQ." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 306, + 482, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 306, + 482, + 389 + ], + "spans": [ + { + "bbox": [ + 130, + 306, + 482, + 389 + ], + "type": "text", + "content": "Impact of feature fusion. The third row of Tab. 4 shows the impact of feature fusion. Without feature fusion, our model already achieves 55.5 " + }, + { + "bbox": [ + 130, + 306, + 482, + 389 + ], + "type": "inline_equation", + "content": "PQ" + }, + { + "bbox": [ + 130, + 306, + 482, + 389 + ], + "type": "text", + "content": ", demonstrating the power of the CLIP vision features. The third row shows that the performance with feature fusion for the model input improves the overall " + }, + { + "bbox": [ + 130, + 306, + 482, + 389 + ], + "type": "inline_equation", + "content": "PQ" + }, + { + "bbox": [ + 130, + 306, + 482, + 389 + ], + "type": "text", + "content": " by 0.9. This slightly improved the overall performance, but the improvement on the novel things class is the most significant, demonstrating that the learned LiDAR features and CLIP vision features are indeed complementary for the task." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 390, + 483, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 390, + 483, + 462 + ], + "spans": [ + { + "bbox": [ + 130, + 390, + 483, + 462 + ], + "type": "text", + "content": "Impact of object-level distillation loss. The fourth row of the results in Tab. 4 shows the impact of the proposed object-level distillation loss. Note that for models with the object-level distillation loss, we remove the frozen class classification head and the ensemble in the PFC baseline, consolidating to a single class embedding head. Although the " + }, + { + "bbox": [ + 130, + 390, + 483, + 462 + ], + "type": "inline_equation", + "content": "RQ_N^{St}" + }, + { + "bbox": [ + 130, + 390, + 483, + 462 + ], + "type": "text", + "content": " slightly dips by 0.3 for the novel stuff classes, this loss can significantly improve the " + }, + { + "bbox": [ + 130, + 390, + 483, + 462 + ], + "type": "inline_equation", + "content": "RQ_N^{Th}" + }, + { + "bbox": [ + 130, + 390, + 483, + 462 + ], + "type": "text", + "content": " for the novel things class by 5.7." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 462, + 482, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 462, + 482, + 545 + ], + "spans": [ + { + "bbox": [ + 130, + 462, + 482, + 545 + ], + "type": "text", + "content": "Impact of voxel-level distillation loss. We study the impact of the voxel-level distillation loss to see if it can further improve the performance given all of our designs. The results are shown in the last row of Tab. 4. With this loss function, " + }, + { + "bbox": [ + 130, + 462, + 482, + 545 + ], + "type": "inline_equation", + "content": "PQ" + }, + { + "bbox": [ + 130, + 462, + 482, + 545 + ], + "type": "text", + "content": " significantly improves by 5.7. The improvement on the novel split is particularly large, especially for the novel stuff classes. The " + }, + { + "bbox": [ + 130, + 462, + 482, + 545 + ], + "type": "inline_equation", + "content": "PQ_N^{St}" + }, + { + "bbox": [ + 130, + 462, + 482, + 545 + ], + "type": "text", + "content": " of the novel stuff class improves from 0.2 to 35.2, which demonstrates the importance of the voxel-level supervision to the performance of the novel stuff class." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 545, + 484, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 545, + 484, + 605 + ], + "spans": [ + { + "bbox": [ + 130, + 545, + 484, + 605 + ], + "type": "text", + "content": "Performance of different splits. To validate the generalizability of our method, we conduct experiments on a different split (B11/N5) for the nuScenes dataset. As shown in Tab. 5, our proposed method consistently and significantly outperforms the strong baseline method. This again demonstrates the effectiveness of our design and the proposed loss functions." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 605, + 482, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 605, + 482, + 667 + ], + "spans": [ + { + "bbox": [ + 130, + 605, + 482, + 667 + ], + "type": "text", + "content": "Open-vocabulary exploration. In previous experiments, we follow other 3D open-vocabulary works [6,10,53] and provide analytical results on pre-defined object categories, mainly due to the limited categories in current panoptic segmentation datasets. In practice, our model goes beyond detecting these object categories: we can take class embeddings " + }, + { + "bbox": [ + 130, + 605, + 482, + 667 + ], + "type": "inline_equation", + "content": "v_{q}" + }, + { + "bbox": [ + 130, + 605, + 482, + 667 + ], + "type": "text", + "content": " in Eq. (1) and compute the cosine" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 263, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 263, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 263, + 91, + 447, + 102 + ], + "type": "text", + "content": "3D Open-Vocabulary Panoptic Segmentation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 134, + 115, + 218, + 169 + ], + "blocks": [ + { + "bbox": [ + 134, + 115, + 218, + 169 + ], + "lines": [ + { + "bbox": [ + 134, + 115, + 218, + 169 + ], + "spans": [ + { + "bbox": [ + 134, + 115, + 218, + 169 + ], + "type": "image", + "image_path": "98d9e5be7fd79d582fc7a56707fae5615716791e0acada95c535e0be4d8becf1.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 219, + 116, + 304, + 170 + ], + "blocks": [ + { + "bbox": [ + 219, + 116, + 304, + 170 + ], + "lines": [ + { + "bbox": [ + 219, + 116, + 304, + 170 + ], + "spans": [ + { + "bbox": [ + 219, + 116, + 304, + 170 + ], + "type": "image", + "image_path": "67d6879f793a2c079aad88c809f56ee7be42a073290d78abaea033f4de792bae.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 134, + 171, + 218, + 223 + ], + "blocks": [ + { + "bbox": [ + 134, + 171, + 218, + 223 + ], + "lines": [ + { + "bbox": [ + 134, + 171, + 218, + 223 + ], + "spans": [ + { + "bbox": [ + 134, + 171, + 218, + 223 + ], + "type": "image", + "image_path": "c29b3c5ba8912dc03032330f6556c5b2b7e2609239965d6e1ae4d15a6aeb5f35.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 219, + 171, + 304, + 223 + ], + "blocks": [ + { + "bbox": [ + 219, + 171, + 304, + 223 + ], + "lines": [ + { + "bbox": [ + 219, + 171, + 304, + 223 + ], + "spans": [ + { + "bbox": [ + 219, + 171, + 304, + 223 + ], + "type": "image", + "image_path": "1bd89946be43210e51bf955c8d074dc8f6ef220a519079704c674fa2b468bd4d.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 134, + 224, + 218, + 277 + ], + "blocks": [ + { + "bbox": [ + 134, + 224, + 218, + 277 + ], + "lines": [ + { + "bbox": [ + 134, + 224, + 218, + 277 + ], + "spans": [ + { + "bbox": [ + 134, + 224, + 218, + 277 + ], + "type": "image", + "image_path": "189fe77276a8fd53569ca1b701fccaabaf91e8e3d916f3622e9bd57eee51143c.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 131, + 298, + 482, + 332 + ], + "lines": [ + { + "bbox": [ + 131, + 298, + 482, + 332 + ], + "spans": [ + { + "bbox": [ + 131, + 298, + 482, + 332 + ], + "type": "text", + "content": "Fig. 4: Open-vocabulary exploration. We show the novel materials/objects in blue color. The orientation of the ego vehicle is fixed in the LiDAR point visualization while the reference images come from on of the surrounding cameras of the ego vehicle." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 219, + 224, + 304, + 277 + ], + "blocks": [ + { + "bbox": [ + 219, + 224, + 304, + 277 + ], + "lines": [ + { + "bbox": [ + 219, + 224, + 304, + 277 + ], + "spans": [ + { + "bbox": [ + 219, + 224, + 304, + 277 + ], + "type": "image", + "image_path": "2c6392ed18f73f379dd6ae65d57e4e23b6037b79c328fbefda6b84d74c6e4ae6.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 309, + 116, + 394, + 169 + ], + "blocks": [ + { + "bbox": [ + 309, + 116, + 394, + 169 + ], + "lines": [ + { + "bbox": [ + 309, + 116, + 394, + 169 + ], + "spans": [ + { + "bbox": [ + 309, + 116, + 394, + 169 + ], + "type": "image", + "image_path": "65c583fc9c1aecf7263b48fe32370c3921ab2cdbac5ef60aa2aab84d17fbd290.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 394, + 116, + 479, + 170 + ], + "blocks": [ + { + "bbox": [ + 394, + 116, + 479, + 170 + ], + "lines": [ + { + "bbox": [ + 394, + 116, + 479, + 170 + ], + "spans": [ + { + "bbox": [ + 394, + 116, + 479, + 170 + ], + "type": "image", + "image_path": "670ad5ad40c8b587e753530c04d562565b21367f3fc399552966e167b2e48ebb.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 309, + 170, + 394, + 223 + ], + "blocks": [ + { + "bbox": [ + 309, + 170, + 394, + 223 + ], + "lines": [ + { + "bbox": [ + 309, + 170, + 394, + 223 + ], + "spans": [ + { + "bbox": [ + 309, + 170, + 394, + 223 + ], + "type": "image", + "image_path": "957ab75cf34867d2c124ea033aa633e725104e5483e822d50b0b944fa672a87f.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 394, + 170, + 479, + 223 + ], + "blocks": [ + { + "bbox": [ + 394, + 170, + 479, + 223 + ], + "lines": [ + { + "bbox": [ + 394, + 170, + 479, + 223 + ], + "spans": [ + { + "bbox": [ + 394, + 170, + 479, + 223 + ], + "type": "image", + "image_path": "64973897491e7f7083474880ff5c2fc0d4bf2ae35f9cd447c7ce3d2dfbb90046.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 309, + 224, + 394, + 277 + ], + "blocks": [ + { + "bbox": [ + 309, + 224, + 394, + 277 + ], + "lines": [ + { + "bbox": [ + 309, + 224, + 394, + 277 + ], + "spans": [ + { + "bbox": [ + 309, + 224, + 394, + 277 + ], + "type": "image", + "image_path": "9bad14e1b3f7f7c649d6d116003dabc4f014c63f585d6e6c6b356011b9700377.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 394, + 224, + 479, + 277 + ], + "blocks": [ + { + "bbox": [ + 394, + 224, + 479, + 277 + ], + "lines": [ + { + "bbox": [ + 394, + 224, + 479, + 277 + ], + "spans": [ + { + "bbox": [ + 394, + 224, + 479, + 277 + ], + "type": "image", + "image_path": "19a7b86e73f80b45740874c952f9a4126ea7596c98fa72fa60aeecac97d071e8.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 130, + 356, + 482, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 356, + 482, + 415 + ], + "spans": [ + { + "bbox": [ + 130, + 356, + 482, + 415 + ], + "type": "text", + "content": "similarity with CLIP embedding of any text. Fig. 4 shows that we can detect novel materials/objects that are not in the predefined category list. Note that the concept of open vocabulary is very different from domain adaptation, as open vocabulary refers to the ability to deal with novel inputs in a scene while domain adaptation addresses the difference in data distributions in different scenes." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 130, + 416, + 484, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 416, + 484, + 464 + ], + "spans": [ + { + "bbox": [ + 130, + 416, + 484, + 464 + ], + "type": "text", + "content": "Limitations. Our models are only evaluated on current autonomous driving panoptic segmentation benchmarks, with limited number of category annotations. To further evaluate open-vocabulary performance, a large-scale autonomous driving benchmark with more diverse object categories is greatly desired." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 132, + 483, + 220, + 496 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 483, + 220, + 496 + ], + "spans": [ + { + "bbox": [ + 132, + 483, + 220, + 496 + ], + "type": "text", + "content": "5 Conclusion" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 130, + 510, + 484, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 510, + 484, + 606 + ], + "spans": [ + { + "bbox": [ + 130, + 510, + 484, + 606 + ], + "type": "text", + "content": "In this paper, we present the first approach for the open-vocabulary 3D panoptic segmentation task in autonomous driving by leveraging large vision-language models. We experimentally verified that simply extending the 2D open-vocabulary segmentation method into 3D does not yield good performance, and demonstrated that our proposed model design and loss functions significantly boost performance for this task. Our method significantly outperformed the strong baseline on multiple well-established benchmarks. We hope our work can shed light on the future studies of the 3D open-vocabulary panoptic segmentation." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 131, + 617, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 617, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 131, + 617, + 482, + 666 + ], + "type": "text", + "content": "Acknowledgements. We would like to thank Mahyar Najibi, Chao Jia, Zhenyao Zhu, Yolanda Wang, Charles R. Qi, Dragomir Anguelov, Tom Ouyang, Ruichi Yu, Chris Sweeney, Colin Graber, Yingwei Li, Sangjin Lee, Weilong Yang, and Congcong Li for the help to the project." + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 221, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 221, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 221, + 100 + ], + "type": "text", + "content": "Z. Xiao et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 114, + 197, + 126 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 114, + 197, + 126 + ], + "spans": [ + { + "bbox": [ + 132, + 114, + 197, + 126 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 138, + 137, + 480, + 665 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 138, + 137, + 480, + 170 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 137, + 480, + 170 + ], + "spans": [ + { + "bbox": [ + 138, + 137, + 480, + 170 + ], + "type": "text", + "content": "1. Alonso, I., Riazuelo, L., Montesano, L., Murillo, A.C.: 3d-mininet: Learning a 2d representation from point clouds for fast and efficient 3d lidar semantic segmentation. IEEE Robotics and Automation Letters 5(4), 5432-5439 (2020)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 138, + 171, + 480, + 203 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 171, + 480, + 203 + ], + "spans": [ + { + "bbox": [ + 138, + 171, + 480, + 203 + ], + "type": "text", + "content": "2. Behley, J., Garbade, M., Milioto, A., Quenzel, J., Behnke, S., Stachniss, C., Gall, J.: SemanticKITTI: A Dataset for Semantic Scene Understanding of LiDAR Sequences. In: ICCV (2019)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 138, + 203, + 451, + 213 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 203, + 451, + 213 + ], + "spans": [ + { + "bbox": [ + 138, + 203, + 451, + 213 + ], + "type": "text", + "content": "3. Bendale, A., Boult, T.: Towards open world recognition. In: CVPR (2015)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 138, + 213, + 480, + 246 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 213, + 480, + 246 + ], + "spans": [ + { + "bbox": [ + 138, + 213, + 480, + 246 + ], + "type": "text", + "content": "4. Caesar, H., Bankiti, V., Lang, A.H., Vora, S., Liong, V.E., Xu, Q., Krishnan, A., Pan, Y., Baldan, G., Beijbom, O.: nuscenes: A multimodal dataset for autonomous driving. In: CVPR (2020)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 138, + 246, + 480, + 267 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 246, + 480, + 267 + ], + "spans": [ + { + "bbox": [ + 138, + 246, + 480, + 267 + ], + "type": "text", + "content": "5. Carion, N., Massa, F., Synnaeve, G., Usunier, N., Kirillov, A., Zagoruyko, S.: End-to-end object detection with transformers. In: ECCV (2020)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 138, + 267, + 480, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 267, + 480, + 289 + ], + "spans": [ + { + "bbox": [ + 138, + 267, + 480, + 289 + ], + "type": "text", + "content": "6. Cen, J., Yun, P., Zhang, S., Cai, J., Luan, D., Wang, M.Y., Liu, M., Tang, M.: Open-world semantic segmentation for LIDAR point clouds. In: ECCV (2022)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 138, + 289, + 480, + 320 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 289, + 480, + 320 + ], + "spans": [ + { + "bbox": [ + 138, + 289, + 480, + 320 + ], + "type": "text", + "content": "7. Chen, R., Liu, Y., Kong, L., Zhu, X., Ma, Y., Li, Y., Hou, Y., Qiao, Y., Wang, W.: Clip2scene: Towards label-efficient 3d scene understanding by clip. In: CVPR (2023)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 138, + 321, + 480, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 321, + 480, + 342 + ], + "spans": [ + { + "bbox": [ + 138, + 321, + 480, + 342 + ], + "type": "text", + "content": "8. Chen, Z., Li, B.: Bridging the domain gap: Self-supervised 3d scene understanding with foundation models. arXiv preprint arXiv:2305.08776 (2023)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 138, + 342, + 480, + 364 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 342, + 480, + 364 + ], + "spans": [ + { + "bbox": [ + 138, + 342, + 480, + 364 + ], + "type": "text", + "content": "9. Cheng, B., Schwing, A., Kirillov, A.: Per-pixel classification is not all you need for semantic segmentation. In: NeurIPS (2021)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 138, + 364, + 480, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 364, + 480, + 385 + ], + "spans": [ + { + "bbox": [ + 138, + 364, + 480, + 385 + ], + "type": "text", + "content": "10. Ding, R., Yang, J., Xue, C., Zhang, W., Bai, S., Qi, X.: Pla: Language-driven open-vocabulary 3d scene understanding. In: CVPR (2023)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 138, + 386, + 480, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 386, + 480, + 407 + ], + "spans": [ + { + "bbox": [ + 138, + 386, + 480, + 407 + ], + "type": "text", + "content": "1. Ding, Z., Wang, J., Tu, Z.: Open-vocabulary universal image segmentation with maskclip. In: ICML (2023)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 138, + 407, + 480, + 429 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 407, + 480, + 429 + ], + "spans": [ + { + "bbox": [ + 138, + 407, + 480, + 429 + ], + "type": "text", + "content": "2. Du, Y., Wei, F., Zhang, Z., Shi, M., Gao, Y., Li, G.: Learning to prompt for open-vocabulary object detection with vision-language model. In: CVPR (2022)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 138, + 429, + 480, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 429, + 480, + 449 + ], + "spans": [ + { + "bbox": [ + 138, + 429, + 480, + 449 + ], + "type": "text", + "content": "3. Geiger, A., Lenz, P., Urtasun, R.: Are we ready for Autonomous Driving? The KITTI Vision Benchmark Suite. In: CVPR (2012)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 138, + 449, + 480, + 471 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 449, + 480, + 471 + ], + "spans": [ + { + "bbox": [ + 138, + 449, + 480, + 471 + ], + "type": "text", + "content": "4. Ghiasi, G., Gu, X., Cui, Y., Lin, T.Y.: Scaling open-vocabulary image segmentation with image-level labels. In: ECCV (2022)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 138, + 471, + 480, + 492 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 471, + 480, + 492 + ], + "spans": [ + { + "bbox": [ + 138, + 471, + 480, + 492 + ], + "type": "text", + "content": "5. Gu, X., Lin, T.Y., Kuo, W., Cui, Y.: Open-vocabulary object detection via vision and language knowledge distillation. ICLR (2022)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 138, + 492, + 480, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 492, + 480, + 514 + ], + "spans": [ + { + "bbox": [ + 138, + 492, + 480, + 514 + ], + "type": "text", + "content": "6. Ha, H., Song, S.: Semantic abstraction: Open-world 3d scene understanding from 2d vision-language models. In: CoRL (2022)" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 138, + 514, + 480, + 535 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 514, + 480, + 535 + ], + "spans": [ + { + "bbox": [ + 138, + 514, + 480, + 535 + ], + "type": "text", + "content": "7. He, W., Jamonnak, S., Gou, L., Ren, L.: Clip-s4: Language-guided self-supervised semantic segmentation. In: CVPR (2023)" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 138, + 536, + 480, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 536, + 480, + 557 + ], + "spans": [ + { + "bbox": [ + 138, + 536, + 480, + 557 + ], + "type": "text", + "content": "8. Hegde, D., Valanarasu, J.M.J., Patel, V.M.: Clip goes 3d: Leveraging prompt tuning for language grounded 3d recognition. arXiv preprint arXiv:2303.11313 (2023)" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 138, + 557, + 480, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 557, + 480, + 578 + ], + "spans": [ + { + "bbox": [ + 138, + 557, + 480, + 578 + ], + "type": "text", + "content": "9. Hong, F., Zhou, H., Zhu, X., Li, H., Liu, Z.: Lidar-based panoptic segmentation via dynamic shifting network. In: CVPR (2021)" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 138, + 578, + 480, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 578, + 480, + 621 + ], + "spans": [ + { + "bbox": [ + 138, + 578, + 480, + 621 + ], + "type": "text", + "content": "20. Hu, Q., Yang, B., Xie, L., Rosa, S., Guo, Y., Wang, Z., Trigoni, N., Markham, A.: Learning semantic segmentation of large-scale point clouds with random sampling. IEEE Transactions on Pattern Analysis and Machine Intelligence 44(11), 8338-8354 (2021)" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 138, + 621, + 480, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 621, + 480, + 665 + ], + "spans": [ + { + "bbox": [ + 138, + 621, + 480, + 665 + ], + "type": "text", + "content": "21. Ilharco, G., Wortsman, M., Wightman, R., Gordon, C., Carlini, N., Taori, R., Dave, A., Shankar, V., Namkoong, H., Miller, J., Hajishirzi, H., Farhadi, A., Schmidt, L.: Openclip (Jul 2021). https://doi.org/10.5281/zenodo.5143773, https://doi.org/10.5281/zenodo.5143773" + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 263, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 263, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 263, + 91, + 447, + 102 + ], + "type": "text", + "content": "3D Open-Vocabulary Panoptic Segmentation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 481, + 666 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 132, + 116, + 481, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 116, + 481, + 149 + ], + "spans": [ + { + "bbox": [ + 132, + 116, + 481, + 149 + ], + "type": "text", + "content": "22. Jia, C., Yang, Y., Xia, Y., Chen, Y.T., Parekh, Z., Pham, H., Le, Q., Sung, Y.H., Li, Z., Duerig, T.: Scaling up visual and vision-language representation learning with noisy text supervision. In: ICML (2021)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 149, + 481, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 149, + 481, + 171 + ], + "spans": [ + { + "bbox": [ + 132, + 149, + 481, + 171 + ], + "type": "text", + "content": "23. Kingma, D.P., Ba, J.: Adam: A method for stochastic optimization. In: ICLR (2015)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 171, + 481, + 192 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 171, + 481, + 192 + ], + "spans": [ + { + "bbox": [ + 132, + 171, + 481, + 192 + ], + "type": "text", + "content": "24. Kuo, W., Cui, Y., Gu, X., Piergiovanni, A., Angelova, A.: F-vlm: Open-vocabulary object detection upon frozen vision and language models. In: ICLR (2023)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 192, + 481, + 214 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 192, + 481, + 214 + ], + "spans": [ + { + "bbox": [ + 132, + 192, + 481, + 214 + ], + "type": "text", + "content": "25. Lambert, J., Liu, Z., Sener, O., Hays, J., Koltun, V.: Mseg: A composite dataset for multi-domain semantic segmentation. In: CVPR (2020)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 214, + 481, + 235 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 214, + 481, + 235 + ], + "spans": [ + { + "bbox": [ + 132, + 214, + 481, + 235 + ], + "type": "text", + "content": "26. Li, B., Weinberger, K.Q., Belongie, S., Koltun, V., Ranftl, R.: Language-driven semantic segmentation. In: ICLR (2022)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 235, + 481, + 267 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 235, + 481, + 267 + ], + "spans": [ + { + "bbox": [ + 132, + 235, + 481, + 267 + ], + "type": "text", + "content": "27. Li, J., He, X., Wen, Y., Gao, Y., Cheng, X., Zhang, D.: Panoptic-phenet: Towards real-time and high-precision lidar panoptic segmentation via clustering pseudo heatmap. In: CVPR (2022)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 267, + 481, + 300 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 267, + 481, + 300 + ], + "spans": [ + { + "bbox": [ + 132, + 267, + 481, + 300 + ], + "type": "text", + "content": "28. Li, Z., Wang, W., Xie, E., Yu, Z., Anandkumar, A., Alvarez, J.M., Luo, P., Lu, T.: Panoptic segformer: Delving deeper into panoptic segmentation with transformers. In: CVPR (2022)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 300, + 481, + 332 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 300, + 481, + 332 + ], + "spans": [ + { + "bbox": [ + 132, + 300, + 481, + 332 + ], + "type": "text", + "content": "29. Liang, F., Wu, B., Dai, X., Li, K., Zhao, Y., Zhang, H., Zhang, P., Vajda, P., Marculescu, D.: Open-vocabulary semantic segmentation with mask-adapted clip. In: CVPR (2023)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 332, + 481, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 332, + 481, + 354 + ], + "spans": [ + { + "bbox": [ + 132, + 332, + 481, + 354 + ], + "type": "text", + "content": "30. Lin, T.Y., Goyal, P., Girshick, R., He, K., Dollar, P.: Focal loss for dense object detection. In: ICCV (2017)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 354, + 481, + 386 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 354, + 481, + 386 + ], + "spans": [ + { + "bbox": [ + 132, + 354, + 481, + 386 + ], + "type": "text", + "content": "31. Liu, Q., Wen, Y., Han, J., Xu, C., Xu, H., Liang, X.: Open-world semantic segmentation via contrasting and clustering vision-language embedding. In: ECCV (2022)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 386, + 481, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 386, + 481, + 407 + ], + "spans": [ + { + "bbox": [ + 132, + 386, + 481, + 407 + ], + "type": "text", + "content": "32. Liu, Z., Mao, H., Wu, C.Y., Feichtenhofer, C., Darrell, T., Xie, S.: A convnet for the 2020s. In: CVPR (2022)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 132, + 407, + 481, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 407, + 481, + 418 + ], + "spans": [ + { + "bbox": [ + 132, + 407, + 481, + 418 + ], + "type": "text", + "content": "33. Loshchilov, I., Hutter, F.: Decoupled weight decay regularization. In: ICLR (2019)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 132, + 418, + 481, + 439 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 418, + 481, + 439 + ], + "spans": [ + { + "bbox": [ + 132, + 418, + 481, + 439 + ], + "type": "text", + "content": "34. Ma, C., Yang, Y., Wang, Y., Zhang, Y., Xie, W.: Open-vocabulary semantic segmentation with frozen vision-language models. BMVC (2022)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 132, + 439, + 481, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 439, + 481, + 460 + ], + "spans": [ + { + "bbox": [ + 132, + 439, + 481, + 460 + ], + "type": "text", + "content": "35. Peng, S., Genova, K., Jiang, C., Tagliasacchi, A., Pollefeys, M., Funkhouser, T., et al.: Openscene: 3d scene understanding with open vocabularies. In: CVPR (2023)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 132, + 460, + 481, + 482 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 460, + 481, + 482 + ], + "spans": [ + { + "bbox": [ + 132, + 460, + 481, + 482 + ], + "type": "text", + "content": "36. Qi, C.R., Su, H., Mo, K., Guibas, L.J.: Pointnet: Deep learning on point sets for 3d classification and segmentation. In: CVPR (2017)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 132, + 482, + 481, + 504 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 482, + 481, + 504 + ], + "spans": [ + { + "bbox": [ + 132, + 482, + 481, + 504 + ], + "type": "text", + "content": "37. Qi, C.R., Yi, L., Su, H., Guibas, L.J.: Pointnet++: Deep hierarchical feature learning on point sets in a metric space. NeurIPS (2017)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 132, + 504, + 481, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 504, + 481, + 536 + ], + "spans": [ + { + "bbox": [ + 132, + 504, + 481, + 536 + ], + "type": "text", + "content": "38. Qin, J., Wu, J., Yan, P., Li, M., Yuxi, R., Xiao, X., Wang, Y., Wang, R., Wen, S., Pan, X., et al.: Freeseg: Unified, universal and open-vocabulary image segmentation. In: CVPR (2023)" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 132, + 536, + 481, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 536, + 481, + 568 + ], + "spans": [ + { + "bbox": [ + 132, + 536, + 481, + 568 + ], + "type": "text", + "content": "39. Radford, A., Kim, J.W., Hallacy, C., Ramesh, A., Goh, G., Agarwal, S., Sastry, G., Askell, A., Mishkin, P., Clark, J., et al.: Learning transferable visual models from natural language supervision. In: ICML (2021)" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 132, + 568, + 481, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 568, + 481, + 590 + ], + "spans": [ + { + "bbox": [ + 132, + 568, + 481, + 590 + ], + "type": "text", + "content": "40. Razani, R., Cheng, R., Li, E., Taghavi, E., Ren, Y., Bingbing, L.: Gp-s3net: Graph-based panoptic sparse semantic segmentation network. In: ICCV (2021)" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 132, + 590, + 481, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 590, + 481, + 611 + ], + "spans": [ + { + "bbox": [ + 132, + 590, + 481, + 611 + ], + "type": "text", + "content": "41. Rozenberszki, D., Litany, O., Dai, A.: Language-grounded indoor 3d semantic segmentation in the wild. In: ECCV (2022)" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 132, + 611, + 481, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 611, + 481, + 643 + ], + "spans": [ + { + "bbox": [ + 132, + 611, + 481, + 643 + ], + "type": "text", + "content": "42. Sirohi, K., Mohan, R., Buscher, D., Burgard, W., Valada, A.: Efficientlps: Efficient lidar panoptic segmentation. IEEE Transactions on Robotics 38(3), 1894-1914 (2021)" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 132, + 643, + 481, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 643, + 481, + 666 + ], + "spans": [ + { + "bbox": [ + 132, + 643, + 481, + 666 + ], + "type": "text", + "content": "43. Takmaz, A., Fedele, E., Sumner, R.W., Pollefeys, M., Tombari, F., Engelmann, F.: Openmask3d: Open-vocabulary 3d instance segmentation. In: NeuRIPS (2023)" + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 221, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 221, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 221, + 100 + ], + "type": "text", + "content": "Z. Xiao et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 489 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 138 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 138 + ], + "type": "text", + "content": "44. Tang, H., Liu, Z., Zhao, S., Lin, Y., Lin, J., Wang, H., Han, S.: Searching efficient 3d architectures with sparse point-voxel convolution. In: ECCV (2020)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 138, + 481, + 160 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 138, + 481, + 160 + ], + "spans": [ + { + "bbox": [ + 130, + 138, + 481, + 160 + ], + "type": "text", + "content": "45. Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, L., Polosukhin, I.: Attention is all you need. In: NeurIPS (2017)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 160, + 481, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 160, + 481, + 182 + ], + "spans": [ + { + "bbox": [ + 130, + 160, + 481, + 182 + ], + "type": "text", + "content": "46. Wu, W., Fuxin, L., Shan, Q.: Pointconvformer: Revenge of the point-based convolution. In: CVPR (2023)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 182, + 481, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 182, + 481, + 205 + ], + "spans": [ + { + "bbox": [ + 130, + 182, + 481, + 205 + ], + "type": "text", + "content": "47. Xiao, Z., Zhang, W., Wang, T., Loy, C.C., Lin, D., Pang, J.: Position-guided point cloud panoptic segmentation transformer. arXiv preprint (2023)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 205, + 481, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 205, + 481, + 237 + ], + "spans": [ + { + "bbox": [ + 130, + 205, + 481, + 237 + ], + "type": "text", + "content": "48. Xu, J., Zhang, R., Dou, J., Zhu, Y., Sun, J., Pu, S.: Rpvnet: A deep and efficient range-point-voxel fusion network for lidar point cloud segmentation. In: ICCV (2021)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 237, + 481, + 259 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 237, + 481, + 259 + ], + "spans": [ + { + "bbox": [ + 130, + 237, + 481, + 259 + ], + "type": "text", + "content": "49. Xu, J., De Mello, S., Liu, S., Byeon, W., Breuel, T., Kautz, J., Wang, X.: Groupvit: Semantic segmentation emerges from text supervision. In: CVPR (2022)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 259, + 481, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 259, + 481, + 281 + ], + "spans": [ + { + "bbox": [ + 130, + 259, + 481, + 281 + ], + "type": "text", + "content": "50. Xu, J., Liu, S., Vahdat, A., Byeon, W., Wang, X., De Mello, S.: Open-vocabulary panoptic segmentation with text-to-image diffusion models. In: CVPR (2023)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 281, + 481, + 314 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 281, + 481, + 314 + ], + "spans": [ + { + "bbox": [ + 130, + 281, + 481, + 314 + ], + "type": "text", + "content": "51. Xu, M., Zhang, Z., Wei, F., Lin, Y., Cao, Y., Hu, H., Bai, X.: A simple baseline for open-vocabulary semantic segmentation with pre-trained vision-language model. In: ECCV (2022)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 314, + 481, + 336 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 314, + 481, + 336 + ], + "spans": [ + { + "bbox": [ + 130, + 314, + 481, + 336 + ], + "type": "text", + "content": "52. Xu, S., Wan, R., Ye, M., Zou, X., Cao, T.: Sparse cross-scale attention network for efficient lidar panoptic segmentation. In: AAAI (2022)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 336, + 481, + 357 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 336, + 481, + 357 + ], + "spans": [ + { + "bbox": [ + 130, + 336, + 481, + 357 + ], + "type": "text", + "content": "53. Yang, J., Ding, R., Wang, Z., Qi, X.: Regionplc: Regional point-language contrastive learning for open-world 3d scene understanding. In: CVPR (2024)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 130, + 357, + 481, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 357, + 481, + 380 + ], + "spans": [ + { + "bbox": [ + 130, + 357, + 481, + 380 + ], + "type": "text", + "content": "54. Yu, Q., He, J., Deng, X., Shen, X., Chen, L.C.: Convolutions die hard: Open-vocabulary segmentation with single frozen convolutional clip. In: NeurIPS (2023)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 130, + 380, + 481, + 402 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 380, + 481, + 402 + ], + "spans": [ + { + "bbox": [ + 130, + 380, + 481, + 402 + ], + "type": "text", + "content": "55. Zhang, J., Dong, R., Ma, K.: Clip-fo3d: Learning free open-world 3d scene representations from 2d dense clip. In: ICCV (2023)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 130, + 402, + 481, + 413 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 402, + 481, + 413 + ], + "spans": [ + { + "bbox": [ + 130, + 402, + 481, + 413 + ], + "type": "text", + "content": "56. Zhou, C., Loy, C.C., Dai, B.: Extract free dense labels from clip. In: ECCV (2022)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 130, + 413, + 481, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 413, + 481, + 435 + ], + "spans": [ + { + "bbox": [ + 130, + 413, + 481, + 435 + ], + "type": "text", + "content": "57. Zhou, Z., Lei, Y., Zhang, B., Liu, L., Liu, Y.: Zegclip: Towards adapting clip for zero-shot semantic segmentation. In: CVPR (2023)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 130, + 435, + 481, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 435, + 481, + 456 + ], + "spans": [ + { + "bbox": [ + 130, + 435, + 481, + 456 + ], + "type": "text", + "content": "58. Zhou, Z., Zhang, Y., Foroosh, H.: Panoptic-polarnet: Proposal-free lidar point cloud panoptic segmentation. In: CVPR (2021)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 130, + 456, + 481, + 489 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 456, + 481, + 489 + ], + "spans": [ + { + "bbox": [ + 130, + 456, + 481, + 489 + ], + "type": "text", + "content": "59. Zou, X., Dou, Z.Y., Yang, J., Gan, Z., Li, L., Li, C., Dai, X., Behl, H., Wang, J., Yuan, L., et al.: Generalized decoding for pixel, image, and language. In: CVPR (2023)" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 263, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 263, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 263, + 91, + 447, + 102 + ], + "type": "text", + "content": "3D Open-Vocabulary Panoptic Segmentation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/3D Reconstruction of Objects in Hands without Real World 3D Supervision/1ec33038-4034-4272-be45-88734c621c33_content_list.json b/2024/3D Reconstruction of Objects in Hands without Real World 3D Supervision/1ec33038-4034-4272-be45-88734c621c33_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..cc90c24ec87137db6ab1ad9e502b2211269b90c8 --- /dev/null +++ b/2024/3D Reconstruction of Objects in Hands without Real World 3D Supervision/1ec33038-4034-4272-be45-88734c621c33_content_list.json @@ -0,0 +1,1801 @@ +[ + { + "type": "text", + "text": "3D Reconstruction of Objects in Hands without Real World 3D Supervision", + "text_level": 1, + "bbox": [ + 276, + 140, + 727, + 186 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Aditya Prakash, Matthew Chang, Matthew Jin, Ruisen Tu, and Saurabh Gupta", + "bbox": [ + 215, + 210, + 785, + 227 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "University of Illinois Urbana-Champaign \n{adityap9,mc48,mjin11,ruisent2,saurabhg}@illinois.edu \nhttps://bit.ly/WildH0I", + "bbox": [ + 292, + 239, + 705, + 281 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract. Prior works for reconstructing hand-held objects from a single image train models on images paired with 3D shapes. Such data is challenging to gather in the real world at scale. Consequently, these approaches do not generalize well when presented with novel objects in in-the-wild settings. While 3D supervision is a major bottleneck, there is an abundance of a) in-the-wild raw video data showing hand-object interactions and b) synthetic 3D shape collections. In this paper, we propose modules to leverage 3D supervision from these sources to scale up the learning of models for reconstructing hand-held objects. Specifically, we extract multiview 2D mask supervision from videos and 3D shape priors from shape collections. We use these indirect 3D cues to train occupancy networks that predict the 3D shape of objects from a single RGB image. Our experiments in the challenging object generalization setting on in-the-wild MOW dataset show $11.6\\%$ relative improvement over models trained with 3D supervision on existing datasets.", + "bbox": [ + 261, + 311, + 738, + 518 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Keywords: hand-held objects $\\cdot$ shape priors $\\cdot$ multiview supervision", + "bbox": [ + 261, + 532, + 723, + 546 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 217, + 569, + 375, + 585 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "While 3D reconstruction of hand-held objects is important for AR/VR [4,20] and robot learning applications [39,40,47,48,68,71], lack of 3D supervision outside of lab settings has made it challenging to produce models that work in the wild. This paper develops techniques to improve the generalization capabilities of single image hand-held object reconstruction methods by extracting supervision from in-the-wild videos & synthetic shape collections showing hand-object interactions.", + "bbox": [ + 212, + 597, + 787, + 686 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Collecting image datasets with ground truth 3D shapes for hand-held objects is hard. Any visual scanning setups (via multiple RGB/RGB-D cameras or motion capture) require full visibility of the object which is not available. Synthesizing realistic hand-object interaction is an open problem in itself [28,31,49,65]. Manual alignment of template shapes [5] is expensive, yet only approximate. Thus, there is very little in-the-wild real-world data with ground truth 3D shapes for hand-held objects. And while many past works have designed expressive models to predict shapes of hand-held objects [22,31,73], they are all held back due to the limited amount of real-world 3D data available for training and suffer from unsatisfactory performance on novel objects encountered in the wild.", + "bbox": [ + 212, + 688, + 787, + 839 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/9dc5bf417c7bb6edace9bbb9a7d2dbc8cd263674fe203c5e894bd83df4b18f80.jpg", + "image_caption": [ + "Fig. 1: We propose modules to extract supervision from in-the-wild videos (Sec. 3.2) & learn shape priors from 3D object collections (Sec. 3.3), to train occupancy networks which predict the 3D shapes of hand-held objects from a single image. This circumvents the need for paired real world 3D shape supervision used in existing works [22, 73]." + ], + "image_footnote": [], + "bbox": [ + 218, + 142, + 781, + 383 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "While in-the-wild images with paired 3D shapes are rare, there are a) plenty of in-the-wild videos containing multiple views of hand-held objects [12, 17] (Fig. 1), b) large catalogues of 3D object shapes [6] (Fig. 1). Shape collections provide 3D supervision but lack realistic hand grasps, videos showcase realistic hand-object interaction but don't provide direct 3D supervision. Either by itself seems insufficient, but can we combine supervision from these diverse sources to improve generalization of single-image hand-held object reconstruction methods?", + "bbox": [ + 212, + 487, + 787, + 595 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Let's consider each cue one at a time. While videos show multiple views of the object, we unfortunately don't know the relative object pose in the different views. Automatically extracting the object pose using structure from motion techniques, e.g. COLMAP [56] doesn't work due to insufficient number of feature matches on the object of interaction. We sidestep this problem by using hand pose as a proxy for object pose (Fig. 2). This is based on the observation that humans rarely conduct in-hand manipulation in pick & place tasks involving rigid objects. Thus, if we assume that the hand and the object are rigidly moving together, then the relative 6 DoF pose of the hand between pairs of frames reveals the relative 6 DoF pose of the object. This reduces the SfM problem to an easier setting where the motion is known. Specifically, we use off-the-shelf FrankMocap system [54] to obtain 6 DoF pose for the hand and consequently the object's. We then use our proposed 2D mask guided 3D sampling module (Sec. 3.2) to generate 3D supervision for the object shape using object segmentation masks (Fig. 2). This lets us train on objects from 144 different categories, where as most methods currently train on only a handful of categories $(< 20)$ .", + "bbox": [ + 212, + 598, + 789, + 843 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "A. Prakash et al.", + "bbox": [ + 271, + 114, + 387, + 127 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "While this works well for unoccluded parts of the object, this does not generate reliable supervision for parts of the object that are occluded by the hand (Fig. 1). This brings us to the 3D shape catalogues, which we use to extract shape priors. This enables the model to learn to output contiguous shapes even when the object is interrupted by the hand in the image, e.g. it can hallucinate a handle for a jug even when it is covered by the hand, because jugs typically have one. We adopt an adversarial training framework [16] to train a discriminator to differentiate between real shapes (from ObMan [22]) and shapes predicted from the model (Fig. 3). Unlike prior works [67] which train the discriminator on 3D inputs, we instead propose a 2D slice-based 3D discriminator (Sec. 3.3), which is computationally efficient and learns better fine-grained shape information.", + "bbox": [ + 212, + 146, + 787, + 313 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our overall framework consists of an occupancy network [43] that predicts the 3D shape of hand-held objects from a single image. We train this model on sequences curated from the VISOR dataset [13] and use the Obman dataset [22] to build the shape prior. Training on diverse real world data outside of lab settings, enabled by our innovations, leads our model (HORSE) to good generalization performance. HORSE outperforms previous state-of-the-art models by $11.6\\%$ in the challenging object generalization setting on MOW [5].", + "bbox": [ + 212, + 314, + 787, + 421 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Related Work", + "text_level": 1, + "bbox": [ + 215, + 450, + 387, + 467 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Reconstructing objects in hands: Several works [9, 10, 22, 31, 73, 77] have trained expressive architectures for predicting 3D shape from a single image using paired real world 3D supervision. Fitting object templates [5, 21] or learned 3D shapes [14, 25, 72, 74] to videos using appearance cues [5, 14, 21, 25] or geometric priors [72, 74] have also been explored. The most relevant work to ours is [73], which uses paired 3D supervision from synthetic [22] and small-scale real-world datasets to predict 3D shape from a single image. However, it does not generalize to novel object categories in the wild due to limited 3D supervision. Instead, we train our model on diverse object categories from in-the-wild videos by extracting multiview 2D supervision and learning shape priors from existing datasets, without any real-world 3D supervision. Note that our setting involves a single image input at test time and we use in-the-wild videos for training only.", + "bbox": [ + 212, + 489, + 787, + 672 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Hand-Object datasets with 3D object models: Existing real-world hand-object datasets with 3D annotations are captured in lab settings and contain limited variation in objects, e.g. HO3D [18]:10, H2O [32]:8, FPHA [15]:4, FreiHAND [81]:35, ContactDB [2]:50, ContactPose [3]:25, DexYCB [8]:20, GRAB [58]:51, HOI4D [34]: 16 object categories. Collecting datasets with ground truth 3D shapes is difficult to scale since it often requires visual scanning setups (multiple cameras or motion capture). Synthesising realistic hand-object interaction is an open problem in itself [28, 31, 49, 65]. In this work, we curate sequences from in-the-wild VISOR dataset containing 144 object categories and design modules to extract supervision for training occupancy networks. The closest to ours is MOW with 120 objects that we only use to test models to assess generalization.", + "bbox": [ + 212, + 672, + 787, + 840 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "3D Reconstruction of Objects in Hands without Real World 3D Supervision", + "bbox": [ + 225, + 114, + 730, + 128 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Hand-Object Interactions in the wild: There is a growing interest in understanding hands and how they interact with objects around them. Researchers have collected datasets [8, 18, 19, 22, 32, 34, 58] and trained models for detecting & segmenting hands and associated objects of interaction [13, 57, 62, 63]. Recognizing what hands are doing in images [7, 46, 79] is also relevant: through grasp classification [31], 2D pose estimation [51, 80], and more recently 3D shape and pose estimation [21, 22, 53, 54, 61, 73] for both hands and objects in contact.", + "bbox": [ + 212, + 146, + 787, + 252 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3D from single image without direct 3D supervision. Several works relax the need for direct 3D supervision by incorporating auxiliary shape cues during training, e.g. multi-view consistency in masks [64], depth from single image [26, 37, 78] or stereo [24], appearance [11, 27, 60, 76]. These have been applied to reconstruction of category specific [27, 29, 30, 37] as well as generic objects [11, 75, 76]. However, directly applying these approaches to hand-held objects in the wild poses several challenges, e.g. unknown camera, novel object categories, heavy occlusion, inaccurate depth estimates. In this work, we propose modules to extract supervision from in-the-wild videos using object masks [13] & hand pose [54] and learn priors from synthetic collections of hand-held objects [22].", + "bbox": [ + 212, + 252, + 789, + 402 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3 Approach", + "text_level": 1, + "bbox": [ + 215, + 424, + 346, + 441 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We propose a novel framework for training 3D shape predictors from a single image without using any real world 3D supervision. Following prior work [73], we use implicit shape representation [43, 45] for 3D objects.", + "bbox": [ + 212, + 453, + 787, + 500 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1 Preliminaries", + "text_level": 1, + "bbox": [ + 215, + 518, + 372, + 532 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Consider the recent AC-SDF model for this task from Ye et al. [73]. Given an input RGB image, AC-SDF uses a neural network to predict the SDF of 3D points. The prediction is done in the hand coordinate frame obtained using FrankMocap [54], which outputs (a) hand articulation parameters $\\theta^a$ (45 dimensional MANO hand pose [52]), (b) global rotation $\\theta^w$ of the wrist joint w.r.t. camera, (c) weak perspective camera $\\theta^c$ , with scale factor $s$ & 2D translation $(t_x, t_y)$ , which is converted into a full perspective camera $K$ . These can be used to project a 3D point $\\mathbf{x}$ into the image ( $f$ is the focal length) as $\\mathbf{x}_p = K[T_{\\theta^w} \\mathbf{x} + (t_x, t_y, f / s)]$", + "bbox": [ + 212, + 540, + 787, + 662 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Given a 3D point $\\mathbf{x}$ & image $I$ , AC-SDF conditions the SDF prediction on: (a) global image features from a ResNet-50 [23], (b) pixel-aligned features [55] from intermediate layers of ResNet-50 at the projection $\\mathbf{x}_p$ of $\\mathbf{x}$ in the image, (c) hand articulation features obtained by representing $\\mathbf{x}$ in the coordinate frame of 15 hand joints. This is realized as, $\\mathbf{s} = \\mathcal{F}(\\mathbf{x}; I, \\theta, K)$ . Training $\\mathcal{F}$ requires sampling 3D points $x$ around the object and corresponding SDF values $s$ , $\\theta = (\\theta^a, \\theta^w, \\theta^c, K)$ are estimated from FrankMocap.", + "bbox": [ + 212, + 662, + 787, + 768 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2 2D Mask Guided 3D Sampling", + "text_level": 1, + "bbox": [ + 215, + 787, + 516, + 801 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Training models with implicit shape representation require supervision in the form of occupancy [43] or SDF [45] for 3D points sampled inside and outside", + "bbox": [ + 212, + 809, + 785, + 840 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "A. Prakash et al.", + "bbox": [ + 271, + 114, + 385, + 128 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/eeb1bf8a1e9150a7b510268e705094ab2cc3cfc7aeadb42869a95d2716011ca5.jpg", + "image_caption": [ + "a) Unposed video frames" + ], + "image_footnote": [], + "bbox": [ + 218, + 148, + 439, + 218 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/1c84f74b3d35fddbd5f39ed95b2a1d8e270b2973065326a8c6afada6e67ca2a1.jpg", + "image_caption": [ + "b) Hand pose as proxy for object pose", + "Fig.2: Registering objects via hand pose and 2D Mask guided 3D sampling. (a) Consider unposed frames from in-the-wild videos. (b) We use hand pose from FrankMocap [54] as a proxy for object pose, thereby registering the different views. (c) We then use 2D object masks for labeling 3D points with occupancy (Sec. 3.2). 3D points that project into the object mask in all views are considered as occupied (green triangles), all other points are considered unoccupied (red crosses). (3D object in the figure is for visualization only, not used for sampling.)" + ], + "image_footnote": [], + "bbox": [ + 217, + 227, + 439, + 285 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/f75f185eb79dea1141bdfcdb7032ffbe35dacf6c0838cc49f55a5dc82e2a8e9d.jpg", + "image_caption": [ + "c) Multi-view supervision from posed images" + ], + "image_footnote": [], + "bbox": [ + 450, + 147, + 787, + 284 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "the object. Note that the balanced sampling of points inside and outside the object is an important consideration for training good predictors. While existing approaches [22, 31, 73] on this task use datasets with paired 3D supervision (3D object shape corresponding to 2D image), we operate in in-the-wild settings which do not contain 3D supervision. Instead, we propose a 2D mask guided 3D sampling strategy to obtain occupancy labels for training.", + "bbox": [ + 212, + 441, + 787, + 532 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Consider multiple views $\\{I_1, \\ldots, I_n\\}$ of a hand-held object (Fig. 2), along with their masks $\\{M_1, \\ldots, M_n\\}$ . We can sample points $\\mathbf{x}$ in 3D space and project them into different views. Any point $x$ which projects into the object mask in all views is considered as occupied whereas if it projects outside the mask in even one of the views, it is considered as unoccupied. Thus, we get occupancy labels for a point $\\mathbf{x}$ as $\\mathbf{s}^{gt} = \\cap_{i=1}^{n} M_i^{\\mathbf{x}_{p_i}}$ . Here, $M_i^{\\mathbf{x}_{p_i}} = 1$ if $x_{p_i}$ lies inside the mask $M_i$ & 0 otherwise. Note that it is not possible to obtain SDF values in this manner, since distance to the object surface cannot be estimated in the absence of 3D objects models. While we can obtain 3D occupancy labels using this strategy, there are two important considerations: camera poses are unknown (required for projection) & how to balance the sampling of points inside & outside the object.", + "bbox": [ + 212, + 535, + 787, + 700 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Camera pose: We assume that the hand is rigidly moving with the object. This is not an unreasonable assumption, as humans rarely do in-hand manipulation in pick & place tasks involving small rigid objects. Thus, the relative pose of hand between different views reveals the relative pose of the object. This lets use the hand pose predicted by FrankMocap $\\{\\theta_1,\\dots ,\\theta_n\\}$ to register the different views.", + "bbox": [ + 212, + 702, + 787, + 777 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Balanced sampling: In the absence of 3D object models, a natural choice is to sample points uniformly in 3D space. However, this leads to most points lying outside the object because the object location is unknown. Instead, we sample points in the hand coordinate frame. Consider the total number of points to be $q$ .", + "bbox": [ + 212, + 779, + 787, + 839 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "3D Reconstruction of Objects in Hands without Real World 3D Supervision", + "bbox": [ + 225, + 114, + 730, + 128 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We adopt several strategies for balanced sampling for points inside $(s^{gt} = 1)$ and outside the object $(s^{gt} = 0)$ . We uniformly sample $q / 2$ 3D points $\\mathbf{x} \\in \\mathbb{R}^3$ in the normalized hand coordinate frame and project these into all the available views. Since all these $q / 2$ points may not be occupied, we use rejection sampling to repeat the procedure, for maximum of $t = 50$ times or until we get $q / 2$ occupied points. Also, all points projecting into the hand mask in all views and vertices of the MANO [53] hand are labeled as unoccupied.", + "bbox": [ + 212, + 146, + 787, + 252 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Formally, for images $\\{I_1,\\ldots ,I_n\\}$ with object masks $\\{M_1,\\dots ,M_n\\}$ , hand masks $\\{H_{1},\\ldots ,H_{n}\\}$ and MANO vertices $\\{V_{1},\\ldots ,V_{n}\\}$ , $\\mathbf{s}^{gt}$ for $\\mathbf{x}$ is:", + "bbox": [ + 212, + 252, + 785, + 284 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {s} ^ {g t} = \\left\\{ \\begin{array}{l l} 1 & \\text {i f} \\cap_ {i = 1} ^ {n} M _ {i} ^ {\\mathbf {x} _ {p _ {i}}} \\text {a n d} \\cap_ {i = 1} ^ {n} \\neg H _ {i} ^ {\\mathbf {x} _ {p _ {i}}} \\text {a n d} \\cup_ {i = 1} ^ {n} \\neg V _ {i} ^ {\\mathbf {x}} \\\\ 0 & \\text {o t h e r w i s e} \\end{array} \\right. \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 295, + 295, + 787, + 335 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\mathbf{x}_{p_i}$ is the projection of $\\mathbf{x}$ , $M_{i}^{\\mathbf{x}_{p_i}} = 1$ if $x_{p_i}$ lies inside $M_{i}$ , $H_{i}^{\\mathbf{x}_{p_i}} = 1$ if $x_{p_i}$ lies inside $H_{i}$ , $V_{i}^{\\mathbf{x}} = 1$ if $\\mathbf{x}$ belongs to $V_{i}$ and $\\neg$ is the logical negation operator.", + "bbox": [ + 212, + 345, + 784, + 378 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Note that, due to hand occlusions and errors in FrankMocap predictions, it is possible that some 3D points belonging to the object are not projected into the object masks but we do not want to label these points as unoccupied. So we disregard points which project onto the object mask in some views and hand mask in other views as these points could belong to object due to hand occlusion.", + "bbox": [ + 212, + 378, + 787, + 452 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "This is reminiscent of the visual hull algorithm [33, 42], which generates 3D reconstruction by carving out space that projects outside the segmentation in any view. Visual hull algorithms need multiple views at test time to generate any output. In contrast, we are doing this at training time to obtain supervision for $\\mathcal{F}(\\mathbf{x};I_1,\\theta_1,K_1)$ , which makes predictions from a single view.", + "bbox": [ + 212, + 453, + 787, + 527 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Training: We use cross-entropy loss (CE) to train $\\mathcal{F}$ using ground truth $\\mathbf{s}^{gt}$ :", + "bbox": [ + 212, + 527, + 771, + 545 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {v i s u a l - h u l l}} = \\operatorname {C E} (\\mathcal {F} (\\mathbf {x}), \\mathbf {s} ^ {g t}) \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 406, + 556, + 787, + 574 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To further regularize training, we also encourage the occupancy prediction from different views to be consistent with each other. Since our predictions are already in the hand coordinate frame, which is common across all views, this can be done by minimizing $\\mathcal{L}_{\\mathrm{consistency}}$ for different views $i\\& j$ of the same object.", + "bbox": [ + 212, + 584, + 787, + 646 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {c o n s i s t e n c y}} = \\sum_ {\\mathbf {x} \\in \\mathbb {R} ^ {3}, i \\neq j} \\operatorname {C E} \\left(\\mathcal {F} (\\mathbf {x}; I _ {i}, \\theta_ {i}, K _ {i}), \\mathcal {F} (\\mathbf {x}; I _ {j}, \\theta_ {j}, K _ {j})\\right) \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 295, + 657, + 787, + 691 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.3 2D Slice based 3D Discriminator as Shape Prior", + "text_level": 1, + "bbox": [ + 214, + 723, + 661, + 739 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We adopt an adversarial training framework [16] to build a prior on shapes of hand-held objects and use it to supervise the training of the occupancy prediction function $\\mathcal{F}(\\mathbf{x};I_1,\\theta_1^a,\\theta_1^w,K_1)$ . As such a prior can be challenging to hand-craft, we build it in a data-driven way. We use 3D shape repository from synthetic datasets [22], which contain more than $2.5\\mathrm{K}$ hand-held objects, to learn the prior. Specifically, we train a discriminator $\\mathcal{D}$ to differentiate between 3D shapes from", + "bbox": [ + 212, + 750, + 787, + 842 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "A. Prakash et al.", + "bbox": [ + 271, + 114, + 387, + 127 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/8654734017afa8718f878ab951a3c1f351b22d038336a8dfdb8e263c60d0a8d1.jpg", + "image_caption": [ + "Fig. 3: 2D slice based 3D discriminator. We learn data-driven 3D shape priors using hand-held objects from ObMan dataset. We sample planes through the object (shown above in blue), resulting in a 2D cross-section map. We pass occupancy predictions on points from these cross-sections through a discriminator which tries to distinguish cross-sections of predicted 3D shapes from cross-sections of ObMan objects (Sec. 3.3)." + ], + "image_footnote": [], + "bbox": [ + 289, + 143, + 728, + 252 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "ObMan [22] and generated shapes as predicted by $\\mathcal{F}$ . We derive supervision for $\\mathcal{F}$ by encouraging it to predict shapes that are real as per $\\mathcal{D}$ .", + "bbox": [ + 214, + 375, + 785, + 407 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "A natural choice is to train the discriminator with 3D input, e.g. $N \\times N \\times N$ cube in 3D voxel space [67]. One way to do this is to sample $N^3$ 3D points in the hand coordinate frame and run a forward pass through $\\mathcal{F}$ to get the occupancy for each of these points. However this is computationally expensive and often leads to large imbalance as most points lie outside the object (we ablate this in Sec. 4.3). Instead, we propose a novel 2D slice based 3D discriminator which operates on arbitrary 2D slices. There are computed by taking the cross-section of 2D planes with 3D shapes and sampling 3D points that lie on these 2D cross-sections. The key intuition here is that the discriminator sees different randomly sampled 2D slides during the course of training, which helps it to learn fine-grained shape information. E.g. for a sphere, all cross-sections are circular but for a cylinder, most are oval. This helps distinguish between different 3D shapes.", + "bbox": [ + 212, + 409, + 787, + 592 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Sampling 2D slices: There are several important considerations in sampling 2D slices. First, uniformly sampling 2D planes often leads to most points lying outside the object, which is not useful for training the discriminator. Instead, we sample 2D planes that pass through the origin in the hand coordinate system. Since the objects are in contact with the hand, the sampled points are more likely to encompass the object. Then, we rotate the sampled 2D planes by arbitrary angles so that they are not axis aligned to better capture fine-grained shape information. We ablate all these design choices in Sec. 4.3. This sampling function $\\mathcal{Z}$ results in a set of 2D planes on which 3D points are uniformly sampled.", + "bbox": [ + 212, + 594, + 787, + 731 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Training: We pass the sampled points from 2D slices of the generated 3D shape through $\\mathcal{F}$ to get the corresponding occupancy values $S^{\\mathrm{gen}}$ . This represents the generated 3D shape. We adopt the same strategy for representing 3D shapes from ObMan (used as real shapes) but use the predictions $S^{\\mathrm{real}}$ of the occupancy network overfitted on ObMan. As they come from a overfitted model, they generally match the ground truth slices well but at the same time are soft and prevent the discriminator from cheating.", + "bbox": [ + 212, + 734, + 787, + 840 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "3D Reconstruction of Objects in Hands without Real World 3D Supervision", + "bbox": [ + 225, + 113, + 732, + 130 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 774, + 114, + 784, + 126 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/33c8327e7f8180bd9541b980893b45a724301ee1c484b12fcda5dc9b03c4dad0.jpg", + "image_caption": [ + "Fig. 4: VISOR visualizations. Using existing hand pose estimation techniques [54], we are able to track the objects in relation to hands through time in in-the-wild videos. We visualize these tracks along with object masks from the VISOR dataset [13]. This form of data, where objects move rigidly relative to hands, is used to train our model to learn 3D shape of hand-held objects." + ], + "image_footnote": [], + "bbox": [ + 217, + 143, + 785, + 219 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We train the discriminator $\\mathcal{D}$ to differentiate between $S^{\\mathrm{gen}}$ & $S^{\\mathrm{real}}$ using the least squares formulation [41] for discriminator loss. We derive supervision for $\\mathcal{F}$ by computing gradients through $\\mathcal{D}$ on the occupancy values at the sampled points to maximize the realism of the generated shapes.", + "bbox": [ + 212, + 327, + 787, + 388 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} _ {\\mathrm {a d v}} ^ {\\mathcal {D}} = [ \\mathcal {D} (S ^ {\\mathrm {r e a l}}) - 1 ] ^ {2} + [ \\mathcal {D} (S ^ {\\mathrm {g e n}}) ] ^ {2} \\\\ \\mathcal {L} _ {\\mathrm {a d v}} ^ {\\mathcal {F}} = [ \\mathcal {D} (S ^ {\\mathrm {g e n}}) - 1 ] ^ {2} \\\\ \\mathcal {L} _ {\\text {s h a p e - p r i o r}} = \\lambda_ {f} \\mathcal {L} _ {\\text {a d v}} (\\mathcal {F}) + \\lambda_ {d} \\mathcal {L} _ {\\text {a d v}} (\\mathcal {D}) \\tag {4} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 352, + 396, + 787, + 454 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3.4 Training Details", + "text_level": 1, + "bbox": [ + 215, + 473, + 397, + 488 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We train $\\mathcal{F}\\& \\mathcal{D}$ in an alternating manner with 2 iterations of $F$ for every iteration of $D$ . The total loss for training our framework is:", + "bbox": [ + 212, + 497, + 785, + 527 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} _ {\\mathcal {F}} = \\lambda_ {v} \\mathcal {L} _ {\\text {v i s u a l - h u l l}} + \\lambda_ {c} \\mathcal {L} _ {\\text {c o n s i s t e n c y}} + \\lambda_ {f} \\mathcal {L} _ {\\text {a d v}} ^ {\\mathcal {F}} \\\\ \\mathcal {L} _ {\\mathcal {D}} = \\lambda_ {d} \\mathcal {L} _ {\\mathrm {a d v}} ^ {\\mathcal {D}} \\tag {5} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 339, + 536, + 785, + 574 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Following standard practice [73], we pretrain on synthetic ObMan. We train our model jointly on ObMan (3D supervision, shape priors) & VISOR (2D supervision) with a dataset ratio of ObMan:VISOR as 1:2. We use batch size of 64, learning rate of 1e-5 across 4 NVIDIA A40 GPUs & loss weights as $\\lambda_v = 1, \\lambda_c = 1, \\lambda_f = 0.25, \\lambda_d = 0.25$ . Please refer to supplementary for more details.", + "bbox": [ + 212, + 584, + 789, + 660 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3.5 Constructing Wild Objects in Hands Dataset", + "text_level": 1, + "bbox": [ + 214, + 680, + 635, + 696 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Our framework requires dataset containing multi-view images of rigid hand-object interactions in the wild, with 3D hand pose and 2D object masks. To construct such a dataset, we consider VISOR [13] which provides 2D tracks for hands, objects they are interacting with and their segmentation masks. It contains a rich set of hand-object interactions, e.g. taking out milk from the fridge, pouring oil from bottles, kneading dough, cutting vegetables, and stirring noodles in a wok. Our interest is in the 3D reconstruction of rigid objects which are in-contact with a hand, but there are no 3D object annotations in VISOR. Hence, we process it to prepare a dataset for training our model.", + "bbox": [ + 212, + 704, + 787, + 840 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "A. Prakash et al.", + "bbox": [ + 271, + 114, + 387, + 127 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/4ae1d2adea35c968993b3d64e07ffd1c358eeec60cc3584da2887093e3605b11.jpg", + "table_caption": [ + "Table 1: Generalization to novel objects in the wild. We report F-score at $5\\mathrm{mm}$ & $10\\mathrm{mm}$ , Chamfer distance (CD, mm) for object generalization splits on MOW. We compare with AC-OCC & AC-SDF trained on different combinations of datasets with full 3D supervision. Our approach outperforms baselines across all metrics without using real-world 3D supervision (Relative % improvement w.r.t. best baseline in green)." + ], + "table_footnote": [], + "table_body": "
MethodDataset and supervision usedF@5 ↑F@10 ↑CD ↓
AC-OCCObMan (Synthetic 3D)0.0950.1798.69
AC-SDF [73]ObMan (Synthetic 3D)0.1080.1997.82
AC-SDF [73]ObMan (Synthetic 3D) + HO3D (Lab 3D)0.0820.1597.52
AC-SDF [73]ObMan (Synthetic 3D) + HO3D (Lab 3D) + HOI4D (3D)0.0950.1937.43
HORSE (Ours)ObMan (Synthetic 3D) + VISOR (2D Masks) + Shape priors0.121+10.7%0.220+10.6%6.76+13.5%
", + "bbox": [ + 220, + 226, + 785, + 300 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We first sample a subset of VISOR involving hand-object contact, using available contact annotations. We select object tracks where only one hand is in consistent contact with the object. This leaves us with 14768 object tracks from the original VISOR dataset. We then manually filter this subset to select a subset that showcases manipulation of rigid objects with a single hand. This leaves us with 604 video snippets showing hands interacting with different objects.", + "bbox": [ + 212, + 330, + 782, + 419 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Processing hands on VISOR: We rely on the 3D hand poses to set up the output coordinate frame, compute hand articulation features, and more importantly to register the different frames together [38,66]. These hand poses are estimated using FrankMocap, which may not always be accurate. To remove erroneous poses, we employ automated filtering using the uncertainty estimate technique from Bahat & Shakhnarovich [1] following 3D human pose literature [50]. Specifically, we obtain 3D hand pose predictions on five different versions of the image, augmented by different fixed translations. The uncertainty estimate for a given image is computed as the standard deviation of reprojection locations of MANO vertices across these 5 image versions. This sidesteps the need to hand-specify the trade-off between translation, rotation, and articulation parameters that are part of the 3D hand pose output. This leaves us with 473 video snippets consisting of 144 object categories. This object diversity is $4 \\times$ larger than existing datasets [18, 19, 32, 34, 69] used for our task, typically containing 10 to 32 object categories. We refer to this dataset as Wild Objects in Hands, some example object sequences are shown in Fig. 4. Note the *incidental* multiple views and relative consistency in hand and object pose over the course of interaction.", + "bbox": [ + 212, + 421, + 787, + 678 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4 Experiments", + "text_level": 1, + "bbox": [ + 215, + 703, + 375, + 720 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.1 Protocols", + "text_level": 1, + "bbox": [ + 215, + 737, + 341, + 750 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We use 4 datasets for training (ObMan [22], VISOR [13], HO3D [18], HOI4D [34]) and 2 datasets (MOW [5], HO3D) for evaluation. Different methods are trained on different datasets, depending on the specific evaluation setting.", + "bbox": [ + 212, + 763, + 785, + 808 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Training datasets: ObMan is a large scale synthetic hand-object dataset with 2.5K objects and 3D supervision. HO3D & HOI4D are real world datasets collected", + "bbox": [ + 212, + 809, + 785, + 839 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "3D Reconstruction of Objects in Hands without Real World 3D Supervision", + "bbox": [ + 225, + 114, + 730, + 128 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/089202a6a85e8a9dbae594868f06d422abbec314b67bbf84abdcb55b413d6720.jpg", + "table_caption": [ + "Table 2: HO3D Object generalization. We outperform AC-OCC & AC-SDF trained on different datasets with 3D supervision." + ], + "table_footnote": [], + "table_body": "
MethodSupervision (ObMan +)F@5F@10CD
AC-OCC-0.180.334.39
AC-SDF-0.170.333.72
AC-SDFMOW (3D)0.170.333.84
AC-SDFMOW (3D) + HOI4D (3D)0.170.333.63
OursVISOR (Multi-view 2D)0.200.353.39
", + "bbox": [ + 217, + 199, + 504, + 276 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/f2aa5932dcaa1aa8ea20e2797bc5bbbf2b249906cd2cc089424c2ba48491d548.jpg", + "table_caption": [ + "Table 3: HO3D View generalization. We outperform HO [22] & GF [31], trained on HO3D with full 3D supervision." + ], + "table_footnote": [], + "table_body": "
MethodSupervision (ObMan +)F@5F@10CD
AC-SDF-0.170.323.72
HO [22]HO3D (3D)0.110.224.19
GF [31]HO3D (3D)0.120.244.96
OursHO3D (Multi-view 2D)0.230.431.41
", + "bbox": [ + 511, + 199, + 787, + 263 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "in lab settings with 3D annotations. HO3D contains 10 YCB [82] objects whereas HOI4D contains 16 object categories, out of which 7 are rigid. VISOR does not contain any 3D supervision. Instead, we use the process described in Sec. 3.5, to extract supervision from VISOR, resulting in 144 object categories.", + "bbox": [ + 212, + 306, + 785, + 367 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "The baselines are trained with different combinations of HO3D & HOI4D [34]. As our method does not require 3D ground truth, we do not use these datasets for training. Instead, we use auxiliary supervision from Wild Objects in Hands (Sec. 3.5) & learn shape priors using ObMan. VISOR does not have 3D annotations and can not be used to train the baselines. Note that all models are initialized from the model pretrained on ObMan for fair comparisons, following protocol [73]. Evaluation datasets: We focus on the challenging zero-shot generalization to novel objects in-the-wild setting. We use MOW [5] dataset which contains images from YouTube, spanning 120 object templates. Note that these types of images have not been seen during training. To be consistent with prior work [73], we also use HO3D for evaluation, consisting of 1221 testing images across 10 objects. While [73] operate in view generalization setting, i.e., making predictions on novel views of training objects, we also consider the more challenging object generalization setting. Almost all of our experiments are conducted in the object generalization setting where we assess predictions on novel objects across datasets. Metrics: Following [59, 73], we report Chamfer distance (CD) and F-score at $5\\mathrm{mm}$ & $10\\mathrm{mm}$ thresholds. F-score evaluates the distance between object surfaces as the harmonic mean between precision & recall. Precision measures accuracy of the reconstruction as $\\%$ of reconstructed points that lie within a certain distance to ground truth. Recall measures completeness of the reconstruction as $\\%$ of points, on the ground truth, that lie within a certain distance to the reconstruction. CD computes sum of distances for each pair of nearest neighbors in the two point clouds. We report mean CD & F-score over all test objects.", + "bbox": [ + 212, + 368, + 787, + 717 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Baselines: We compare our model with AC-SDF trained in supervised manner using 3D ground truth on different combination of datasets in different settings: (1) For object generalization on MOW in the wild, AC-SDF is trained on ObMan, ObMan + HO3D, ObMan + HO3D + HOI4D, (2) For object generalization on HO3D, AC-SDF is trained on ObMan, ObMan + MOW, ObMan + MOW + HOI4D, (3) For view generalization on HO3D, AC-SDF is trained on ObMan + HO3D. We also compare with an occupancy variant of AC-SDF (AC-OCC) and recent published methods with different forms of SDF representation, e.g.", + "bbox": [ + 212, + 719, + 789, + 840 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "A. Prakash et al.", + "bbox": [ + 271, + 114, + 387, + 127 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/1b7912bcda3c10953f81c96ac09f552ab653a663e098e895f1615335e4d7c611.jpg", + "table_caption": [ + "Table 4: Comparison with relevant methods. Our approach also outperforms gSDF, AlignSDF & DDFHO (trained in the same setting as ours) in zero-shot generalization to MOW across most metrics." + ], + "table_footnote": [], + "table_body": "
MethodF@5 ↑F@10 ↑CD ↓
AC-SDF [73]0.1080.1997.82
AlignSDF [10]0.0990.1828.30
gSDF [9]0.1070.1977.50
DDFHO [77]0.0940.1663.06
HORSE (Ours)0.1210.2206.76
", + "bbox": [ + 225, + 226, + 488, + 321 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/661bee211c1801b3f0a2556bf7052bdff7a452a5bd36e352b1fe24722f3adf16.jpg", + "table_caption": [ + "Table 5: 3D vs. 2D input to discriminator. Training with 3D inputs (at different resolutions) perform worse, likely due to coarse sampling resulting in very few points inside the object." + ], + "table_footnote": [], + "table_body": "
Disc. inputF@5 ↑F@10 ↑CD ↓
No disc.0.1170.2166.93
10 × 10 × 100.1200.2187.29
16 × 16 × 160.1150.2097.79
32 × 32 × 320.1040.1917.83
2D slices0.1210.2206.76
", + "bbox": [ + 519, + 226, + 781, + 321 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "AlignSDF [10], gSDF [9], DDFHO [77]. Note that the VISOR dataset cannot be used for training since it does not have 3D supervision. For the view generalization setting on HO3D, we also compare with HO [22] & GF [31] trained with 3D ground truth on ObMan + HO3D. Recent works [44,70] on unsupervised reconstruction of objects require several views or depth, which are not available in our setting.", + "bbox": [ + 212, + 349, + 787, + 428 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "4.2 Results", + "text_level": 1, + "bbox": [ + 214, + 450, + 323, + 465 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Object generalization in the wild: We first examine if the auxiliary supervision from visual hull and shape prior is useful for generalization to novel objects in the wild. We evaluate on MOW in Tab. 1 and compare with AC-OCC & AC-SDF trained on different combinations of ObMan, HO3D, HOI4D datasets with 3D supervision. Our approach provides gains of $24.3\\%$ compared to AC-OCC (trained on ObMan) and $11.6\\%$ on AC-SDF (trained on ObMan). This shows the benefits of our supervision cues in the wild over training on just large scale synthetic data with 3D supervision. We also outperform AC-SDF trained on ObMan + HO3D + HOI4D with full 3D supervision by $16.8\\%$ across all metrics. This indicates that our supervision cues from in-the-wild VISOR are better than using 3D supervision on lab datasets with limited diversity in objects. We also outperform relevant methods that use different forms of SDF representations, e.g. AlignSDF, gSDF & DDFHO across most metrics (Tab. 4). Note that our contributions are orthogonal and could be combined with these works.", + "bbox": [ + 212, + 477, + 787, + 686 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Adding 3D supervision to AC-SDF. In Tab. 1, we observe that adding more data from HO3D & HOI4D to AC-SDF training did not help in zero-shot generalization to MOW. Instead, the performance drops compared to AC-SDF trained on ObMan. This is likely due to limited diversity in HO3D: 10 YCB objects, HOI4D: 7 rigid object categories & the model overfitting to these categories.", + "bbox": [ + 212, + 688, + 787, + 763 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Object generalization on HO3D: Our approach is better than AC-OCC & AC-SDF trained on different datasets with 3D supervision (Tab. 2). This further shows the benefits of auxiliary supervision from VISOR for object generalization. Also, AC-SDF does not benefit from MOW & HOI4D. This could because HO3D evaluates on 10 objects only and they may not be present in MOW or HOI4D.", + "bbox": [ + 212, + 763, + 787, + 839 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "3D Reconstruction of Objects in Hands without Real World 3D Supervision", + "bbox": [ + 225, + 113, + 732, + 130 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 767, + 114, + 782, + 126 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/73f7d19b0bd2db2728c83e02c1580ccd71dee205db107ee6a23f1a600edc463d.jpg", + "table_caption": [ + "Table 6: Supervision quality on HO3D. Automated filtering to remove incorrect hand poses improves results & using ground truth hand pose differs little compared to predicted pose. $^{1}$" + ], + "table_footnote": [], + "table_body": "
F@5 ↑F@10 ↑CD ↓
HORSE (base setting)0.2340.4341.41
no training on HO3D0.1750.3293.72
w/o filtering0.2130.4051.42
w/ ground truth pose10.2430.4441.39
", + "bbox": [ + 220, + 226, + 480, + 295 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/72745c9e5bce0aaea5b49116274234e7132c4a7efd1fcc08bd14e5427a3bf51d.jpg", + "table_caption": [ + "Table 7: Role of different loss functions. We report F-score at $5\\mathrm{mm}$ & $10\\mathrm{mm}$ , Chamfer distance (CD, mm) for different variants of our model on MOW. All losses are effective & multiview supervision leads to largest gain." + ], + "table_footnote": [], + "table_body": "
\\( \\mathcal{L}_{\\text{ObMan}} \\)\\( \\mathcal{L}_{\\text{visual-hull}} \\)\\( \\mathcal{L}_{\\text{consistency}} \\)\\( \\mathcal{L}_{\\text{shape-prior}} \\)\\( \\mathbf{F@5} \\uparrow \\)\\( \\mathbf{F@10} \\uparrow \\)\\( \\mathbf{CD} \\downarrow \\)
0.0950.1818.69
0.1110.2057.26
0.0730.13212.75
0.0970.17510.29
0.1170.2166.93
0.1210.2206.76
", + "bbox": [ + 496, + 226, + 785, + 295 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Occupancy vs SDF. We see that SDF formulation is better than occupancy when trained with full 3D supervision (AC-OCC vs. AC-SDF). In contrast, we find SDF training to be unstable (does not give meaningful predictions) with auxiliary supervision. This could be because regressing continuous SDF values with weak supervision is harder than binary classification for occupancy values.", + "bbox": [ + 212, + 323, + 787, + 398 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "View generalization results on HO3D. In Tab. 3, we see gains with using supervision cues over just training on synthetic data, consistent with trends in the object generalization setting. We also outperform HO [22] & GF [31], both trained on HO3D using full 3D supervision. We outperform these methods even without any images from HO3D (last row in Tab. 1 vs. GF & HO in Table 3), likely due to use of more expressive pixel-aligned & hand articulation features.", + "bbox": [ + 212, + 398, + 787, + 491 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "4.3 Ablation Study", + "text_level": 1, + "bbox": [ + 215, + 510, + 388, + 526 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Analysis of supervision quality. We also observe in Tab. 3 that our method is able to bridge more than $40\\%$ of the gap between no training on HO3D to training with full 3D supervision. We further use the view generalization setting to assess the quality of 2D object mask supervision used in our method in Tab. 6. Our automated filtering of frames with inaccurate hand poses (as described in Sec. 3.5) is crucial for good performance. Also, little is lost from using hand pose as a proxy for object pose on the HO3D dataset. $^{1}$", + "bbox": [ + 212, + 534, + 787, + 640 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Role of different loss terms: We experiment with multiple variants of our model to assess the importance of different loss terms. We start with the AC-OCC model trained on ObMan and gradually add $\\mathcal{L}_{\\mathrm{visual - hull}}$ , $\\mathcal{L}_{\\mathrm{consistency}}$ , and $\\mathcal{L}_{\\mathrm{shape - prior}}$ . From the results in Tab. 7, we observe that $\\mathcal{L}_{\\mathrm{visual - hull}}$ is more effective than $\\mathcal{L}_{\\mathrm{consistency}}$ and using them together provides further benefits. Moreover, $\\mathcal{L}_{\\mathrm{shape - prior}}$ improves performance on top of $\\mathcal{L}_{\\mathrm{consistency}}$ and $\\mathcal{L}_{\\mathrm{visual - hull}}$ .", + "bbox": [ + 214, + 640, + 787, + 731 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "3D vs 2D input to discriminator: We also consider 3D volumes as input to the discriminator (instead of 2D cross-sections). For this, we need to sample $64 \\times 64 \\times 64$", + "bbox": [ + 214, + 731, + 785, + 761 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "A. Prakash et al.", + "bbox": [ + 271, + 114, + 387, + 127 + ], + "page_idx": 11 + }, + { + "type": "page_footnote", + "text": "1 While [73] uses similar contrast between predicted vs. ground truth hands to make claims, we note that those claims & this result should be taken with a grain of salt. FrankMocap is trained on HO3D, so its predictions on HO3D are better than they would be on unseen data. As most of our models are trained on VISOR (not used for training FrankMocap), our other experiments do not suffer from this issue.", + "bbox": [ + 217, + 768, + 787, + 839 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/c6fa4145aeb7f762a920526b80ea316af83a43807c3817adf4f637e652409420.jpg", + "table_caption": [ + "Table 8: Design choices for mask Table 9: Sampling method for 2D planes. guided sampling. Uniformly sampling Sampling planes through origin of hand coordinates is much worse than the rejection dinate system & rotated randomly performs sampling used in our method. Using neg- the best compared to sampling axis-aligned ative points from hand masks is useful. planes either uniformly or through origin." + ], + "table_footnote": [], + "table_body": "
Sampling methodF@5 ↑ F@10 ↑ CD ↓Sampling methodF@5 ↑ F@10 ↑ CD ↓
Uniform0.0930.16610.29Uniform (axis-aligned)0.1150.2087.01
Ours (no hand points)0.1130.2077.69Origin (axis-aligned)0.0980.1838.52
Ours0.1170.2166.93Origin (random rotation)0.1210.2206.76
", + "bbox": [ + 220, + 226, + 784, + 292 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "$(=262144)$ points & run several forward passes of our model to get occupancies. Since this is computationally expensive, we sample points at coarser resolutions: $32 \\times 32 \\times 32$ , $16 \\times 16 \\times 16$ , $10 \\times 10 \\times 10$ . We use $32 \\times 32$ size 2D slices, so $10 \\times 10 \\times 10$ 3D volume has no. of points & takes similar compute. We see that 2D slices perform better than 3D volumes (Tab. 5). Also, the performance gets worse with increase in the sampled 3D volume, likely due to 3D sampling being so coarse that very few points lie inside the object, thus unable to capture fine-grained shape.", + "bbox": [ + 212, + 321, + 787, + 428 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Sampling 2D slices for discriminator: We ablate different design choices (Sec. 3.3) in Tab. 9. We observe that sampling 2D planes through origin of the hand coordinate system and rotated randomly performs the best compared to sampling axis-aligned frames either uniformly or through origin.", + "bbox": [ + 212, + 429, + 787, + 489 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Design choices for mask guided sampling: We run rejection sampling (with hand & object masks) to sample points in the hand coordinate frame (Sec. 3.2). We compare with 2 variants: uniformly sampling in the hand frame & removing negative points from hand masks. We find our strategy to work the best (Tab. 8).", + "bbox": [ + 212, + 489, + 789, + 551 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "4.4 Visualizations", + "text_level": 1, + "bbox": [ + 215, + 575, + 377, + 589 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We compare the mesh generated by our model and AC-SDF (trained on ObManbest baseline) on zero-shot generalization to MOW (Fig. 5) and Core50 [35](Fig. 6). For this, we sample points uniformly in a $64 \\times 64 \\times 64$ volume, predict their occupancies or SDF from the network and run marching cubes [36]. We project the mesh into the input image & render it in different views. Our model captures the visual hull of the object, as evidenced by the projection of the mesh onto the image, and generates more coherent shapes than AC-SDF, which often reconstructs disconnected and scattered shapes. More visualizations are in supplementary.", + "bbox": [ + 212, + 602, + 787, + 724 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "4.5 Limitations", + "text_level": 1, + "bbox": [ + 215, + 748, + 356, + 762 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Inaccurate hand pose. We use predictions from FrankMocap for hand pose & camera parameters. Note that the sampled points do not cover the entire object if the hand pose is not accurate, due to mis-projection into the image plane. This leads to exclusion of points in certain parts of the object (Fig. 7).", + "bbox": [ + 212, + 776, + 787, + 838 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "3D Reconstruction of Objects in Hands without Real World 3D Supervision", + "bbox": [ + 225, + 113, + 730, + 128 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 767, + 114, + 785, + 126 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/6f78e95ca1118a8a3c2918210888dd77d68864ee441af6a5fb86944ecb14e04c.jpg", + "image_caption": [ + "Fig. 5: Visualizations on MOw object generalization split. We show the object mesh projected onto the image and rendered in different views for our HORSE model and compare with the AC-SDF model trained on ObMan dataset with 3D supervision (best baseline model). We also show the ground truth (GT) object model. We observe that our model is able to predict the object shape more accurately than AC-SDF which often reconstructs smaller and disconnected shapes." + ], + "image_footnote": [], + "bbox": [ + 220, + 141, + 767, + 296 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/5df372738649518855eb9b6a94fe58137623174e0e61ef106576d93a87d714ce.jpg", + "image_caption": [ + "Fig. 6: Visualizations on zero-shot generalization to Core50 [35]. We show the object mesh projected onto the image and rendered in different views on Core50. HORSE predicts better shapes than AC-SDF (best baseline, often leads to artifacts)." + ], + "image_footnote": [], + "bbox": [ + 220, + 405, + 772, + 508 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Limited object views. Videos in the wild often do not capture $360^{\\circ}$ view of the object, e.g. kettle in Fig. 7. This is different than lab settings where the interactions are often constrained & multi-camera setup is used to capture all sides of the object.", + "bbox": [ + 212, + 593, + 517, + 686 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/5b117a7c1b8e95b9cd03dadb80623c76b33c672e70e7b0d8c3ca86cb97068fb7.jpg", + "image_caption": [ + "Fig. 7: Sampled points do not cover the entire object if hand pose is inaccurate." + ], + "image_footnote": [], + "bbox": [ + 527, + 595, + 787, + 662 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "5 Conclusion", + "text_level": 1, + "bbox": [ + 215, + 704, + 359, + 720 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "We present an approach for reconstructing hand-held objects in 3D from a single image. We propose modules to extract supervision from in-the-wild videos & learn data-driven 3D shape priors from synthetic ObMan to circumvent the need for direct 3D supervision. Experiments show that our approach generalizes better to novel objects in the wild than baselines trained using 3D supervision. Future directions include jointly optimizing the hand pose with the object shape to deal with inaccurate hand poses or incorporating additional cues, e.g. contact priors.", + "bbox": [ + 212, + 733, + 787, + 842 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "A. Prakash et al.", + "bbox": [ + 271, + 114, + 387, + 127 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Acknowledgements: We thank Ashish Kumar, Erin Zhang, Arjun Gupta, Shaowei Liu, Anand Bhattachad, Pranay Thangeda & Kashyap Chitta for feedback on the draft. This material is based upon work supported by NSF (IIS2007035), NASA (80NSSC21K1030), DARPA (Machine Common Sense program), an Amazon Research Award, an NVIDIA Academic Hardware Grant, and the NCSA Delta System (supported by NSF OCI 2005572 and the State of Illinois).", + "bbox": [ + 212, + 146, + 787, + 238 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 215, + 262, + 325, + 277 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "1. Bahat, Y., Shakhnarovich, G.: Confidence from invariance to image transformations. arXiv (2018)", + "2. Brahmbhatt, S., Ham, C., Kemp, C.C., Hays, J.: Contactdb: Analyzing and predicting grasp contact via thermal imaging. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2019)", + "3. Brahmbhatt, S., Tang, C., Twigg, C.D., Kemp, C.C., Hays, J.: Contactpose: A dataset of grasps with object contact and hand pose. In: Proceedings of the European Conference on Computer Vision (ECCV) (2020)", + "4. Buckingham, G.: Hand tracking for immersive virtual reality: Opportunities and challenges. Frontiers in Virtual Reality (2021)", + "5. Cao, Z., Radosavovic, I., Kanazawa, A., Malik, J.: Reconstructing hand-object interactions in the wild. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2021)", + "6. Chang, A.X., Funkhouser, T.A., Guibas, L.J., Hanrahan, P., Huang, Q., Li, Z., Savarese, S., Savva, M., Song, S., Su, H., Xiao, J., Yi, L., Yu, F.: Shapenet: An information-rich 3D model repository. ArXiv (2015)", + "7. Chang, M., Prakash, A., Gupta, S.: Look ma, no hands! agent-environment factorization of egocentric videos. In: Advances in Neural Information Processing Systems (NeurIPS) (2023)", + "8. Chao, Y., Yang, W., Xiang, Y., Molchanov, P., Handa, A., Tremblay, J., Narang, Y.S., Wyk, K.V., Iqbal, U., Birchfield, S., Kautz, J., Fox, D.: Dexycb: A benchmark for capturing hand grasping of objects. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2021)", + "9. Chen, Z., Chen, S., Schmid, C., Laptev, I.: gsdf: Geometry-driven signed distance functions for 3d hand-object reconstruction. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2023)", + "0. Chen, Z., Hasson, Y., Schmid, C., Laptev, I.: Alignsdf: Pose-aligned signed distance fields for hand-object reconstruction. In: Proceedings of the European Conference on Computer Vision (ECCV) (2022)", + "1. Choi, H., Chavan-Dafle, N., Yuan, J., Isler, V., Park, H.: Handnerf: Learning to reconstruct hand-object interaction scene from a single rgb image. In: International Conference on Robotics and Automation (2024)", + "12. Damen, D., Doughty, H., Farinella, G.M., Fidler, S., Furnari, A., Kazakos, E., Moltisanti, D., Munro, J., Perrett, T., Price, W., Wray, M.: Scaling egocentric vision: The epic-kitchens dataset. Proceedings of the European Conference on Computer Vision (ECCV) (2018)", + "13. Darkhalil, A., Shan, D., Zhu, B., Ma, J., Kar, A., Higgins, R., Fidler, S., Fouhey, D., Damen, D.: Epic-kitchens visor benchmark: Video segmentations and object relations. In: NeurIPS Track on Datasets and Benchmarks (2022)" + ], + "bbox": [ + 218, + 294, + 785, + 839 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "3D Reconstruction of Objects in Hands without Real World 3D Supervision", + "bbox": [ + 225, + 114, + 730, + 128 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 767, + 114, + 785, + 126 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "14. Fan, Z., Parelli, M., Kadoglou, M.E., Kocabas, M., Chen, X., Black, M.J., Hilliges, O.: Hold: Category-agnostic 3d reconstruction of interacting hands and objects from video. arXiv preprint arXiv:2311.18448 (2023)", + "15. Garcia-Hernando, G., Yuan, S., Baek, S., Kim, T.: First-person hand action benchmark with RGB-D videos and 3d hand pose annotations. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2018)", + "16. Goodfellow, I., Pouget-Abadie, J., Mirza, M., Xu, B., Warde-Farley, D., Ozair, S., Courville, A., Bengio, Y.: Generative adversarial nets. In: Advances in Neural Information Processing Systems (NeurIPS) (2014)", + "17. Grauman, K., Westbury, A., Byrne, E., Chavis, Z., Furnari, A., Girdhar, R., Hamburger, J., Jiang, H., Liu, M., Liu, X., et al.: Ego4d: Around the world in 3,000 hours of egocentric video. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)", + "18. Hampali, S., Rad, M., Oberweger, M., Lepetit, V.: Honnotate: A method for 3d annotation of hand and object poses. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2020)", + "19. Hampali, S., Sarkar, S.D., Rad, M., Lepetit, V.: Keypoint transformer: Solving joint identification in challenging hands and object interactions for accurate 3d pose estimation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)", + "20. Han, S., Liu, B., Cabezas, R., Twigg, C.D., Zhang, P., Petkau, J., Yu, T., Tai, C., Akbay, M., Wang, Z., Nitzan, A., Dong, G., Ye, Y., Tao, L., Wan, C., Wang, R.: Megatrack: monochrome egocentric articulated hand-tracking for virtual reality. ACM Transactions on Graphics (TOG) (2020)", + "21. Hasson, Y., Tekin, B., Bogo, F., Laptev, I., Pollefeys, M., Schmid, C.: Leveraging photometric consistency over time for sparsely supervised hand-object reconstruction. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2020)", + "22. Hasson, Y., Varol, G., Tzionas, D., Kalevatykh, I., Black, M.J., Laptev, I., Schmid, C.: Learning joint reconstruction of hands and manipulated objects. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2019)", + "23. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2016)", + "24. Heppert, N., Irshad, M.Z., Zakharov, S., Liu, K., Ambrus, R.A., Bohg, J., Valada, A., Kollar, T.: CARTO: category and joint agnostic reconstruction of articulated objects. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2023)", + "25. Huang, D., Ji, X., He, X., Sun, J., He, T., Shuai, Q., Ouyang, W., Zhou, X.: Reconstructing hand-held objects from monocular video. In: ACM Transactions on Graphics (2022)", + "26. Irshad, M.Z., Zakharov, S., Ambrus, R., Kollar, T., Kira, Z., Gaidon, A.: Shapo: Implicit representations for multi-object shape, appearance, and pose optimization. In: Proceedings of the European Conference on Computer Vision (ECCV) (2022)", + "27. Irshad, M.Z., Zakharov, S., Liu, K., Guizilini, V., Kollar, T., Gaidon, A., Kira, Z., Ambrus, R.: Neo 360: Neural fields for sparse view synthesis of outdoor scenes. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2023)" + ], + "bbox": [ + 215, + 146, + 785, + 839 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "A. Prakash et al.", + "bbox": [ + 271, + 114, + 387, + 127 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "28. Jiang, H., Liu, S., Wang, J., Wang, X.: Hand-object contact consistency reasoning for human grasps generation. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2021)", + "29. Kanazawa, A., Tulsiani, S., Efros, A.A., Malik, J.: Learning category-specific mesh reconstruction from image collections. In: Proceedings of the European Conference on Computer Vision (ECCV) (2018)", + "30. Kar, A., Tulsiani, S., Carreira, J., Malik, J.: Category-specific object reconstruction from a single image. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2015)", + "31. Karunratanakul, K., Yang, J., Zhang, Y., Black, M.J., Muandet, K., Tang, S.: Grasping field: Learning implicit representations for human grasps. In: Proceedings of the International Conference on 3D Vision (3DV) (2020)", + "32. Kwon, T., Tekin, B., Stühmer, J., Bogo, F., Pollefeys, M.: H2O: two hands manipulating objects for first person interaction recognition. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2021)", + "33. Laurentini, A.: The visual hull concept for silhouette-based image understanding. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI) 16, 150-162 (1994)", + "34. Liu, Y., Liu, Y., Jiang, C., Lyu, K., Wan, W., Shen, H., Liang, B., Fu, Z., Wang, H., Yi, L.: HOI4D: A 4d egocentric dataset for category-level human-object interaction. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)", + "35. Lomonaco, V., Maltoni, D.: Core50: a new dataset and benchmark for continuous object recognition. In: Proceedings of the Conference on Robot Learning (CoRL) (2017)", + "36. Lorensen, W.E., Cline, H.E.: Marching cubes: A high resolution 3D surface construction algorithm. ACM Transactions on Graphics (1987)", + "37. Lunayach, M., Zakharov, S., Chen, D., Ambrus, R., Kira, Z., Irshad, M.Z.: FSD: fast self-supervised single RGB-D to categorical 3d objects. arXiv abs/2310.12974 (2023)", + "38. Ma, W.C., Yang, A.J., Wang, S., Urtasun, R., Torralba, A.: Virtual correspondence: Humans as a cue for extreme-view geometry. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)", + "39. Mandikal, P., Grauman, K.: Dexvip: Learning dexterous grasping with human hand pose priors from video. In: Proceedings of the Conference on Robot Learning (CoRL) (2021)", + "40. Mandikal, P., Grauman, K.: Learning dexterous grasping with object-centric visual affordances. In: Proceedings of the IEEE International Conference on Robotics and Automation (ICRA) (2021)", + "41. Mao, X., Li, Q., Xie, H., Lau, R.Y.K., Wang, Z., Smolley, S.P.: Least squares generative adversarial networks. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2017)", + "42. Matusik, W., Buehler, C., Raskar, R., Gortler, S.J., McMillan, L.: Image-based visual hulls. In: ACM Transactions on Graphics (2000)", + "43. Mescheder, L., Oechsle, M., Niemeyer, M., Nowozin, S., Geiger, A.: Occupancy networks: Learning 3d reconstruction in function space. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2019)", + "44. Niemeyer, M., Mescheder, L.M., Oechsle, M., Geiger, A.: Differentiable volumetric rendering: Learning implicit 3d representations without 3d supervision. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2020)" + ], + "bbox": [ + 215, + 147, + 785, + 839 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "3D Reconstruction of Objects in Hands without Real World 3D Supervision", + "bbox": [ + 225, + 114, + 730, + 128 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 767, + 116, + 784, + 126 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "45. Park, J.J., Florence, P., Straub, J., Newcombe, R., Lovegrove, S.: Deepsdf: Learning continuous signed distance functions for shape representation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2019)", + "46. Prakash, A., Tu, R., Chang, M., Gupta, S.: 3d hand pose estimation in everyday egocentric images. In: Proceedings of the European Conference on Computer Vision (ECCV) (2024)", + "47. Qin, Y., Su, H., Wang, X.: From one hand to multiple hands: Imitation learning for dexterous manipulation from single-camera teleoperation. Proceedings of the International Conference on Intelligent Robots and Systems (IROS) (2022)", + "48. Qin, Y., Wu, Y., Liu, S., Jiang, H., Yang, R., Fu, Y., Wang, X.: Dexamv: Imitation learning for dexterous manipulation from human videos. In: Proceedings of the European Conference on Computer Vision (ECCV) (2022)", + "49. Rijpkema, H., Girard, M.: Computer animation of knowledge-based human grasping. In: Thomas, J.J. (ed.) ACM Transactions on Graphics (1991)", + "50. Rockwell, C., Fouhey, D.F.: Full-body awareness from partial observations. In: Proceedings of the European Conference on Computer Vision (ECCV) (2020)", + "51. Rogez, G., Khademi, M., Supancic III, J., Montiel, J.M.M., Ramanan, D.: 3d hand pose detection in egocentric rgb-d images. In: Proceedings of the European Conference on Computer Vision (ECCV) (2014)", + "52. Romero, J., Kjellström, H., Kragic, D.: Hands in action: real-time 3D reconstruction of hands in interaction with objects. In: Proceedings of the IEEE International Conference on Robotics and Automation (ICRA) (2010)", + "53. Romero, J., Tzionas, D., Black, M.J.: Embodied hands: Modeling and capturing hands and bodies together. ACM Transactions on Graphics (ToG) (2017)", + "54. Rong, Y., Shiratori, T., Joo, H.: Frankmocap: Fast monocular 3D hand and body motion capture by regression and integration. Proceedings of the IEEE International Conference on Computer Vision Workshops (ICCV Workshops) (2021)", + "55. Saito, S., Huang, Z., Natsume, R., Morishima, S., Kanazawa, A., Li, H.: Pifu: Pixel-aligned implicit function for high-resolution clothed human digitization. Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2019)", + "56. Schonberger, J.L., Frahm, J.M.: Structure-from-motion revisited. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2016)", + "57. Shan, D., Geng, J., Shu, M., Fouhey, D.F.: Understanding human hands in contact at internet scale. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2020)", + "58. Taheri, O., Ghorbani, N., Black, M.J., Tzionas, D.: GRAB: A dataset of whole-body human grasping of objects. In: Proceedings of the European Conference on Computer Vision (ECCV) (2020)", + "59. Tatarchenko, M., Richter, S.R., Ranftl, R., Li, Z., Koltun, V., Brox, T.: What do single-view 3d reconstruction networks learn? In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2019)", + "60. Truong, P., Rakotosaona, M., Manhardt, F., Tombari, F.: SPARF: neural radiance fields from sparse and noisy poses. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2023)", + "61. Tschernezki, V., Darkhalil, A., Zhu, Z., Fouhey, D., Laina, I., Larlus, D., Damen, D., Vedaldi, A.: EPIC fields: Marrying 3d geometry and video understanding. In: Advances in Neural Information Processing Systems (NeurIPS) (2023)", + "62. Tschernezki, V., Laina, I., Larlus, D., Vedaldi, A.: Neural feature fusion fields: 3d distillation of self-supervised 2d image representations. In: Proceedings of the International Conference on 3D Vision (3DV) (2022)" + ], + "bbox": [ + 215, + 147, + 785, + 840 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "A. Prakash et al.", + "bbox": [ + 271, + 114, + 387, + 127 + ], + "page_idx": 17 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "63. Tschernezki, V., Larlus, D., Vedaldi, A.: Neuraldiff: Segmenting 3d objects that move in egocentric videos. In: Proceedings of the International Conference on 3D Vision (3DV) (2021)", + "64. Tulsiani, S., Efros, A.A., Malik, J.: Multi-view consistency as supervisory signal for learning shape and pose prediction. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2018)", + "65. Turpin, D., Wang, L., Heiden, E., Chen, Y., Macklin, M., Tsogkas, S., Dickinson, S.J., Garg, A.: Grasp'd: Differentiable contact-rich grasp synthesis for multi-fingered hands. In: Proceedings of the European Conference on Computer Vision (ECCV) (2022)", + "66. Tzionas, D., Gall, J.: 3d object reconstruction from hand-object interactions. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2015)", + "67. Wu, J., Zhang, C., Xue, T., Freeman, W.T., Tenenbaum, J.B.: Learning a probabilistic latent space of object shapes via 3d generative-adversarial modeling. In: Advances in Neural Information Processing Systems (NeurIPS) (2016)", + "68. Wu, Y., Wang, J., Wang, X.: Learning generalizable dexterous manipulation from human grasp affordance. In: Proceedings of the Conference on Robot Learning (CoRL) (2022)", + "69. Yang, L., Li, K., Zhan, X., Wu, F., Xu, A., Liu, L., Lu, C.: Oakink: A large-scale knowledge repository for understanding hand-object interaction. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)", + "70. Yariv, L., Kasten, Y., Moran, D., Galun, M., Atzmon, M., Basri, R., Lipman, Y.: Multiview neural surface reconstruction by disentangling geometry and appearance In: Advances in Neural Information Processing Systems (NeurIPS) (2020)", + "71. Ye, J., Wang, J., Huang, B., Qin, Y., Wang, X.: Learning continuous grasping function with a dexterous hand from human demonstrations. arXiv (2022)", + "72. Ye, Y., Gupta, A., Kitani, K., Tulsiani, S.: G-HOP: generative hand-object prior for interaction reconstruction and grasp synthesis. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2024)", + "73. Ye, Y., Gupta, A., Tulsiani, S.: What's in your hands? 3D reconstruction of generic objects in hands. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)", + "74. Ye, Y., Hebbar, P., Gupta, A., Tulsiani, S.: Diffusion-guided reconstruction of everyday hand-object interaction clips. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2023)", + "75. Ye, Y., Tulsiani, S., Gupta, A.: Shelf-supervised mesh prediction in the wild. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2021)", + "76. Yu, A., Ye, V., Tancik, M., Kanazawa, A.: pixelnerf: Neural radiance fields from one or few images. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2021)", + "77. Zhang, C., Di, Y., Zhang, R., Zhai, G., Manhardt, F., Tombari, F., Ji, X.: DDF-HO: hand-held object reconstruction via conditional directed distance field. In: Advances in Neural Information Processing Systems (NeurIPS) (2023)", + "78. Zhou, T., Brown, M., Snavely, N., Lowe, D.G.: Unsupervised learning of depth and ego-motion from video. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2017)", + "79. Zhu, Z., Damen, D.: Get a grip: Reconstructing hand-object stable grasps in egocentric videos. arXiv preprint arXiv:2312.15719 (2023)" + ], + "bbox": [ + 215, + 146, + 784, + 840 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "3D Reconstruction of Objects in Hands without Real World 3D Supervision", + "bbox": [ + 225, + 114, + 730, + 128 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 767, + 116, + 784, + 126 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "80. Zimmermann, C., Brox, T.: Learning to estimate 3d hand pose from single rgb images. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2017)", + "81. Zimmermann, C., Ceylan, D., Yang, J., Russell, B.C., Argus, M.J., Brox, T.: Freihand: A dataset for markerless capture of hand pose and shape from single RGB images. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2019)", + "82. Qalli, B., Singh, A., Walsman, A., Srinivasa, S.S., Abbeel, P., Dollar, A.M.: The ycb object and model set: Towards common benchmarks for manipulation research. In: Proceedings of the International Conference on Advanced Robotics (ICAR) (2015)" + ], + "bbox": [ + 215, + 147, + 785, + 285 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "A. Prakash et al.", + "bbox": [ + 271, + 114, + 383, + 126 + ], + "page_idx": 19 + } +] \ No newline at end of file diff --git a/2024/3D Reconstruction of Objects in Hands without Real World 3D Supervision/1ec33038-4034-4272-be45-88734c621c33_model.json b/2024/3D Reconstruction of Objects in Hands without Real World 3D Supervision/1ec33038-4034-4272-be45-88734c621c33_model.json new file mode 100644 index 0000000000000000000000000000000000000000..e9d7b3e8686e24daa60c56863e3b86603d85ce96 --- /dev/null +++ b/2024/3D Reconstruction of Objects in Hands without Real World 3D Supervision/1ec33038-4034-4272-be45-88734c621c33_model.json @@ -0,0 +1,2748 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.277, + 0.141, + 0.728, + 0.187 + ], + "angle": 0, + "content": "3D Reconstruction of Objects in Hands without Real World 3D Supervision" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.212, + 0.786, + 0.228 + ], + "angle": 0, + "content": "Aditya Prakash, Matthew Chang, Matthew Jin, Ruisen Tu, and Saurabh Gupta" + }, + { + "type": "text", + "bbox": [ + 0.294, + 0.24, + 0.706, + 0.282 + ], + "angle": 0, + "content": "University of Illinois Urbana-Champaign \n{adityap9,mc48,mjin11,ruisent2,saurabhg}@illinois.edu \nhttps://bit.ly/WildH0I" + }, + { + "type": "text", + "bbox": [ + 0.263, + 0.312, + 0.74, + 0.52 + ], + "angle": 0, + "content": "Abstract. Prior works for reconstructing hand-held objects from a single image train models on images paired with 3D shapes. Such data is challenging to gather in the real world at scale. Consequently, these approaches do not generalize well when presented with novel objects in in-the-wild settings. While 3D supervision is a major bottleneck, there is an abundance of a) in-the-wild raw video data showing hand-object interactions and b) synthetic 3D shape collections. In this paper, we propose modules to leverage 3D supervision from these sources to scale up the learning of models for reconstructing hand-held objects. Specifically, we extract multiview 2D mask supervision from videos and 3D shape priors from shape collections. We use these indirect 3D cues to train occupancy networks that predict the 3D shape of objects from a single RGB image. Our experiments in the challenging object generalization setting on in-the-wild MOW dataset show \\(11.6\\%\\) relative improvement over models trained with 3D supervision on existing datasets." + }, + { + "type": "text", + "bbox": [ + 0.263, + 0.533, + 0.724, + 0.547 + ], + "angle": 0, + "content": "Keywords: hand-held objects \\(\\cdot\\) shape priors \\(\\cdot\\) multiview supervision" + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.57, + 0.377, + 0.586 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.598, + 0.788, + 0.688 + ], + "angle": 0, + "content": "While 3D reconstruction of hand-held objects is important for AR/VR [4,20] and robot learning applications [39,40,47,48,68,71], lack of 3D supervision outside of lab settings has made it challenging to produce models that work in the wild. This paper develops techniques to improve the generalization capabilities of single image hand-held object reconstruction methods by extracting supervision from in-the-wild videos & synthetic shape collections showing hand-object interactions." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.689, + 0.788, + 0.84 + ], + "angle": 0, + "content": "Collecting image datasets with ground truth 3D shapes for hand-held objects is hard. Any visual scanning setups (via multiple RGB/RGB-D cameras or motion capture) require full visibility of the object which is not available. Synthesizing realistic hand-object interaction is an open problem in itself [28,31,49,65]. Manual alignment of template shapes [5] is expensive, yet only approximate. Thus, there is very little in-the-wild real-world data with ground truth 3D shapes for hand-held objects. And while many past works have designed expressive models to predict shapes of hand-held objects [22,31,73], they are all held back due to the limited amount of real-world 3D data available for training and suffer from unsatisfactory performance on novel objects encountered in the wild." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "2" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.388, + 0.128 + ], + "angle": 0, + "content": "A. Prakash et al." + }, + { + "type": "image", + "bbox": [ + 0.219, + 0.143, + 0.782, + 0.384 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.393, + 0.789, + 0.451 + ], + "angle": 0, + "content": "Fig. 1: We propose modules to extract supervision from in-the-wild videos (Sec. 3.2) & learn shape priors from 3D object collections (Sec. 3.3), to train occupancy networks which predict the 3D shapes of hand-held objects from a single image. This circumvents the need for paired real world 3D shape supervision used in existing works [22, 73]." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.488, + 0.789, + 0.597 + ], + "angle": 0, + "content": "While in-the-wild images with paired 3D shapes are rare, there are a) plenty of in-the-wild videos containing multiple views of hand-held objects [12, 17] (Fig. 1), b) large catalogues of 3D object shapes [6] (Fig. 1). Shape collections provide 3D supervision but lack realistic hand grasps, videos showcase realistic hand-object interaction but don't provide direct 3D supervision. Either by itself seems insufficient, but can we combine supervision from these diverse sources to improve generalization of single-image hand-held object reconstruction methods?" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.599, + 0.79, + 0.844 + ], + "angle": 0, + "content": "Let's consider each cue one at a time. While videos show multiple views of the object, we unfortunately don't know the relative object pose in the different views. Automatically extracting the object pose using structure from motion techniques, e.g. COLMAP [56] doesn't work due to insufficient number of feature matches on the object of interaction. We sidestep this problem by using hand pose as a proxy for object pose (Fig. 2). This is based on the observation that humans rarely conduct in-hand manipulation in pick & place tasks involving rigid objects. Thus, if we assume that the hand and the object are rigidly moving together, then the relative 6 DoF pose of the hand between pairs of frames reveals the relative 6 DoF pose of the object. This reduces the SfM problem to an easier setting where the motion is known. Specifically, we use off-the-shelf FrankMocap system [54] to obtain 6 DoF pose for the hand and consequently the object's. We then use our proposed 2D mask guided 3D sampling module (Sec. 3.2) to generate 3D supervision for the object shape using object segmentation masks (Fig. 2). This lets us train on objects from 144 different categories, where as most methods currently train on only a handful of categories \\((< 20)\\)." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.226, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "3D Reconstruction of Objects in Hands without Real World 3D Supervision" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "3" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.314 + ], + "angle": 0, + "content": "While this works well for unoccluded parts of the object, this does not generate reliable supervision for parts of the object that are occluded by the hand (Fig. 1). This brings us to the 3D shape catalogues, which we use to extract shape priors. This enables the model to learn to output contiguous shapes even when the object is interrupted by the hand in the image, e.g. it can hallucinate a handle for a jug even when it is covered by the hand, because jugs typically have one. We adopt an adversarial training framework [16] to train a discriminator to differentiate between real shapes (from ObMan [22]) and shapes predicted from the model (Fig. 3). Unlike prior works [67] which train the discriminator on 3D inputs, we instead propose a 2D slice-based 3D discriminator (Sec. 3.3), which is computationally efficient and learns better fine-grained shape information." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.315, + 0.788, + 0.422 + ], + "angle": 0, + "content": "Our overall framework consists of an occupancy network [43] that predicts the 3D shape of hand-held objects from a single image. We train this model on sequences curated from the VISOR dataset [13] and use the Obman dataset [22] to build the shape prior. Training on diverse real world data outside of lab settings, enabled by our innovations, leads our model (HORSE) to good generalization performance. HORSE outperforms previous state-of-the-art models by \\(11.6\\%\\) in the challenging object generalization setting on MOW [5]." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.451, + 0.388, + 0.468 + ], + "angle": 0, + "content": "2 Related Work" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.491, + 0.788, + 0.673 + ], + "angle": 0, + "content": "Reconstructing objects in hands: Several works [9, 10, 22, 31, 73, 77] have trained expressive architectures for predicting 3D shape from a single image using paired real world 3D supervision. Fitting object templates [5, 21] or learned 3D shapes [14, 25, 72, 74] to videos using appearance cues [5, 14, 21, 25] or geometric priors [72, 74] have also been explored. The most relevant work to ours is [73], which uses paired 3D supervision from synthetic [22] and small-scale real-world datasets to predict 3D shape from a single image. However, it does not generalize to novel object categories in the wild due to limited 3D supervision. Instead, we train our model on diverse object categories from in-the-wild videos by extracting multiview 2D supervision and learning shape priors from existing datasets, without any real-world 3D supervision. Note that our setting involves a single image input at test time and we use in-the-wild videos for training only." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.674, + 0.789, + 0.841 + ], + "angle": 0, + "content": "Hand-Object datasets with 3D object models: Existing real-world hand-object datasets with 3D annotations are captured in lab settings and contain limited variation in objects, e.g. HO3D [18]:10, H2O [32]:8, FPHA [15]:4, FreiHAND [81]:35, ContactDB [2]:50, ContactPose [3]:25, DexYCB [8]:20, GRAB [58]:51, HOI4D [34]: 16 object categories. Collecting datasets with ground truth 3D shapes is difficult to scale since it often requires visual scanning setups (multiple cameras or motion capture). Synthesising realistic hand-object interaction is an open problem in itself [28, 31, 49, 65]. In this work, we curate sequences from in-the-wild VISOR dataset containing 144 object categories and design modules to extract supervision for training occupancy networks. The closest to ours is MOW with 120 objects that we only use to test models to assess generalization." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "4" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.387, + 0.129 + ], + "angle": 0, + "content": "A. Prakash et al." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.253 + ], + "angle": 0, + "content": "Hand-Object Interactions in the wild: There is a growing interest in understanding hands and how they interact with objects around them. Researchers have collected datasets [8, 18, 19, 22, 32, 34, 58] and trained models for detecting & segmenting hands and associated objects of interaction [13, 57, 62, 63]. Recognizing what hands are doing in images [7, 46, 79] is also relevant: through grasp classification [31], 2D pose estimation [51, 80], and more recently 3D shape and pose estimation [21, 22, 53, 54, 61, 73] for both hands and objects in contact." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.253, + 0.79, + 0.404 + ], + "angle": 0, + "content": "3D from single image without direct 3D supervision. Several works relax the need for direct 3D supervision by incorporating auxiliary shape cues during training, e.g. multi-view consistency in masks [64], depth from single image [26, 37, 78] or stereo [24], appearance [11, 27, 60, 76]. These have been applied to reconstruction of category specific [27, 29, 30, 37] as well as generic objects [11, 75, 76]. However, directly applying these approaches to hand-held objects in the wild poses several challenges, e.g. unknown camera, novel object categories, heavy occlusion, inaccurate depth estimates. In this work, we propose modules to extract supervision from in-the-wild videos using object masks [13] & hand pose [54] and learn priors from synthetic collections of hand-held objects [22]." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.425, + 0.348, + 0.442 + ], + "angle": 0, + "content": "3 Approach" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.454, + 0.788, + 0.5 + ], + "angle": 0, + "content": "We propose a novel framework for training 3D shape predictors from a single image without using any real world 3D supervision. Following prior work [73], we use implicit shape representation [43, 45] for 3D objects." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.519, + 0.373, + 0.533 + ], + "angle": 0, + "content": "3.1 Preliminaries" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.541, + 0.788, + 0.663 + ], + "angle": 0, + "content": "Consider the recent AC-SDF model for this task from Ye et al. [73]. Given an input RGB image, AC-SDF uses a neural network to predict the SDF of 3D points. The prediction is done in the hand coordinate frame obtained using FrankMocap [54], which outputs (a) hand articulation parameters \\(\\theta^a\\) (45 dimensional MANO hand pose [52]), (b) global rotation \\(\\theta^w\\) of the wrist joint w.r.t. camera, (c) weak perspective camera \\(\\theta^c\\), with scale factor \\(s\\) & 2D translation \\((t_x, t_y)\\), which is converted into a full perspective camera \\(K\\). These can be used to project a 3D point \\(\\mathbf{x}\\) into the image (\\(f\\) is the focal length) as \\(\\mathbf{x}_p = K[T_{\\theta^w} \\mathbf{x} + (t_x, t_y, f / s)]\\)" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.663, + 0.788, + 0.769 + ], + "angle": 0, + "content": "Given a 3D point \\(\\mathbf{x}\\) & image \\(I\\), AC-SDF conditions the SDF prediction on: (a) global image features from a ResNet-50 [23], (b) pixel-aligned features [55] from intermediate layers of ResNet-50 at the projection \\(\\mathbf{x}_p\\) of \\(\\mathbf{x}\\) in the image, (c) hand articulation features obtained by representing \\(\\mathbf{x}\\) in the coordinate frame of 15 hand joints. This is realized as, \\(\\mathbf{s} = \\mathcal{F}(\\mathbf{x}; I, \\theta, K)\\). Training \\(\\mathcal{F}\\) requires sampling 3D points \\(x\\) around the object and corresponding SDF values \\(s\\), \\(\\theta = (\\theta^a, \\theta^w, \\theta^c, K)\\) are estimated from FrankMocap." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.788, + 0.517, + 0.803 + ], + "angle": 0, + "content": "3.2 2D Mask Guided 3D Sampling" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.81, + 0.787, + 0.842 + ], + "angle": 0, + "content": "Training models with implicit shape representation require supervision in the form of occupancy [43] or SDF [45] for 3D points sampled inside and outside" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.226, + 0.115, + 0.731, + 0.129 + ], + "angle": 0, + "content": "3D Reconstruction of Objects in Hands without Real World 3D Supervision" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "5" + }, + { + "type": "image", + "bbox": [ + 0.22, + 0.149, + 0.441, + 0.219 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.269, + 0.219, + 0.391, + 0.228 + ], + "angle": 0, + "content": "a) Unposed video frames" + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.228, + 0.44, + 0.286 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.239, + 0.286, + 0.422, + 0.297 + ], + "angle": 0, + "content": "b) Hand pose as proxy for object pose" + }, + { + "type": "image", + "bbox": [ + 0.452, + 0.148, + 0.788, + 0.285 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.501, + 0.286, + 0.72, + 0.298 + ], + "angle": 0, + "content": "c) Multi-view supervision from posed images" + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.314, + 0.789, + 0.412 + ], + "angle": 0, + "content": "Fig.2: Registering objects via hand pose and 2D Mask guided 3D sampling. (a) Consider unposed frames from in-the-wild videos. (b) We use hand pose from FrankMocap [54] as a proxy for object pose, thereby registering the different views. (c) We then use 2D object masks for labeling 3D points with occupancy (Sec. 3.2). 3D points that project into the object mask in all views are considered as occupied (green triangles), all other points are considered unoccupied (red crosses). (3D object in the figure is for visualization only, not used for sampling.)" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.443, + 0.788, + 0.534 + ], + "angle": 0, + "content": "the object. Note that the balanced sampling of points inside and outside the object is an important consideration for training good predictors. While existing approaches [22, 31, 73] on this task use datasets with paired 3D supervision (3D object shape corresponding to 2D image), we operate in in-the-wild settings which do not contain 3D supervision. Instead, we propose a 2D mask guided 3D sampling strategy to obtain occupancy labels for training." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.536, + 0.789, + 0.702 + ], + "angle": 0, + "content": "Consider multiple views \\(\\{I_1, \\ldots, I_n\\}\\) of a hand-held object (Fig. 2), along with their masks \\(\\{M_1, \\ldots, M_n\\}\\). We can sample points \\(\\mathbf{x}\\) in 3D space and project them into different views. Any point \\(x\\) which projects into the object mask in all views is considered as occupied whereas if it projects outside the mask in even one of the views, it is considered as unoccupied. Thus, we get occupancy labels for a point \\(\\mathbf{x}\\) as \\(\\mathbf{s}^{gt} = \\cap_{i=1}^{n} M_i^{\\mathbf{x}_{p_i}}\\). Here, \\(M_i^{\\mathbf{x}_{p_i}} = 1\\) if \\(x_{p_i}\\) lies inside the mask \\(M_i\\) & 0 otherwise. Note that it is not possible to obtain SDF values in this manner, since distance to the object surface cannot be estimated in the absence of 3D objects models. While we can obtain 3D occupancy labels using this strategy, there are two important considerations: camera poses are unknown (required for projection) & how to balance the sampling of points inside & outside the object." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.703, + 0.789, + 0.779 + ], + "angle": 0, + "content": "Camera pose: We assume that the hand is rigidly moving with the object. This is not an unreasonable assumption, as humans rarely do in-hand manipulation in pick & place tasks involving small rigid objects. Thus, the relative pose of hand between different views reveals the relative pose of the object. This lets use the hand pose predicted by FrankMocap \\(\\{\\theta_1,\\dots ,\\theta_n\\}\\) to register the different views." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.78, + 0.789, + 0.84 + ], + "angle": 0, + "content": "Balanced sampling: In the absence of 3D object models, a natural choice is to sample points uniformly in 3D space. However, this leads to most points lying outside the object because the object location is unknown. Instead, we sample points in the hand coordinate frame. Consider the total number of points to be \\( q \\)." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "6" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.388, + 0.128 + ], + "angle": 0, + "content": "A. Prakash et al." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.253 + ], + "angle": 0, + "content": "We adopt several strategies for balanced sampling for points inside \\( (s^{gt} = 1) \\) and outside the object \\( (s^{gt} = 0) \\). We uniformly sample \\( q / 2 \\) 3D points \\( \\mathbf{x} \\in \\mathbb{R}^3 \\) in the normalized hand coordinate frame and project these into all the available views. Since all these \\( q / 2 \\) points may not be occupied, we use rejection sampling to repeat the procedure, for maximum of \\( t = 50 \\) times or until we get \\( q / 2 \\) occupied points. Also, all points projecting into the hand mask in all views and vertices of the MANO [53] hand are labeled as unoccupied." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.253, + 0.787, + 0.285 + ], + "angle": 0, + "content": "Formally, for images \\(\\{I_1,\\ldots ,I_n\\}\\) with object masks \\(\\{M_1,\\dots ,M_n\\}\\), hand masks \\(\\{H_{1},\\ldots ,H_{n}\\}\\) and MANO vertices \\(\\{V_{1},\\ldots ,V_{n}\\}\\), \\(\\mathbf{s}^{gt}\\) for \\(\\mathbf{x}\\) is:" + }, + { + "type": "equation", + "bbox": [ + 0.297, + 0.296, + 0.788, + 0.337 + ], + "angle": 0, + "content": "\\[\n\\mathbf {s} ^ {g t} = \\left\\{ \\begin{array}{l l} 1 & \\text {i f} \\cap_ {i = 1} ^ {n} M _ {i} ^ {\\mathbf {x} _ {p _ {i}}} \\text {a n d} \\cap_ {i = 1} ^ {n} \\neg H _ {i} ^ {\\mathbf {x} _ {p _ {i}}} \\text {a n d} \\cup_ {i = 1} ^ {n} \\neg V _ {i} ^ {\\mathbf {x}} \\\\ 0 & \\text {o t h e r w i s e} \\end{array} \\right. \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.347, + 0.785, + 0.379 + ], + "angle": 0, + "content": "where \\(\\mathbf{x}_{p_i}\\) is the projection of \\(\\mathbf{x}\\), \\(M_{i}^{\\mathbf{x}_{p_i}} = 1\\) if \\(x_{p_i}\\) lies inside \\(M_{i}\\), \\(H_{i}^{\\mathbf{x}_{p_i}} = 1\\) if \\(x_{p_i}\\) lies inside \\(H_{i}\\), \\(V_{i}^{\\mathbf{x}} = 1\\) if \\(\\mathbf{x}\\) belongs to \\(V_{i}\\) and \\(\\neg\\) is the logical negation operator." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.379, + 0.788, + 0.453 + ], + "angle": 0, + "content": "Note that, due to hand occlusions and errors in FrankMocap predictions, it is possible that some 3D points belonging to the object are not projected into the object masks but we do not want to label these points as unoccupied. So we disregard points which project onto the object mask in some views and hand mask in other views as these points could belong to object due to hand occlusion." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.454, + 0.788, + 0.529 + ], + "angle": 0, + "content": "This is reminiscent of the visual hull algorithm [33, 42], which generates 3D reconstruction by carving out space that projects outside the segmentation in any view. Visual hull algorithms need multiple views at test time to generate any output. In contrast, we are doing this at training time to obtain supervision for \\(\\mathcal{F}(\\mathbf{x};I_1,\\theta_1,K_1)\\), which makes predictions from a single view." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.529, + 0.772, + 0.546 + ], + "angle": 0, + "content": "Training: We use cross-entropy loss (CE) to train \\(\\mathcal{F}\\) using ground truth \\(\\mathbf{s}^{gt}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.407, + 0.558, + 0.788, + 0.575 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {v i s u a l - h u l l}} = \\operatorname {C E} (\\mathcal {F} (\\mathbf {x}), \\mathbf {s} ^ {g t}) \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.585, + 0.788, + 0.647 + ], + "angle": 0, + "content": "To further regularize training, we also encourage the occupancy prediction from different views to be consistent with each other. Since our predictions are already in the hand coordinate frame, which is common across all views, this can be done by minimizing \\(\\mathcal{L}_{\\mathrm{consistency}}\\) for different views \\(i\\& j\\) of the same object." + }, + { + "type": "equation", + "bbox": [ + 0.297, + 0.659, + 0.788, + 0.693 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {c o n s i s t e n c y}} = \\sum_ {\\mathbf {x} \\in \\mathbb {R} ^ {3}, i \\neq j} \\operatorname {C E} \\left(\\mathcal {F} (\\mathbf {x}; I _ {i}, \\theta_ {i}, K _ {i}), \\mathcal {F} (\\mathbf {x}; I _ {j}, \\theta_ {j}, K _ {j})\\right) \\tag {3}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.724, + 0.663, + 0.74 + ], + "angle": 0, + "content": "3.3 2D Slice based 3D Discriminator as Shape Prior" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.75, + 0.788, + 0.843 + ], + "angle": 0, + "content": "We adopt an adversarial training framework [16] to build a prior on shapes of hand-held objects and use it to supervise the training of the occupancy prediction function \\(\\mathcal{F}(\\mathbf{x};I_1,\\theta_1^a,\\theta_1^w,K_1)\\). As such a prior can be challenging to hand-craft, we build it in a data-driven way. We use 3D shape repository from synthetic datasets [22], which contain more than \\(2.5\\mathrm{K}\\) hand-held objects, to learn the prior. Specifically, we train a discriminator \\(\\mathcal{D}\\) to differentiate between 3D shapes from" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.226, + 0.114, + 0.733, + 0.131 + ], + "angle": 0, + "content": "3D Reconstruction of Objects in Hands without Real World 3D Supervision" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.116, + 0.785, + 0.127 + ], + "angle": 0, + "content": "7" + }, + { + "type": "image", + "bbox": [ + 0.29, + 0.144, + 0.729, + 0.253 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.215, + 0.269, + 0.788, + 0.34 + ], + "angle": 0, + "content": "Fig. 3: 2D slice based 3D discriminator. We learn data-driven 3D shape priors using hand-held objects from ObMan dataset. We sample planes through the object (shown above in blue), resulting in a 2D cross-section map. We pass occupancy predictions on points from these cross-sections through a discriminator which tries to distinguish cross-sections of predicted 3D shapes from cross-sections of ObMan objects (Sec. 3.3)." + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.376, + 0.787, + 0.408 + ], + "angle": 0, + "content": "ObMan [22] and generated shapes as predicted by \\(\\mathcal{F}\\). We derive supervision for \\(\\mathcal{F}\\) by encouraging it to predict shapes that are real as per \\(\\mathcal{D}\\)." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.41, + 0.789, + 0.593 + ], + "angle": 0, + "content": "A natural choice is to train the discriminator with 3D input, e.g. \\( N \\times N \\times N \\) cube in 3D voxel space [67]. One way to do this is to sample \\( N^3 \\) 3D points in the hand coordinate frame and run a forward pass through \\( \\mathcal{F} \\) to get the occupancy for each of these points. However this is computationally expensive and often leads to large imbalance as most points lie outside the object (we ablate this in Sec. 4.3). Instead, we propose a novel 2D slice based 3D discriminator which operates on arbitrary 2D slices. There are computed by taking the cross-section of 2D planes with 3D shapes and sampling 3D points that lie on these 2D cross-sections. The key intuition here is that the discriminator sees different randomly sampled 2D slides during the course of training, which helps it to learn fine-grained shape information. E.g. for a sphere, all cross-sections are circular but for a cylinder, most are oval. This helps distinguish between different 3D shapes." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.595, + 0.789, + 0.732 + ], + "angle": 0, + "content": "Sampling 2D slices: There are several important considerations in sampling 2D slices. First, uniformly sampling 2D planes often leads to most points lying outside the object, which is not useful for training the discriminator. Instead, we sample 2D planes that pass through the origin in the hand coordinate system. Since the objects are in contact with the hand, the sampled points are more likely to encompass the object. Then, we rotate the sampled 2D planes by arbitrary angles so that they are not axis aligned to better capture fine-grained shape information. We ablate all these design choices in Sec. 4.3. This sampling function \\(\\mathcal{Z}\\) results in a set of 2D planes on which 3D points are uniformly sampled." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.735, + 0.789, + 0.841 + ], + "angle": 0, + "content": "Training: We pass the sampled points from 2D slices of the generated 3D shape through \\(\\mathcal{F}\\) to get the corresponding occupancy values \\(S^{\\mathrm{gen}}\\). This represents the generated 3D shape. We adopt the same strategy for representing 3D shapes from ObMan (used as real shapes) but use the predictions \\(S^{\\mathrm{real}}\\) of the occupancy network overfitted on ObMan. As they come from a overfitted model, they generally match the ground truth slices well but at the same time are soft and prevent the discriminator from cheating." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "8" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.388, + 0.128 + ], + "angle": 0, + "content": "A. Prakash et al." + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.145, + 0.787, + 0.22 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.229, + 0.789, + 0.3 + ], + "angle": 0, + "content": "Fig. 4: VISOR visualizations. Using existing hand pose estimation techniques [54], we are able to track the objects in relation to hands through time in in-the-wild videos. We visualize these tracks along with object masks from the VISOR dataset [13]. This form of data, where objects move rigidly relative to hands, is used to train our model to learn 3D shape of hand-held objects." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.328, + 0.789, + 0.39 + ], + "angle": 0, + "content": "We train the discriminator \\(\\mathcal{D}\\) to differentiate between \\(S^{\\mathrm{gen}}\\) & \\(S^{\\mathrm{real}}\\) using the least squares formulation [41] for discriminator loss. We derive supervision for \\(\\mathcal{F}\\) by computing gradients through \\(\\mathcal{D}\\) on the occupancy values at the sampled points to maximize the realism of the generated shapes." + }, + { + "type": "equation", + "bbox": [ + 0.354, + 0.397, + 0.788, + 0.455 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} _ {\\mathrm {a d v}} ^ {\\mathcal {D}} = [ \\mathcal {D} (S ^ {\\mathrm {r e a l}}) - 1 ] ^ {2} + [ \\mathcal {D} (S ^ {\\mathrm {g e n}}) ] ^ {2} \\\\ \\mathcal {L} _ {\\mathrm {a d v}} ^ {\\mathcal {F}} = [ \\mathcal {D} (S ^ {\\mathrm {g e n}}) - 1 ] ^ {2} \\\\ \\mathcal {L} _ {\\text {s h a p e - p r i o r}} = \\lambda_ {f} \\mathcal {L} _ {\\text {a d v}} (\\mathcal {F}) + \\lambda_ {d} \\mathcal {L} _ {\\text {a d v}} (\\mathcal {D}) \\tag {4} \\\\ \\end{array}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.474, + 0.398, + 0.489 + ], + "angle": 0, + "content": "3.4 Training Details" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.498, + 0.787, + 0.528 + ], + "angle": 0, + "content": "We train \\(\\mathcal{F}\\& \\mathcal{D}\\) in an alternating manner with 2 iterations of \\(F\\) for every iteration of \\(D\\). The total loss for training our framework is:" + }, + { + "type": "equation", + "bbox": [ + 0.341, + 0.537, + 0.787, + 0.575 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} _ {\\mathcal {F}} = \\lambda_ {v} \\mathcal {L} _ {\\text {v i s u a l - h u l l}} + \\lambda_ {c} \\mathcal {L} _ {\\text {c o n s i s t e n c y}} + \\lambda_ {f} \\mathcal {L} _ {\\text {a d v}} ^ {\\mathcal {F}} \\\\ \\mathcal {L} _ {\\mathcal {D}} = \\lambda_ {d} \\mathcal {L} _ {\\mathrm {a d v}} ^ {\\mathcal {D}} \\tag {5} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.585, + 0.79, + 0.661 + ], + "angle": 0, + "content": "Following standard practice [73], we pretrain on synthetic ObMan. We train our model jointly on ObMan (3D supervision, shape priors) & VISOR (2D supervision) with a dataset ratio of ObMan:VISOR as 1:2. We use batch size of 64, learning rate of 1e-5 across 4 NVIDIA A40 GPUs & loss weights as \\(\\lambda_v = 1, \\lambda_c = 1, \\lambda_f = 0.25, \\lambda_d = 0.25\\). Please refer to supplementary for more details." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.681, + 0.636, + 0.697 + ], + "angle": 0, + "content": "3.5 Constructing Wild Objects in Hands Dataset" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.705, + 0.789, + 0.841 + ], + "angle": 0, + "content": "Our framework requires dataset containing multi-view images of rigid hand-object interactions in the wild, with 3D hand pose and 2D object masks. To construct such a dataset, we consider VISOR [13] which provides 2D tracks for hands, objects they are interacting with and their segmentation masks. It contains a rich set of hand-object interactions, e.g. taking out milk from the fridge, pouring oil from bottles, kneading dough, cutting vegetables, and stirring noodles in a wok. Our interest is in the 3D reconstruction of rigid objects which are in-contact with a hand, but there are no 3D object annotations in VISOR. Hence, we process it to prepare a dataset for training our model." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.226, + 0.115, + 0.731, + 0.129 + ], + "angle": 0, + "content": "3D Reconstruction of Objects in Hands without Real World 3D Supervision" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "9" + }, + { + "type": "table_caption", + "bbox": [ + 0.214, + 0.145, + 0.788, + 0.215 + ], + "angle": 0, + "content": "Table 1: Generalization to novel objects in the wild. We report F-score at \\(5\\mathrm{mm}\\) & \\(10\\mathrm{mm}\\), Chamfer distance (CD, mm) for object generalization splits on MOW. We compare with AC-OCC & AC-SDF trained on different combinations of datasets with full 3D supervision. Our approach outperforms baselines across all metrics without using real-world 3D supervision (Relative % improvement w.r.t. best baseline in green)." + }, + { + "type": "table", + "bbox": [ + 0.221, + 0.227, + 0.787, + 0.301 + ], + "angle": 0, + "content": "
MethodDataset and supervision usedF@5 ↑F@10 ↑CD ↓
AC-OCCObMan (Synthetic 3D)0.0950.1798.69
AC-SDF [73]ObMan (Synthetic 3D)0.1080.1997.82
AC-SDF [73]ObMan (Synthetic 3D) + HO3D (Lab 3D)0.0820.1597.52
AC-SDF [73]ObMan (Synthetic 3D) + HO3D (Lab 3D) + HOI4D (3D)0.0950.1937.43
HORSE (Ours)ObMan (Synthetic 3D) + VISOR (2D Masks) + Shape priors0.121+10.7%0.220+10.6%6.76+13.5%
" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.331, + 0.784, + 0.42 + ], + "angle": 0, + "content": "We first sample a subset of VISOR involving hand-object contact, using available contact annotations. We select object tracks where only one hand is in consistent contact with the object. This leaves us with 14768 object tracks from the original VISOR dataset. We then manually filter this subset to select a subset that showcases manipulation of rigid objects with a single hand. This leaves us with 604 video snippets showing hands interacting with different objects." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.422, + 0.789, + 0.679 + ], + "angle": 0, + "content": "Processing hands on VISOR: We rely on the 3D hand poses to set up the output coordinate frame, compute hand articulation features, and more importantly to register the different frames together [38,66]. These hand poses are estimated using FrankMocap, which may not always be accurate. To remove erroneous poses, we employ automated filtering using the uncertainty estimate technique from Bahat & Shakhnarovich [1] following 3D human pose literature [50]. Specifically, we obtain 3D hand pose predictions on five different versions of the image, augmented by different fixed translations. The uncertainty estimate for a given image is computed as the standard deviation of reprojection locations of MANO vertices across these 5 image versions. This sidesteps the need to hand-specify the trade-off between translation, rotation, and articulation parameters that are part of the 3D hand pose output. This leaves us with 473 video snippets consisting of 144 object categories. This object diversity is \\(4 \\times\\) larger than existing datasets [18, 19, 32, 34, 69] used for our task, typically containing 10 to 32 object categories. We refer to this dataset as Wild Objects in Hands, some example object sequences are shown in Fig. 4. Note the *incidental* multiple views and relative consistency in hand and object pose over the course of interaction." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.704, + 0.377, + 0.721 + ], + "angle": 0, + "content": "4 Experiments" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.738, + 0.342, + 0.751 + ], + "angle": 0, + "content": "4.1 Protocols" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.764, + 0.787, + 0.809 + ], + "angle": 0, + "content": "We use 4 datasets for training (ObMan [22], VISOR [13], HO3D [18], HOI4D [34]) and 2 datasets (MOW [5], HO3D) for evaluation. Different methods are trained on different datasets, depending on the specific evaluation setting." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.81, + 0.787, + 0.84 + ], + "angle": 0, + "content": "Training datasets: ObMan is a large scale synthetic hand-object dataset with 2.5K objects and 3D supervision. HO3D & HOI4D are real world datasets collected" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "10" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.388, + 0.128 + ], + "angle": 0, + "content": "A. Prakash et al." + }, + { + "type": "table_caption", + "bbox": [ + 0.216, + 0.158, + 0.505, + 0.199 + ], + "angle": 0, + "content": "Table 2: HO3D Object generalization. We outperform AC-OCC & AC-SDF trained on different datasets with 3D supervision." + }, + { + "type": "table", + "bbox": [ + 0.218, + 0.2, + 0.505, + 0.277 + ], + "angle": 0, + "content": "
MethodSupervision (ObMan +)F@5F@10CD
AC-OCC-0.180.334.39
AC-SDF-0.170.333.72
AC-SDFMOW (3D)0.170.333.84
AC-SDFMOW (3D) + HOI4D (3D)0.170.333.63
OursVISOR (Multi-view 2D)0.200.353.39
" + }, + { + "type": "table_caption", + "bbox": [ + 0.51, + 0.158, + 0.789, + 0.199 + ], + "angle": 0, + "content": "Table 3: HO3D View generalization. We outperform HO [22] & GF [31], trained on HO3D with full 3D supervision." + }, + { + "type": "table", + "bbox": [ + 0.512, + 0.2, + 0.789, + 0.264 + ], + "angle": 0, + "content": "
MethodSupervision (ObMan +)F@5F@10CD
AC-SDF-0.170.323.72
HO [22]HO3D (3D)0.110.224.19
GF [31]HO3D (3D)0.120.244.96
OursHO3D (Multi-view 2D)0.230.431.41
" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.308, + 0.786, + 0.368 + ], + "angle": 0, + "content": "in lab settings with 3D annotations. HO3D contains 10 YCB [82] objects whereas HOI4D contains 16 object categories, out of which 7 are rigid. VISOR does not contain any 3D supervision. Instead, we use the process described in Sec. 3.5, to extract supervision from VISOR, resulting in 144 object categories." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.369, + 0.789, + 0.718 + ], + "angle": 0, + "content": "The baselines are trained with different combinations of HO3D & HOI4D [34]. As our method does not require 3D ground truth, we do not use these datasets for training. Instead, we use auxiliary supervision from Wild Objects in Hands (Sec. 3.5) & learn shape priors using ObMan. VISOR does not have 3D annotations and can not be used to train the baselines. Note that all models are initialized from the model pretrained on ObMan for fair comparisons, following protocol [73]. Evaluation datasets: We focus on the challenging zero-shot generalization to novel objects in-the-wild setting. We use MOW [5] dataset which contains images from YouTube, spanning 120 object templates. Note that these types of images have not been seen during training. To be consistent with prior work [73], we also use HO3D for evaluation, consisting of 1221 testing images across 10 objects. While [73] operate in view generalization setting, i.e., making predictions on novel views of training objects, we also consider the more challenging object generalization setting. Almost all of our experiments are conducted in the object generalization setting where we assess predictions on novel objects across datasets. Metrics: Following [59, 73], we report Chamfer distance (CD) and F-score at \\(5\\mathrm{mm}\\) & \\(10\\mathrm{mm}\\) thresholds. F-score evaluates the distance between object surfaces as the harmonic mean between precision & recall. Precision measures accuracy of the reconstruction as \\(\\%\\) of reconstructed points that lie within a certain distance to ground truth. Recall measures completeness of the reconstruction as \\(\\%\\) of points, on the ground truth, that lie within a certain distance to the reconstruction. CD computes sum of distances for each pair of nearest neighbors in the two point clouds. We report mean CD & F-score over all test objects." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.72, + 0.79, + 0.841 + ], + "angle": 0, + "content": "Baselines: We compare our model with AC-SDF trained in supervised manner using 3D ground truth on different combination of datasets in different settings: (1) For object generalization on MOW in the wild, AC-SDF is trained on ObMan, ObMan + HO3D, ObMan + HO3D + HOI4D, (2) For object generalization on HO3D, AC-SDF is trained on ObMan, ObMan + MOW, ObMan + MOW + HOI4D, (3) For view generalization on HO3D, AC-SDF is trained on ObMan + HO3D. We also compare with an occupancy variant of AC-SDF (AC-OCC) and recent published methods with different forms of SDF representation, e.g." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.226, + 0.114, + 0.733, + 0.131 + ], + "angle": 0, + "content": "3D Reconstruction of Objects in Hands without Real World 3D Supervision" + }, + { + "type": "page_number", + "bbox": [ + 0.768, + 0.116, + 0.784, + 0.127 + ], + "angle": 0, + "content": "11" + }, + { + "type": "table_caption", + "bbox": [ + 0.215, + 0.145, + 0.495, + 0.214 + ], + "angle": 0, + "content": "Table 4: Comparison with relevant methods. Our approach also outperforms gSDF, AlignSDF & DDFHO (trained in the same setting as ours) in zero-shot generalization to MOW across most metrics." + }, + { + "type": "table", + "bbox": [ + 0.226, + 0.227, + 0.489, + 0.322 + ], + "angle": 0, + "content": "
MethodF@5 ↑F@10 ↑CD ↓
AC-SDF [73]0.1080.1997.82
AlignSDF [10]0.0990.1828.30
gSDF [9]0.1070.1977.50
DDFHO [77]0.0940.1663.06
HORSE (Ours)0.1210.2206.76
" + }, + { + "type": "table_caption", + "bbox": [ + 0.509, + 0.145, + 0.789, + 0.215 + ], + "angle": 0, + "content": "Table 5: 3D vs. 2D input to discriminator. Training with 3D inputs (at different resolutions) perform worse, likely due to coarse sampling resulting in very few points inside the object." + }, + { + "type": "table", + "bbox": [ + 0.521, + 0.227, + 0.782, + 0.323 + ], + "angle": 0, + "content": "
Disc. inputF@5 ↑F@10 ↑CD ↓
No disc.0.1170.2166.93
10 × 10 × 100.1200.2187.29
16 × 16 × 160.1150.2097.79
32 × 32 × 320.1040.1917.83
2D slices0.1210.2206.76
" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.351, + 0.788, + 0.429 + ], + "angle": 0, + "content": "AlignSDF [10], gSDF [9], DDFHO [77]. Note that the VISOR dataset cannot be used for training since it does not have 3D supervision. For the view generalization setting on HO3D, we also compare with HO [22] & GF [31] trained with 3D ground truth on ObMan + HO3D. Recent works [44,70] on unsupervised reconstruction of objects require several views or depth, which are not available in our setting." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.451, + 0.325, + 0.466 + ], + "angle": 0, + "content": "4.2 Results" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.478, + 0.789, + 0.688 + ], + "angle": 0, + "content": "Object generalization in the wild: We first examine if the auxiliary supervision from visual hull and shape prior is useful for generalization to novel objects in the wild. We evaluate on MOW in Tab. 1 and compare with AC-OCC & AC-SDF trained on different combinations of ObMan, HO3D, HOI4D datasets with 3D supervision. Our approach provides gains of \\(24.3\\%\\) compared to AC-OCC (trained on ObMan) and \\(11.6\\%\\) on AC-SDF (trained on ObMan). This shows the benefits of our supervision cues in the wild over training on just large scale synthetic data with 3D supervision. We also outperform AC-SDF trained on ObMan + HO3D + HOI4D with full 3D supervision by \\(16.8\\%\\) across all metrics. This indicates that our supervision cues from in-the-wild VISOR are better than using 3D supervision on lab datasets with limited diversity in objects. We also outperform relevant methods that use different forms of SDF representations, e.g. AlignSDF, gSDF & DDFHO across most metrics (Tab. 4). Note that our contributions are orthogonal and could be combined with these works." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.689, + 0.789, + 0.764 + ], + "angle": 0, + "content": "Adding 3D supervision to AC-SDF. In Tab. 1, we observe that adding more data from HO3D & HOI4D to AC-SDF training did not help in zero-shot generalization to MOW. Instead, the performance drops compared to AC-SDF trained on ObMan. This is likely due to limited diversity in HO3D: 10 YCB objects, HOI4D: 7 rigid object categories & the model overfitting to these categories." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.765, + 0.789, + 0.84 + ], + "angle": 0, + "content": "Object generalization on HO3D: Our approach is better than AC-OCC & AC-SDF trained on different datasets with 3D supervision (Tab. 2). This further shows the benefits of auxiliary supervision from VISOR for object generalization. Also, AC-SDF does not benefit from MOW & HOI4D. This could because HO3D evaluates on 10 objects only and they may not be present in MOW or HOI4D." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "12" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.388, + 0.128 + ], + "angle": 0, + "content": "A. Prakash et al." + }, + { + "type": "table_caption", + "bbox": [ + 0.216, + 0.145, + 0.482, + 0.215 + ], + "angle": 0, + "content": "Table 6: Supervision quality on HO3D. Automated filtering to remove incorrect hand poses improves results & using ground truth hand pose differs little compared to predicted pose.\\(^{1}\\)" + }, + { + "type": "table", + "bbox": [ + 0.221, + 0.227, + 0.482, + 0.296 + ], + "angle": 0, + "content": "
F@5 ↑F@10 ↑CD ↓
HORSE (base setting)0.2340.4341.41
no training on HO3D0.1750.3293.72
w/o filtering0.2130.4051.42
w/ ground truth pose10.2430.4441.39
" + }, + { + "type": "table_caption", + "bbox": [ + 0.492, + 0.145, + 0.788, + 0.215 + ], + "angle": 0, + "content": "Table 7: Role of different loss functions. We report F-score at \\(5\\mathrm{mm}\\) & \\(10\\mathrm{mm}\\), Chamfer distance (CD, mm) for different variants of our model on MOW. All losses are effective & multiview supervision leads to largest gain." + }, + { + "type": "table", + "bbox": [ + 0.498, + 0.227, + 0.787, + 0.296 + ], + "angle": 0, + "content": "
\\( \\mathcal{L}_{\\text{ObMan}} \\)\\( \\mathcal{L}_{\\text{visual-hull}} \\)\\( \\mathcal{L}_{\\text{consistency}} \\)\\( \\mathcal{L}_{\\text{shape-prior}} \\)\\( \\mathbf{F@5} \\uparrow \\)\\( \\mathbf{F@10} \\uparrow \\)\\( \\mathbf{CD} \\downarrow \\)
0.0950.1818.69
0.1110.2057.26
0.0730.13212.75
0.0970.17510.29
0.1170.2166.93
0.1210.2206.76
" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.324, + 0.788, + 0.4 + ], + "angle": 0, + "content": "Occupancy vs SDF. We see that SDF formulation is better than occupancy when trained with full 3D supervision (AC-OCC vs. AC-SDF). In contrast, we find SDF training to be unstable (does not give meaningful predictions) with auxiliary supervision. This could be because regressing continuous SDF values with weak supervision is harder than binary classification for occupancy values." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.4, + 0.788, + 0.492 + ], + "angle": 0, + "content": "View generalization results on HO3D. In Tab. 3, we see gains with using supervision cues over just training on synthetic data, consistent with trends in the object generalization setting. We also outperform HO [22] & GF [31], both trained on HO3D using full 3D supervision. We outperform these methods even without any images from HO3D (last row in Tab. 1 vs. GF & HO in Table 3), likely due to use of more expressive pixel-aligned & hand articulation features." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.511, + 0.39, + 0.527 + ], + "angle": 0, + "content": "4.3 Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.535, + 0.788, + 0.641 + ], + "angle": 0, + "content": "Analysis of supervision quality. We also observe in Tab. 3 that our method is able to bridge more than \\(40\\%\\) of the gap between no training on HO3D to training with full 3D supervision. We further use the view generalization setting to assess the quality of 2D object mask supervision used in our method in Tab. 6. Our automated filtering of frames with inaccurate hand poses (as described in Sec. 3.5) is crucial for good performance. Also, little is lost from using hand pose as a proxy for object pose on the HO3D dataset.\\(^{1}\\)" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.641, + 0.788, + 0.732 + ], + "angle": 0, + "content": "Role of different loss terms: We experiment with multiple variants of our model to assess the importance of different loss terms. We start with the AC-OCC model trained on ObMan and gradually add \\(\\mathcal{L}_{\\mathrm{visual - hull}}\\), \\(\\mathcal{L}_{\\mathrm{consistency}}\\), and \\(\\mathcal{L}_{\\mathrm{shape - prior}}\\). From the results in Tab. 7, we observe that \\(\\mathcal{L}_{\\mathrm{visual - hull}}\\) is more effective than \\(\\mathcal{L}_{\\mathrm{consistency}}\\) and using them together provides further benefits. Moreover, \\(\\mathcal{L}_{\\mathrm{shape - prior}}\\) improves performance on top of \\(\\mathcal{L}_{\\mathrm{consistency}}\\) and \\(\\mathcal{L}_{\\mathrm{visual - hull}}\\)." + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.732, + 0.787, + 0.762 + ], + "angle": 0, + "content": "3D vs 2D input to discriminator: We also consider 3D volumes as input to the discriminator (instead of 2D cross-sections). For this, we need to sample \\(64 \\times 64 \\times 64\\)" + }, + { + "type": "page_footnote", + "bbox": [ + 0.218, + 0.77, + 0.788, + 0.84 + ], + "angle": 0, + "content": "1 While [73] uses similar contrast between predicted vs. ground truth hands to make claims, we note that those claims & this result should be taken with a grain of salt. FrankMocap is trained on HO3D, so its predictions on HO3D are better than they would be on unseen data. As most of our models are trained on VISOR (not used for training FrankMocap), our other experiments do not suffer from this issue." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.226, + 0.114, + 0.732, + 0.129 + ], + "angle": 0, + "content": "3D Reconstruction of Objects in Hands without Real World 3D Supervision" + }, + { + "type": "page_number", + "bbox": [ + 0.768, + 0.116, + 0.786, + 0.127 + ], + "angle": 0, + "content": "13" + }, + { + "type": "table_caption", + "bbox": [ + 0.215, + 0.145, + 0.788, + 0.214 + ], + "angle": 0, + "content": "Table 8: Design choices for mask Table 9: Sampling method for 2D planes. guided sampling. Uniformly sampling Sampling planes through origin of hand coordinates is much worse than the rejection dinate system & rotated randomly performs sampling used in our method. Using neg- the best compared to sampling axis-aligned ative points from hand masks is useful. planes either uniformly or through origin." + }, + { + "type": "table", + "bbox": [ + 0.222, + 0.227, + 0.785, + 0.294 + ], + "angle": 0, + "content": "
Sampling methodF@5 ↑ F@10 ↑ CD ↓Sampling methodF@5 ↑ F@10 ↑ CD ↓
Uniform0.0930.16610.29Uniform (axis-aligned)0.1150.2087.01
Ours (no hand points)0.1130.2077.69Origin (axis-aligned)0.0980.1838.52
Ours0.1170.2166.93Origin (random rotation)0.1210.2206.76
" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.323, + 0.788, + 0.429 + ], + "angle": 0, + "content": "\\((=262144)\\) points & run several forward passes of our model to get occupancies. Since this is computationally expensive, we sample points at coarser resolutions: \\(32 \\times 32 \\times 32\\), \\(16 \\times 16 \\times 16\\), \\(10 \\times 10 \\times 10\\). We use \\(32 \\times 32\\) size 2D slices, so \\(10 \\times 10 \\times 10\\) 3D volume has no. of points & takes similar compute. We see that 2D slices perform better than 3D volumes (Tab. 5). Also, the performance gets worse with increase in the sampled 3D volume, likely due to 3D sampling being so coarse that very few points lie inside the object, thus unable to capture fine-grained shape." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.43, + 0.788, + 0.49 + ], + "angle": 0, + "content": "Sampling 2D slices for discriminator: We ablate different design choices (Sec. 3.3) in Tab. 9. We observe that sampling 2D planes through origin of the hand coordinate system and rotated randomly performs the best compared to sampling axis-aligned frames either uniformly or through origin." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.49, + 0.79, + 0.552 + ], + "angle": 0, + "content": "Design choices for mask guided sampling: We run rejection sampling (with hand & object masks) to sample points in the hand coordinate frame (Sec. 3.2). We compare with 2 variants: uniformly sampling in the hand frame & removing negative points from hand masks. We find our strategy to work the best (Tab. 8)." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.576, + 0.378, + 0.59 + ], + "angle": 0, + "content": "4.4 Visualizations" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.603, + 0.788, + 0.725 + ], + "angle": 0, + "content": "We compare the mesh generated by our model and AC-SDF (trained on ObManbest baseline) on zero-shot generalization to MOW (Fig. 5) and Core50 [35](Fig. 6). For this, we sample points uniformly in a \\(64 \\times 64 \\times 64\\) volume, predict their occupancies or SDF from the network and run marching cubes [36]. We project the mesh into the input image & render it in different views. Our model captures the visual hull of the object, as evidenced by the projection of the mesh onto the image, and generates more coherent shapes than AC-SDF, which often reconstructs disconnected and scattered shapes. More visualizations are in supplementary." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.749, + 0.357, + 0.763 + ], + "angle": 0, + "content": "4.5 Limitations" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.777, + 0.788, + 0.839 + ], + "angle": 0, + "content": "Inaccurate hand pose. We use predictions from FrankMocap for hand pose & camera parameters. Note that the sampled points do not cover the entire object if the hand pose is not accurate, due to mis-projection into the image plane. This leads to exclusion of points in certain parts of the object (Fig. 7)." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "14" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.388, + 0.128 + ], + "angle": 0, + "content": "A. Prakash et al." + }, + { + "type": "image", + "bbox": [ + 0.222, + 0.142, + 0.768, + 0.297 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.312, + 0.789, + 0.397 + ], + "angle": 0, + "content": "Fig. 5: Visualizations on MOw object generalization split. We show the object mesh projected onto the image and rendered in different views for our HORSE model and compare with the AC-SDF model trained on ObMan dataset with 3D supervision (best baseline model). We also show the ground truth (GT) object model. We observe that our model is able to predict the object shape more accurately than AC-SDF which often reconstructs smaller and disconnected shapes." + }, + { + "type": "image", + "bbox": [ + 0.222, + 0.406, + 0.773, + 0.51 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.523, + 0.79, + 0.568 + ], + "angle": 0, + "content": "Fig. 6: Visualizations on zero-shot generalization to Core50 [35]. We show the object mesh projected onto the image and rendered in different views on Core50. HORSE predicts better shapes than AC-SDF (best baseline, often leads to artifacts)." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.594, + 0.518, + 0.687 + ], + "angle": 0, + "content": "Limited object views. Videos in the wild often do not capture \\(360^{\\circ}\\) view of the object, e.g. kettle in Fig. 7. This is different than lab settings where the interactions are often constrained & multi-camera setup is used to capture all sides of the object." + }, + { + "type": "image", + "bbox": [ + 0.528, + 0.596, + 0.789, + 0.664 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.527, + 0.672, + 0.788, + 0.702 + ], + "angle": 0, + "content": "Fig. 7: Sampled points do not cover the entire object if hand pose is inaccurate." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.705, + 0.36, + 0.722 + ], + "angle": 0, + "content": "5 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.734, + 0.789, + 0.843 + ], + "angle": 0, + "content": "We present an approach for reconstructing hand-held objects in 3D from a single image. We propose modules to extract supervision from in-the-wild videos & learn data-driven 3D shape priors from synthetic ObMan to circumvent the need for direct 3D supervision. Experiments show that our approach generalizes better to novel objects in the wild than baselines trained using 3D supervision. Future directions include jointly optimizing the hand pose with the object shape to deal with inaccurate hand poses or incorporating additional cues, e.g. contact priors." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.226, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "3D Reconstruction of Objects in Hands without Real World 3D Supervision" + }, + { + "type": "page_number", + "bbox": [ + 0.768, + 0.116, + 0.786, + 0.127 + ], + "angle": 0, + "content": "15" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.239 + ], + "angle": 0, + "content": "Acknowledgements: We thank Ashish Kumar, Erin Zhang, Arjun Gupta, Shaowei Liu, Anand Bhattachad, Pranay Thangeda & Kashyap Chitta for feedback on the draft. This material is based upon work supported by NSF (IIS2007035), NASA (80NSSC21K1030), DARPA (Machine Common Sense program), an Amazon Research Award, an NVIDIA Academic Hardware Grant, and the NCSA Delta System (supported by NSF OCI 2005572 and the State of Illinois)." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.263, + 0.326, + 0.279 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.295, + 0.787, + 0.325 + ], + "angle": 0, + "content": "1. Bahat, Y., Shakhnarovich, G.: Confidence from invariance to image transformations. arXiv (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.325, + 0.787, + 0.367 + ], + "angle": 0, + "content": "2. Brahmbhatt, S., Ham, C., Kemp, C.C., Hays, J.: Contactdb: Analyzing and predicting grasp contact via thermal imaging. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.367, + 0.787, + 0.409 + ], + "angle": 0, + "content": "3. Brahmbhatt, S., Tang, C., Twigg, C.D., Kemp, C.C., Hays, J.: Contactpose: A dataset of grasps with object contact and hand pose. In: Proceedings of the European Conference on Computer Vision (ECCV) (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.409, + 0.787, + 0.437 + ], + "angle": 0, + "content": "4. Buckingham, G.: Hand tracking for immersive virtual reality: Opportunities and challenges. Frontiers in Virtual Reality (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.437, + 0.787, + 0.478 + ], + "angle": 0, + "content": "5. Cao, Z., Radosavovic, I., Kanazawa, A., Malik, J.: Reconstructing hand-object interactions in the wild. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.479, + 0.787, + 0.52 + ], + "angle": 0, + "content": "6. Chang, A.X., Funkhouser, T.A., Guibas, L.J., Hanrahan, P., Huang, Q., Li, Z., Savarese, S., Savva, M., Song, S., Su, H., Xiao, J., Yi, L., Yu, F.: Shapenet: An information-rich 3D model repository. ArXiv (2015)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.521, + 0.787, + 0.562 + ], + "angle": 0, + "content": "7. Chang, M., Prakash, A., Gupta, S.: Look ma, no hands! agent-environment factorization of egocentric videos. In: Advances in Neural Information Processing Systems (NeurIPS) (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.562, + 0.787, + 0.618 + ], + "angle": 0, + "content": "8. Chao, Y., Yang, W., Xiang, Y., Molchanov, P., Handa, A., Tremblay, J., Narang, Y.S., Wyk, K.V., Iqbal, U., Birchfield, S., Kautz, J., Fox, D.: Dexycb: A benchmark for capturing hand grasping of objects. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.618, + 0.787, + 0.659 + ], + "angle": 0, + "content": "9. Chen, Z., Chen, S., Schmid, C., Laptev, I.: gsdf: Geometry-driven signed distance functions for 3d hand-object reconstruction. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.66, + 0.787, + 0.702 + ], + "angle": 0, + "content": "0. Chen, Z., Hasson, Y., Schmid, C., Laptev, I.: Alignsdf: Pose-aligned signed distance fields for hand-object reconstruction. In: Proceedings of the European Conference on Computer Vision (ECCV) (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.702, + 0.787, + 0.743 + ], + "angle": 0, + "content": "1. Choi, H., Chavan-Dafle, N., Yuan, J., Isler, V., Park, H.: Handnerf: Learning to reconstruct hand-object interaction scene from a single rgb image. In: International Conference on Robotics and Automation (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.22, + 0.743, + 0.787, + 0.799 + ], + "angle": 0, + "content": "12. Damen, D., Doughty, H., Farinella, G.M., Fidler, S., Furnari, A., Kazakos, E., Moltisanti, D., Munro, J., Perrett, T., Price, W., Wray, M.: Scaling egocentric vision: The epic-kitchens dataset. Proceedings of the European Conference on Computer Vision (ECCV) (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.22, + 0.799, + 0.787, + 0.84 + ], + "angle": 0, + "content": "13. Darkhalil, A., Shan, D., Zhu, B., Ma, J., Kar, A., Higgins, R., Fidler, S., Fouhey, D., Damen, D.: Epic-kitchens visor benchmark: Video segmentations and object relations. In: NeurIPS Track on Datasets and Benchmarks (2022)" + }, + { + "type": "list", + "bbox": [ + 0.22, + 0.295, + 0.787, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "16" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.388, + 0.128 + ], + "angle": 0, + "content": "A. Prakash et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.147, + 0.787, + 0.189 + ], + "angle": 0, + "content": "14. Fan, Z., Parelli, M., Kadoglou, M.E., Kocabas, M., Chen, X., Black, M.J., Hilliges, O.: Hold: Category-agnostic 3d reconstruction of interacting hands and objects from video. arXiv preprint arXiv:2311.18448 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.19, + 0.787, + 0.233 + ], + "angle": 0, + "content": "15. Garcia-Hernando, G., Yuan, S., Baek, S., Kim, T.: First-person hand action benchmark with RGB-D videos and 3d hand pose annotations. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.234, + 0.787, + 0.275 + ], + "angle": 0, + "content": "16. Goodfellow, I., Pouget-Abadie, J., Mirza, M., Xu, B., Warde-Farley, D., Ozair, S., Courville, A., Bengio, Y.: Generative adversarial nets. In: Advances in Neural Information Processing Systems (NeurIPS) (2014)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.276, + 0.787, + 0.332 + ], + "angle": 0, + "content": "17. Grauman, K., Westbury, A., Byrne, E., Chavis, Z., Furnari, A., Girdhar, R., Hamburger, J., Jiang, H., Liu, M., Liu, X., et al.: Ego4d: Around the world in 3,000 hours of egocentric video. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.333, + 0.787, + 0.375 + ], + "angle": 0, + "content": "18. Hampali, S., Rad, M., Oberweger, M., Lepetit, V.: Honnotate: A method for 3d annotation of hand and object poses. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.375, + 0.787, + 0.431 + ], + "angle": 0, + "content": "19. Hampali, S., Sarkar, S.D., Rad, M., Lepetit, V.: Keypoint transformer: Solving joint identification in challenging hands and object interactions for accurate 3d pose estimation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.431, + 0.787, + 0.487 + ], + "angle": 0, + "content": "20. Han, S., Liu, B., Cabezas, R., Twigg, C.D., Zhang, P., Petkau, J., Yu, T., Tai, C., Akbay, M., Wang, Z., Nitzan, A., Dong, G., Ye, Y., Tao, L., Wan, C., Wang, R.: Megatrack: monochrome egocentric articulated hand-tracking for virtual reality. ACM Transactions on Graphics (TOG) (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.488, + 0.787, + 0.543 + ], + "angle": 0, + "content": "21. Hasson, Y., Tekin, B., Bogo, F., Laptev, I., Pollefeys, M., Schmid, C.: Leveraging photometric consistency over time for sparsely supervised hand-object reconstruction. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.545, + 0.787, + 0.599 + ], + "angle": 0, + "content": "22. Hasson, Y., Varol, G., Tzionas, D., Kalevatykh, I., Black, M.J., Laptev, I., Schmid, C.: Learning joint reconstruction of hands and manipulated objects. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.601, + 0.787, + 0.642 + ], + "angle": 0, + "content": "23. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2016)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.643, + 0.787, + 0.699 + ], + "angle": 0, + "content": "24. Heppert, N., Irshad, M.Z., Zakharov, S., Liu, K., Ambrus, R.A., Bohg, J., Valada, A., Kollar, T.: CARTO: category and joint agnostic reconstruction of articulated objects. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.7, + 0.787, + 0.741 + ], + "angle": 0, + "content": "25. Huang, D., Ji, X., He, X., Sun, J., He, T., Shuai, Q., Ouyang, W., Zhou, X.: Reconstructing hand-held objects from monocular video. In: ACM Transactions on Graphics (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.742, + 0.787, + 0.784 + ], + "angle": 0, + "content": "26. Irshad, M.Z., Zakharov, S., Ambrus, R., Kollar, T., Kira, Z., Gaidon, A.: Shapo: Implicit representations for multi-object shape, appearance, and pose optimization. In: Proceedings of the European Conference on Computer Vision (ECCV) (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.785, + 0.787, + 0.84 + ], + "angle": 0, + "content": "27. Irshad, M.Z., Zakharov, S., Liu, K., Guizilini, V., Kollar, T., Gaidon, A., Kira, Z., Ambrus, R.: Neo 360: Neural fields for sparse view synthesis of outdoor scenes. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2023)" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.147, + 0.787, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.227, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "3D Reconstruction of Objects in Hands without Real World 3D Supervision" + }, + { + "type": "page_number", + "bbox": [ + 0.768, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "17" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.148, + 0.785, + 0.189 + ], + "angle": 0, + "content": "28. Jiang, H., Liu, S., Wang, J., Wang, X.: Hand-object contact consistency reasoning for human grasps generation. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.19, + 0.785, + 0.23 + ], + "angle": 0, + "content": "29. Kanazawa, A., Tulsiani, S., Efros, A.A., Malik, J.: Learning category-specific mesh reconstruction from image collections. In: Proceedings of the European Conference on Computer Vision (ECCV) (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.23, + 0.785, + 0.271 + ], + "angle": 0, + "content": "30. Kar, A., Tulsiani, S., Carreira, J., Malik, J.: Category-specific object reconstruction from a single image. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2015)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.271, + 0.785, + 0.312 + ], + "angle": 0, + "content": "31. Karunratanakul, K., Yang, J., Zhang, Y., Black, M.J., Muandet, K., Tang, S.: Grasping field: Learning implicit representations for human grasps. In: Proceedings of the International Conference on 3D Vision (3DV) (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.312, + 0.785, + 0.353 + ], + "angle": 0, + "content": "32. Kwon, T., Tekin, B., Stühmer, J., Bogo, F., Pollefeys, M.: H2O: two hands manipulating objects for first person interaction recognition. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.353, + 0.787, + 0.393 + ], + "angle": 0, + "content": "33. Laurentini, A.: The visual hull concept for silhouette-based image understanding. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI) 16, 150-162 (1994)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.393, + 0.785, + 0.447 + ], + "angle": 0, + "content": "34. Liu, Y., Liu, Y., Jiang, C., Lyu, K., Wan, W., Shen, H., Liang, B., Fu, Z., Wang, H., Yi, L.: HOI4D: A 4d egocentric dataset for category-level human-object interaction. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.447, + 0.785, + 0.488 + ], + "angle": 0, + "content": "35. Lomonaco, V., Maltoni, D.: Core50: a new dataset and benchmark for continuous object recognition. In: Proceedings of the Conference on Robot Learning (CoRL) (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.488, + 0.785, + 0.515 + ], + "angle": 0, + "content": "36. Lorensen, W.E., Cline, H.E.: Marching cubes: A high resolution 3D surface construction algorithm. ACM Transactions on Graphics (1987)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.515, + 0.785, + 0.555 + ], + "angle": 0, + "content": "37. Lunayach, M., Zakharov, S., Chen, D., Ambrus, R., Kira, Z., Irshad, M.Z.: FSD: fast self-supervised single RGB-D to categorical 3d objects. arXiv abs/2310.12974 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.555, + 0.785, + 0.597 + ], + "angle": 0, + "content": "38. Ma, W.C., Yang, A.J., Wang, S., Urtasun, R., Torralba, A.: Virtual correspondence: Humans as a cue for extreme-view geometry. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.597, + 0.785, + 0.637 + ], + "angle": 0, + "content": "39. Mandikal, P., Grauman, K.: Dexvip: Learning dexterous grasping with human hand pose priors from video. In: Proceedings of the Conference on Robot Learning (CoRL) (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.637, + 0.785, + 0.678 + ], + "angle": 0, + "content": "40. Mandikal, P., Grauman, K.: Learning dexterous grasping with object-centric visual affordances. In: Proceedings of the IEEE International Conference on Robotics and Automation (ICRA) (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.678, + 0.785, + 0.718 + ], + "angle": 0, + "content": "41. Mao, X., Li, Q., Xie, H., Lau, R.Y.K., Wang, Z., Smolley, S.P.: Least squares generative adversarial networks. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.718, + 0.785, + 0.745 + ], + "angle": 0, + "content": "42. Matusik, W., Buehler, C., Raskar, R., Gortler, S.J., McMillan, L.: Image-based visual hulls. In: ACM Transactions on Graphics (2000)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.745, + 0.785, + 0.786 + ], + "angle": 0, + "content": "43. Mescheder, L., Oechsle, M., Niemeyer, M., Nowozin, S., Geiger, A.: Occupancy networks: Learning 3d reconstruction in function space. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.786, + 0.785, + 0.84 + ], + "angle": 0, + "content": "44. Niemeyer, M., Mescheder, L.M., Oechsle, M., Geiger, A.: Differentiable volumetric rendering: Learning implicit 3d representations without 3d supervision. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2020)" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.148, + 0.787, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "18" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.388, + 0.128 + ], + "angle": 0, + "content": "A. Prakash et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.148, + 0.787, + 0.189 + ], + "angle": 0, + "content": "45. Park, J.J., Florence, P., Straub, J., Newcombe, R., Lovegrove, S.: Deepsdf: Learning continuous signed distance functions for shape representation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.19, + 0.787, + 0.232 + ], + "angle": 0, + "content": "46. Prakash, A., Tu, R., Chang, M., Gupta, S.: 3d hand pose estimation in everyday egocentric images. In: Proceedings of the European Conference on Computer Vision (ECCV) (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.232, + 0.787, + 0.273 + ], + "angle": 0, + "content": "47. Qin, Y., Su, H., Wang, X.: From one hand to multiple hands: Imitation learning for dexterous manipulation from single-camera teleoperation. Proceedings of the International Conference on Intelligent Robots and Systems (IROS) (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.273, + 0.787, + 0.314 + ], + "angle": 0, + "content": "48. Qin, Y., Wu, Y., Liu, S., Jiang, H., Yang, R., Fu, Y., Wang, X.: Dexamv: Imitation learning for dexterous manipulation from human videos. In: Proceedings of the European Conference on Computer Vision (ECCV) (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.315, + 0.787, + 0.342 + ], + "angle": 0, + "content": "49. Rijpkema, H., Girard, M.: Computer animation of knowledge-based human grasping. In: Thomas, J.J. (ed.) ACM Transactions on Graphics (1991)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.343, + 0.787, + 0.37 + ], + "angle": 0, + "content": "50. Rockwell, C., Fouhey, D.F.: Full-body awareness from partial observations. In: Proceedings of the European Conference on Computer Vision (ECCV) (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.371, + 0.787, + 0.412 + ], + "angle": 0, + "content": "51. Rogez, G., Khademi, M., Supancic III, J., Montiel, J.M.M., Ramanan, D.: 3d hand pose detection in egocentric rgb-d images. In: Proceedings of the European Conference on Computer Vision (ECCV) (2014)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.412, + 0.787, + 0.453 + ], + "angle": 0, + "content": "52. Romero, J., Kjellström, H., Kragic, D.: Hands in action: real-time 3D reconstruction of hands in interaction with objects. In: Proceedings of the IEEE International Conference on Robotics and Automation (ICRA) (2010)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.453, + 0.787, + 0.48 + ], + "angle": 0, + "content": "53. Romero, J., Tzionas, D., Black, M.J.: Embodied hands: Modeling and capturing hands and bodies together. ACM Transactions on Graphics (ToG) (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.481, + 0.787, + 0.522 + ], + "angle": 0, + "content": "54. Rong, Y., Shiratori, T., Joo, H.: Frankmocap: Fast monocular 3D hand and body motion capture by regression and integration. Proceedings of the IEEE International Conference on Computer Vision Workshops (ICCV Workshops) (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.522, + 0.787, + 0.564 + ], + "angle": 0, + "content": "55. Saito, S., Huang, Z., Natsume, R., Morishima, S., Kanazawa, A., Li, H.: Pifu: Pixel-aligned implicit function for high-resolution clothed human digitization. Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.565, + 0.787, + 0.592 + ], + "angle": 0, + "content": "56. Schonberger, J.L., Frahm, J.M.: Structure-from-motion revisited. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2016)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.592, + 0.787, + 0.633 + ], + "angle": 0, + "content": "57. Shan, D., Geng, J., Shu, M., Fouhey, D.F.: Understanding human hands in contact at internet scale. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.633, + 0.787, + 0.675 + ], + "angle": 0, + "content": "58. Taheri, O., Ghorbani, N., Black, M.J., Tzionas, D.: GRAB: A dataset of whole-body human grasping of objects. In: Proceedings of the European Conference on Computer Vision (ECCV) (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.675, + 0.787, + 0.716 + ], + "angle": 0, + "content": "59. Tatarchenko, M., Richter, S.R., Ranftl, R., Li, Z., Koltun, V., Brox, T.: What do single-view 3d reconstruction networks learn? In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.717, + 0.787, + 0.758 + ], + "angle": 0, + "content": "60. Truong, P., Rakotosaona, M., Manhardt, F., Tombari, F.: SPARF: neural radiance fields from sparse and noisy poses. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.759, + 0.787, + 0.799 + ], + "angle": 0, + "content": "61. Tschernezki, V., Darkhalil, A., Zhu, Z., Fouhey, D., Laina, I., Larlus, D., Damen, D., Vedaldi, A.: EPIC fields: Marrying 3d geometry and video understanding. In: Advances in Neural Information Processing Systems (NeurIPS) (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.799, + 0.787, + 0.841 + ], + "angle": 0, + "content": "62. Tschernezki, V., Laina, I., Larlus, D., Vedaldi, A.: Neural feature fusion fields: 3d distillation of self-supervised 2d image representations. In: Proceedings of the International Conference on 3D Vision (3DV) (2022)" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.148, + 0.787, + 0.841 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.227, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "3D Reconstruction of Objects in Hands without Real World 3D Supervision" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "19" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.147, + 0.785, + 0.189 + ], + "angle": 0, + "content": "63. Tschernezki, V., Larlus, D., Vedaldi, A.: Neuraldiff: Segmenting 3d objects that move in egocentric videos. In: Proceedings of the International Conference on 3D Vision (3DV) (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.19, + 0.785, + 0.232 + ], + "angle": 0, + "content": "64. Tulsiani, S., Efros, A.A., Malik, J.: Multi-view consistency as supervisory signal for learning shape and pose prediction. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.232, + 0.785, + 0.286 + ], + "angle": 0, + "content": "65. Turpin, D., Wang, L., Heiden, E., Chen, Y., Macklin, M., Tsogkas, S., Dickinson, S.J., Garg, A.: Grasp'd: Differentiable contact-rich grasp synthesis for multi-fingered hands. In: Proceedings of the European Conference on Computer Vision (ECCV) (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.287, + 0.785, + 0.328 + ], + "angle": 0, + "content": "66. Tzionas, D., Gall, J.: 3d object reconstruction from hand-object interactions. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2015)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.328, + 0.785, + 0.37 + ], + "angle": 0, + "content": "67. Wu, J., Zhang, C., Xue, T., Freeman, W.T., Tenenbaum, J.B.: Learning a probabilistic latent space of object shapes via 3d generative-adversarial modeling. In: Advances in Neural Information Processing Systems (NeurIPS) (2016)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.371, + 0.785, + 0.411 + ], + "angle": 0, + "content": "68. Wu, Y., Wang, J., Wang, X.: Learning generalizable dexterous manipulation from human grasp affordance. In: Proceedings of the Conference on Robot Learning (CoRL) (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.411, + 0.785, + 0.453 + ], + "angle": 0, + "content": "69. Yang, L., Li, K., Zhan, X., Wu, F., Xu, A., Liu, L., Lu, C.: Oakink: A large-scale knowledge repository for understanding hand-object interaction. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.453, + 0.785, + 0.495 + ], + "angle": 0, + "content": "70. Yariv, L., Kasten, Y., Moran, D., Galun, M., Atzmon, M., Basri, R., Lipman, Y.: Multiview neural surface reconstruction by disentangling geometry and appearance In: Advances in Neural Information Processing Systems (NeurIPS) (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.495, + 0.785, + 0.522 + ], + "angle": 0, + "content": "71. Ye, J., Wang, J., Huang, B., Qin, Y., Wang, X.: Learning continuous grasping function with a dexterous hand from human demonstrations. arXiv (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.522, + 0.785, + 0.564 + ], + "angle": 0, + "content": "72. Ye, Y., Gupta, A., Kitani, K., Tulsiani, S.: G-HOP: generative hand-object prior for interaction reconstruction and grasp synthesis. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.564, + 0.785, + 0.605 + ], + "angle": 0, + "content": "73. Ye, Y., Gupta, A., Tulsiani, S.: What's in your hands? 3D reconstruction of generic objects in hands. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.606, + 0.785, + 0.647 + ], + "angle": 0, + "content": "74. Ye, Y., Hebbar, P., Gupta, A., Tulsiani, S.: Diffusion-guided reconstruction of everyday hand-object interaction clips. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.647, + 0.785, + 0.688 + ], + "angle": 0, + "content": "75. Ye, Y., Tulsiani, S., Gupta, A.: Shelf-supervised mesh prediction in the wild. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.689, + 0.785, + 0.73 + ], + "angle": 0, + "content": "76. Yu, A., Ye, V., Tancik, M., Kanazawa, A.: pixelnerf: Neural radiance fields from one or few images. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.73, + 0.785, + 0.772 + ], + "angle": 0, + "content": "77. Zhang, C., Di, Y., Zhang, R., Zhai, G., Manhardt, F., Tombari, F., Ji, X.: DDF-HO: hand-held object reconstruction via conditional directed distance field. In: Advances in Neural Information Processing Systems (NeurIPS) (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.772, + 0.785, + 0.813 + ], + "angle": 0, + "content": "78. Zhou, T., Brown, M., Snavely, N., Lowe, D.G.: Unsupervised learning of depth and ego-motion from video. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.813, + 0.785, + 0.841 + ], + "angle": 0, + "content": "79. Zhu, Z., Damen, D.: Get a grip: Reconstructing hand-object stable grasps in egocentric videos. arXiv preprint arXiv:2312.15719 (2023)" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.147, + 0.785, + 0.841 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "20" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.116, + 0.385, + 0.127 + ], + "angle": 0, + "content": "A. Prakash et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.148, + 0.785, + 0.189 + ], + "angle": 0, + "content": "80. Zimmermann, C., Brox, T.: Learning to estimate 3d hand pose from single rgb images. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.19, + 0.787, + 0.245 + ], + "angle": 0, + "content": "81. Zimmermann, C., Ceylan, D., Yang, J., Russell, B.C., Argus, M.J., Brox, T.: Freihand: A dataset for markerless capture of hand pose and shape from single RGB images. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.246, + 0.787, + 0.286 + ], + "angle": 0, + "content": "82. Qalli, B., Singh, A., Walsman, A., Srinivasa, S.S., Abbeel, P., Dollar, A.M.: The ycb object and model set: Towards common benchmarks for manipulation research. In: Proceedings of the International Conference on Advanced Robotics (ICAR) (2015)" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.148, + 0.787, + 0.286 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/2024/3D Reconstruction of Objects in Hands without Real World 3D Supervision/1ec33038-4034-4272-be45-88734c621c33_origin.pdf b/2024/3D Reconstruction of Objects in Hands without Real World 3D Supervision/1ec33038-4034-4272-be45-88734c621c33_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..e532505ef525255e0af0d16a7e4bbf99238f66da --- /dev/null +++ b/2024/3D Reconstruction of Objects in Hands without Real World 3D Supervision/1ec33038-4034-4272-be45-88734c621c33_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e16aacd305fb9cf077760bc627f37c9f3a4775f66afe74a2c3481bac7967fc8 +size 5345765 diff --git a/2024/3D Reconstruction of Objects in Hands without Real World 3D Supervision/full.md b/2024/3D Reconstruction of Objects in Hands without Real World 3D Supervision/full.md new file mode 100644 index 0000000000000000000000000000000000000000..5c1e0ff56cacfdad4ac857de928f87b5056183ab --- /dev/null +++ b/2024/3D Reconstruction of Objects in Hands without Real World 3D Supervision/full.md @@ -0,0 +1,327 @@ +# 3D Reconstruction of Objects in Hands without Real World 3D Supervision + +Aditya Prakash, Matthew Chang, Matthew Jin, Ruisen Tu, and Saurabh Gupta + +University of Illinois Urbana-Champaign +{adityap9,mc48,mjin11,ruisent2,saurabhg}@illinois.edu +https://bit.ly/WildH0I + +Abstract. Prior works for reconstructing hand-held objects from a single image train models on images paired with 3D shapes. Such data is challenging to gather in the real world at scale. Consequently, these approaches do not generalize well when presented with novel objects in in-the-wild settings. While 3D supervision is a major bottleneck, there is an abundance of a) in-the-wild raw video data showing hand-object interactions and b) synthetic 3D shape collections. In this paper, we propose modules to leverage 3D supervision from these sources to scale up the learning of models for reconstructing hand-held objects. Specifically, we extract multiview 2D mask supervision from videos and 3D shape priors from shape collections. We use these indirect 3D cues to train occupancy networks that predict the 3D shape of objects from a single RGB image. Our experiments in the challenging object generalization setting on in-the-wild MOW dataset show $11.6\%$ relative improvement over models trained with 3D supervision on existing datasets. + +Keywords: hand-held objects $\cdot$ shape priors $\cdot$ multiview supervision + +# 1 Introduction + +While 3D reconstruction of hand-held objects is important for AR/VR [4,20] and robot learning applications [39,40,47,48,68,71], lack of 3D supervision outside of lab settings has made it challenging to produce models that work in the wild. This paper develops techniques to improve the generalization capabilities of single image hand-held object reconstruction methods by extracting supervision from in-the-wild videos & synthetic shape collections showing hand-object interactions. + +Collecting image datasets with ground truth 3D shapes for hand-held objects is hard. Any visual scanning setups (via multiple RGB/RGB-D cameras or motion capture) require full visibility of the object which is not available. Synthesizing realistic hand-object interaction is an open problem in itself [28,31,49,65]. Manual alignment of template shapes [5] is expensive, yet only approximate. Thus, there is very little in-the-wild real-world data with ground truth 3D shapes for hand-held objects. And while many past works have designed expressive models to predict shapes of hand-held objects [22,31,73], they are all held back due to the limited amount of real-world 3D data available for training and suffer from unsatisfactory performance on novel objects encountered in the wild. + +![](images/9dc5bf417c7bb6edace9bbb9a7d2dbc8cd263674fe203c5e894bd83df4b18f80.jpg) +Fig. 1: We propose modules to extract supervision from in-the-wild videos (Sec. 3.2) & learn shape priors from 3D object collections (Sec. 3.3), to train occupancy networks which predict the 3D shapes of hand-held objects from a single image. This circumvents the need for paired real world 3D shape supervision used in existing works [22, 73]. + +While in-the-wild images with paired 3D shapes are rare, there are a) plenty of in-the-wild videos containing multiple views of hand-held objects [12, 17] (Fig. 1), b) large catalogues of 3D object shapes [6] (Fig. 1). Shape collections provide 3D supervision but lack realistic hand grasps, videos showcase realistic hand-object interaction but don't provide direct 3D supervision. Either by itself seems insufficient, but can we combine supervision from these diverse sources to improve generalization of single-image hand-held object reconstruction methods? + +Let's consider each cue one at a time. While videos show multiple views of the object, we unfortunately don't know the relative object pose in the different views. Automatically extracting the object pose using structure from motion techniques, e.g. COLMAP [56] doesn't work due to insufficient number of feature matches on the object of interaction. We sidestep this problem by using hand pose as a proxy for object pose (Fig. 2). This is based on the observation that humans rarely conduct in-hand manipulation in pick & place tasks involving rigid objects. Thus, if we assume that the hand and the object are rigidly moving together, then the relative 6 DoF pose of the hand between pairs of frames reveals the relative 6 DoF pose of the object. This reduces the SfM problem to an easier setting where the motion is known. Specifically, we use off-the-shelf FrankMocap system [54] to obtain 6 DoF pose for the hand and consequently the object's. We then use our proposed 2D mask guided 3D sampling module (Sec. 3.2) to generate 3D supervision for the object shape using object segmentation masks (Fig. 2). This lets us train on objects from 144 different categories, where as most methods currently train on only a handful of categories $(< 20)$ . + +While this works well for unoccluded parts of the object, this does not generate reliable supervision for parts of the object that are occluded by the hand (Fig. 1). This brings us to the 3D shape catalogues, which we use to extract shape priors. This enables the model to learn to output contiguous shapes even when the object is interrupted by the hand in the image, e.g. it can hallucinate a handle for a jug even when it is covered by the hand, because jugs typically have one. We adopt an adversarial training framework [16] to train a discriminator to differentiate between real shapes (from ObMan [22]) and shapes predicted from the model (Fig. 3). Unlike prior works [67] which train the discriminator on 3D inputs, we instead propose a 2D slice-based 3D discriminator (Sec. 3.3), which is computationally efficient and learns better fine-grained shape information. + +Our overall framework consists of an occupancy network [43] that predicts the 3D shape of hand-held objects from a single image. We train this model on sequences curated from the VISOR dataset [13] and use the Obman dataset [22] to build the shape prior. Training on diverse real world data outside of lab settings, enabled by our innovations, leads our model (HORSE) to good generalization performance. HORSE outperforms previous state-of-the-art models by $11.6\%$ in the challenging object generalization setting on MOW [5]. + +# 2 Related Work + +Reconstructing objects in hands: Several works [9, 10, 22, 31, 73, 77] have trained expressive architectures for predicting 3D shape from a single image using paired real world 3D supervision. Fitting object templates [5, 21] or learned 3D shapes [14, 25, 72, 74] to videos using appearance cues [5, 14, 21, 25] or geometric priors [72, 74] have also been explored. The most relevant work to ours is [73], which uses paired 3D supervision from synthetic [22] and small-scale real-world datasets to predict 3D shape from a single image. However, it does not generalize to novel object categories in the wild due to limited 3D supervision. Instead, we train our model on diverse object categories from in-the-wild videos by extracting multiview 2D supervision and learning shape priors from existing datasets, without any real-world 3D supervision. Note that our setting involves a single image input at test time and we use in-the-wild videos for training only. + +Hand-Object datasets with 3D object models: Existing real-world hand-object datasets with 3D annotations are captured in lab settings and contain limited variation in objects, e.g. HO3D [18]:10, H2O [32]:8, FPHA [15]:4, FreiHAND [81]:35, ContactDB [2]:50, ContactPose [3]:25, DexYCB [8]:20, GRAB [58]:51, HOI4D [34]: 16 object categories. Collecting datasets with ground truth 3D shapes is difficult to scale since it often requires visual scanning setups (multiple cameras or motion capture). Synthesising realistic hand-object interaction is an open problem in itself [28, 31, 49, 65]. In this work, we curate sequences from in-the-wild VISOR dataset containing 144 object categories and design modules to extract supervision for training occupancy networks. The closest to ours is MOW with 120 objects that we only use to test models to assess generalization. + +Hand-Object Interactions in the wild: There is a growing interest in understanding hands and how they interact with objects around them. Researchers have collected datasets [8, 18, 19, 22, 32, 34, 58] and trained models for detecting & segmenting hands and associated objects of interaction [13, 57, 62, 63]. Recognizing what hands are doing in images [7, 46, 79] is also relevant: through grasp classification [31], 2D pose estimation [51, 80], and more recently 3D shape and pose estimation [21, 22, 53, 54, 61, 73] for both hands and objects in contact. + +3D from single image without direct 3D supervision. Several works relax the need for direct 3D supervision by incorporating auxiliary shape cues during training, e.g. multi-view consistency in masks [64], depth from single image [26, 37, 78] or stereo [24], appearance [11, 27, 60, 76]. These have been applied to reconstruction of category specific [27, 29, 30, 37] as well as generic objects [11, 75, 76]. However, directly applying these approaches to hand-held objects in the wild poses several challenges, e.g. unknown camera, novel object categories, heavy occlusion, inaccurate depth estimates. In this work, we propose modules to extract supervision from in-the-wild videos using object masks [13] & hand pose [54] and learn priors from synthetic collections of hand-held objects [22]. + +# 3 Approach + +We propose a novel framework for training 3D shape predictors from a single image without using any real world 3D supervision. Following prior work [73], we use implicit shape representation [43, 45] for 3D objects. + +# 3.1 Preliminaries + +Consider the recent AC-SDF model for this task from Ye et al. [73]. Given an input RGB image, AC-SDF uses a neural network to predict the SDF of 3D points. The prediction is done in the hand coordinate frame obtained using FrankMocap [54], which outputs (a) hand articulation parameters $\theta^a$ (45 dimensional MANO hand pose [52]), (b) global rotation $\theta^w$ of the wrist joint w.r.t. camera, (c) weak perspective camera $\theta^c$ , with scale factor $s$ & 2D translation $(t_x, t_y)$ , which is converted into a full perspective camera $K$ . These can be used to project a 3D point $\mathbf{x}$ into the image ( $f$ is the focal length) as $\mathbf{x}_p = K[T_{\theta^w} \mathbf{x} + (t_x, t_y, f / s)]$ + +Given a 3D point $\mathbf{x}$ & image $I$ , AC-SDF conditions the SDF prediction on: (a) global image features from a ResNet-50 [23], (b) pixel-aligned features [55] from intermediate layers of ResNet-50 at the projection $\mathbf{x}_p$ of $\mathbf{x}$ in the image, (c) hand articulation features obtained by representing $\mathbf{x}$ in the coordinate frame of 15 hand joints. This is realized as, $\mathbf{s} = \mathcal{F}(\mathbf{x}; I, \theta, K)$ . Training $\mathcal{F}$ requires sampling 3D points $x$ around the object and corresponding SDF values $s$ , $\theta = (\theta^a, \theta^w, \theta^c, K)$ are estimated from FrankMocap. + +# 3.2 2D Mask Guided 3D Sampling + +Training models with implicit shape representation require supervision in the form of occupancy [43] or SDF [45] for 3D points sampled inside and outside + +![](images/eeb1bf8a1e9150a7b510268e705094ab2cc3cfc7aeadb42869a95d2716011ca5.jpg) +a) Unposed video frames + +![](images/1c84f74b3d35fddbd5f39ed95b2a1d8e270b2973065326a8c6afada6e67ca2a1.jpg) +b) Hand pose as proxy for object pose +Fig.2: Registering objects via hand pose and 2D Mask guided 3D sampling. (a) Consider unposed frames from in-the-wild videos. (b) We use hand pose from FrankMocap [54] as a proxy for object pose, thereby registering the different views. (c) We then use 2D object masks for labeling 3D points with occupancy (Sec. 3.2). 3D points that project into the object mask in all views are considered as occupied (green triangles), all other points are considered unoccupied (red crosses). (3D object in the figure is for visualization only, not used for sampling.) + +![](images/f75f185eb79dea1141bdfcdb7032ffbe35dacf6c0838cc49f55a5dc82e2a8e9d.jpg) +c) Multi-view supervision from posed images + +the object. Note that the balanced sampling of points inside and outside the object is an important consideration for training good predictors. While existing approaches [22, 31, 73] on this task use datasets with paired 3D supervision (3D object shape corresponding to 2D image), we operate in in-the-wild settings which do not contain 3D supervision. Instead, we propose a 2D mask guided 3D sampling strategy to obtain occupancy labels for training. + +Consider multiple views $\{I_1, \ldots, I_n\}$ of a hand-held object (Fig. 2), along with their masks $\{M_1, \ldots, M_n\}$ . We can sample points $\mathbf{x}$ in 3D space and project them into different views. Any point $x$ which projects into the object mask in all views is considered as occupied whereas if it projects outside the mask in even one of the views, it is considered as unoccupied. Thus, we get occupancy labels for a point $\mathbf{x}$ as $\mathbf{s}^{gt} = \cap_{i=1}^{n} M_i^{\mathbf{x}_{p_i}}$ . Here, $M_i^{\mathbf{x}_{p_i}} = 1$ if $x_{p_i}$ lies inside the mask $M_i$ & 0 otherwise. Note that it is not possible to obtain SDF values in this manner, since distance to the object surface cannot be estimated in the absence of 3D objects models. While we can obtain 3D occupancy labels using this strategy, there are two important considerations: camera poses are unknown (required for projection) & how to balance the sampling of points inside & outside the object. + +Camera pose: We assume that the hand is rigidly moving with the object. This is not an unreasonable assumption, as humans rarely do in-hand manipulation in pick & place tasks involving small rigid objects. Thus, the relative pose of hand between different views reveals the relative pose of the object. This lets use the hand pose predicted by FrankMocap $\{\theta_1,\dots ,\theta_n\}$ to register the different views. + +Balanced sampling: In the absence of 3D object models, a natural choice is to sample points uniformly in 3D space. However, this leads to most points lying outside the object because the object location is unknown. Instead, we sample points in the hand coordinate frame. Consider the total number of points to be $q$ . + +We adopt several strategies for balanced sampling for points inside $(s^{gt} = 1)$ and outside the object $(s^{gt} = 0)$ . We uniformly sample $q / 2$ 3D points $\mathbf{x} \in \mathbb{R}^3$ in the normalized hand coordinate frame and project these into all the available views. Since all these $q / 2$ points may not be occupied, we use rejection sampling to repeat the procedure, for maximum of $t = 50$ times or until we get $q / 2$ occupied points. Also, all points projecting into the hand mask in all views and vertices of the MANO [53] hand are labeled as unoccupied. + +Formally, for images $\{I_1,\ldots ,I_n\}$ with object masks $\{M_1,\dots ,M_n\}$ , hand masks $\{H_{1},\ldots ,H_{n}\}$ and MANO vertices $\{V_{1},\ldots ,V_{n}\}$ , $\mathbf{s}^{gt}$ for $\mathbf{x}$ is: + +$$ +\mathbf {s} ^ {g t} = \left\{ \begin{array}{l l} 1 & \text {i f} \cap_ {i = 1} ^ {n} M _ {i} ^ {\mathbf {x} _ {p _ {i}}} \text {a n d} \cap_ {i = 1} ^ {n} \neg H _ {i} ^ {\mathbf {x} _ {p _ {i}}} \text {a n d} \cup_ {i = 1} ^ {n} \neg V _ {i} ^ {\mathbf {x}} \\ 0 & \text {o t h e r w i s e} \end{array} \right. \tag {1} +$$ + +where $\mathbf{x}_{p_i}$ is the projection of $\mathbf{x}$ , $M_{i}^{\mathbf{x}_{p_i}} = 1$ if $x_{p_i}$ lies inside $M_{i}$ , $H_{i}^{\mathbf{x}_{p_i}} = 1$ if $x_{p_i}$ lies inside $H_{i}$ , $V_{i}^{\mathbf{x}} = 1$ if $\mathbf{x}$ belongs to $V_{i}$ and $\neg$ is the logical negation operator. + +Note that, due to hand occlusions and errors in FrankMocap predictions, it is possible that some 3D points belonging to the object are not projected into the object masks but we do not want to label these points as unoccupied. So we disregard points which project onto the object mask in some views and hand mask in other views as these points could belong to object due to hand occlusion. + +This is reminiscent of the visual hull algorithm [33, 42], which generates 3D reconstruction by carving out space that projects outside the segmentation in any view. Visual hull algorithms need multiple views at test time to generate any output. In contrast, we are doing this at training time to obtain supervision for $\mathcal{F}(\mathbf{x};I_1,\theta_1,K_1)$ , which makes predictions from a single view. + +Training: We use cross-entropy loss (CE) to train $\mathcal{F}$ using ground truth $\mathbf{s}^{gt}$ : + +$$ +\mathcal {L} _ {\mathrm {v i s u a l - h u l l}} = \operatorname {C E} (\mathcal {F} (\mathbf {x}), \mathbf {s} ^ {g t}) \tag {2} +$$ + +To further regularize training, we also encourage the occupancy prediction from different views to be consistent with each other. Since our predictions are already in the hand coordinate frame, which is common across all views, this can be done by minimizing $\mathcal{L}_{\mathrm{consistency}}$ for different views $i\& j$ of the same object. + +$$ +\mathcal {L} _ {\mathrm {c o n s i s t e n c y}} = \sum_ {\mathbf {x} \in \mathbb {R} ^ {3}, i \neq j} \operatorname {C E} \left(\mathcal {F} (\mathbf {x}; I _ {i}, \theta_ {i}, K _ {i}), \mathcal {F} (\mathbf {x}; I _ {j}, \theta_ {j}, K _ {j})\right) \tag {3} +$$ + +# 3.3 2D Slice based 3D Discriminator as Shape Prior + +We adopt an adversarial training framework [16] to build a prior on shapes of hand-held objects and use it to supervise the training of the occupancy prediction function $\mathcal{F}(\mathbf{x};I_1,\theta_1^a,\theta_1^w,K_1)$ . As such a prior can be challenging to hand-craft, we build it in a data-driven way. We use 3D shape repository from synthetic datasets [22], which contain more than $2.5\mathrm{K}$ hand-held objects, to learn the prior. Specifically, we train a discriminator $\mathcal{D}$ to differentiate between 3D shapes from + +![](images/8654734017afa8718f878ab951a3c1f351b22d038336a8dfdb8e263c60d0a8d1.jpg) +Fig. 3: 2D slice based 3D discriminator. We learn data-driven 3D shape priors using hand-held objects from ObMan dataset. We sample planes through the object (shown above in blue), resulting in a 2D cross-section map. We pass occupancy predictions on points from these cross-sections through a discriminator which tries to distinguish cross-sections of predicted 3D shapes from cross-sections of ObMan objects (Sec. 3.3). + +ObMan [22] and generated shapes as predicted by $\mathcal{F}$ . We derive supervision for $\mathcal{F}$ by encouraging it to predict shapes that are real as per $\mathcal{D}$ . + +A natural choice is to train the discriminator with 3D input, e.g. $N \times N \times N$ cube in 3D voxel space [67]. One way to do this is to sample $N^3$ 3D points in the hand coordinate frame and run a forward pass through $\mathcal{F}$ to get the occupancy for each of these points. However this is computationally expensive and often leads to large imbalance as most points lie outside the object (we ablate this in Sec. 4.3). Instead, we propose a novel 2D slice based 3D discriminator which operates on arbitrary 2D slices. There are computed by taking the cross-section of 2D planes with 3D shapes and sampling 3D points that lie on these 2D cross-sections. The key intuition here is that the discriminator sees different randomly sampled 2D slides during the course of training, which helps it to learn fine-grained shape information. E.g. for a sphere, all cross-sections are circular but for a cylinder, most are oval. This helps distinguish between different 3D shapes. + +Sampling 2D slices: There are several important considerations in sampling 2D slices. First, uniformly sampling 2D planes often leads to most points lying outside the object, which is not useful for training the discriminator. Instead, we sample 2D planes that pass through the origin in the hand coordinate system. Since the objects are in contact with the hand, the sampled points are more likely to encompass the object. Then, we rotate the sampled 2D planes by arbitrary angles so that they are not axis aligned to better capture fine-grained shape information. We ablate all these design choices in Sec. 4.3. This sampling function $\mathcal{Z}$ results in a set of 2D planes on which 3D points are uniformly sampled. + +Training: We pass the sampled points from 2D slices of the generated 3D shape through $\mathcal{F}$ to get the corresponding occupancy values $S^{\mathrm{gen}}$ . This represents the generated 3D shape. We adopt the same strategy for representing 3D shapes from ObMan (used as real shapes) but use the predictions $S^{\mathrm{real}}$ of the occupancy network overfitted on ObMan. As they come from a overfitted model, they generally match the ground truth slices well but at the same time are soft and prevent the discriminator from cheating. + +![](images/33c8327e7f8180bd9541b980893b45a724301ee1c484b12fcda5dc9b03c4dad0.jpg) +Fig. 4: VISOR visualizations. Using existing hand pose estimation techniques [54], we are able to track the objects in relation to hands through time in in-the-wild videos. We visualize these tracks along with object masks from the VISOR dataset [13]. This form of data, where objects move rigidly relative to hands, is used to train our model to learn 3D shape of hand-held objects. + +We train the discriminator $\mathcal{D}$ to differentiate between $S^{\mathrm{gen}}$ & $S^{\mathrm{real}}$ using the least squares formulation [41] for discriminator loss. We derive supervision for $\mathcal{F}$ by computing gradients through $\mathcal{D}$ on the occupancy values at the sampled points to maximize the realism of the generated shapes. + +$$ +\begin{array}{l} \mathcal {L} _ {\mathrm {a d v}} ^ {\mathcal {D}} = [ \mathcal {D} (S ^ {\mathrm {r e a l}}) - 1 ] ^ {2} + [ \mathcal {D} (S ^ {\mathrm {g e n}}) ] ^ {2} \\ \mathcal {L} _ {\mathrm {a d v}} ^ {\mathcal {F}} = [ \mathcal {D} (S ^ {\mathrm {g e n}}) - 1 ] ^ {2} \\ \mathcal {L} _ {\text {s h a p e - p r i o r}} = \lambda_ {f} \mathcal {L} _ {\text {a d v}} (\mathcal {F}) + \lambda_ {d} \mathcal {L} _ {\text {a d v}} (\mathcal {D}) \tag {4} \\ \end{array} +$$ + +# 3.4 Training Details + +We train $\mathcal{F}\& \mathcal{D}$ in an alternating manner with 2 iterations of $F$ for every iteration of $D$ . The total loss for training our framework is: + +$$ +\begin{array}{l} \mathcal {L} _ {\mathcal {F}} = \lambda_ {v} \mathcal {L} _ {\text {v i s u a l - h u l l}} + \lambda_ {c} \mathcal {L} _ {\text {c o n s i s t e n c y}} + \lambda_ {f} \mathcal {L} _ {\text {a d v}} ^ {\mathcal {F}} \\ \mathcal {L} _ {\mathcal {D}} = \lambda_ {d} \mathcal {L} _ {\mathrm {a d v}} ^ {\mathcal {D}} \tag {5} \\ \end{array} +$$ + +Following standard practice [73], we pretrain on synthetic ObMan. We train our model jointly on ObMan (3D supervision, shape priors) & VISOR (2D supervision) with a dataset ratio of ObMan:VISOR as 1:2. We use batch size of 64, learning rate of 1e-5 across 4 NVIDIA A40 GPUs & loss weights as $\lambda_v = 1, \lambda_c = 1, \lambda_f = 0.25, \lambda_d = 0.25$ . Please refer to supplementary for more details. + +# 3.5 Constructing Wild Objects in Hands Dataset + +Our framework requires dataset containing multi-view images of rigid hand-object interactions in the wild, with 3D hand pose and 2D object masks. To construct such a dataset, we consider VISOR [13] which provides 2D tracks for hands, objects they are interacting with and their segmentation masks. It contains a rich set of hand-object interactions, e.g. taking out milk from the fridge, pouring oil from bottles, kneading dough, cutting vegetables, and stirring noodles in a wok. Our interest is in the 3D reconstruction of rigid objects which are in-contact with a hand, but there are no 3D object annotations in VISOR. Hence, we process it to prepare a dataset for training our model. + +Table 1: Generalization to novel objects in the wild. We report F-score at $5\mathrm{mm}$ & $10\mathrm{mm}$ , Chamfer distance (CD, mm) for object generalization splits on MOW. We compare with AC-OCC & AC-SDF trained on different combinations of datasets with full 3D supervision. Our approach outperforms baselines across all metrics without using real-world 3D supervision (Relative % improvement w.r.t. best baseline in green). + +
MethodDataset and supervision usedF@5 ↑F@10 ↑CD ↓
AC-OCCObMan (Synthetic 3D)0.0950.1798.69
AC-SDF [73]ObMan (Synthetic 3D)0.1080.1997.82
AC-SDF [73]ObMan (Synthetic 3D) + HO3D (Lab 3D)0.0820.1597.52
AC-SDF [73]ObMan (Synthetic 3D) + HO3D (Lab 3D) + HOI4D (3D)0.0950.1937.43
HORSE (Ours)ObMan (Synthetic 3D) + VISOR (2D Masks) + Shape priors0.121+10.7%0.220+10.6%6.76+13.5%
+ +We first sample a subset of VISOR involving hand-object contact, using available contact annotations. We select object tracks where only one hand is in consistent contact with the object. This leaves us with 14768 object tracks from the original VISOR dataset. We then manually filter this subset to select a subset that showcases manipulation of rigid objects with a single hand. This leaves us with 604 video snippets showing hands interacting with different objects. + +Processing hands on VISOR: We rely on the 3D hand poses to set up the output coordinate frame, compute hand articulation features, and more importantly to register the different frames together [38,66]. These hand poses are estimated using FrankMocap, which may not always be accurate. To remove erroneous poses, we employ automated filtering using the uncertainty estimate technique from Bahat & Shakhnarovich [1] following 3D human pose literature [50]. Specifically, we obtain 3D hand pose predictions on five different versions of the image, augmented by different fixed translations. The uncertainty estimate for a given image is computed as the standard deviation of reprojection locations of MANO vertices across these 5 image versions. This sidesteps the need to hand-specify the trade-off between translation, rotation, and articulation parameters that are part of the 3D hand pose output. This leaves us with 473 video snippets consisting of 144 object categories. This object diversity is $4 \times$ larger than existing datasets [18, 19, 32, 34, 69] used for our task, typically containing 10 to 32 object categories. We refer to this dataset as Wild Objects in Hands, some example object sequences are shown in Fig. 4. Note the *incidental* multiple views and relative consistency in hand and object pose over the course of interaction. + +# 4 Experiments + +# 4.1 Protocols + +We use 4 datasets for training (ObMan [22], VISOR [13], HO3D [18], HOI4D [34]) and 2 datasets (MOW [5], HO3D) for evaluation. Different methods are trained on different datasets, depending on the specific evaluation setting. + +Training datasets: ObMan is a large scale synthetic hand-object dataset with 2.5K objects and 3D supervision. HO3D & HOI4D are real world datasets collected + +Table 2: HO3D Object generalization. We outperform AC-OCC & AC-SDF trained on different datasets with 3D supervision. + +
MethodSupervision (ObMan +)F@5F@10CD
AC-OCC-0.180.334.39
AC-SDF-0.170.333.72
AC-SDFMOW (3D)0.170.333.84
AC-SDFMOW (3D) + HOI4D (3D)0.170.333.63
OursVISOR (Multi-view 2D)0.200.353.39
+ +Table 3: HO3D View generalization. We outperform HO [22] & GF [31], trained on HO3D with full 3D supervision. + +
MethodSupervision (ObMan +)F@5F@10CD
AC-SDF-0.170.323.72
HO [22]HO3D (3D)0.110.224.19
GF [31]HO3D (3D)0.120.244.96
OursHO3D (Multi-view 2D)0.230.431.41
+ +in lab settings with 3D annotations. HO3D contains 10 YCB [82] objects whereas HOI4D contains 16 object categories, out of which 7 are rigid. VISOR does not contain any 3D supervision. Instead, we use the process described in Sec. 3.5, to extract supervision from VISOR, resulting in 144 object categories. + +The baselines are trained with different combinations of HO3D & HOI4D [34]. As our method does not require 3D ground truth, we do not use these datasets for training. Instead, we use auxiliary supervision from Wild Objects in Hands (Sec. 3.5) & learn shape priors using ObMan. VISOR does not have 3D annotations and can not be used to train the baselines. Note that all models are initialized from the model pretrained on ObMan for fair comparisons, following protocol [73]. Evaluation datasets: We focus on the challenging zero-shot generalization to novel objects in-the-wild setting. We use MOW [5] dataset which contains images from YouTube, spanning 120 object templates. Note that these types of images have not been seen during training. To be consistent with prior work [73], we also use HO3D for evaluation, consisting of 1221 testing images across 10 objects. While [73] operate in view generalization setting, i.e., making predictions on novel views of training objects, we also consider the more challenging object generalization setting. Almost all of our experiments are conducted in the object generalization setting where we assess predictions on novel objects across datasets. Metrics: Following [59, 73], we report Chamfer distance (CD) and F-score at $5\mathrm{mm}$ & $10\mathrm{mm}$ thresholds. F-score evaluates the distance between object surfaces as the harmonic mean between precision & recall. Precision measures accuracy of the reconstruction as $\%$ of reconstructed points that lie within a certain distance to ground truth. Recall measures completeness of the reconstruction as $\%$ of points, on the ground truth, that lie within a certain distance to the reconstruction. CD computes sum of distances for each pair of nearest neighbors in the two point clouds. We report mean CD & F-score over all test objects. + +Baselines: We compare our model with AC-SDF trained in supervised manner using 3D ground truth on different combination of datasets in different settings: (1) For object generalization on MOW in the wild, AC-SDF is trained on ObMan, ObMan + HO3D, ObMan + HO3D + HOI4D, (2) For object generalization on HO3D, AC-SDF is trained on ObMan, ObMan + MOW, ObMan + MOW + HOI4D, (3) For view generalization on HO3D, AC-SDF is trained on ObMan + HO3D. We also compare with an occupancy variant of AC-SDF (AC-OCC) and recent published methods with different forms of SDF representation, e.g. + +Table 4: Comparison with relevant methods. Our approach also outperforms gSDF, AlignSDF & DDFHO (trained in the same setting as ours) in zero-shot generalization to MOW across most metrics. + +
MethodF@5 ↑F@10 ↑CD ↓
AC-SDF [73]0.1080.1997.82
AlignSDF [10]0.0990.1828.30
gSDF [9]0.1070.1977.50
DDFHO [77]0.0940.1663.06
HORSE (Ours)0.1210.2206.76
+ +Table 5: 3D vs. 2D input to discriminator. Training with 3D inputs (at different resolutions) perform worse, likely due to coarse sampling resulting in very few points inside the object. + +
Disc. inputF@5 ↑F@10 ↑CD ↓
No disc.0.1170.2166.93
10 × 10 × 100.1200.2187.29
16 × 16 × 160.1150.2097.79
32 × 32 × 320.1040.1917.83
2D slices0.1210.2206.76
+ +AlignSDF [10], gSDF [9], DDFHO [77]. Note that the VISOR dataset cannot be used for training since it does not have 3D supervision. For the view generalization setting on HO3D, we also compare with HO [22] & GF [31] trained with 3D ground truth on ObMan + HO3D. Recent works [44,70] on unsupervised reconstruction of objects require several views or depth, which are not available in our setting. + +# 4.2 Results + +Object generalization in the wild: We first examine if the auxiliary supervision from visual hull and shape prior is useful for generalization to novel objects in the wild. We evaluate on MOW in Tab. 1 and compare with AC-OCC & AC-SDF trained on different combinations of ObMan, HO3D, HOI4D datasets with 3D supervision. Our approach provides gains of $24.3\%$ compared to AC-OCC (trained on ObMan) and $11.6\%$ on AC-SDF (trained on ObMan). This shows the benefits of our supervision cues in the wild over training on just large scale synthetic data with 3D supervision. We also outperform AC-SDF trained on ObMan + HO3D + HOI4D with full 3D supervision by $16.8\%$ across all metrics. This indicates that our supervision cues from in-the-wild VISOR are better than using 3D supervision on lab datasets with limited diversity in objects. We also outperform relevant methods that use different forms of SDF representations, e.g. AlignSDF, gSDF & DDFHO across most metrics (Tab. 4). Note that our contributions are orthogonal and could be combined with these works. + +Adding 3D supervision to AC-SDF. In Tab. 1, we observe that adding more data from HO3D & HOI4D to AC-SDF training did not help in zero-shot generalization to MOW. Instead, the performance drops compared to AC-SDF trained on ObMan. This is likely due to limited diversity in HO3D: 10 YCB objects, HOI4D: 7 rigid object categories & the model overfitting to these categories. + +Object generalization on HO3D: Our approach is better than AC-OCC & AC-SDF trained on different datasets with 3D supervision (Tab. 2). This further shows the benefits of auxiliary supervision from VISOR for object generalization. Also, AC-SDF does not benefit from MOW & HOI4D. This could because HO3D evaluates on 10 objects only and they may not be present in MOW or HOI4D. + +Table 6: Supervision quality on HO3D. Automated filtering to remove incorrect hand poses improves results & using ground truth hand pose differs little compared to predicted pose. $^{1}$ + +
F@5 ↑F@10 ↑CD ↓
HORSE (base setting)0.2340.4341.41
no training on HO3D0.1750.3293.72
w/o filtering0.2130.4051.42
w/ ground truth pose10.2430.4441.39
+ +Table 7: Role of different loss functions. We report F-score at $5\mathrm{mm}$ & $10\mathrm{mm}$ , Chamfer distance (CD, mm) for different variants of our model on MOW. All losses are effective & multiview supervision leads to largest gain. + +
\( \mathcal{L}_{\text{ObMan}} \)\( \mathcal{L}_{\text{visual-hull}} \)\( \mathcal{L}_{\text{consistency}} \)\( \mathcal{L}_{\text{shape-prior}} \)\( \mathbf{F@5} \uparrow \)\( \mathbf{F@10} \uparrow \)\( \mathbf{CD} \downarrow \)
0.0950.1818.69
0.1110.2057.26
0.0730.13212.75
0.0970.17510.29
0.1170.2166.93
0.1210.2206.76
+ +Occupancy vs SDF. We see that SDF formulation is better than occupancy when trained with full 3D supervision (AC-OCC vs. AC-SDF). In contrast, we find SDF training to be unstable (does not give meaningful predictions) with auxiliary supervision. This could be because regressing continuous SDF values with weak supervision is harder than binary classification for occupancy values. + +View generalization results on HO3D. In Tab. 3, we see gains with using supervision cues over just training on synthetic data, consistent with trends in the object generalization setting. We also outperform HO [22] & GF [31], both trained on HO3D using full 3D supervision. We outperform these methods even without any images from HO3D (last row in Tab. 1 vs. GF & HO in Table 3), likely due to use of more expressive pixel-aligned & hand articulation features. + +# 4.3 Ablation Study + +Analysis of supervision quality. We also observe in Tab. 3 that our method is able to bridge more than $40\%$ of the gap between no training on HO3D to training with full 3D supervision. We further use the view generalization setting to assess the quality of 2D object mask supervision used in our method in Tab. 6. Our automated filtering of frames with inaccurate hand poses (as described in Sec. 3.5) is crucial for good performance. Also, little is lost from using hand pose as a proxy for object pose on the HO3D dataset. $^{1}$ + +Role of different loss terms: We experiment with multiple variants of our model to assess the importance of different loss terms. We start with the AC-OCC model trained on ObMan and gradually add $\mathcal{L}_{\mathrm{visual - hull}}$ , $\mathcal{L}_{\mathrm{consistency}}$ , and $\mathcal{L}_{\mathrm{shape - prior}}$ . From the results in Tab. 7, we observe that $\mathcal{L}_{\mathrm{visual - hull}}$ is more effective than $\mathcal{L}_{\mathrm{consistency}}$ and using them together provides further benefits. Moreover, $\mathcal{L}_{\mathrm{shape - prior}}$ improves performance on top of $\mathcal{L}_{\mathrm{consistency}}$ and $\mathcal{L}_{\mathrm{visual - hull}}$ . + +3D vs 2D input to discriminator: We also consider 3D volumes as input to the discriminator (instead of 2D cross-sections). For this, we need to sample $64 \times 64 \times 64$ + +Table 8: Design choices for mask Table 9: Sampling method for 2D planes. guided sampling. Uniformly sampling Sampling planes through origin of hand coordinates is much worse than the rejection dinate system & rotated randomly performs sampling used in our method. Using neg- the best compared to sampling axis-aligned ative points from hand masks is useful. planes either uniformly or through origin. + +
Sampling methodF@5 ↑ F@10 ↑ CD ↓Sampling methodF@5 ↑ F@10 ↑ CD ↓
Uniform0.0930.16610.29Uniform (axis-aligned)0.1150.2087.01
Ours (no hand points)0.1130.2077.69Origin (axis-aligned)0.0980.1838.52
Ours0.1170.2166.93Origin (random rotation)0.1210.2206.76
+ +$(=262144)$ points & run several forward passes of our model to get occupancies. Since this is computationally expensive, we sample points at coarser resolutions: $32 \times 32 \times 32$ , $16 \times 16 \times 16$ , $10 \times 10 \times 10$ . We use $32 \times 32$ size 2D slices, so $10 \times 10 \times 10$ 3D volume has no. of points & takes similar compute. We see that 2D slices perform better than 3D volumes (Tab. 5). Also, the performance gets worse with increase in the sampled 3D volume, likely due to 3D sampling being so coarse that very few points lie inside the object, thus unable to capture fine-grained shape. + +Sampling 2D slices for discriminator: We ablate different design choices (Sec. 3.3) in Tab. 9. We observe that sampling 2D planes through origin of the hand coordinate system and rotated randomly performs the best compared to sampling axis-aligned frames either uniformly or through origin. + +Design choices for mask guided sampling: We run rejection sampling (with hand & object masks) to sample points in the hand coordinate frame (Sec. 3.2). We compare with 2 variants: uniformly sampling in the hand frame & removing negative points from hand masks. We find our strategy to work the best (Tab. 8). + +# 4.4 Visualizations + +We compare the mesh generated by our model and AC-SDF (trained on ObManbest baseline) on zero-shot generalization to MOW (Fig. 5) and Core50 [35](Fig. 6). For this, we sample points uniformly in a $64 \times 64 \times 64$ volume, predict their occupancies or SDF from the network and run marching cubes [36]. We project the mesh into the input image & render it in different views. Our model captures the visual hull of the object, as evidenced by the projection of the mesh onto the image, and generates more coherent shapes than AC-SDF, which often reconstructs disconnected and scattered shapes. More visualizations are in supplementary. + +# 4.5 Limitations + +Inaccurate hand pose. We use predictions from FrankMocap for hand pose & camera parameters. Note that the sampled points do not cover the entire object if the hand pose is not accurate, due to mis-projection into the image plane. This leads to exclusion of points in certain parts of the object (Fig. 7). + +![](images/6f78e95ca1118a8a3c2918210888dd77d68864ee441af6a5fb86944ecb14e04c.jpg) +Fig. 5: Visualizations on MOw object generalization split. We show the object mesh projected onto the image and rendered in different views for our HORSE model and compare with the AC-SDF model trained on ObMan dataset with 3D supervision (best baseline model). We also show the ground truth (GT) object model. We observe that our model is able to predict the object shape more accurately than AC-SDF which often reconstructs smaller and disconnected shapes. + +![](images/5df372738649518855eb9b6a94fe58137623174e0e61ef106576d93a87d714ce.jpg) +Fig. 6: Visualizations on zero-shot generalization to Core50 [35]. We show the object mesh projected onto the image and rendered in different views on Core50. HORSE predicts better shapes than AC-SDF (best baseline, often leads to artifacts). + +Limited object views. Videos in the wild often do not capture $360^{\circ}$ view of the object, e.g. kettle in Fig. 7. This is different than lab settings where the interactions are often constrained & multi-camera setup is used to capture all sides of the object. + +![](images/5b117a7c1b8e95b9cd03dadb80623c76b33c672e70e7b0d8c3ca86cb97068fb7.jpg) +Fig. 7: Sampled points do not cover the entire object if hand pose is inaccurate. + +# 5 Conclusion + +We present an approach for reconstructing hand-held objects in 3D from a single image. We propose modules to extract supervision from in-the-wild videos & learn data-driven 3D shape priors from synthetic ObMan to circumvent the need for direct 3D supervision. Experiments show that our approach generalizes better to novel objects in the wild than baselines trained using 3D supervision. Future directions include jointly optimizing the hand pose with the object shape to deal with inaccurate hand poses or incorporating additional cues, e.g. contact priors. + +Acknowledgements: We thank Ashish Kumar, Erin Zhang, Arjun Gupta, Shaowei Liu, Anand Bhattachad, Pranay Thangeda & Kashyap Chitta for feedback on the draft. This material is based upon work supported by NSF (IIS2007035), NASA (80NSSC21K1030), DARPA (Machine Common Sense program), an Amazon Research Award, an NVIDIA Academic Hardware Grant, and the NCSA Delta System (supported by NSF OCI 2005572 and the State of Illinois). + +# References + +1. Bahat, Y., Shakhnarovich, G.: Confidence from invariance to image transformations. arXiv (2018) +2. Brahmbhatt, S., Ham, C., Kemp, C.C., Hays, J.: Contactdb: Analyzing and predicting grasp contact via thermal imaging. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2019) +3. Brahmbhatt, S., Tang, C., Twigg, C.D., Kemp, C.C., Hays, J.: Contactpose: A dataset of grasps with object contact and hand pose. In: Proceedings of the European Conference on Computer Vision (ECCV) (2020) +4. Buckingham, G.: Hand tracking for immersive virtual reality: Opportunities and challenges. Frontiers in Virtual Reality (2021) +5. Cao, Z., Radosavovic, I., Kanazawa, A., Malik, J.: Reconstructing hand-object interactions in the wild. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2021) +6. Chang, A.X., Funkhouser, T.A., Guibas, L.J., Hanrahan, P., Huang, Q., Li, Z., Savarese, S., Savva, M., Song, S., Su, H., Xiao, J., Yi, L., Yu, F.: Shapenet: An information-rich 3D model repository. ArXiv (2015) +7. Chang, M., Prakash, A., Gupta, S.: Look ma, no hands! agent-environment factorization of egocentric videos. In: Advances in Neural Information Processing Systems (NeurIPS) (2023) +8. Chao, Y., Yang, W., Xiang, Y., Molchanov, P., Handa, A., Tremblay, J., Narang, Y.S., Wyk, K.V., Iqbal, U., Birchfield, S., Kautz, J., Fox, D.: Dexycb: A benchmark for capturing hand grasping of objects. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2021) +9. Chen, Z., Chen, S., Schmid, C., Laptev, I.: gsdf: Geometry-driven signed distance functions for 3d hand-object reconstruction. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2023) +0. Chen, Z., Hasson, Y., Schmid, C., Laptev, I.: Alignsdf: Pose-aligned signed distance fields for hand-object reconstruction. In: Proceedings of the European Conference on Computer Vision (ECCV) (2022) +1. Choi, H., Chavan-Dafle, N., Yuan, J., Isler, V., Park, H.: Handnerf: Learning to reconstruct hand-object interaction scene from a single rgb image. In: International Conference on Robotics and Automation (2024) +12. Damen, D., Doughty, H., Farinella, G.M., Fidler, S., Furnari, A., Kazakos, E., Moltisanti, D., Munro, J., Perrett, T., Price, W., Wray, M.: Scaling egocentric vision: The epic-kitchens dataset. Proceedings of the European Conference on Computer Vision (ECCV) (2018) +13. Darkhalil, A., Shan, D., Zhu, B., Ma, J., Kar, A., Higgins, R., Fidler, S., Fouhey, D., Damen, D.: Epic-kitchens visor benchmark: Video segmentations and object relations. In: NeurIPS Track on Datasets and Benchmarks (2022) + +14. Fan, Z., Parelli, M., Kadoglou, M.E., Kocabas, M., Chen, X., Black, M.J., Hilliges, O.: Hold: Category-agnostic 3d reconstruction of interacting hands and objects from video. arXiv preprint arXiv:2311.18448 (2023) +15. Garcia-Hernando, G., Yuan, S., Baek, S., Kim, T.: First-person hand action benchmark with RGB-D videos and 3d hand pose annotations. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2018) +16. Goodfellow, I., Pouget-Abadie, J., Mirza, M., Xu, B., Warde-Farley, D., Ozair, S., Courville, A., Bengio, Y.: Generative adversarial nets. In: Advances in Neural Information Processing Systems (NeurIPS) (2014) +17. Grauman, K., Westbury, A., Byrne, E., Chavis, Z., Furnari, A., Girdhar, R., Hamburger, J., Jiang, H., Liu, M., Liu, X., et al.: Ego4d: Around the world in 3,000 hours of egocentric video. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022) +18. Hampali, S., Rad, M., Oberweger, M., Lepetit, V.: Honnotate: A method for 3d annotation of hand and object poses. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2020) +19. Hampali, S., Sarkar, S.D., Rad, M., Lepetit, V.: Keypoint transformer: Solving joint identification in challenging hands and object interactions for accurate 3d pose estimation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022) +20. Han, S., Liu, B., Cabezas, R., Twigg, C.D., Zhang, P., Petkau, J., Yu, T., Tai, C., Akbay, M., Wang, Z., Nitzan, A., Dong, G., Ye, Y., Tao, L., Wan, C., Wang, R.: Megatrack: monochrome egocentric articulated hand-tracking for virtual reality. ACM Transactions on Graphics (TOG) (2020) +21. Hasson, Y., Tekin, B., Bogo, F., Laptev, I., Pollefeys, M., Schmid, C.: Leveraging photometric consistency over time for sparsely supervised hand-object reconstruction. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2020) +22. Hasson, Y., Varol, G., Tzionas, D., Kalevatykh, I., Black, M.J., Laptev, I., Schmid, C.: Learning joint reconstruction of hands and manipulated objects. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2019) +23. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2016) +24. Heppert, N., Irshad, M.Z., Zakharov, S., Liu, K., Ambrus, R.A., Bohg, J., Valada, A., Kollar, T.: CARTO: category and joint agnostic reconstruction of articulated objects. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2023) +25. Huang, D., Ji, X., He, X., Sun, J., He, T., Shuai, Q., Ouyang, W., Zhou, X.: Reconstructing hand-held objects from monocular video. In: ACM Transactions on Graphics (2022) +26. Irshad, M.Z., Zakharov, S., Ambrus, R., Kollar, T., Kira, Z., Gaidon, A.: Shapo: Implicit representations for multi-object shape, appearance, and pose optimization. In: Proceedings of the European Conference on Computer Vision (ECCV) (2022) +27. Irshad, M.Z., Zakharov, S., Liu, K., Guizilini, V., Kollar, T., Gaidon, A., Kira, Z., Ambrus, R.: Neo 360: Neural fields for sparse view synthesis of outdoor scenes. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2023) + +28. Jiang, H., Liu, S., Wang, J., Wang, X.: Hand-object contact consistency reasoning for human grasps generation. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2021) +29. Kanazawa, A., Tulsiani, S., Efros, A.A., Malik, J.: Learning category-specific mesh reconstruction from image collections. In: Proceedings of the European Conference on Computer Vision (ECCV) (2018) +30. Kar, A., Tulsiani, S., Carreira, J., Malik, J.: Category-specific object reconstruction from a single image. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2015) +31. Karunratanakul, K., Yang, J., Zhang, Y., Black, M.J., Muandet, K., Tang, S.: Grasping field: Learning implicit representations for human grasps. In: Proceedings of the International Conference on 3D Vision (3DV) (2020) +32. Kwon, T., Tekin, B., Stühmer, J., Bogo, F., Pollefeys, M.: H2O: two hands manipulating objects for first person interaction recognition. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2021) +33. Laurentini, A.: The visual hull concept for silhouette-based image understanding. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI) 16, 150-162 (1994) +34. Liu, Y., Liu, Y., Jiang, C., Lyu, K., Wan, W., Shen, H., Liang, B., Fu, Z., Wang, H., Yi, L.: HOI4D: A 4d egocentric dataset for category-level human-object interaction. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022) +35. Lomonaco, V., Maltoni, D.: Core50: a new dataset and benchmark for continuous object recognition. In: Proceedings of the Conference on Robot Learning (CoRL) (2017) +36. Lorensen, W.E., Cline, H.E.: Marching cubes: A high resolution 3D surface construction algorithm. ACM Transactions on Graphics (1987) +37. Lunayach, M., Zakharov, S., Chen, D., Ambrus, R., Kira, Z., Irshad, M.Z.: FSD: fast self-supervised single RGB-D to categorical 3d objects. arXiv abs/2310.12974 (2023) +38. Ma, W.C., Yang, A.J., Wang, S., Urtasun, R., Torralba, A.: Virtual correspondence: Humans as a cue for extreme-view geometry. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022) +39. Mandikal, P., Grauman, K.: Dexvip: Learning dexterous grasping with human hand pose priors from video. In: Proceedings of the Conference on Robot Learning (CoRL) (2021) +40. Mandikal, P., Grauman, K.: Learning dexterous grasping with object-centric visual affordances. In: Proceedings of the IEEE International Conference on Robotics and Automation (ICRA) (2021) +41. Mao, X., Li, Q., Xie, H., Lau, R.Y.K., Wang, Z., Smolley, S.P.: Least squares generative adversarial networks. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2017) +42. Matusik, W., Buehler, C., Raskar, R., Gortler, S.J., McMillan, L.: Image-based visual hulls. In: ACM Transactions on Graphics (2000) +43. Mescheder, L., Oechsle, M., Niemeyer, M., Nowozin, S., Geiger, A.: Occupancy networks: Learning 3d reconstruction in function space. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2019) +44. Niemeyer, M., Mescheder, L.M., Oechsle, M., Geiger, A.: Differentiable volumetric rendering: Learning implicit 3d representations without 3d supervision. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2020) + +45. Park, J.J., Florence, P., Straub, J., Newcombe, R., Lovegrove, S.: Deepsdf: Learning continuous signed distance functions for shape representation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2019) +46. Prakash, A., Tu, R., Chang, M., Gupta, S.: 3d hand pose estimation in everyday egocentric images. In: Proceedings of the European Conference on Computer Vision (ECCV) (2024) +47. Qin, Y., Su, H., Wang, X.: From one hand to multiple hands: Imitation learning for dexterous manipulation from single-camera teleoperation. Proceedings of the International Conference on Intelligent Robots and Systems (IROS) (2022) +48. Qin, Y., Wu, Y., Liu, S., Jiang, H., Yang, R., Fu, Y., Wang, X.: Dexamv: Imitation learning for dexterous manipulation from human videos. In: Proceedings of the European Conference on Computer Vision (ECCV) (2022) +49. Rijpkema, H., Girard, M.: Computer animation of knowledge-based human grasping. In: Thomas, J.J. (ed.) ACM Transactions on Graphics (1991) +50. Rockwell, C., Fouhey, D.F.: Full-body awareness from partial observations. In: Proceedings of the European Conference on Computer Vision (ECCV) (2020) +51. Rogez, G., Khademi, M., Supancic III, J., Montiel, J.M.M., Ramanan, D.: 3d hand pose detection in egocentric rgb-d images. In: Proceedings of the European Conference on Computer Vision (ECCV) (2014) +52. Romero, J., Kjellström, H., Kragic, D.: Hands in action: real-time 3D reconstruction of hands in interaction with objects. In: Proceedings of the IEEE International Conference on Robotics and Automation (ICRA) (2010) +53. Romero, J., Tzionas, D., Black, M.J.: Embodied hands: Modeling and capturing hands and bodies together. ACM Transactions on Graphics (ToG) (2017) +54. Rong, Y., Shiratori, T., Joo, H.: Frankmocap: Fast monocular 3D hand and body motion capture by regression and integration. Proceedings of the IEEE International Conference on Computer Vision Workshops (ICCV Workshops) (2021) +55. Saito, S., Huang, Z., Natsume, R., Morishima, S., Kanazawa, A., Li, H.: Pifu: Pixel-aligned implicit function for high-resolution clothed human digitization. Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2019) +56. Schonberger, J.L., Frahm, J.M.: Structure-from-motion revisited. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2016) +57. Shan, D., Geng, J., Shu, M., Fouhey, D.F.: Understanding human hands in contact at internet scale. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2020) +58. Taheri, O., Ghorbani, N., Black, M.J., Tzionas, D.: GRAB: A dataset of whole-body human grasping of objects. In: Proceedings of the European Conference on Computer Vision (ECCV) (2020) +59. Tatarchenko, M., Richter, S.R., Ranftl, R., Li, Z., Koltun, V., Brox, T.: What do single-view 3d reconstruction networks learn? In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2019) +60. Truong, P., Rakotosaona, M., Manhardt, F., Tombari, F.: SPARF: neural radiance fields from sparse and noisy poses. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2023) +61. Tschernezki, V., Darkhalil, A., Zhu, Z., Fouhey, D., Laina, I., Larlus, D., Damen, D., Vedaldi, A.: EPIC fields: Marrying 3d geometry and video understanding. In: Advances in Neural Information Processing Systems (NeurIPS) (2023) +62. Tschernezki, V., Laina, I., Larlus, D., Vedaldi, A.: Neural feature fusion fields: 3d distillation of self-supervised 2d image representations. In: Proceedings of the International Conference on 3D Vision (3DV) (2022) + +63. Tschernezki, V., Larlus, D., Vedaldi, A.: Neuraldiff: Segmenting 3d objects that move in egocentric videos. In: Proceedings of the International Conference on 3D Vision (3DV) (2021) +64. Tulsiani, S., Efros, A.A., Malik, J.: Multi-view consistency as supervisory signal for learning shape and pose prediction. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2018) +65. Turpin, D., Wang, L., Heiden, E., Chen, Y., Macklin, M., Tsogkas, S., Dickinson, S.J., Garg, A.: Grasp'd: Differentiable contact-rich grasp synthesis for multi-fingered hands. In: Proceedings of the European Conference on Computer Vision (ECCV) (2022) +66. Tzionas, D., Gall, J.: 3d object reconstruction from hand-object interactions. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2015) +67. Wu, J., Zhang, C., Xue, T., Freeman, W.T., Tenenbaum, J.B.: Learning a probabilistic latent space of object shapes via 3d generative-adversarial modeling. In: Advances in Neural Information Processing Systems (NeurIPS) (2016) +68. Wu, Y., Wang, J., Wang, X.: Learning generalizable dexterous manipulation from human grasp affordance. In: Proceedings of the Conference on Robot Learning (CoRL) (2022) +69. Yang, L., Li, K., Zhan, X., Wu, F., Xu, A., Liu, L., Lu, C.: Oakink: A large-scale knowledge repository for understanding hand-object interaction. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022) +70. Yariv, L., Kasten, Y., Moran, D., Galun, M., Atzmon, M., Basri, R., Lipman, Y.: Multiview neural surface reconstruction by disentangling geometry and appearance In: Advances in Neural Information Processing Systems (NeurIPS) (2020) +71. Ye, J., Wang, J., Huang, B., Qin, Y., Wang, X.: Learning continuous grasping function with a dexterous hand from human demonstrations. arXiv (2022) +72. Ye, Y., Gupta, A., Kitani, K., Tulsiani, S.: G-HOP: generative hand-object prior for interaction reconstruction and grasp synthesis. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2024) +73. Ye, Y., Gupta, A., Tulsiani, S.: What's in your hands? 3D reconstruction of generic objects in hands. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022) +74. Ye, Y., Hebbar, P., Gupta, A., Tulsiani, S.: Diffusion-guided reconstruction of everyday hand-object interaction clips. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2023) +75. Ye, Y., Tulsiani, S., Gupta, A.: Shelf-supervised mesh prediction in the wild. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2021) +76. Yu, A., Ye, V., Tancik, M., Kanazawa, A.: pixelnerf: Neural radiance fields from one or few images. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2021) +77. Zhang, C., Di, Y., Zhang, R., Zhai, G., Manhardt, F., Tombari, F., Ji, X.: DDF-HO: hand-held object reconstruction via conditional directed distance field. In: Advances in Neural Information Processing Systems (NeurIPS) (2023) +78. Zhou, T., Brown, M., Snavely, N., Lowe, D.G.: Unsupervised learning of depth and ego-motion from video. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2017) +79. Zhu, Z., Damen, D.: Get a grip: Reconstructing hand-object stable grasps in egocentric videos. arXiv preprint arXiv:2312.15719 (2023) + +80. Zimmermann, C., Brox, T.: Learning to estimate 3d hand pose from single rgb images. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2017) +81. Zimmermann, C., Ceylan, D., Yang, J., Russell, B.C., Argus, M.J., Brox, T.: Freihand: A dataset for markerless capture of hand pose and shape from single RGB images. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2019) +82. Qalli, B., Singh, A., Walsman, A., Srinivasa, S.S., Abbeel, P., Dollar, A.M.: The ycb object and model set: Towards common benchmarks for manipulation research. In: Proceedings of the International Conference on Advanced Robotics (ICAR) (2015) \ No newline at end of file diff --git a/2024/3D Reconstruction of Objects in Hands without Real World 3D Supervision/images.zip b/2024/3D Reconstruction of Objects in Hands without Real World 3D Supervision/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..a4985768ed5c659a533dfc88a7cffd5d3e68fd78 --- /dev/null +++ b/2024/3D Reconstruction of Objects in Hands without Real World 3D Supervision/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bdee36b5a7a40a2233cfbd38543e3b01e48145843037923127ace7e77ea83ff6 +size 447096 diff --git a/2024/3D Reconstruction of Objects in Hands without Real World 3D Supervision/layout.json b/2024/3D Reconstruction of Objects in Hands without Real World 3D Supervision/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..0a9f956c87634a202e26a2517dff735f47dae118 --- /dev/null +++ b/2024/3D Reconstruction of Objects in Hands without Real World 3D Supervision/layout.json @@ -0,0 +1,10776 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 169, + 111, + 445, + 148 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 169, + 111, + 445, + 148 + ], + "spans": [ + { + "bbox": [ + 169, + 111, + 445, + 148 + ], + "type": "text", + "content": "3D Reconstruction of Objects in Hands without Real World 3D Supervision" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 132, + 167, + 481, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 167, + 481, + 180 + ], + "spans": [ + { + "bbox": [ + 132, + 167, + 481, + 180 + ], + "type": "text", + "content": "Aditya Prakash, Matthew Chang, Matthew Jin, Ruisen Tu, and Saurabh Gupta" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 179, + 190, + 432, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 179, + 190, + 432, + 223 + ], + "spans": [ + { + "bbox": [ + 179, + 190, + 432, + 223 + ], + "type": "text", + "content": "University of Illinois Urbana-Champaign \n{adityap9,mc48,mjin11,ruisent2,saurabhg}@illinois.edu \nhttps://bit.ly/WildH0I" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 160, + 247, + 452, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 247, + 452, + 411 + ], + "spans": [ + { + "bbox": [ + 160, + 247, + 452, + 411 + ], + "type": "text", + "content": "Abstract. Prior works for reconstructing hand-held objects from a single image train models on images paired with 3D shapes. Such data is challenging to gather in the real world at scale. Consequently, these approaches do not generalize well when presented with novel objects in in-the-wild settings. While 3D supervision is a major bottleneck, there is an abundance of a) in-the-wild raw video data showing hand-object interactions and b) synthetic 3D shape collections. In this paper, we propose modules to leverage 3D supervision from these sources to scale up the learning of models for reconstructing hand-held objects. Specifically, we extract multiview 2D mask supervision from videos and 3D shape priors from shape collections. We use these indirect 3D cues to train occupancy networks that predict the 3D shape of objects from a single RGB image. Our experiments in the challenging object generalization setting on in-the-wild MOW dataset show " + }, + { + "bbox": [ + 160, + 247, + 452, + 411 + ], + "type": "inline_equation", + "content": "11.6\\%" + }, + { + "bbox": [ + 160, + 247, + 452, + 411 + ], + "type": "text", + "content": " relative improvement over models trained with 3D supervision on existing datasets." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 160, + 422, + 443, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 422, + 443, + 433 + ], + "spans": [ + { + "bbox": [ + 160, + 422, + 443, + 433 + ], + "type": "text", + "content": "Keywords: hand-held objects " + }, + { + "bbox": [ + 160, + 422, + 443, + 433 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 160, + 422, + 443, + 433 + ], + "type": "text", + "content": " shape priors " + }, + { + "bbox": [ + 160, + 422, + 443, + 433 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 160, + 422, + 443, + 433 + ], + "type": "text", + "content": " multiview supervision" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 133, + 451, + 230, + 464 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 451, + 230, + 464 + ], + "spans": [ + { + "bbox": [ + 133, + 451, + 230, + 464 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 473, + 482, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 473, + 482, + 544 + ], + "spans": [ + { + "bbox": [ + 130, + 473, + 482, + 544 + ], + "type": "text", + "content": "While 3D reconstruction of hand-held objects is important for AR/VR [4,20] and robot learning applications [39,40,47,48,68,71], lack of 3D supervision outside of lab settings has made it challenging to produce models that work in the wild. This paper develops techniques to improve the generalization capabilities of single image hand-held object reconstruction methods by extracting supervision from in-the-wild videos & synthetic shape collections showing hand-object interactions." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 545, + 482, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 545, + 482, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 545, + 482, + 665 + ], + "type": "text", + "content": "Collecting image datasets with ground truth 3D shapes for hand-held objects is hard. Any visual scanning setups (via multiple RGB/RGB-D cameras or motion capture) require full visibility of the object which is not available. Synthesizing realistic hand-object interaction is an open problem in itself [28,31,49,65]. Manual alignment of template shapes [5] is expensive, yet only approximate. Thus, there is very little in-the-wild real-world data with ground truth 3D shapes for hand-held objects. And while many past works have designed expressive models to predict shapes of hand-held objects [22,31,73], they are all held back due to the limited amount of real-world 3D data available for training and suffer from unsatisfactory performance on novel objects encountered in the wild." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 134, + 113, + 478, + 304 + ], + "blocks": [ + { + "bbox": [ + 134, + 113, + 478, + 304 + ], + "lines": [ + { + "bbox": [ + 134, + 113, + 478, + 304 + ], + "spans": [ + { + "bbox": [ + 134, + 113, + 478, + 304 + ], + "type": "image", + "image_path": "9dc5bf417c7bb6edace9bbb9a7d2dbc8cd263674fe203c5e894bd83df4b18f80.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 311, + 482, + 357 + ], + "lines": [ + { + "bbox": [ + 130, + 311, + 482, + 357 + ], + "spans": [ + { + "bbox": [ + 130, + 311, + 482, + 357 + ], + "type": "text", + "content": "Fig. 1: We propose modules to extract supervision from in-the-wild videos (Sec. 3.2) & learn shape priors from 3D object collections (Sec. 3.3), to train occupancy networks which predict the 3D shapes of hand-held objects from a single image. This circumvents the need for paired real world 3D shape supervision used in existing works [22, 73]." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 386, + 482, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 386, + 482, + 472 + ], + "spans": [ + { + "bbox": [ + 130, + 386, + 482, + 472 + ], + "type": "text", + "content": "While in-the-wild images with paired 3D shapes are rare, there are a) plenty of in-the-wild videos containing multiple views of hand-held objects [12, 17] (Fig. 1), b) large catalogues of 3D object shapes [6] (Fig. 1). Shape collections provide 3D supervision but lack realistic hand grasps, videos showcase realistic hand-object interaction but don't provide direct 3D supervision. Either by itself seems insufficient, but can we combine supervision from these diverse sources to improve generalization of single-image hand-held object reconstruction methods?" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 474, + 483, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 474, + 483, + 668 + ], + "spans": [ + { + "bbox": [ + 130, + 474, + 483, + 668 + ], + "type": "text", + "content": "Let's consider each cue one at a time. While videos show multiple views of the object, we unfortunately don't know the relative object pose in the different views. Automatically extracting the object pose using structure from motion techniques, e.g. COLMAP [56] doesn't work due to insufficient number of feature matches on the object of interaction. We sidestep this problem by using hand pose as a proxy for object pose (Fig. 2). This is based on the observation that humans rarely conduct in-hand manipulation in pick & place tasks involving rigid objects. Thus, if we assume that the hand and the object are rigidly moving together, then the relative 6 DoF pose of the hand between pairs of frames reveals the relative 6 DoF pose of the object. This reduces the SfM problem to an easier setting where the motion is known. Specifically, we use off-the-shelf FrankMocap system [54] to obtain 6 DoF pose for the hand and consequently the object's. We then use our proposed 2D mask guided 3D sampling module (Sec. 3.2) to generate 3D supervision for the object shape using object segmentation masks (Fig. 2). This lets us train on objects from 144 different categories, where as most methods currently train on only a handful of categories " + }, + { + "bbox": [ + 130, + 474, + 483, + 668 + ], + "type": "inline_equation", + "content": "(< 20)" + }, + { + "bbox": [ + 130, + 474, + 483, + 668 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "text", + "content": "A. Prakash et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 248 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 248 + ], + "type": "text", + "content": "While this works well for unoccluded parts of the object, this does not generate reliable supervision for parts of the object that are occluded by the hand (Fig. 1). This brings us to the 3D shape catalogues, which we use to extract shape priors. This enables the model to learn to output contiguous shapes even when the object is interrupted by the hand in the image, e.g. it can hallucinate a handle for a jug even when it is covered by the hand, because jugs typically have one. We adopt an adversarial training framework [16] to train a discriminator to differentiate between real shapes (from ObMan [22]) and shapes predicted from the model (Fig. 3). Unlike prior works [67] which train the discriminator on 3D inputs, we instead propose a 2D slice-based 3D discriminator (Sec. 3.3), which is computationally efficient and learns better fine-grained shape information." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 249, + 482, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 249, + 482, + 334 + ], + "spans": [ + { + "bbox": [ + 130, + 249, + 482, + 334 + ], + "type": "text", + "content": "Our overall framework consists of an occupancy network [43] that predicts the 3D shape of hand-held objects from a single image. We train this model on sequences curated from the VISOR dataset [13] and use the Obman dataset [22] to build the shape prior. Training on diverse real world data outside of lab settings, enabled by our innovations, leads our model (HORSE) to good generalization performance. HORSE outperforms previous state-of-the-art models by " + }, + { + "bbox": [ + 130, + 249, + 482, + 334 + ], + "type": "inline_equation", + "content": "11.6\\%" + }, + { + "bbox": [ + 130, + 249, + 482, + 334 + ], + "type": "text", + "content": " in the challenging object generalization setting on MOW [5]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 357, + 237, + 370 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 357, + 237, + 370 + ], + "spans": [ + { + "bbox": [ + 132, + 357, + 237, + 370 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 388, + 482, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 388, + 482, + 533 + ], + "spans": [ + { + "bbox": [ + 130, + 388, + 482, + 533 + ], + "type": "text", + "content": "Reconstructing objects in hands: Several works [9, 10, 22, 31, 73, 77] have trained expressive architectures for predicting 3D shape from a single image using paired real world 3D supervision. Fitting object templates [5, 21] or learned 3D shapes [14, 25, 72, 74] to videos using appearance cues [5, 14, 21, 25] or geometric priors [72, 74] have also been explored. The most relevant work to ours is [73], which uses paired 3D supervision from synthetic [22] and small-scale real-world datasets to predict 3D shape from a single image. However, it does not generalize to novel object categories in the wild due to limited 3D supervision. Instead, we train our model on diverse object categories from in-the-wild videos by extracting multiview 2D supervision and learning shape priors from existing datasets, without any real-world 3D supervision. Note that our setting involves a single image input at test time and we use in-the-wild videos for training only." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 533, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 533, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 533, + 482, + 666 + ], + "type": "text", + "content": "Hand-Object datasets with 3D object models: Existing real-world hand-object datasets with 3D annotations are captured in lab settings and contain limited variation in objects, e.g. HO3D [18]:10, H2O [32]:8, FPHA [15]:4, FreiHAND [81]:35, ContactDB [2]:50, ContactPose [3]:25, DexYCB [8]:20, GRAB [58]:51, HOI4D [34]: 16 object categories. Collecting datasets with ground truth 3D shapes is difficult to scale since it often requires visual scanning setups (multiple cameras or motion capture). Synthesising realistic hand-object interaction is an open problem in itself [28, 31, 49, 65]. In this work, we curate sequences from in-the-wild VISOR dataset containing 144 object categories and design modules to extract supervision for training occupancy networks. The closest to ours is MOW with 120 objects that we only use to test models to assess generalization." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 138, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 138, + 91, + 447, + 102 + ], + "type": "text", + "content": "3D Reconstruction of Objects in Hands without Real World 3D Supervision" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 200 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 200 + ], + "type": "text", + "content": "Hand-Object Interactions in the wild: There is a growing interest in understanding hands and how they interact with objects around them. Researchers have collected datasets [8, 18, 19, 22, 32, 34, 58] and trained models for detecting & segmenting hands and associated objects of interaction [13, 57, 62, 63]. Recognizing what hands are doing in images [7, 46, 79] is also relevant: through grasp classification [31], 2D pose estimation [51, 80], and more recently 3D shape and pose estimation [21, 22, 53, 54, 61, 73] for both hands and objects in contact." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 200, + 483, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 200, + 483, + 319 + ], + "spans": [ + { + "bbox": [ + 130, + 200, + 483, + 319 + ], + "type": "text", + "content": "3D from single image without direct 3D supervision. Several works relax the need for direct 3D supervision by incorporating auxiliary shape cues during training, e.g. multi-view consistency in masks [64], depth from single image [26, 37, 78] or stereo [24], appearance [11, 27, 60, 76]. These have been applied to reconstruction of category specific [27, 29, 30, 37] as well as generic objects [11, 75, 76]. However, directly applying these approaches to hand-held objects in the wild poses several challenges, e.g. unknown camera, novel object categories, heavy occlusion, inaccurate depth estimates. In this work, we propose modules to extract supervision from in-the-wild videos using object masks [13] & hand pose [54] and learn priors from synthetic collections of hand-held objects [22]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 336, + 212, + 350 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 336, + 212, + 350 + ], + "spans": [ + { + "bbox": [ + 132, + 336, + 212, + 350 + ], + "type": "text", + "content": "3 Approach" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 359, + 482, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 359, + 482, + 396 + ], + "spans": [ + { + "bbox": [ + 130, + 359, + 482, + 396 + ], + "type": "text", + "content": "We propose a novel framework for training 3D shape predictors from a single image without using any real world 3D supervision. Following prior work [73], we use implicit shape representation [43, 45] for 3D objects." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 411, + 228, + 422 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 411, + 228, + 422 + ], + "spans": [ + { + "bbox": [ + 132, + 411, + 228, + 422 + ], + "type": "text", + "content": "3.1 Preliminaries" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 428, + 482, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 428, + 482, + 525 + ], + "spans": [ + { + "bbox": [ + 130, + 428, + 482, + 525 + ], + "type": "text", + "content": "Consider the recent AC-SDF model for this task from Ye et al. [73]. Given an input RGB image, AC-SDF uses a neural network to predict the SDF of 3D points. The prediction is done in the hand coordinate frame obtained using FrankMocap [54], which outputs (a) hand articulation parameters " + }, + { + "bbox": [ + 130, + 428, + 482, + 525 + ], + "type": "inline_equation", + "content": "\\theta^a" + }, + { + "bbox": [ + 130, + 428, + 482, + 525 + ], + "type": "text", + "content": " (45 dimensional MANO hand pose [52]), (b) global rotation " + }, + { + "bbox": [ + 130, + 428, + 482, + 525 + ], + "type": "inline_equation", + "content": "\\theta^w" + }, + { + "bbox": [ + 130, + 428, + 482, + 525 + ], + "type": "text", + "content": " of the wrist joint w.r.t. camera, (c) weak perspective camera " + }, + { + "bbox": [ + 130, + 428, + 482, + 525 + ], + "type": "inline_equation", + "content": "\\theta^c" + }, + { + "bbox": [ + 130, + 428, + 482, + 525 + ], + "type": "text", + "content": ", with scale factor " + }, + { + "bbox": [ + 130, + 428, + 482, + 525 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 130, + 428, + 482, + 525 + ], + "type": "text", + "content": " & 2D translation " + }, + { + "bbox": [ + 130, + 428, + 482, + 525 + ], + "type": "inline_equation", + "content": "(t_x, t_y)" + }, + { + "bbox": [ + 130, + 428, + 482, + 525 + ], + "type": "text", + "content": ", which is converted into a full perspective camera " + }, + { + "bbox": [ + 130, + 428, + 482, + 525 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 130, + 428, + 482, + 525 + ], + "type": "text", + "content": ". These can be used to project a 3D point " + }, + { + "bbox": [ + 130, + 428, + 482, + 525 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 130, + 428, + 482, + 525 + ], + "type": "text", + "content": " into the image (" + }, + { + "bbox": [ + 130, + 428, + 482, + 525 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 130, + 428, + 482, + 525 + ], + "type": "text", + "content": " is the focal length) as " + }, + { + "bbox": [ + 130, + 428, + 482, + 525 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_p = K[T_{\\theta^w} \\mathbf{x} + (t_x, t_y, f / s)]" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 525, + 482, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 525, + 482, + 609 + ], + "spans": [ + { + "bbox": [ + 130, + 525, + 482, + 609 + ], + "type": "text", + "content": "Given a 3D point " + }, + { + "bbox": [ + 130, + 525, + 482, + 609 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 130, + 525, + 482, + 609 + ], + "type": "text", + "content": " & image " + }, + { + "bbox": [ + 130, + 525, + 482, + 609 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 130, + 525, + 482, + 609 + ], + "type": "text", + "content": ", AC-SDF conditions the SDF prediction on: (a) global image features from a ResNet-50 [23], (b) pixel-aligned features [55] from intermediate layers of ResNet-50 at the projection " + }, + { + "bbox": [ + 130, + 525, + 482, + 609 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_p" + }, + { + "bbox": [ + 130, + 525, + 482, + 609 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 130, + 525, + 482, + 609 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 130, + 525, + 482, + 609 + ], + "type": "text", + "content": " in the image, (c) hand articulation features obtained by representing " + }, + { + "bbox": [ + 130, + 525, + 482, + 609 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 130, + 525, + 482, + 609 + ], + "type": "text", + "content": " in the coordinate frame of 15 hand joints. This is realized as, " + }, + { + "bbox": [ + 130, + 525, + 482, + 609 + ], + "type": "inline_equation", + "content": "\\mathbf{s} = \\mathcal{F}(\\mathbf{x}; I, \\theta, K)" + }, + { + "bbox": [ + 130, + 525, + 482, + 609 + ], + "type": "text", + "content": ". Training " + }, + { + "bbox": [ + 130, + 525, + 482, + 609 + ], + "type": "inline_equation", + "content": "\\mathcal{F}" + }, + { + "bbox": [ + 130, + 525, + 482, + 609 + ], + "type": "text", + "content": " requires sampling 3D points " + }, + { + "bbox": [ + 130, + 525, + 482, + 609 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 130, + 525, + 482, + 609 + ], + "type": "text", + "content": " around the object and corresponding SDF values " + }, + { + "bbox": [ + 130, + 525, + 482, + 609 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 130, + 525, + 482, + 609 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 525, + 482, + 609 + ], + "type": "inline_equation", + "content": "\\theta = (\\theta^a, \\theta^w, \\theta^c, K)" + }, + { + "bbox": [ + 130, + 525, + 482, + 609 + ], + "type": "text", + "content": " are estimated from FrankMocap." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 624, + 316, + 635 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 624, + 316, + 635 + ], + "spans": [ + { + "bbox": [ + 132, + 624, + 316, + 635 + ], + "type": "text", + "content": "3.2 2D Mask Guided 3D Sampling" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 641, + 481, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 641, + 481, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 641, + 481, + 666 + ], + "type": "text", + "content": "Training models with implicit shape representation require supervision in the form of occupancy [43] or SDF [45] for 3D points sampled inside and outside" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 236, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 236, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 236, + 102 + ], + "type": "text", + "content": "A. Prakash et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 134, + 118, + 269, + 173 + ], + "blocks": [ + { + "bbox": [ + 134, + 118, + 269, + 173 + ], + "lines": [ + { + "bbox": [ + 134, + 118, + 269, + 173 + ], + "spans": [ + { + "bbox": [ + 134, + 118, + 269, + 173 + ], + "type": "image", + "image_path": "eeb1bf8a1e9150a7b510268e705094ab2cc3cfc7aeadb42869a95d2716011ca5.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 164, + 173, + 239, + 180 + ], + "lines": [ + { + "bbox": [ + 164, + 173, + 239, + 180 + ], + "spans": [ + { + "bbox": [ + 164, + 173, + 239, + 180 + ], + "type": "text", + "content": "a) Unposed video frames" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 133, + 180, + 269, + 226 + ], + "blocks": [ + { + "bbox": [ + 133, + 180, + 269, + 226 + ], + "lines": [ + { + "bbox": [ + 133, + 180, + 269, + 226 + ], + "spans": [ + { + "bbox": [ + 133, + 180, + 269, + 226 + ], + "type": "image", + "image_path": "1c84f74b3d35fddbd5f39ed95b2a1d8e270b2973065326a8c6afada6e67ca2a1.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 146, + 226, + 258, + 235 + ], + "lines": [ + { + "bbox": [ + 146, + 226, + 258, + 235 + ], + "spans": [ + { + "bbox": [ + 146, + 226, + 258, + 235 + ], + "type": "text", + "content": "b) Hand pose as proxy for object pose" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 130, + 248, + 482, + 326 + ], + "lines": [ + { + "bbox": [ + 130, + 248, + 482, + 326 + ], + "spans": [ + { + "bbox": [ + 130, + 248, + 482, + 326 + ], + "type": "text", + "content": "Fig.2: Registering objects via hand pose and 2D Mask guided 3D sampling. (a) Consider unposed frames from in-the-wild videos. (b) We use hand pose from FrankMocap [54] as a proxy for object pose, thereby registering the different views. (c) We then use 2D object masks for labeling 3D points with occupancy (Sec. 3.2). 3D points that project into the object mask in all views are considered as occupied (green triangles), all other points are considered unoccupied (red crosses). (3D object in the figure is for visualization only, not used for sampling.)" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 276, + 117, + 482, + 225 + ], + "blocks": [ + { + "bbox": [ + 276, + 117, + 482, + 225 + ], + "lines": [ + { + "bbox": [ + 276, + 117, + 482, + 225 + ], + "spans": [ + { + "bbox": [ + 276, + 117, + 482, + 225 + ], + "type": "image", + "image_path": "f75f185eb79dea1141bdfcdb7032ffbe35dacf6c0838cc49f55a5dc82e2a8e9d.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 306, + 226, + 440, + 236 + ], + "lines": [ + { + "bbox": [ + 306, + 226, + 440, + 236 + ], + "spans": [ + { + "bbox": [ + 306, + 226, + 440, + 236 + ], + "type": "text", + "content": "c) Multi-view supervision from posed images" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 350, + 482, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 350, + 482, + 422 + ], + "spans": [ + { + "bbox": [ + 130, + 350, + 482, + 422 + ], + "type": "text", + "content": "the object. Note that the balanced sampling of points inside and outside the object is an important consideration for training good predictors. While existing approaches [22, 31, 73] on this task use datasets with paired 3D supervision (3D object shape corresponding to 2D image), we operate in in-the-wild settings which do not contain 3D supervision. Instead, we propose a 2D mask guided 3D sampling strategy to obtain occupancy labels for training." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 424, + 482, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 424, + 482, + 555 + ], + "spans": [ + { + "bbox": [ + 130, + 424, + 482, + 555 + ], + "type": "text", + "content": "Consider multiple views " + }, + { + "bbox": [ + 130, + 424, + 482, + 555 + ], + "type": "inline_equation", + "content": "\\{I_1, \\ldots, I_n\\}" + }, + { + "bbox": [ + 130, + 424, + 482, + 555 + ], + "type": "text", + "content": " of a hand-held object (Fig. 2), along with their masks " + }, + { + "bbox": [ + 130, + 424, + 482, + 555 + ], + "type": "inline_equation", + "content": "\\{M_1, \\ldots, M_n\\}" + }, + { + "bbox": [ + 130, + 424, + 482, + 555 + ], + "type": "text", + "content": ". We can sample points " + }, + { + "bbox": [ + 130, + 424, + 482, + 555 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 130, + 424, + 482, + 555 + ], + "type": "text", + "content": " in 3D space and project them into different views. Any point " + }, + { + "bbox": [ + 130, + 424, + 482, + 555 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 130, + 424, + 482, + 555 + ], + "type": "text", + "content": " which projects into the object mask in all views is considered as occupied whereas if it projects outside the mask in even one of the views, it is considered as unoccupied. Thus, we get occupancy labels for a point " + }, + { + "bbox": [ + 130, + 424, + 482, + 555 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 130, + 424, + 482, + 555 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 130, + 424, + 482, + 555 + ], + "type": "inline_equation", + "content": "\\mathbf{s}^{gt} = \\cap_{i=1}^{n} M_i^{\\mathbf{x}_{p_i}}" + }, + { + "bbox": [ + 130, + 424, + 482, + 555 + ], + "type": "text", + "content": ". Here, " + }, + { + "bbox": [ + 130, + 424, + 482, + 555 + ], + "type": "inline_equation", + "content": "M_i^{\\mathbf{x}_{p_i}} = 1" + }, + { + "bbox": [ + 130, + 424, + 482, + 555 + ], + "type": "text", + "content": " if " + }, + { + "bbox": [ + 130, + 424, + 482, + 555 + ], + "type": "inline_equation", + "content": "x_{p_i}" + }, + { + "bbox": [ + 130, + 424, + 482, + 555 + ], + "type": "text", + "content": " lies inside the mask " + }, + { + "bbox": [ + 130, + 424, + 482, + 555 + ], + "type": "inline_equation", + "content": "M_i" + }, + { + "bbox": [ + 130, + 424, + 482, + 555 + ], + "type": "text", + "content": " & 0 otherwise. Note that it is not possible to obtain SDF values in this manner, since distance to the object surface cannot be estimated in the absence of 3D objects models. While we can obtain 3D occupancy labels using this strategy, there are two important considerations: camera poses are unknown (required for projection) & how to balance the sampling of points inside & outside the object." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 556, + 482, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 556, + 482, + 616 + ], + "spans": [ + { + "bbox": [ + 130, + 556, + 482, + 616 + ], + "type": "text", + "content": "Camera pose: We assume that the hand is rigidly moving with the object. This is not an unreasonable assumption, as humans rarely do in-hand manipulation in pick & place tasks involving small rigid objects. Thus, the relative pose of hand between different views reveals the relative pose of the object. This lets use the hand pose predicted by FrankMocap " + }, + { + "bbox": [ + 130, + 556, + 482, + 616 + ], + "type": "inline_equation", + "content": "\\{\\theta_1,\\dots ,\\theta_n\\}" + }, + { + "bbox": [ + 130, + 556, + 482, + 616 + ], + "type": "text", + "content": " to register the different views." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 130, + 617, + 482, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 617, + 482, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 617, + 482, + 665 + ], + "type": "text", + "content": "Balanced sampling: In the absence of 3D object models, a natural choice is to sample points uniformly in 3D space. However, this leads to most points lying outside the object because the object location is unknown. Instead, we sample points in the hand coordinate frame. Consider the total number of points to be " + }, + { + "bbox": [ + 130, + 617, + 482, + 665 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 130, + 617, + 482, + 665 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 138, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 138, + 91, + 447, + 102 + ], + "type": "text", + "content": "3D Reconstruction of Objects in Hands without Real World 3D Supervision" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 200 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 200 + ], + "type": "text", + "content": "We adopt several strategies for balanced sampling for points inside " + }, + { + "bbox": [ + 130, + 116, + 482, + 200 + ], + "type": "inline_equation", + "content": "(s^{gt} = 1)" + }, + { + "bbox": [ + 130, + 116, + 482, + 200 + ], + "type": "text", + "content": " and outside the object " + }, + { + "bbox": [ + 130, + 116, + 482, + 200 + ], + "type": "inline_equation", + "content": "(s^{gt} = 0)" + }, + { + "bbox": [ + 130, + 116, + 482, + 200 + ], + "type": "text", + "content": ". We uniformly sample " + }, + { + "bbox": [ + 130, + 116, + 482, + 200 + ], + "type": "inline_equation", + "content": "q / 2" + }, + { + "bbox": [ + 130, + 116, + 482, + 200 + ], + "type": "text", + "content": " 3D points " + }, + { + "bbox": [ + 130, + 116, + 482, + 200 + ], + "type": "inline_equation", + "content": "\\mathbf{x} \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 130, + 116, + 482, + 200 + ], + "type": "text", + "content": " in the normalized hand coordinate frame and project these into all the available views. Since all these " + }, + { + "bbox": [ + 130, + 116, + 482, + 200 + ], + "type": "inline_equation", + "content": "q / 2" + }, + { + "bbox": [ + 130, + 116, + 482, + 200 + ], + "type": "text", + "content": " points may not be occupied, we use rejection sampling to repeat the procedure, for maximum of " + }, + { + "bbox": [ + 130, + 116, + 482, + 200 + ], + "type": "inline_equation", + "content": "t = 50" + }, + { + "bbox": [ + 130, + 116, + 482, + 200 + ], + "type": "text", + "content": " times or until we get " + }, + { + "bbox": [ + 130, + 116, + 482, + 200 + ], + "type": "inline_equation", + "content": "q / 2" + }, + { + "bbox": [ + 130, + 116, + 482, + 200 + ], + "type": "text", + "content": " occupied points. Also, all points projecting into the hand mask in all views and vertices of the MANO [53] hand are labeled as unoccupied." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 200, + 481, + 225 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 200, + 481, + 225 + ], + "spans": [ + { + "bbox": [ + 130, + 200, + 481, + 225 + ], + "type": "text", + "content": "Formally, for images " + }, + { + "bbox": [ + 130, + 200, + 481, + 225 + ], + "type": "inline_equation", + "content": "\\{I_1,\\ldots ,I_n\\}" + }, + { + "bbox": [ + 130, + 200, + 481, + 225 + ], + "type": "text", + "content": " with object masks " + }, + { + "bbox": [ + 130, + 200, + 481, + 225 + ], + "type": "inline_equation", + "content": "\\{M_1,\\dots ,M_n\\}" + }, + { + "bbox": [ + 130, + 200, + 481, + 225 + ], + "type": "text", + "content": ", hand masks " + }, + { + "bbox": [ + 130, + 200, + 481, + 225 + ], + "type": "inline_equation", + "content": "\\{H_{1},\\ldots ,H_{n}\\}" + }, + { + "bbox": [ + 130, + 200, + 481, + 225 + ], + "type": "text", + "content": " and MANO vertices " + }, + { + "bbox": [ + 130, + 200, + 481, + 225 + ], + "type": "inline_equation", + "content": "\\{V_{1},\\ldots ,V_{n}\\}" + }, + { + "bbox": [ + 130, + 200, + 481, + 225 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 200, + 481, + 225 + ], + "type": "inline_equation", + "content": "\\mathbf{s}^{gt}" + }, + { + "bbox": [ + 130, + 200, + 481, + 225 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 130, + 200, + 481, + 225 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 130, + 200, + 481, + 225 + ], + "type": "text", + "content": " is:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 181, + 234, + 482, + 266 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 181, + 234, + 482, + 266 + ], + "spans": [ + { + "bbox": [ + 181, + 234, + 482, + 266 + ], + "type": "interline_equation", + "content": "\\mathbf {s} ^ {g t} = \\left\\{ \\begin{array}{l l} 1 & \\text {i f} \\cap_ {i = 1} ^ {n} M _ {i} ^ {\\mathbf {x} _ {p _ {i}}} \\text {a n d} \\cap_ {i = 1} ^ {n} \\neg H _ {i} ^ {\\mathbf {x} _ {p _ {i}}} \\text {a n d} \\cup_ {i = 1} ^ {n} \\neg V _ {i} ^ {\\mathbf {x}} \\\\ 0 & \\text {o t h e r w i s e} \\end{array} \\right. \\tag {1}", + "image_path": "f507fe2be272f9ebe557d111d145b243dae1b0f02ccd4a5fdec4c5c9b49f39dc.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 274, + 480, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 274, + 480, + 300 + ], + "spans": [ + { + "bbox": [ + 130, + 274, + 480, + 300 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 274, + 480, + 300 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_{p_i}" + }, + { + "bbox": [ + 130, + 274, + 480, + 300 + ], + "type": "text", + "content": " is the projection of " + }, + { + "bbox": [ + 130, + 274, + 480, + 300 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 130, + 274, + 480, + 300 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 274, + 480, + 300 + ], + "type": "inline_equation", + "content": "M_{i}^{\\mathbf{x}_{p_i}} = 1" + }, + { + "bbox": [ + 130, + 274, + 480, + 300 + ], + "type": "text", + "content": " if " + }, + { + "bbox": [ + 130, + 274, + 480, + 300 + ], + "type": "inline_equation", + "content": "x_{p_i}" + }, + { + "bbox": [ + 130, + 274, + 480, + 300 + ], + "type": "text", + "content": " lies inside " + }, + { + "bbox": [ + 130, + 274, + 480, + 300 + ], + "type": "inline_equation", + "content": "M_{i}" + }, + { + "bbox": [ + 130, + 274, + 480, + 300 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 274, + 480, + 300 + ], + "type": "inline_equation", + "content": "H_{i}^{\\mathbf{x}_{p_i}} = 1" + }, + { + "bbox": [ + 130, + 274, + 480, + 300 + ], + "type": "text", + "content": " if " + }, + { + "bbox": [ + 130, + 274, + 480, + 300 + ], + "type": "inline_equation", + "content": "x_{p_i}" + }, + { + "bbox": [ + 130, + 274, + 480, + 300 + ], + "type": "text", + "content": " lies inside " + }, + { + "bbox": [ + 130, + 274, + 480, + 300 + ], + "type": "inline_equation", + "content": "H_{i}" + }, + { + "bbox": [ + 130, + 274, + 480, + 300 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 274, + 480, + 300 + ], + "type": "inline_equation", + "content": "V_{i}^{\\mathbf{x}} = 1" + }, + { + "bbox": [ + 130, + 274, + 480, + 300 + ], + "type": "text", + "content": " if " + }, + { + "bbox": [ + 130, + 274, + 480, + 300 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 130, + 274, + 480, + 300 + ], + "type": "text", + "content": " belongs to " + }, + { + "bbox": [ + 130, + 274, + 480, + 300 + ], + "type": "inline_equation", + "content": "V_{i}" + }, + { + "bbox": [ + 130, + 274, + 480, + 300 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 274, + 480, + 300 + ], + "type": "inline_equation", + "content": "\\neg" + }, + { + "bbox": [ + 130, + 274, + 480, + 300 + ], + "type": "text", + "content": " is the logical negation operator." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 300, + 482, + 358 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 300, + 482, + 358 + ], + "spans": [ + { + "bbox": [ + 130, + 300, + 482, + 358 + ], + "type": "text", + "content": "Note that, due to hand occlusions and errors in FrankMocap predictions, it is possible that some 3D points belonging to the object are not projected into the object masks but we do not want to label these points as unoccupied. So we disregard points which project onto the object mask in some views and hand mask in other views as these points could belong to object due to hand occlusion." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 359, + 482, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 359, + 482, + 418 + ], + "spans": [ + { + "bbox": [ + 130, + 359, + 482, + 418 + ], + "type": "text", + "content": "This is reminiscent of the visual hull algorithm [33, 42], which generates 3D reconstruction by carving out space that projects outside the segmentation in any view. Visual hull algorithms need multiple views at test time to generate any output. In contrast, we are doing this at training time to obtain supervision for " + }, + { + "bbox": [ + 130, + 359, + 482, + 418 + ], + "type": "inline_equation", + "content": "\\mathcal{F}(\\mathbf{x};I_1,\\theta_1,K_1)" + }, + { + "bbox": [ + 130, + 359, + 482, + 418 + ], + "type": "text", + "content": ", which makes predictions from a single view." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 418, + 472, + 432 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 418, + 472, + 432 + ], + "spans": [ + { + "bbox": [ + 130, + 418, + 472, + 432 + ], + "type": "text", + "content": "Training: We use cross-entropy loss (CE) to train " + }, + { + "bbox": [ + 130, + 418, + 472, + 432 + ], + "type": "inline_equation", + "content": "\\mathcal{F}" + }, + { + "bbox": [ + 130, + 418, + 472, + 432 + ], + "type": "text", + "content": " using ground truth " + }, + { + "bbox": [ + 130, + 418, + 472, + 432 + ], + "type": "inline_equation", + "content": "\\mathbf{s}^{gt}" + }, + { + "bbox": [ + 130, + 418, + 472, + 432 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 249, + 441, + 482, + 455 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 249, + 441, + 482, + 455 + ], + "spans": [ + { + "bbox": [ + 249, + 441, + 482, + 455 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {v i s u a l - h u l l}} = \\operatorname {C E} (\\mathcal {F} (\\mathbf {x}), \\mathbf {s} ^ {g t}) \\tag {2}", + "image_path": "beeaa49d848fbb9eeda51b8ec136c0d3a769b0c64cc643e76724b32e4ce70b8e.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 463, + 482, + 512 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 463, + 482, + 512 + ], + "spans": [ + { + "bbox": [ + 130, + 463, + 482, + 512 + ], + "type": "text", + "content": "To further regularize training, we also encourage the occupancy prediction from different views to be consistent with each other. Since our predictions are already in the hand coordinate frame, which is common across all views, this can be done by minimizing " + }, + { + "bbox": [ + 130, + 463, + 482, + 512 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{consistency}}" + }, + { + "bbox": [ + 130, + 463, + 482, + 512 + ], + "type": "text", + "content": " for different views " + }, + { + "bbox": [ + 130, + 463, + 482, + 512 + ], + "type": "inline_equation", + "content": "i\\& j" + }, + { + "bbox": [ + 130, + 463, + 482, + 512 + ], + "type": "text", + "content": " of the same object." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 181, + 521, + 482, + 548 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 181, + 521, + 482, + 548 + ], + "spans": [ + { + "bbox": [ + 181, + 521, + 482, + 548 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {c o n s i s t e n c y}} = \\sum_ {\\mathbf {x} \\in \\mathbb {R} ^ {3}, i \\neq j} \\operatorname {C E} \\left(\\mathcal {F} (\\mathbf {x}; I _ {i}, \\theta_ {i}, K _ {i}), \\mathcal {F} (\\mathbf {x}; I _ {j}, \\theta_ {j}, K _ {j})\\right) \\tag {3}", + "image_path": "e3f7bb92abf6a3af275aa947134ebc0b3a68b48c26095083ec4e103999038c11.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 131, + 573, + 405, + 586 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 573, + 405, + 586 + ], + "spans": [ + { + "bbox": [ + 131, + 573, + 405, + 586 + ], + "type": "text", + "content": "3.3 2D Slice based 3D Discriminator as Shape Prior" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 130, + 594, + 482, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 594, + 482, + 667 + ], + "spans": [ + { + "bbox": [ + 130, + 594, + 482, + 667 + ], + "type": "text", + "content": "We adopt an adversarial training framework [16] to build a prior on shapes of hand-held objects and use it to supervise the training of the occupancy prediction function " + }, + { + "bbox": [ + 130, + 594, + 482, + 667 + ], + "type": "inline_equation", + "content": "\\mathcal{F}(\\mathbf{x};I_1,\\theta_1^a,\\theta_1^w,K_1)" + }, + { + "bbox": [ + 130, + 594, + 482, + 667 + ], + "type": "text", + "content": ". As such a prior can be challenging to hand-craft, we build it in a data-driven way. We use 3D shape repository from synthetic datasets [22], which contain more than " + }, + { + "bbox": [ + 130, + 594, + 482, + 667 + ], + "type": "inline_equation", + "content": "2.5\\mathrm{K}" + }, + { + "bbox": [ + 130, + 594, + 482, + 667 + ], + "type": "text", + "content": " hand-held objects, to learn the prior. Specifically, we train a discriminator " + }, + { + "bbox": [ + 130, + 594, + 482, + 667 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 130, + 594, + 482, + 667 + ], + "type": "text", + "content": " to differentiate between 3D shapes from" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "text", + "content": "A. Prakash et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 177, + 114, + 446, + 200 + ], + "blocks": [ + { + "bbox": [ + 177, + 114, + 446, + 200 + ], + "lines": [ + { + "bbox": [ + 177, + 114, + 446, + 200 + ], + "spans": [ + { + "bbox": [ + 177, + 114, + 446, + 200 + ], + "type": "image", + "image_path": "8654734017afa8718f878ab951a3c1f351b22d038336a8dfdb8e263c60d0a8d1.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 131, + 213, + 482, + 269 + ], + "lines": [ + { + "bbox": [ + 131, + 213, + 482, + 269 + ], + "spans": [ + { + "bbox": [ + 131, + 213, + 482, + 269 + ], + "type": "text", + "content": "Fig. 3: 2D slice based 3D discriminator. We learn data-driven 3D shape priors using hand-held objects from ObMan dataset. We sample planes through the object (shown above in blue), resulting in a 2D cross-section map. We pass occupancy predictions on points from these cross-sections through a discriminator which tries to distinguish cross-sections of predicted 3D shapes from cross-sections of ObMan objects (Sec. 3.3)." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 131, + 297, + 481, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 297, + 481, + 323 + ], + "spans": [ + { + "bbox": [ + 131, + 297, + 481, + 323 + ], + "type": "text", + "content": "ObMan [22] and generated shapes as predicted by " + }, + { + "bbox": [ + 131, + 297, + 481, + 323 + ], + "type": "inline_equation", + "content": "\\mathcal{F}" + }, + { + "bbox": [ + 131, + 297, + 481, + 323 + ], + "type": "text", + "content": ". We derive supervision for " + }, + { + "bbox": [ + 131, + 297, + 481, + 323 + ], + "type": "inline_equation", + "content": "\\mathcal{F}" + }, + { + "bbox": [ + 131, + 297, + 481, + 323 + ], + "type": "text", + "content": " by encouraging it to predict shapes that are real as per " + }, + { + "bbox": [ + 131, + 297, + 481, + 323 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 131, + 297, + 481, + 323 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 324, + 482, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 324, + 482, + 469 + ], + "spans": [ + { + "bbox": [ + 130, + 324, + 482, + 469 + ], + "type": "text", + "content": "A natural choice is to train the discriminator with 3D input, e.g. " + }, + { + "bbox": [ + 130, + 324, + 482, + 469 + ], + "type": "inline_equation", + "content": "N \\times N \\times N" + }, + { + "bbox": [ + 130, + 324, + 482, + 469 + ], + "type": "text", + "content": " cube in 3D voxel space [67]. One way to do this is to sample " + }, + { + "bbox": [ + 130, + 324, + 482, + 469 + ], + "type": "inline_equation", + "content": "N^3" + }, + { + "bbox": [ + 130, + 324, + 482, + 469 + ], + "type": "text", + "content": " 3D points in the hand coordinate frame and run a forward pass through " + }, + { + "bbox": [ + 130, + 324, + 482, + 469 + ], + "type": "inline_equation", + "content": "\\mathcal{F}" + }, + { + "bbox": [ + 130, + 324, + 482, + 469 + ], + "type": "text", + "content": " to get the occupancy for each of these points. However this is computationally expensive and often leads to large imbalance as most points lie outside the object (we ablate this in Sec. 4.3). Instead, we propose a novel 2D slice based 3D discriminator which operates on arbitrary 2D slices. There are computed by taking the cross-section of 2D planes with 3D shapes and sampling 3D points that lie on these 2D cross-sections. The key intuition here is that the discriminator sees different randomly sampled 2D slides during the course of training, which helps it to learn fine-grained shape information. E.g. for a sphere, all cross-sections are circular but for a cylinder, most are oval. This helps distinguish between different 3D shapes." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 471, + 482, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 471, + 482, + 579 + ], + "spans": [ + { + "bbox": [ + 130, + 471, + 482, + 579 + ], + "type": "text", + "content": "Sampling 2D slices: There are several important considerations in sampling 2D slices. First, uniformly sampling 2D planes often leads to most points lying outside the object, which is not useful for training the discriminator. Instead, we sample 2D planes that pass through the origin in the hand coordinate system. Since the objects are in contact with the hand, the sampled points are more likely to encompass the object. Then, we rotate the sampled 2D planes by arbitrary angles so that they are not axis aligned to better capture fine-grained shape information. We ablate all these design choices in Sec. 4.3. This sampling function " + }, + { + "bbox": [ + 130, + 471, + 482, + 579 + ], + "type": "inline_equation", + "content": "\\mathcal{Z}" + }, + { + "bbox": [ + 130, + 471, + 482, + 579 + ], + "type": "text", + "content": " results in a set of 2D planes on which 3D points are uniformly sampled." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 582, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 582, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 582, + 482, + 666 + ], + "type": "text", + "content": "Training: We pass the sampled points from 2D slices of the generated 3D shape through " + }, + { + "bbox": [ + 130, + 582, + 482, + 666 + ], + "type": "inline_equation", + "content": "\\mathcal{F}" + }, + { + "bbox": [ + 130, + 582, + 482, + 666 + ], + "type": "text", + "content": " to get the corresponding occupancy values " + }, + { + "bbox": [ + 130, + 582, + 482, + 666 + ], + "type": "inline_equation", + "content": "S^{\\mathrm{gen}}" + }, + { + "bbox": [ + 130, + 582, + 482, + 666 + ], + "type": "text", + "content": ". This represents the generated 3D shape. We adopt the same strategy for representing 3D shapes from ObMan (used as real shapes) but use the predictions " + }, + { + "bbox": [ + 130, + 582, + 482, + 666 + ], + "type": "inline_equation", + "content": "S^{\\mathrm{real}}" + }, + { + "bbox": [ + 130, + 582, + 482, + 666 + ], + "type": "text", + "content": " of the occupancy network overfitted on ObMan. As they come from a overfitted model, they generally match the ground truth slices well but at the same time are soft and prevent the discriminator from cheating." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 138, + 90, + 448, + 103 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 90, + 448, + 103 + ], + "spans": [ + { + "bbox": [ + 138, + 90, + 448, + 103 + ], + "type": "text", + "content": "3D Reconstruction of Objects in Hands without Real World 3D Supervision" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 91, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 91, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 91, + 480, + 100 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 133, + 114, + 481, + 174 + ], + "blocks": [ + { + "bbox": [ + 133, + 114, + 481, + 174 + ], + "lines": [ + { + "bbox": [ + 133, + 114, + 481, + 174 + ], + "spans": [ + { + "bbox": [ + 133, + 114, + 481, + 174 + ], + "type": "image", + "image_path": "33c8327e7f8180bd9541b980893b45a724301ee1c484b12fcda5dc9b03c4dad0.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 181, + 482, + 237 + ], + "lines": [ + { + "bbox": [ + 130, + 181, + 482, + 237 + ], + "spans": [ + { + "bbox": [ + 130, + 181, + 482, + 237 + ], + "type": "text", + "content": "Fig. 4: VISOR visualizations. Using existing hand pose estimation techniques [54], we are able to track the objects in relation to hands through time in in-the-wild videos. We visualize these tracks along with object masks from the VISOR dataset [13]. This form of data, where objects move rigidly relative to hands, is used to train our model to learn 3D shape of hand-held objects." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 259, + 482, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 259, + 482, + 308 + ], + "spans": [ + { + "bbox": [ + 130, + 259, + 482, + 308 + ], + "type": "text", + "content": "We train the discriminator " + }, + { + "bbox": [ + 130, + 259, + 482, + 308 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 130, + 259, + 482, + 308 + ], + "type": "text", + "content": " to differentiate between " + }, + { + "bbox": [ + 130, + 259, + 482, + 308 + ], + "type": "inline_equation", + "content": "S^{\\mathrm{gen}}" + }, + { + "bbox": [ + 130, + 259, + 482, + 308 + ], + "type": "text", + "content": " & " + }, + { + "bbox": [ + 130, + 259, + 482, + 308 + ], + "type": "inline_equation", + "content": "S^{\\mathrm{real}}" + }, + { + "bbox": [ + 130, + 259, + 482, + 308 + ], + "type": "text", + "content": " using the least squares formulation [41] for discriminator loss. We derive supervision for " + }, + { + "bbox": [ + 130, + 259, + 482, + 308 + ], + "type": "inline_equation", + "content": "\\mathcal{F}" + }, + { + "bbox": [ + 130, + 259, + 482, + 308 + ], + "type": "text", + "content": " by computing gradients through " + }, + { + "bbox": [ + 130, + 259, + 482, + 308 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 130, + 259, + 482, + 308 + ], + "type": "text", + "content": " on the occupancy values at the sampled points to maximize the realism of the generated shapes." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 216, + 314, + 482, + 360 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 216, + 314, + 482, + 360 + ], + "spans": [ + { + "bbox": [ + 216, + 314, + 482, + 360 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {\\mathrm {a d v}} ^ {\\mathcal {D}} = [ \\mathcal {D} (S ^ {\\mathrm {r e a l}}) - 1 ] ^ {2} + [ \\mathcal {D} (S ^ {\\mathrm {g e n}}) ] ^ {2} \\\\ \\mathcal {L} _ {\\mathrm {a d v}} ^ {\\mathcal {F}} = [ \\mathcal {D} (S ^ {\\mathrm {g e n}}) - 1 ] ^ {2} \\\\ \\mathcal {L} _ {\\text {s h a p e - p r i o r}} = \\lambda_ {f} \\mathcal {L} _ {\\text {a d v}} (\\mathcal {F}) + \\lambda_ {d} \\mathcal {L} _ {\\text {a d v}} (\\mathcal {D}) \\tag {4} \\\\ \\end{array}", + "image_path": "08e3d82f60b3233e02eae1fc6887ce756b3c66e82c5eaf2d59f64bfde09faf0f.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 375, + 243, + 387 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 375, + 243, + 387 + ], + "spans": [ + { + "bbox": [ + 132, + 375, + 243, + 387 + ], + "type": "text", + "content": "3.4 Training Details" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 394, + 481, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 394, + 481, + 418 + ], + "spans": [ + { + "bbox": [ + 130, + 394, + 481, + 418 + ], + "type": "text", + "content": "We train " + }, + { + "bbox": [ + 130, + 394, + 481, + 418 + ], + "type": "inline_equation", + "content": "\\mathcal{F}\\& \\mathcal{D}" + }, + { + "bbox": [ + 130, + 394, + 481, + 418 + ], + "type": "text", + "content": " in an alternating manner with 2 iterations of " + }, + { + "bbox": [ + 130, + 394, + 481, + 418 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 130, + 394, + 481, + 418 + ], + "type": "text", + "content": " for every iteration of " + }, + { + "bbox": [ + 130, + 394, + 481, + 418 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 130, + 394, + 481, + 418 + ], + "type": "text", + "content": ". The total loss for training our framework is:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 208, + 425, + 481, + 455 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 208, + 425, + 481, + 455 + ], + "spans": [ + { + "bbox": [ + 208, + 425, + 481, + 455 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {\\mathcal {F}} = \\lambda_ {v} \\mathcal {L} _ {\\text {v i s u a l - h u l l}} + \\lambda_ {c} \\mathcal {L} _ {\\text {c o n s i s t e n c y}} + \\lambda_ {f} \\mathcal {L} _ {\\text {a d v}} ^ {\\mathcal {F}} \\\\ \\mathcal {L} _ {\\mathcal {D}} = \\lambda_ {d} \\mathcal {L} _ {\\mathrm {a d v}} ^ {\\mathcal {D}} \\tag {5} \\\\ \\end{array}", + "image_path": "b27968a8480adf4eeeed45297e4a287a727a9eace2d08c29b75963fc33f35cff.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 463, + 483, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 463, + 483, + 523 + ], + "spans": [ + { + "bbox": [ + 130, + 463, + 483, + 523 + ], + "type": "text", + "content": "Following standard practice [73], we pretrain on synthetic ObMan. We train our model jointly on ObMan (3D supervision, shape priors) & VISOR (2D supervision) with a dataset ratio of ObMan:VISOR as 1:2. We use batch size of 64, learning rate of 1e-5 across 4 NVIDIA A40 GPUs & loss weights as " + }, + { + "bbox": [ + 130, + 463, + 483, + 523 + ], + "type": "inline_equation", + "content": "\\lambda_v = 1, \\lambda_c = 1, \\lambda_f = 0.25, \\lambda_d = 0.25" + }, + { + "bbox": [ + 130, + 463, + 483, + 523 + ], + "type": "text", + "content": ". Please refer to supplementary for more details." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 131, + 539, + 389, + 552 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 539, + 389, + 552 + ], + "spans": [ + { + "bbox": [ + 131, + 539, + 389, + 552 + ], + "type": "text", + "content": "3.5 Constructing Wild Objects in Hands Dataset" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 558, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 558, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 558, + 482, + 666 + ], + "type": "text", + "content": "Our framework requires dataset containing multi-view images of rigid hand-object interactions in the wild, with 3D hand pose and 2D object masks. To construct such a dataset, we consider VISOR [13] which provides 2D tracks for hands, objects they are interacting with and their segmentation masks. It contains a rich set of hand-object interactions, e.g. taking out milk from the fridge, pouring oil from bottles, kneading dough, cutting vegetables, and stirring noodles in a wok. Our interest is in the 3D reconstruction of rigid objects which are in-contact with a hand, but there are no 3D object annotations in VISOR. Hence, we process it to prepare a dataset for training our model." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "text", + "content": "A. Prakash et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 135, + 179, + 481, + 238 + ], + "blocks": [ + { + "bbox": [ + 130, + 114, + 482, + 170 + ], + "lines": [ + { + "bbox": [ + 130, + 114, + 482, + 170 + ], + "spans": [ + { + "bbox": [ + 130, + 114, + 482, + 170 + ], + "type": "text", + "content": "Table 1: Generalization to novel objects in the wild. We report F-score at " + }, + { + "bbox": [ + 130, + 114, + 482, + 170 + ], + "type": "inline_equation", + "content": "5\\mathrm{mm}" + }, + { + "bbox": [ + 130, + 114, + 482, + 170 + ], + "type": "text", + "content": " & " + }, + { + "bbox": [ + 130, + 114, + 482, + 170 + ], + "type": "inline_equation", + "content": "10\\mathrm{mm}" + }, + { + "bbox": [ + 130, + 114, + 482, + 170 + ], + "type": "text", + "content": ", Chamfer distance (CD, mm) for object generalization splits on MOW. We compare with AC-OCC & AC-SDF trained on different combinations of datasets with full 3D supervision. Our approach outperforms baselines across all metrics without using real-world 3D supervision (Relative % improvement w.r.t. best baseline in green)." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 135, + 179, + 481, + 238 + ], + "lines": [ + { + "bbox": [ + 135, + 179, + 481, + 238 + ], + "spans": [ + { + "bbox": [ + 135, + 179, + 481, + 238 + ], + "type": "table", + "html": "
MethodDataset and supervision usedF@5 ↑F@10 ↑CD ↓
AC-OCCObMan (Synthetic 3D)0.0950.1798.69
AC-SDF [73]ObMan (Synthetic 3D)0.1080.1997.82
AC-SDF [73]ObMan (Synthetic 3D) + HO3D (Lab 3D)0.0820.1597.52
AC-SDF [73]ObMan (Synthetic 3D) + HO3D (Lab 3D) + HOI4D (3D)0.0950.1937.43
HORSE (Ours)ObMan (Synthetic 3D) + VISOR (2D Masks) + Shape priors0.121+10.7%0.220+10.6%6.76+13.5%
", + "image_path": "4ae1d2adea35c968993b3d64e07ffd1c358eeec60cc3584da2887093e3605b11.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 262, + 479, + 332 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 262, + 479, + 332 + ], + "spans": [ + { + "bbox": [ + 130, + 262, + 479, + 332 + ], + "type": "text", + "content": "We first sample a subset of VISOR involving hand-object contact, using available contact annotations. We select object tracks where only one hand is in consistent contact with the object. This leaves us with 14768 object tracks from the original VISOR dataset. We then manually filter this subset to select a subset that showcases manipulation of rigid objects with a single hand. This leaves us with 604 video snippets showing hands interacting with different objects." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 334, + 482, + 537 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 334, + 482, + 537 + ], + "spans": [ + { + "bbox": [ + 130, + 334, + 482, + 537 + ], + "type": "text", + "content": "Processing hands on VISOR: We rely on the 3D hand poses to set up the output coordinate frame, compute hand articulation features, and more importantly to register the different frames together [38,66]. These hand poses are estimated using FrankMocap, which may not always be accurate. To remove erroneous poses, we employ automated filtering using the uncertainty estimate technique from Bahat & Shakhnarovich [1] following 3D human pose literature [50]. Specifically, we obtain 3D hand pose predictions on five different versions of the image, augmented by different fixed translations. The uncertainty estimate for a given image is computed as the standard deviation of reprojection locations of MANO vertices across these 5 image versions. This sidesteps the need to hand-specify the trade-off between translation, rotation, and articulation parameters that are part of the 3D hand pose output. This leaves us with 473 video snippets consisting of 144 object categories. This object diversity is " + }, + { + "bbox": [ + 130, + 334, + 482, + 537 + ], + "type": "inline_equation", + "content": "4 \\times" + }, + { + "bbox": [ + 130, + 334, + 482, + 537 + ], + "type": "text", + "content": " larger than existing datasets [18, 19, 32, 34, 69] used for our task, typically containing 10 to 32 object categories. We refer to this dataset as Wild Objects in Hands, some example object sequences are shown in Fig. 4. Note the *incidental* multiple views and relative consistency in hand and object pose over the course of interaction." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 557, + 230, + 571 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 557, + 230, + 571 + ], + "spans": [ + { + "bbox": [ + 132, + 557, + 230, + 571 + ], + "type": "text", + "content": "4 Experiments" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 584, + 209, + 594 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 584, + 209, + 594 + ], + "spans": [ + { + "bbox": [ + 132, + 584, + 209, + 594 + ], + "type": "text", + "content": "4.1 Protocols" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 605, + 481, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 605, + 481, + 640 + ], + "spans": [ + { + "bbox": [ + 130, + 605, + 481, + 640 + ], + "type": "text", + "content": "We use 4 datasets for training (ObMan [22], VISOR [13], HO3D [18], HOI4D [34]) and 2 datasets (MOW [5], HO3D) for evaluation. Different methods are trained on different datasets, depending on the specific evaluation setting." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 641, + 481, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 641, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 641, + 481, + 665 + ], + "type": "text", + "content": "Training datasets: ObMan is a large scale synthetic hand-object dataset with 2.5K objects and 3D supervision. HO3D & HOI4D are real world datasets collected" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 138, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 138, + 91, + 447, + 102 + ], + "type": "text", + "content": "3D Reconstruction of Objects in Hands without Real World 3D Supervision" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 133, + 158, + 309, + 219 + ], + "blocks": [ + { + "bbox": [ + 132, + 125, + 309, + 157 + ], + "lines": [ + { + "bbox": [ + 132, + 125, + 309, + 157 + ], + "spans": [ + { + "bbox": [ + 132, + 125, + 309, + 157 + ], + "type": "text", + "content": "Table 2: HO3D Object generalization. We outperform AC-OCC & AC-SDF trained on different datasets with 3D supervision." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 133, + 158, + 309, + 219 + ], + "lines": [ + { + "bbox": [ + 133, + 158, + 309, + 219 + ], + "spans": [ + { + "bbox": [ + 133, + 158, + 309, + 219 + ], + "type": "table", + "html": "
MethodSupervision (ObMan +)F@5F@10CD
AC-OCC-0.180.334.39
AC-SDF-0.170.333.72
AC-SDFMOW (3D)0.170.333.84
AC-SDFMOW (3D) + HOI4D (3D)0.170.333.63
OursVISOR (Multi-view 2D)0.200.353.39
", + "image_path": "089202a6a85e8a9dbae594868f06d422abbec314b67bbf84abdcb55b413d6720.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 313, + 158, + 482, + 209 + ], + "blocks": [ + { + "bbox": [ + 312, + 125, + 482, + 157 + ], + "lines": [ + { + "bbox": [ + 312, + 125, + 482, + 157 + ], + "spans": [ + { + "bbox": [ + 312, + 125, + 482, + 157 + ], + "type": "text", + "content": "Table 3: HO3D View generalization. We outperform HO [22] & GF [31], trained on HO3D with full 3D supervision." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 313, + 158, + 482, + 209 + ], + "lines": [ + { + "bbox": [ + 313, + 158, + 482, + 209 + ], + "spans": [ + { + "bbox": [ + 313, + 158, + 482, + 209 + ], + "type": "table", + "html": "
MethodSupervision (ObMan +)F@5F@10CD
AC-SDF-0.170.323.72
HO [22]HO3D (3D)0.110.224.19
GF [31]HO3D (3D)0.120.244.96
OursHO3D (Multi-view 2D)0.230.431.41
", + "image_path": "f2aa5932dcaa1aa8ea20e2797bc5bbbf2b249906cd2cc089424c2ba48491d548.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 243, + 481, + 291 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 243, + 481, + 291 + ], + "spans": [ + { + "bbox": [ + 130, + 243, + 481, + 291 + ], + "type": "text", + "content": "in lab settings with 3D annotations. HO3D contains 10 YCB [82] objects whereas HOI4D contains 16 object categories, out of which 7 are rigid. VISOR does not contain any 3D supervision. Instead, we use the process described in Sec. 3.5, to extract supervision from VISOR, resulting in 144 object categories." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 292, + 482, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 292, + 482, + 568 + ], + "spans": [ + { + "bbox": [ + 130, + 292, + 482, + 568 + ], + "type": "text", + "content": "The baselines are trained with different combinations of HO3D & HOI4D [34]. As our method does not require 3D ground truth, we do not use these datasets for training. Instead, we use auxiliary supervision from Wild Objects in Hands (Sec. 3.5) & learn shape priors using ObMan. VISOR does not have 3D annotations and can not be used to train the baselines. Note that all models are initialized from the model pretrained on ObMan for fair comparisons, following protocol [73]. Evaluation datasets: We focus on the challenging zero-shot generalization to novel objects in-the-wild setting. We use MOW [5] dataset which contains images from YouTube, spanning 120 object templates. Note that these types of images have not been seen during training. To be consistent with prior work [73], we also use HO3D for evaluation, consisting of 1221 testing images across 10 objects. While [73] operate in view generalization setting, i.e., making predictions on novel views of training objects, we also consider the more challenging object generalization setting. Almost all of our experiments are conducted in the object generalization setting where we assess predictions on novel objects across datasets. Metrics: Following [59, 73], we report Chamfer distance (CD) and F-score at " + }, + { + "bbox": [ + 130, + 292, + 482, + 568 + ], + "type": "inline_equation", + "content": "5\\mathrm{mm}" + }, + { + "bbox": [ + 130, + 292, + 482, + 568 + ], + "type": "text", + "content": " & " + }, + { + "bbox": [ + 130, + 292, + 482, + 568 + ], + "type": "inline_equation", + "content": "10\\mathrm{mm}" + }, + { + "bbox": [ + 130, + 292, + 482, + 568 + ], + "type": "text", + "content": " thresholds. F-score evaluates the distance between object surfaces as the harmonic mean between precision & recall. Precision measures accuracy of the reconstruction as " + }, + { + "bbox": [ + 130, + 292, + 482, + 568 + ], + "type": "inline_equation", + "content": "\\%" + }, + { + "bbox": [ + 130, + 292, + 482, + 568 + ], + "type": "text", + "content": " of reconstructed points that lie within a certain distance to ground truth. Recall measures completeness of the reconstruction as " + }, + { + "bbox": [ + 130, + 292, + 482, + 568 + ], + "type": "inline_equation", + "content": "\\%" + }, + { + "bbox": [ + 130, + 292, + 482, + 568 + ], + "type": "text", + "content": " of points, on the ground truth, that lie within a certain distance to the reconstruction. CD computes sum of distances for each pair of nearest neighbors in the two point clouds. We report mean CD & F-score over all test objects." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 570, + 483, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 570, + 483, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 570, + 483, + 666 + ], + "type": "text", + "content": "Baselines: We compare our model with AC-SDF trained in supervised manner using 3D ground truth on different combination of datasets in different settings: (1) For object generalization on MOW in the wild, AC-SDF is trained on ObMan, ObMan + HO3D, ObMan + HO3D + HOI4D, (2) For object generalization on HO3D, AC-SDF is trained on ObMan, ObMan + MOW, ObMan + MOW + HOI4D, (3) For view generalization on HO3D, AC-SDF is trained on ObMan + HO3D. We also compare with an occupancy variant of AC-SDF (AC-OCC) and recent published methods with different forms of SDF representation, e.g." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "text", + "content": "A. Prakash et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 138, + 179, + 299, + 255 + ], + "blocks": [ + { + "bbox": [ + 131, + 114, + 302, + 169 + ], + "lines": [ + { + "bbox": [ + 131, + 114, + 302, + 169 + ], + "spans": [ + { + "bbox": [ + 131, + 114, + 302, + 169 + ], + "type": "text", + "content": "Table 4: Comparison with relevant methods. Our approach also outperforms gSDF, AlignSDF & DDFHO (trained in the same setting as ours) in zero-shot generalization to MOW across most metrics." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 138, + 179, + 299, + 255 + ], + "lines": [ + { + "bbox": [ + 138, + 179, + 299, + 255 + ], + "spans": [ + { + "bbox": [ + 138, + 179, + 299, + 255 + ], + "type": "table", + "html": "
MethodF@5 ↑F@10 ↑CD ↓
AC-SDF [73]0.1080.1997.82
AlignSDF [10]0.0990.1828.30
gSDF [9]0.1070.1977.50
DDFHO [77]0.0940.1663.06
HORSE (Ours)0.1210.2206.76
", + "image_path": "1b7912bcda3c10953f81c96ac09f552ab653a663e098e895f1615335e4d7c611.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 318, + 179, + 478, + 255 + ], + "blocks": [ + { + "bbox": [ + 311, + 114, + 482, + 170 + ], + "lines": [ + { + "bbox": [ + 311, + 114, + 482, + 170 + ], + "spans": [ + { + "bbox": [ + 311, + 114, + 482, + 170 + ], + "type": "text", + "content": "Table 5: 3D vs. 2D input to discriminator. Training with 3D inputs (at different resolutions) perform worse, likely due to coarse sampling resulting in very few points inside the object." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 318, + 179, + 478, + 255 + ], + "lines": [ + { + "bbox": [ + 318, + 179, + 478, + 255 + ], + "spans": [ + { + "bbox": [ + 318, + 179, + 478, + 255 + ], + "type": "table", + "html": "
Disc. inputF@5 ↑F@10 ↑CD ↓
No disc.0.1170.2166.93
10 × 10 × 100.1200.2187.29
16 × 16 × 160.1150.2097.79
32 × 32 × 320.1040.1917.83
2D slices0.1210.2206.76
", + "image_path": "661bee211c1801b3f0a2556bf7052bdff7a452a5bd36e352b1fe24722f3adf16.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 277, + 482, + 339 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 277, + 482, + 339 + ], + "spans": [ + { + "bbox": [ + 130, + 277, + 482, + 339 + ], + "type": "text", + "content": "AlignSDF [10], gSDF [9], DDFHO [77]. Note that the VISOR dataset cannot be used for training since it does not have 3D supervision. For the view generalization setting on HO3D, we also compare with HO [22] & GF [31] trained with 3D ground truth on ObMan + HO3D. Recent works [44,70] on unsupervised reconstruction of objects require several views or depth, which are not available in our setting." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 131, + 357, + 198, + 369 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 357, + 198, + 369 + ], + "spans": [ + { + "bbox": [ + 131, + 357, + 198, + 369 + ], + "type": "text", + "content": "4.2 Results" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 378, + 482, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 378, + 482, + 544 + ], + "spans": [ + { + "bbox": [ + 130, + 378, + 482, + 544 + ], + "type": "text", + "content": "Object generalization in the wild: We first examine if the auxiliary supervision from visual hull and shape prior is useful for generalization to novel objects in the wild. We evaluate on MOW in Tab. 1 and compare with AC-OCC & AC-SDF trained on different combinations of ObMan, HO3D, HOI4D datasets with 3D supervision. Our approach provides gains of " + }, + { + "bbox": [ + 130, + 378, + 482, + 544 + ], + "type": "inline_equation", + "content": "24.3\\%" + }, + { + "bbox": [ + 130, + 378, + 482, + 544 + ], + "type": "text", + "content": " compared to AC-OCC (trained on ObMan) and " + }, + { + "bbox": [ + 130, + 378, + 482, + 544 + ], + "type": "inline_equation", + "content": "11.6\\%" + }, + { + "bbox": [ + 130, + 378, + 482, + 544 + ], + "type": "text", + "content": " on AC-SDF (trained on ObMan). This shows the benefits of our supervision cues in the wild over training on just large scale synthetic data with 3D supervision. We also outperform AC-SDF trained on ObMan + HO3D + HOI4D with full 3D supervision by " + }, + { + "bbox": [ + 130, + 378, + 482, + 544 + ], + "type": "inline_equation", + "content": "16.8\\%" + }, + { + "bbox": [ + 130, + 378, + 482, + 544 + ], + "type": "text", + "content": " across all metrics. This indicates that our supervision cues from in-the-wild VISOR are better than using 3D supervision on lab datasets with limited diversity in objects. We also outperform relevant methods that use different forms of SDF representations, e.g. AlignSDF, gSDF & DDFHO across most metrics (Tab. 4). Note that our contributions are orthogonal and could be combined with these works." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 545, + 482, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 545, + 482, + 605 + ], + "spans": [ + { + "bbox": [ + 130, + 545, + 482, + 605 + ], + "type": "text", + "content": "Adding 3D supervision to AC-SDF. In Tab. 1, we observe that adding more data from HO3D & HOI4D to AC-SDF training did not help in zero-shot generalization to MOW. Instead, the performance drops compared to AC-SDF trained on ObMan. This is likely due to limited diversity in HO3D: 10 YCB objects, HOI4D: 7 rigid object categories & the model overfitting to these categories." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 605, + 482, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 605, + 482, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 605, + 482, + 665 + ], + "type": "text", + "content": "Object generalization on HO3D: Our approach is better than AC-OCC & AC-SDF trained on different datasets with 3D supervision (Tab. 2). This further shows the benefits of auxiliary supervision from VISOR for object generalization. Also, AC-SDF does not benefit from MOW & HOI4D. This could because HO3D evaluates on 10 objects only and they may not be present in MOW or HOI4D." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 138, + 90, + 448, + 103 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 90, + 448, + 103 + ], + "spans": [ + { + "bbox": [ + 138, + 90, + 448, + 103 + ], + "type": "text", + "content": "3D Reconstruction of Objects in Hands without Real World 3D Supervision" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 479, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 479, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 479, + 100 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 135, + 179, + 294, + 234 + ], + "blocks": [ + { + "bbox": [ + 132, + 114, + 294, + 170 + ], + "lines": [ + { + "bbox": [ + 132, + 114, + 294, + 170 + ], + "spans": [ + { + "bbox": [ + 132, + 114, + 294, + 170 + ], + "type": "text", + "content": "Table 6: Supervision quality on HO3D. Automated filtering to remove incorrect hand poses improves results & using ground truth hand pose differs little compared to predicted pose." + }, + { + "bbox": [ + 132, + 114, + 294, + 170 + ], + "type": "inline_equation", + "content": "^{1}" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 135, + 179, + 294, + 234 + ], + "lines": [ + { + "bbox": [ + 135, + 179, + 294, + 234 + ], + "spans": [ + { + "bbox": [ + 135, + 179, + 294, + 234 + ], + "type": "table", + "html": "
F@5 ↑F@10 ↑CD ↓
HORSE (base setting)0.2340.4341.41
no training on HO3D0.1750.3293.72
w/o filtering0.2130.4051.42
w/ ground truth pose10.2430.4441.39
", + "image_path": "73f7d19b0bd2db2728c83e02c1580ccd71dee205db107ee6a23f1a600edc463d.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 304, + 179, + 481, + 234 + ], + "blocks": [ + { + "bbox": [ + 301, + 114, + 482, + 170 + ], + "lines": [ + { + "bbox": [ + 301, + 114, + 482, + 170 + ], + "spans": [ + { + "bbox": [ + 301, + 114, + 482, + 170 + ], + "type": "text", + "content": "Table 7: Role of different loss functions. We report F-score at " + }, + { + "bbox": [ + 301, + 114, + 482, + 170 + ], + "type": "inline_equation", + "content": "5\\mathrm{mm}" + }, + { + "bbox": [ + 301, + 114, + 482, + 170 + ], + "type": "text", + "content": " & " + }, + { + "bbox": [ + 301, + 114, + 482, + 170 + ], + "type": "inline_equation", + "content": "10\\mathrm{mm}" + }, + { + "bbox": [ + 301, + 114, + 482, + 170 + ], + "type": "text", + "content": ", Chamfer distance (CD, mm) for different variants of our model on MOW. All losses are effective & multiview supervision leads to largest gain." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 304, + 179, + 481, + 234 + ], + "lines": [ + { + "bbox": [ + 304, + 179, + 481, + 234 + ], + "spans": [ + { + "bbox": [ + 304, + 179, + 481, + 234 + ], + "type": "table", + "html": "
\\( \\mathcal{L}_{\\text{ObMan}} \\)\\( \\mathcal{L}_{\\text{visual-hull}} \\)\\( \\mathcal{L}_{\\text{consistency}} \\)\\( \\mathcal{L}_{\\text{shape-prior}} \\)\\( \\mathbf{F@5} \\uparrow \\)\\( \\mathbf{F@10} \\uparrow \\)\\( \\mathbf{CD} \\downarrow \\)
0.0950.1818.69
0.1110.2057.26
0.0730.13212.75
0.0970.17510.29
0.1170.2166.93
0.1210.2206.76
", + "image_path": "72745c9e5bce0aaea5b49116274234e7132c4a7efd1fcc08bd14e5427a3bf51d.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 256, + 482, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 256, + 482, + 316 + ], + "spans": [ + { + "bbox": [ + 130, + 256, + 482, + 316 + ], + "type": "text", + "content": "Occupancy vs SDF. We see that SDF formulation is better than occupancy when trained with full 3D supervision (AC-OCC vs. AC-SDF). In contrast, we find SDF training to be unstable (does not give meaningful predictions) with auxiliary supervision. This could be because regressing continuous SDF values with weak supervision is harder than binary classification for occupancy values." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 316, + 482, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 316, + 482, + 389 + ], + "spans": [ + { + "bbox": [ + 130, + 316, + 482, + 389 + ], + "type": "text", + "content": "View generalization results on HO3D. In Tab. 3, we see gains with using supervision cues over just training on synthetic data, consistent with trends in the object generalization setting. We also outperform HO [22] & GF [31], both trained on HO3D using full 3D supervision. We outperform these methods even without any images from HO3D (last row in Tab. 1 vs. GF & HO in Table 3), likely due to use of more expressive pixel-aligned & hand articulation features." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 404, + 238, + 417 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 404, + 238, + 417 + ], + "spans": [ + { + "bbox": [ + 132, + 404, + 238, + 417 + ], + "type": "text", + "content": "4.3 Ablation Study" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 423, + 482, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 423, + 482, + 507 + ], + "spans": [ + { + "bbox": [ + 130, + 423, + 482, + 507 + ], + "type": "text", + "content": "Analysis of supervision quality. We also observe in Tab. 3 that our method is able to bridge more than " + }, + { + "bbox": [ + 130, + 423, + 482, + 507 + ], + "type": "inline_equation", + "content": "40\\%" + }, + { + "bbox": [ + 130, + 423, + 482, + 507 + ], + "type": "text", + "content": " of the gap between no training on HO3D to training with full 3D supervision. We further use the view generalization setting to assess the quality of 2D object mask supervision used in our method in Tab. 6. Our automated filtering of frames with inaccurate hand poses (as described in Sec. 3.5) is crucial for good performance. Also, little is lost from using hand pose as a proxy for object pose on the HO3D dataset." + }, + { + "bbox": [ + 130, + 423, + 482, + 507 + ], + "type": "inline_equation", + "content": "^{1}" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 131, + 507, + 482, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 507, + 482, + 579 + ], + "spans": [ + { + "bbox": [ + 131, + 507, + 482, + 579 + ], + "type": "text", + "content": "Role of different loss terms: We experiment with multiple variants of our model to assess the importance of different loss terms. We start with the AC-OCC model trained on ObMan and gradually add " + }, + { + "bbox": [ + 131, + 507, + 482, + 579 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{visual - hull}}" + }, + { + "bbox": [ + 131, + 507, + 482, + 579 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 131, + 507, + 482, + 579 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{consistency}}" + }, + { + "bbox": [ + 131, + 507, + 482, + 579 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 131, + 507, + 482, + 579 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{shape - prior}}" + }, + { + "bbox": [ + 131, + 507, + 482, + 579 + ], + "type": "text", + "content": ". From the results in Tab. 7, we observe that " + }, + { + "bbox": [ + 131, + 507, + 482, + 579 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{visual - hull}}" + }, + { + "bbox": [ + 131, + 507, + 482, + 579 + ], + "type": "text", + "content": " is more effective than " + }, + { + "bbox": [ + 131, + 507, + 482, + 579 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{consistency}}" + }, + { + "bbox": [ + 131, + 507, + 482, + 579 + ], + "type": "text", + "content": " and using them together provides further benefits. Moreover, " + }, + { + "bbox": [ + 131, + 507, + 482, + 579 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{shape - prior}}" + }, + { + "bbox": [ + 131, + 507, + 482, + 579 + ], + "type": "text", + "content": " improves performance on top of " + }, + { + "bbox": [ + 131, + 507, + 482, + 579 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{consistency}}" + }, + { + "bbox": [ + 131, + 507, + 482, + 579 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 131, + 507, + 482, + 579 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{visual - hull}}" + }, + { + "bbox": [ + 131, + 507, + 482, + 579 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 131, + 579, + 481, + 603 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 579, + 481, + 603 + ], + "spans": [ + { + "bbox": [ + 131, + 579, + 481, + 603 + ], + "type": "text", + "content": "3D vs 2D input to discriminator: We also consider 3D volumes as input to the discriminator (instead of 2D cross-sections). For this, we need to sample " + }, + { + "bbox": [ + 131, + 579, + 481, + 603 + ], + "type": "inline_equation", + "content": "64 \\times 64 \\times 64" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "text", + "content": "A. Prakash et al." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 133, + 609, + 482, + 665 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 609, + 482, + 665 + ], + "spans": [ + { + "bbox": [ + 133, + 609, + 482, + 665 + ], + "type": "text", + "content": "1 While [73] uses similar contrast between predicted vs. ground truth hands to make claims, we note that those claims & this result should be taken with a grain of salt. FrankMocap is trained on HO3D, so its predictions on HO3D are better than they would be on unseen data. As most of our models are trained on VISOR (not used for training FrankMocap), our other experiments do not suffer from this issue." + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 135, + 179, + 480, + 232 + ], + "blocks": [ + { + "bbox": [ + 131, + 114, + 482, + 169 + ], + "lines": [ + { + "bbox": [ + 131, + 114, + 482, + 169 + ], + "spans": [ + { + "bbox": [ + 131, + 114, + 482, + 169 + ], + "type": "text", + "content": "Table 8: Design choices for mask Table 9: Sampling method for 2D planes. guided sampling. Uniformly sampling Sampling planes through origin of hand coordinates is much worse than the rejection dinate system & rotated randomly performs sampling used in our method. Using neg- the best compared to sampling axis-aligned ative points from hand masks is useful. planes either uniformly or through origin." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 135, + 179, + 480, + 232 + ], + "lines": [ + { + "bbox": [ + 135, + 179, + 480, + 232 + ], + "spans": [ + { + "bbox": [ + 135, + 179, + 480, + 232 + ], + "type": "table", + "html": "
Sampling methodF@5 ↑ F@10 ↑ CD ↓Sampling methodF@5 ↑ F@10 ↑ CD ↓
Uniform0.0930.16610.29Uniform (axis-aligned)0.1150.2087.01
Ours (no hand points)0.1130.2077.69Origin (axis-aligned)0.0980.1838.52
Ours0.1170.2166.93Origin (random rotation)0.1210.2206.76
", + "image_path": "c6fa4145aeb7f762a920526b80ea316af83a43807c3817adf4f637e652409420.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 255, + 482, + 339 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 255, + 482, + 339 + ], + "spans": [ + { + "bbox": [ + 130, + 255, + 482, + 339 + ], + "type": "inline_equation", + "content": "(=262144)" + }, + { + "bbox": [ + 130, + 255, + 482, + 339 + ], + "type": "text", + "content": " points & run several forward passes of our model to get occupancies. Since this is computationally expensive, we sample points at coarser resolutions: " + }, + { + "bbox": [ + 130, + 255, + 482, + 339 + ], + "type": "inline_equation", + "content": "32 \\times 32 \\times 32" + }, + { + "bbox": [ + 130, + 255, + 482, + 339 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 255, + 482, + 339 + ], + "type": "inline_equation", + "content": "16 \\times 16 \\times 16" + }, + { + "bbox": [ + 130, + 255, + 482, + 339 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 255, + 482, + 339 + ], + "type": "inline_equation", + "content": "10 \\times 10 \\times 10" + }, + { + "bbox": [ + 130, + 255, + 482, + 339 + ], + "type": "text", + "content": ". We use " + }, + { + "bbox": [ + 130, + 255, + 482, + 339 + ], + "type": "inline_equation", + "content": "32 \\times 32" + }, + { + "bbox": [ + 130, + 255, + 482, + 339 + ], + "type": "text", + "content": " size 2D slices, so " + }, + { + "bbox": [ + 130, + 255, + 482, + 339 + ], + "type": "inline_equation", + "content": "10 \\times 10 \\times 10" + }, + { + "bbox": [ + 130, + 255, + 482, + 339 + ], + "type": "text", + "content": " 3D volume has no. of points & takes similar compute. We see that 2D slices perform better than 3D volumes (Tab. 5). Also, the performance gets worse with increase in the sampled 3D volume, likely due to 3D sampling being so coarse that very few points lie inside the object, thus unable to capture fine-grained shape." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 340, + 482, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 340, + 482, + 388 + ], + "spans": [ + { + "bbox": [ + 130, + 340, + 482, + 388 + ], + "type": "text", + "content": "Sampling 2D slices for discriminator: We ablate different design choices (Sec. 3.3) in Tab. 9. We observe that sampling 2D planes through origin of the hand coordinate system and rotated randomly performs the best compared to sampling axis-aligned frames either uniformly or through origin." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 388, + 483, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 388, + 483, + 437 + ], + "spans": [ + { + "bbox": [ + 130, + 388, + 483, + 437 + ], + "type": "text", + "content": "Design choices for mask guided sampling: We run rejection sampling (with hand & object masks) to sample points in the hand coordinate frame (Sec. 3.2). We compare with 2 variants: uniformly sampling in the hand frame & removing negative points from hand masks. We find our strategy to work the best (Tab. 8)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 456, + 231, + 467 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 456, + 231, + 467 + ], + "spans": [ + { + "bbox": [ + 132, + 456, + 231, + 467 + ], + "type": "text", + "content": "4.4 Visualizations" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 477, + 482, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 477, + 482, + 574 + ], + "spans": [ + { + "bbox": [ + 130, + 477, + 482, + 574 + ], + "type": "text", + "content": "We compare the mesh generated by our model and AC-SDF (trained on ObManbest baseline) on zero-shot generalization to MOW (Fig. 5) and Core50 [35](Fig. 6). For this, we sample points uniformly in a " + }, + { + "bbox": [ + 130, + 477, + 482, + 574 + ], + "type": "inline_equation", + "content": "64 \\times 64 \\times 64" + }, + { + "bbox": [ + 130, + 477, + 482, + 574 + ], + "type": "text", + "content": " volume, predict their occupancies or SDF from the network and run marching cubes [36]. We project the mesh into the input image & render it in different views. Our model captures the visual hull of the object, as evidenced by the projection of the mesh onto the image, and generates more coherent shapes than AC-SDF, which often reconstructs disconnected and scattered shapes. More visualizations are in supplementary." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 593, + 218, + 604 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 593, + 218, + 604 + ], + "spans": [ + { + "bbox": [ + 132, + 593, + 218, + 604 + ], + "type": "text", + "content": "4.5 Limitations" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 615, + 482, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 615, + 482, + 664 + ], + "spans": [ + { + "bbox": [ + 130, + 615, + 482, + 664 + ], + "type": "text", + "content": "Inaccurate hand pose. We use predictions from FrankMocap for hand pose & camera parameters. Note that the sampled points do not cover the entire object if the hand pose is not accurate, due to mis-projection into the image plane. This leads to exclusion of points in certain parts of the object (Fig. 7)." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 138, + 90, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 90, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 138, + 90, + 447, + 102 + ], + "type": "text", + "content": "3D Reconstruction of Objects in Hands without Real World 3D Supervision" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 135, + 112, + 470, + 235 + ], + "blocks": [ + { + "bbox": [ + 135, + 112, + 470, + 235 + ], + "lines": [ + { + "bbox": [ + 135, + 112, + 470, + 235 + ], + "spans": [ + { + "bbox": [ + 135, + 112, + 470, + 235 + ], + "type": "image", + "image_path": "6f78e95ca1118a8a3c2918210888dd77d68864ee441af6a5fb86944ecb14e04c.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 247, + 482, + 314 + ], + "lines": [ + { + "bbox": [ + 130, + 247, + 482, + 314 + ], + "spans": [ + { + "bbox": [ + 130, + 247, + 482, + 314 + ], + "type": "text", + "content": "Fig. 5: Visualizations on MOw object generalization split. We show the object mesh projected onto the image and rendered in different views for our HORSE model and compare with the AC-SDF model trained on ObMan dataset with 3D supervision (best baseline model). We also show the ground truth (GT) object model. We observe that our model is able to predict the object shape more accurately than AC-SDF which often reconstructs smaller and disconnected shapes." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 135, + 321, + 473, + 403 + ], + "blocks": [ + { + "bbox": [ + 135, + 321, + 473, + 403 + ], + "lines": [ + { + "bbox": [ + 135, + 321, + 473, + 403 + ], + "spans": [ + { + "bbox": [ + 135, + 321, + 473, + 403 + ], + "type": "image", + "image_path": "5df372738649518855eb9b6a94fe58137623174e0e61ef106576d93a87d714ce.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 414, + 483, + 449 + ], + "lines": [ + { + "bbox": [ + 130, + 414, + 483, + 449 + ], + "spans": [ + { + "bbox": [ + 130, + 414, + 483, + 449 + ], + "type": "text", + "content": "Fig. 6: Visualizations on zero-shot generalization to Core50 [35]. We show the object mesh projected onto the image and rendered in different views on Core50. HORSE predicts better shapes than AC-SDF (best baseline, often leads to artifacts)." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 470, + 317, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 470, + 317, + 544 + ], + "spans": [ + { + "bbox": [ + 130, + 470, + 317, + 544 + ], + "type": "text", + "content": "Limited object views. Videos in the wild often do not capture " + }, + { + "bbox": [ + 130, + 470, + 317, + 544 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 130, + 470, + 317, + 544 + ], + "type": "text", + "content": " view of the object, e.g. kettle in Fig. 7. This is different than lab settings where the interactions are often constrained & multi-camera setup is used to capture all sides of the object." + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 323, + 472, + 482, + 525 + ], + "blocks": [ + { + "bbox": [ + 323, + 472, + 482, + 525 + ], + "lines": [ + { + "bbox": [ + 323, + 472, + 482, + 525 + ], + "spans": [ + { + "bbox": [ + 323, + 472, + 482, + 525 + ], + "type": "image", + "image_path": "5b117a7c1b8e95b9cd03dadb80623c76b33c672e70e7b0d8c3ca86cb97068fb7.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 322, + 532, + 482, + 555 + ], + "lines": [ + { + "bbox": [ + 322, + 532, + 482, + 555 + ], + "spans": [ + { + "bbox": [ + 322, + 532, + 482, + 555 + ], + "type": "text", + "content": "Fig. 7: Sampled points do not cover the entire object if hand pose is inaccurate." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 558, + 220, + 571 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 558, + 220, + 571 + ], + "spans": [ + { + "bbox": [ + 132, + 558, + 220, + 571 + ], + "type": "text", + "content": "5 Conclusion" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 581, + 482, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 581, + 482, + 667 + ], + "spans": [ + { + "bbox": [ + 130, + 581, + 482, + 667 + ], + "type": "text", + "content": "We present an approach for reconstructing hand-held objects in 3D from a single image. We propose modules to extract supervision from in-the-wild videos & learn data-driven 3D shape priors from synthetic ObMan to circumvent the need for direct 3D supervision. Experiments show that our approach generalizes better to novel objects in the wild than baselines trained using 3D supervision. Future directions include jointly optimizing the hand pose with the object shape to deal with inaccurate hand poses or incorporating additional cues, e.g. contact priors." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "text", + "content": "A. Prakash et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 189 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 189 + ], + "type": "text", + "content": "Acknowledgements: We thank Ashish Kumar, Erin Zhang, Arjun Gupta, Shaowei Liu, Anand Bhattachad, Pranay Thangeda & Kashyap Chitta for feedback on the draft. This material is based upon work supported by NSF (IIS2007035), NASA (80NSSC21K1030), DARPA (Machine Common Sense program), an Amazon Research Award, an NVIDIA Academic Hardware Grant, and the NCSA Delta System (supported by NSF OCI 2005572 and the State of Illinois)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 208, + 199, + 220 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 208, + 199, + 220 + ], + "spans": [ + { + "bbox": [ + 132, + 208, + 199, + 220 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 134, + 233, + 481, + 665 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 138, + 233, + 481, + 257 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 233, + 481, + 257 + ], + "spans": [ + { + "bbox": [ + 138, + 233, + 481, + 257 + ], + "type": "text", + "content": "1. Bahat, Y., Shakhnarovich, G.: Confidence from invariance to image transformations. arXiv (2018)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 138, + 257, + 481, + 290 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 257, + 481, + 290 + ], + "spans": [ + { + "bbox": [ + 138, + 257, + 481, + 290 + ], + "type": "text", + "content": "2. Brahmbhatt, S., Ham, C., Kemp, C.C., Hays, J.: Contactdb: Analyzing and predicting grasp contact via thermal imaging. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2019)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 138, + 290, + 481, + 323 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 290, + 481, + 323 + ], + "spans": [ + { + "bbox": [ + 138, + 290, + 481, + 323 + ], + "type": "text", + "content": "3. Brahmbhatt, S., Tang, C., Twigg, C.D., Kemp, C.C., Hays, J.: Contactpose: A dataset of grasps with object contact and hand pose. In: Proceedings of the European Conference on Computer Vision (ECCV) (2020)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 138, + 323, + 481, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 323, + 481, + 346 + ], + "spans": [ + { + "bbox": [ + 138, + 323, + 481, + 346 + ], + "type": "text", + "content": "4. Buckingham, G.: Hand tracking for immersive virtual reality: Opportunities and challenges. Frontiers in Virtual Reality (2021)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 138, + 346, + 481, + 378 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 346, + 481, + 378 + ], + "spans": [ + { + "bbox": [ + 138, + 346, + 481, + 378 + ], + "type": "text", + "content": "5. Cao, Z., Radosavovic, I., Kanazawa, A., Malik, J.: Reconstructing hand-object interactions in the wild. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2021)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 138, + 379, + 481, + 411 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 379, + 481, + 411 + ], + "spans": [ + { + "bbox": [ + 138, + 379, + 481, + 411 + ], + "type": "text", + "content": "6. Chang, A.X., Funkhouser, T.A., Guibas, L.J., Hanrahan, P., Huang, Q., Li, Z., Savarese, S., Savva, M., Song, S., Su, H., Xiao, J., Yi, L., Yu, F.: Shapenet: An information-rich 3D model repository. ArXiv (2015)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 138, + 412, + 481, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 412, + 481, + 445 + ], + "spans": [ + { + "bbox": [ + 138, + 412, + 481, + 445 + ], + "type": "text", + "content": "7. Chang, M., Prakash, A., Gupta, S.: Look ma, no hands! agent-environment factorization of egocentric videos. In: Advances in Neural Information Processing Systems (NeurIPS) (2023)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 138, + 445, + 481, + 489 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 445, + 481, + 489 + ], + "spans": [ + { + "bbox": [ + 138, + 445, + 481, + 489 + ], + "type": "text", + "content": "8. Chao, Y., Yang, W., Xiang, Y., Molchanov, P., Handa, A., Tremblay, J., Narang, Y.S., Wyk, K.V., Iqbal, U., Birchfield, S., Kautz, J., Fox, D.: Dexycb: A benchmark for capturing hand grasping of objects. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2021)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 138, + 489, + 481, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 489, + 481, + 521 + ], + "spans": [ + { + "bbox": [ + 138, + 489, + 481, + 521 + ], + "type": "text", + "content": "9. Chen, Z., Chen, S., Schmid, C., Laptev, I.: gsdf: Geometry-driven signed distance functions for 3d hand-object reconstruction. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2023)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 138, + 522, + 481, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 522, + 481, + 555 + ], + "spans": [ + { + "bbox": [ + 138, + 522, + 481, + 555 + ], + "type": "text", + "content": "0. Chen, Z., Hasson, Y., Schmid, C., Laptev, I.: Alignsdf: Pose-aligned signed distance fields for hand-object reconstruction. In: Proceedings of the European Conference on Computer Vision (ECCV) (2022)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 138, + 555, + 481, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 555, + 481, + 588 + ], + "spans": [ + { + "bbox": [ + 138, + 555, + 481, + 588 + ], + "type": "text", + "content": "1. Choi, H., Chavan-Dafle, N., Yuan, J., Isler, V., Park, H.: Handnerf: Learning to reconstruct hand-object interaction scene from a single rgb image. In: International Conference on Robotics and Automation (2024)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 134, + 588, + 481, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 588, + 481, + 632 + ], + "spans": [ + { + "bbox": [ + 134, + 588, + 481, + 632 + ], + "type": "text", + "content": "12. Damen, D., Doughty, H., Farinella, G.M., Fidler, S., Furnari, A., Kazakos, E., Moltisanti, D., Munro, J., Perrett, T., Price, W., Wray, M.: Scaling egocentric vision: The epic-kitchens dataset. Proceedings of the European Conference on Computer Vision (ECCV) (2018)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 134, + 632, + 481, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 632, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 134, + 632, + 481, + 665 + ], + "type": "text", + "content": "13. Darkhalil, A., Shan, D., Zhu, B., Ma, J., Kar, A., Higgins, R., Fidler, S., Fouhey, D., Damen, D.: Epic-kitchens visor benchmark: Video segmentations and object relations. In: NeurIPS Track on Datasets and Benchmarks (2022)" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 138, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 138, + 91, + 447, + 102 + ], + "type": "text", + "content": "3D Reconstruction of Objects in Hands without Real World 3D Supervision" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 481, + 665 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 133, + 116, + 481, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 116, + 481, + 149 + ], + "spans": [ + { + "bbox": [ + 133, + 116, + 481, + 149 + ], + "type": "text", + "content": "14. Fan, Z., Parelli, M., Kadoglou, M.E., Kocabas, M., Chen, X., Black, M.J., Hilliges, O.: Hold: Category-agnostic 3d reconstruction of interacting hands and objects from video. arXiv preprint arXiv:2311.18448 (2023)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 150, + 481, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 150, + 481, + 184 + ], + "spans": [ + { + "bbox": [ + 132, + 150, + 481, + 184 + ], + "type": "text", + "content": "15. Garcia-Hernando, G., Yuan, S., Baek, S., Kim, T.: First-person hand action benchmark with RGB-D videos and 3d hand pose annotations. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2018)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 185, + 481, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 185, + 481, + 217 + ], + "spans": [ + { + "bbox": [ + 132, + 185, + 481, + 217 + ], + "type": "text", + "content": "16. Goodfellow, I., Pouget-Abadie, J., Mirza, M., Xu, B., Warde-Farley, D., Ozair, S., Courville, A., Bengio, Y.: Generative adversarial nets. In: Advances in Neural Information Processing Systems (NeurIPS) (2014)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 218, + 481, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 218, + 481, + 262 + ], + "spans": [ + { + "bbox": [ + 132, + 218, + 481, + 262 + ], + "type": "text", + "content": "17. Grauman, K., Westbury, A., Byrne, E., Chavis, Z., Furnari, A., Girdhar, R., Hamburger, J., Jiang, H., Liu, M., Liu, X., et al.: Ego4d: Around the world in 3,000 hours of egocentric video. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 263, + 481, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 263, + 481, + 297 + ], + "spans": [ + { + "bbox": [ + 132, + 263, + 481, + 297 + ], + "type": "text", + "content": "18. Hampali, S., Rad, M., Oberweger, M., Lepetit, V.: Honnotate: A method for 3d annotation of hand and object poses. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2020)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 297, + 481, + 341 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 297, + 481, + 341 + ], + "spans": [ + { + "bbox": [ + 132, + 297, + 481, + 341 + ], + "type": "text", + "content": "19. Hampali, S., Sarkar, S.D., Rad, M., Lepetit, V.: Keypoint transformer: Solving joint identification in challenging hands and object interactions for accurate 3d pose estimation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 341, + 481, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 341, + 481, + 385 + ], + "spans": [ + { + "bbox": [ + 132, + 341, + 481, + 385 + ], + "type": "text", + "content": "20. Han, S., Liu, B., Cabezas, R., Twigg, C.D., Zhang, P., Petkau, J., Yu, T., Tai, C., Akbay, M., Wang, Z., Nitzan, A., Dong, G., Ye, Y., Tao, L., Wan, C., Wang, R.: Megatrack: monochrome egocentric articulated hand-tracking for virtual reality. ACM Transactions on Graphics (TOG) (2020)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 386, + 481, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 386, + 481, + 430 + ], + "spans": [ + { + "bbox": [ + 132, + 386, + 481, + 430 + ], + "type": "text", + "content": "21. Hasson, Y., Tekin, B., Bogo, F., Laptev, I., Pollefeys, M., Schmid, C.: Leveraging photometric consistency over time for sparsely supervised hand-object reconstruction. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2020)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 431, + 481, + 474 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 431, + 481, + 474 + ], + "spans": [ + { + "bbox": [ + 132, + 431, + 481, + 474 + ], + "type": "text", + "content": "22. Hasson, Y., Varol, G., Tzionas, D., Kalevatykh, I., Black, M.J., Laptev, I., Schmid, C.: Learning joint reconstruction of hands and manipulated objects. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2019)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 475, + 481, + 508 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 475, + 481, + 508 + ], + "spans": [ + { + "bbox": [ + 132, + 475, + 481, + 508 + ], + "type": "text", + "content": "23. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2016)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 509, + 481, + 553 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 509, + 481, + 553 + ], + "spans": [ + { + "bbox": [ + 132, + 509, + 481, + 553 + ], + "type": "text", + "content": "24. Heppert, N., Irshad, M.Z., Zakharov, S., Liu, K., Ambrus, R.A., Bohg, J., Valada, A., Kollar, T.: CARTO: category and joint agnostic reconstruction of articulated objects. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2023)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 132, + 554, + 481, + 586 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 554, + 481, + 586 + ], + "spans": [ + { + "bbox": [ + 132, + 554, + 481, + 586 + ], + "type": "text", + "content": "25. Huang, D., Ji, X., He, X., Sun, J., He, T., Shuai, Q., Ouyang, W., Zhou, X.: Reconstructing hand-held objects from monocular video. In: ACM Transactions on Graphics (2022)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 132, + 587, + 481, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 587, + 481, + 620 + ], + "spans": [ + { + "bbox": [ + 132, + 587, + 481, + 620 + ], + "type": "text", + "content": "26. Irshad, M.Z., Zakharov, S., Ambrus, R., Kollar, T., Kira, Z., Gaidon, A.: Shapo: Implicit representations for multi-object shape, appearance, and pose optimization. In: Proceedings of the European Conference on Computer Vision (ECCV) (2022)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 132, + 621, + 481, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 621, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 132, + 621, + 481, + 665 + ], + "type": "text", + "content": "27. Irshad, M.Z., Zakharov, S., Liu, K., Guizilini, V., Kollar, T., Gaidon, A., Kira, Z., Ambrus, R.: Neo 360: Neural fields for sparse view synthesis of outdoor scenes. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2023)" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "text", + "content": "A. Prakash et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 117, + 481, + 665 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 132, + 117, + 480, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 117, + 480, + 149 + ], + "spans": [ + { + "bbox": [ + 132, + 117, + 480, + 149 + ], + "type": "text", + "content": "28. Jiang, H., Liu, S., Wang, J., Wang, X.: Hand-object contact consistency reasoning for human grasps generation. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2021)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 150, + 480, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 150, + 480, + 182 + ], + "spans": [ + { + "bbox": [ + 132, + 150, + 480, + 182 + ], + "type": "text", + "content": "29. Kanazawa, A., Tulsiani, S., Efros, A.A., Malik, J.: Learning category-specific mesh reconstruction from image collections. In: Proceedings of the European Conference on Computer Vision (ECCV) (2018)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 182, + 480, + 214 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 182, + 480, + 214 + ], + "spans": [ + { + "bbox": [ + 132, + 182, + 480, + 214 + ], + "type": "text", + "content": "30. Kar, A., Tulsiani, S., Carreira, J., Malik, J.: Category-specific object reconstruction from a single image. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2015)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 214, + 480, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 214, + 480, + 247 + ], + "spans": [ + { + "bbox": [ + 132, + 214, + 480, + 247 + ], + "type": "text", + "content": "31. Karunratanakul, K., Yang, J., Zhang, Y., Black, M.J., Muandet, K., Tang, S.: Grasping field: Learning implicit representations for human grasps. In: Proceedings of the International Conference on 3D Vision (3DV) (2020)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 247, + 480, + 279 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 247, + 480, + 279 + ], + "spans": [ + { + "bbox": [ + 132, + 247, + 480, + 279 + ], + "type": "text", + "content": "32. Kwon, T., Tekin, B., Stühmer, J., Bogo, F., Pollefeys, M.: H2O: two hands manipulating objects for first person interaction recognition. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2021)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 279, + 481, + 311 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 279, + 481, + 311 + ], + "spans": [ + { + "bbox": [ + 132, + 279, + 481, + 311 + ], + "type": "text", + "content": "33. Laurentini, A.: The visual hull concept for silhouette-based image understanding. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI) 16, 150-162 (1994)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 311, + 480, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 311, + 480, + 354 + ], + "spans": [ + { + "bbox": [ + 132, + 311, + 480, + 354 + ], + "type": "text", + "content": "34. Liu, Y., Liu, Y., Jiang, C., Lyu, K., Wan, W., Shen, H., Liang, B., Fu, Z., Wang, H., Yi, L.: HOI4D: A 4d egocentric dataset for category-level human-object interaction. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 354, + 480, + 386 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 354, + 480, + 386 + ], + "spans": [ + { + "bbox": [ + 132, + 354, + 480, + 386 + ], + "type": "text", + "content": "35. Lomonaco, V., Maltoni, D.: Core50: a new dataset and benchmark for continuous object recognition. In: Proceedings of the Conference on Robot Learning (CoRL) (2017)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 386, + 480, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 386, + 480, + 407 + ], + "spans": [ + { + "bbox": [ + 132, + 386, + 480, + 407 + ], + "type": "text", + "content": "36. Lorensen, W.E., Cline, H.E.: Marching cubes: A high resolution 3D surface construction algorithm. ACM Transactions on Graphics (1987)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 407, + 480, + 439 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 407, + 480, + 439 + ], + "spans": [ + { + "bbox": [ + 132, + 407, + 480, + 439 + ], + "type": "text", + "content": "37. Lunayach, M., Zakharov, S., Chen, D., Ambrus, R., Kira, Z., Irshad, M.Z.: FSD: fast self-supervised single RGB-D to categorical 3d objects. arXiv abs/2310.12974 (2023)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 439, + 480, + 472 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 439, + 480, + 472 + ], + "spans": [ + { + "bbox": [ + 132, + 439, + 480, + 472 + ], + "type": "text", + "content": "38. Ma, W.C., Yang, A.J., Wang, S., Urtasun, R., Torralba, A.: Virtual correspondence: Humans as a cue for extreme-view geometry. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 132, + 472, + 480, + 504 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 472, + 480, + 504 + ], + "spans": [ + { + "bbox": [ + 132, + 472, + 480, + 504 + ], + "type": "text", + "content": "39. Mandikal, P., Grauman, K.: Dexvip: Learning dexterous grasping with human hand pose priors from video. In: Proceedings of the Conference on Robot Learning (CoRL) (2021)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 132, + 504, + 480, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 504, + 480, + 536 + ], + "spans": [ + { + "bbox": [ + 132, + 504, + 480, + 536 + ], + "type": "text", + "content": "40. Mandikal, P., Grauman, K.: Learning dexterous grasping with object-centric visual affordances. In: Proceedings of the IEEE International Conference on Robotics and Automation (ICRA) (2021)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 132, + 536, + 480, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 536, + 480, + 568 + ], + "spans": [ + { + "bbox": [ + 132, + 536, + 480, + 568 + ], + "type": "text", + "content": "41. Mao, X., Li, Q., Xie, H., Lau, R.Y.K., Wang, Z., Smolley, S.P.: Least squares generative adversarial networks. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2017)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 132, + 568, + 480, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 568, + 480, + 590 + ], + "spans": [ + { + "bbox": [ + 132, + 568, + 480, + 590 + ], + "type": "text", + "content": "42. Matusik, W., Buehler, C., Raskar, R., Gortler, S.J., McMillan, L.: Image-based visual hulls. In: ACM Transactions on Graphics (2000)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 132, + 590, + 480, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 590, + 480, + 622 + ], + "spans": [ + { + "bbox": [ + 132, + 590, + 480, + 622 + ], + "type": "text", + "content": "43. Mescheder, L., Oechsle, M., Niemeyer, M., Nowozin, S., Geiger, A.: Occupancy networks: Learning 3d reconstruction in function space. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2019)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 132, + 622, + 480, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 622, + 480, + 665 + ], + "spans": [ + { + "bbox": [ + 132, + 622, + 480, + 665 + ], + "type": "text", + "content": "44. Niemeyer, M., Mescheder, L.M., Oechsle, M., Geiger, A.: Differentiable volumetric rendering: Learning implicit 3d representations without 3d supervision. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2020)" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 138, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 138, + 91, + 447, + 102 + ], + "type": "text", + "content": "3D Reconstruction of Objects in Hands without Real World 3D Supervision" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 117, + 481, + 666 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 133, + 117, + 481, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 117, + 481, + 149 + ], + "spans": [ + { + "bbox": [ + 133, + 117, + 481, + 149 + ], + "type": "text", + "content": "45. Park, J.J., Florence, P., Straub, J., Newcombe, R., Lovegrove, S.: Deepsdf: Learning continuous signed distance functions for shape representation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2019)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 133, + 150, + 481, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 150, + 481, + 183 + ], + "spans": [ + { + "bbox": [ + 133, + 150, + 481, + 183 + ], + "type": "text", + "content": "46. Prakash, A., Tu, R., Chang, M., Gupta, S.: 3d hand pose estimation in everyday egocentric images. In: Proceedings of the European Conference on Computer Vision (ECCV) (2024)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 183, + 481, + 216 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 183, + 481, + 216 + ], + "spans": [ + { + "bbox": [ + 132, + 183, + 481, + 216 + ], + "type": "text", + "content": "47. Qin, Y., Su, H., Wang, X.: From one hand to multiple hands: Imitation learning for dexterous manipulation from single-camera teleoperation. Proceedings of the International Conference on Intelligent Robots and Systems (IROS) (2022)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 216, + 481, + 248 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 216, + 481, + 248 + ], + "spans": [ + { + "bbox": [ + 132, + 216, + 481, + 248 + ], + "type": "text", + "content": "48. Qin, Y., Wu, Y., Liu, S., Jiang, H., Yang, R., Fu, Y., Wang, X.: Dexamv: Imitation learning for dexterous manipulation from human videos. In: Proceedings of the European Conference on Computer Vision (ECCV) (2022)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 249, + 481, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 249, + 481, + 270 + ], + "spans": [ + { + "bbox": [ + 132, + 249, + 481, + 270 + ], + "type": "text", + "content": "49. Rijpkema, H., Girard, M.: Computer animation of knowledge-based human grasping. In: Thomas, J.J. (ed.) ACM Transactions on Graphics (1991)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 271, + 481, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 271, + 481, + 293 + ], + "spans": [ + { + "bbox": [ + 132, + 271, + 481, + 293 + ], + "type": "text", + "content": "50. Rockwell, C., Fouhey, D.F.: Full-body awareness from partial observations. In: Proceedings of the European Conference on Computer Vision (ECCV) (2020)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 293, + 481, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 293, + 481, + 326 + ], + "spans": [ + { + "bbox": [ + 132, + 293, + 481, + 326 + ], + "type": "text", + "content": "51. Rogez, G., Khademi, M., Supancic III, J., Montiel, J.M.M., Ramanan, D.: 3d hand pose detection in egocentric rgb-d images. In: Proceedings of the European Conference on Computer Vision (ECCV) (2014)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 326, + 481, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 326, + 481, + 358 + ], + "spans": [ + { + "bbox": [ + 132, + 326, + 481, + 358 + ], + "type": "text", + "content": "52. Romero, J., Kjellström, H., Kragic, D.: Hands in action: real-time 3D reconstruction of hands in interaction with objects. In: Proceedings of the IEEE International Conference on Robotics and Automation (ICRA) (2010)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 358, + 481, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 358, + 481, + 380 + ], + "spans": [ + { + "bbox": [ + 132, + 358, + 481, + 380 + ], + "type": "text", + "content": "53. Romero, J., Tzionas, D., Black, M.J.: Embodied hands: Modeling and capturing hands and bodies together. ACM Transactions on Graphics (ToG) (2017)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 380, + 481, + 413 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 380, + 481, + 413 + ], + "spans": [ + { + "bbox": [ + 132, + 380, + 481, + 413 + ], + "type": "text", + "content": "54. Rong, Y., Shiratori, T., Joo, H.: Frankmocap: Fast monocular 3D hand and body motion capture by regression and integration. Proceedings of the IEEE International Conference on Computer Vision Workshops (ICCV Workshops) (2021)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 413, + 481, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 413, + 481, + 446 + ], + "spans": [ + { + "bbox": [ + 132, + 413, + 481, + 446 + ], + "type": "text", + "content": "55. Saito, S., Huang, Z., Natsume, R., Morishima, S., Kanazawa, A., Li, H.: Pifu: Pixel-aligned implicit function for high-resolution clothed human digitization. Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2019)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 132, + 447, + 481, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 447, + 481, + 468 + ], + "spans": [ + { + "bbox": [ + 132, + 447, + 481, + 468 + ], + "type": "text", + "content": "56. Schonberger, J.L., Frahm, J.M.: Structure-from-motion revisited. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2016)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 132, + 468, + 481, + 501 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 468, + 481, + 501 + ], + "spans": [ + { + "bbox": [ + 132, + 468, + 481, + 501 + ], + "type": "text", + "content": "57. Shan, D., Geng, J., Shu, M., Fouhey, D.F.: Understanding human hands in contact at internet scale. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2020)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 132, + 501, + 481, + 534 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 501, + 481, + 534 + ], + "spans": [ + { + "bbox": [ + 132, + 501, + 481, + 534 + ], + "type": "text", + "content": "58. Taheri, O., Ghorbani, N., Black, M.J., Tzionas, D.: GRAB: A dataset of whole-body human grasping of objects. In: Proceedings of the European Conference on Computer Vision (ECCV) (2020)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 132, + 534, + 481, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 534, + 481, + 567 + ], + "spans": [ + { + "bbox": [ + 132, + 534, + 481, + 567 + ], + "type": "text", + "content": "59. Tatarchenko, M., Richter, S.R., Ranftl, R., Li, Z., Koltun, V., Brox, T.: What do single-view 3d reconstruction networks learn? In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2019)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 132, + 567, + 481, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 567, + 481, + 600 + ], + "spans": [ + { + "bbox": [ + 132, + 567, + 481, + 600 + ], + "type": "text", + "content": "60. Truong, P., Rakotosaona, M., Manhardt, F., Tombari, F.: SPARF: neural radiance fields from sparse and noisy poses. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2023)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 132, + 601, + 481, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 601, + 481, + 632 + ], + "spans": [ + { + "bbox": [ + 132, + 601, + 481, + 632 + ], + "type": "text", + "content": "61. Tschernezki, V., Darkhalil, A., Zhu, Z., Fouhey, D., Laina, I., Larlus, D., Damen, D., Vedaldi, A.: EPIC fields: Marrying 3d geometry and video understanding. In: Advances in Neural Information Processing Systems (NeurIPS) (2023)" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 132, + 632, + 481, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 632, + 481, + 666 + ], + "spans": [ + { + "bbox": [ + 132, + 632, + 481, + 666 + ], + "type": "text", + "content": "62. Tschernezki, V., Laina, I., Larlus, D., Vedaldi, A.: Neural feature fusion fields: 3d distillation of self-supervised 2d image representations. In: Proceedings of the International Conference on 3D Vision (3DV) (2022)" + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "text", + "content": "A. Prakash et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 480, + 666 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 132, + 116, + 480, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 116, + 480, + 149 + ], + "spans": [ + { + "bbox": [ + 132, + 116, + 480, + 149 + ], + "type": "text", + "content": "63. Tschernezki, V., Larlus, D., Vedaldi, A.: Neuraldiff: Segmenting 3d objects that move in egocentric videos. In: Proceedings of the International Conference on 3D Vision (3DV) (2021)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 150, + 480, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 150, + 480, + 183 + ], + "spans": [ + { + "bbox": [ + 132, + 150, + 480, + 183 + ], + "type": "text", + "content": "64. Tulsiani, S., Efros, A.A., Malik, J.: Multi-view consistency as supervisory signal for learning shape and pose prediction. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2018)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 183, + 480, + 226 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 183, + 480, + 226 + ], + "spans": [ + { + "bbox": [ + 132, + 183, + 480, + 226 + ], + "type": "text", + "content": "65. Turpin, D., Wang, L., Heiden, E., Chen, Y., Macklin, M., Tsogkas, S., Dickinson, S.J., Garg, A.: Grasp'd: Differentiable contact-rich grasp synthesis for multi-fingered hands. In: Proceedings of the European Conference on Computer Vision (ECCV) (2022)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 227, + 480, + 259 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 227, + 480, + 259 + ], + "spans": [ + { + "bbox": [ + 132, + 227, + 480, + 259 + ], + "type": "text", + "content": "66. Tzionas, D., Gall, J.: 3d object reconstruction from hand-object interactions. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2015)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 259, + 480, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 259, + 480, + 293 + ], + "spans": [ + { + "bbox": [ + 132, + 259, + 480, + 293 + ], + "type": "text", + "content": "67. Wu, J., Zhang, C., Xue, T., Freeman, W.T., Tenenbaum, J.B.: Learning a probabilistic latent space of object shapes via 3d generative-adversarial modeling. In: Advances in Neural Information Processing Systems (NeurIPS) (2016)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 293, + 480, + 325 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 293, + 480, + 325 + ], + "spans": [ + { + "bbox": [ + 132, + 293, + 480, + 325 + ], + "type": "text", + "content": "68. Wu, Y., Wang, J., Wang, X.: Learning generalizable dexterous manipulation from human grasp affordance. In: Proceedings of the Conference on Robot Learning (CoRL) (2022)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 325, + 480, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 325, + 480, + 358 + ], + "spans": [ + { + "bbox": [ + 132, + 325, + 480, + 358 + ], + "type": "text", + "content": "69. Yang, L., Li, K., Zhan, X., Wu, F., Xu, A., Liu, L., Lu, C.: Oakink: A large-scale knowledge repository for understanding hand-object interaction. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 358, + 480, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 358, + 480, + 392 + ], + "spans": [ + { + "bbox": [ + 132, + 358, + 480, + 392 + ], + "type": "text", + "content": "70. Yariv, L., Kasten, Y., Moran, D., Galun, M., Atzmon, M., Basri, R., Lipman, Y.: Multiview neural surface reconstruction by disentangling geometry and appearance In: Advances in Neural Information Processing Systems (NeurIPS) (2020)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 392, + 480, + 413 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 392, + 480, + 413 + ], + "spans": [ + { + "bbox": [ + 132, + 392, + 480, + 413 + ], + "type": "text", + "content": "71. Ye, J., Wang, J., Huang, B., Qin, Y., Wang, X.: Learning continuous grasping function with a dexterous hand from human demonstrations. arXiv (2022)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 413, + 480, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 413, + 480, + 446 + ], + "spans": [ + { + "bbox": [ + 132, + 413, + 480, + 446 + ], + "type": "text", + "content": "72. Ye, Y., Gupta, A., Kitani, K., Tulsiani, S.: G-HOP: generative hand-object prior for interaction reconstruction and grasp synthesis. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2024)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 446, + 480, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 446, + 480, + 479 + ], + "spans": [ + { + "bbox": [ + 132, + 446, + 480, + 479 + ], + "type": "text", + "content": "73. Ye, Y., Gupta, A., Tulsiani, S.: What's in your hands? 3D reconstruction of generic objects in hands. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 132, + 479, + 480, + 512 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 479, + 480, + 512 + ], + "spans": [ + { + "bbox": [ + 132, + 479, + 480, + 512 + ], + "type": "text", + "content": "74. Ye, Y., Hebbar, P., Gupta, A., Tulsiani, S.: Diffusion-guided reconstruction of everyday hand-object interaction clips. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2023)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 132, + 512, + 480, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 512, + 480, + 544 + ], + "spans": [ + { + "bbox": [ + 132, + 512, + 480, + 544 + ], + "type": "text", + "content": "75. Ye, Y., Tulsiani, S., Gupta, A.: Shelf-supervised mesh prediction in the wild. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2021)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 132, + 545, + 480, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 545, + 480, + 578 + ], + "spans": [ + { + "bbox": [ + 132, + 545, + 480, + 578 + ], + "type": "text", + "content": "76. Yu, A., Ye, V., Tancik, M., Kanazawa, A.: pixelnerf: Neural radiance fields from one or few images. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2021)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 132, + 578, + 480, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 578, + 480, + 611 + ], + "spans": [ + { + "bbox": [ + 132, + 578, + 480, + 611 + ], + "type": "text", + "content": "77. Zhang, C., Di, Y., Zhang, R., Zhai, G., Manhardt, F., Tombari, F., Ji, X.: DDF-HO: hand-held object reconstruction via conditional directed distance field. In: Advances in Neural Information Processing Systems (NeurIPS) (2023)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 132, + 611, + 480, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 611, + 480, + 643 + ], + "spans": [ + { + "bbox": [ + 132, + 611, + 480, + 643 + ], + "type": "text", + "content": "78. Zhou, T., Brown, M., Snavely, N., Lowe, D.G.: Unsupervised learning of depth and ego-motion from video. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2017)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 132, + 643, + 480, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 643, + 480, + 666 + ], + "spans": [ + { + "bbox": [ + 132, + 643, + 480, + 666 + ], + "type": "text", + "content": "79. Zhu, Z., Damen, D.: Get a grip: Reconstructing hand-object stable grasps in egocentric videos. arXiv preprint arXiv:2312.15719 (2023)" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 138, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 138, + 91, + 447, + 102 + ], + "type": "text", + "content": "3D Reconstruction of Objects in Hands without Real World 3D Supervision" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 117, + 481, + 226 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 132, + 117, + 480, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 117, + 480, + 149 + ], + "spans": [ + { + "bbox": [ + 132, + 117, + 480, + 149 + ], + "type": "text", + "content": "80. Zimmermann, C., Brox, T.: Learning to estimate 3d hand pose from single rgb images. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2017)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 150, + 481, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 150, + 481, + 194 + ], + "spans": [ + { + "bbox": [ + 132, + 150, + 481, + 194 + ], + "type": "text", + "content": "81. Zimmermann, C., Ceylan, D., Yang, J., Russell, B.C., Argus, M.J., Brox, T.: Freihand: A dataset for markerless capture of hand pose and shape from single RGB images. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2019)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 194, + 481, + 226 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 194, + 481, + 226 + ], + "spans": [ + { + "bbox": [ + 132, + 194, + 481, + 226 + ], + "type": "text", + "content": "82. Qalli, B., Singh, A., Walsman, A., Srinivasa, S.S., Abbeel, P., Dollar, A.M.: The ycb object and model set: Towards common benchmarks for manipulation research. In: Proceedings of the International Conference on Advanced Robotics (ICAR) (2015)" + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 235, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 235, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 235, + 100 + ], + "type": "text", + "content": "A. Prakash et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/3D Single-object Tracking in Point Clouds with High Temporal Variation/c2204d64-6706-4e48-94d5-09db9f8770f0_content_list.json b/2024/3D Single-object Tracking in Point Clouds with High Temporal Variation/c2204d64-6706-4e48-94d5-09db9f8770f0_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..df15c3b46f261461b9c3864f132c5af8afd6f3ab --- /dev/null +++ b/2024/3D Single-object Tracking in Point Clouds with High Temporal Variation/c2204d64-6706-4e48-94d5-09db9f8770f0_content_list.json @@ -0,0 +1,1676 @@ +[ + { + "type": "text", + "text": "3D Single-object Tracking in Point Clouds with High Temporal Variation", + "text_level": 1, + "bbox": [ + 230, + 140, + 772, + 186 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Qiao Wu $^{1}$ , Kun Sun $^{2}$ , Pei An $^{3}$ , Mathieu Salzmann $^{4}$ , Yanning Zhang $^{1}$ , and Jiaqi Yang $^{1\\star}$", + "bbox": [ + 233, + 210, + 769, + 244 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Northwestern Polytechnical University", + "bbox": [ + 362, + 253, + 638, + 268 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{2}$ China University of Geosciences, Wuhan", + "bbox": [ + 357, + 268, + 643, + 282 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "3 HuaZhong University of Science and Technology", + "bbox": [ + 331, + 282, + 669, + 296 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "4 École Polytechnique Fédérale de Lausanne qiaowu@mail.nwu.edu.cn, jqyang@nwpu.edu.cn", + "bbox": [ + 331, + 296, + 666, + 324 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract. The high temporal variation of the point clouds is the key challenge of 3D single-object tracking (3D SOT). Existing approaches rely on the assumption that the shape variation of the point clouds and the motion of the objects across neighboring frames are smooth, failing to cope with high temporal variation data. In this paper, we present a novel framework for 3D SOT in point clouds with high temporal variation, called HVTrack. HVTrack proposes three novel components to tackle the challenges in the high temporal variation scenario: 1) A Relative-Pose-Aware Memory module to handle temporal point cloud shape variations; 2) a Base-Expansion Feature Cross-Attention module to deal with similar object distractions in expanded search areas; 3) a Contextual Point Guided Self-Attention module for suppressing heavy background noise. We construct a dataset with high temporal variation (KITTI-HV) by setting different frame intervals for sampling in the KITTI dataset. On the KITTI-HV with 5 frame intervals, our HVTrack surpasses the state-of-the-art tracker CXTracker by $11.3\\% / 15.7\\%$ in Success/Precision.", + "bbox": [ + 259, + 359, + 743, + 583 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Keywords: 3D single-object tracking $\\cdot$ High temporal variation $\\cdot$ Point cloud", + "bbox": [ + 259, + 595, + 740, + 625 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 215, + 648, + 375, + 666 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "3D single-object tracking (3D SOT) is pivotal for autonomous driving [2,40] and robotics [16, 19, 25, 43]. Given the target point cloud and 3D bounding box as template, the goal of 3D SOT is to regress the target 3D poses in the tracking point cloud sequence. Existing approaches [5,7,9-12,24,28,34,39,44-46] rely on the assumption that the point cloud variations and motion of the object across neighboring frames are relatively smooth. They crop out a small search area around the last proposal for tracking, thus dramatically reducing the complexity of the problem. The template and search area features are then typically correlated as shown in Fig. 1a, and used to regress the 3D bounding box.", + "bbox": [ + 212, + 680, + 787, + 816 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "* Corresponding author.", + "bbox": [ + 217, + 824, + 385, + 840 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/b61358caaf88a58a50d563d5ad68178071fa021fe560a3ab69e67133729ccba2.jpg", + "image_caption": [ + "Fig. 1: Feature correlation in 3D SOT. (a) Feature correlation in the smooth case (1 frame interval). Correlating the features is relatively trivial as the target undergoes only small shape variations, and the observation angles are consistent in the three frames. (b-c) Feature correlation in high temporal variation cases (10 frames interval). The pose relative to the camera changes rapidly. Correlating the features using historical information is highly challenging (b). We encode the historical observation angles $\\alpha$ into the features to guide the variation of relative pose to the camera (c)." + ], + "image_footnote": [], + "bbox": [ + 215, + 143, + 785, + 228 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In practice, these approaches are challenged by the presence of large point cloud variations due to the limited sensor temporal resolution and the moving speed of objects as shown in Fig. 1b. We refer to this significant variation in point cloud and object position between two frames as the high temporal variation (HV). The high temporal variation challenge is non-negligible in existing benchmarks, and exists in other scenarios not yet covered by them, such as:", + "bbox": [ + 212, + 368, + 787, + 459 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Skipped-tracking, which can greatly reduce computational consumption in tracking and serve a wide range of other tasks such as detection [20] and segmentation [41].", + "- Tracking in edge devices, which is essential for deploying trackers on common devices with limited frame rate, resolution, computation, and power etc.", + "- Tracking in highly dynamic scenarios [15], which is common in life. For example, tracking in sports events, highway, and UAV scenarios." + ], + "bbox": [ + 223, + 468, + 784, + 571 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "There are three challenges for 3D SOT in HV point clouds, and existing approaches are not sufficient to address these challenges. 1) Strong shape variations of the point clouds: Point cloud shape variations are usually caused by the occlusion and relative pose transformation between the object and the sensor. As illustrated in Fig. 1b, feature correlation in existing approaches fails because of the dramatic change in the density and distribution of points. 2) Distractions due to similar objects: When objects suffer from a significant motion, the search area needs to be enlarged to incorporate the target, thus introducing more distractions from similar objects. Most of the existing trackers focus on local scale features, which discards environmental spatial contextual information to handle distractions. 3) Heavy background noise: The expansion of the search area further reduces the proportion of target information in the scene. While aiming to find the high template-response features in the feature correlation stage, existing methods then neglect to suppress the noise interference and reduce the impact of noise features. We evaluate state-of-the-art (SOTA) trackers [24,39,44,45] in the high temporal variation scenario as shown in Fig. 2. Their performance drops dramatically as the temporal variation of scene point clouds enlarges.", + "bbox": [ + 212, + 583, + 787, + 840 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Q. Wu et al.", + "bbox": [ + 271, + 114, + 357, + 128 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/0a79de94d6bee311fd49a27bd2643270542e0f6a519c9ac96df629629bc43d48.jpg", + "image_caption": [ + "Fig. 2: Comparison of HVTrack with the SOTAs [24,39,44,45] on 'Car' from KITTI-HV (KITTI [8] with different frame intervals, see Sec. 4)." + ], + "image_footnote": [], + "bbox": [ + 331, + 143, + 504, + 300 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/979aa47cf04a5ba1c9a7068f41de170d603ca2467e05c7be82fb26d172676d5a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 143, + 671, + 300 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To address the above challenges, we propose a novel framework for 3D SOT in point clouds with High temporal Variation, which we call HVTrack. Specifically, we propose three novel modules to address each of the three above-mentioned challenges. 1) A Relative-Pose-Aware Memory (RPM) module to handle the strong shape variations of the point clouds. Different from [17], we integrate the foreground masks and observation angles into the memory bank. Therefore, the model can implicitly learn the distribution variation of point clouds from the relative pose in time. The information arising from observation angles has been overlooked by all existing trackers. 2) A Base-Expansion Feature Cross-Attention (BEA) module to deal with the problem of similar object distractions occurring in large scenes. We synchronize the correlation of the hybrid scales features (base and expansion scales, Sec. 3.4) in the cross-attention, and efficiently utilize spatial contextual information. 3) A Contextual Point Guided Self-Attention (CPA) module to suppress the background noise introduced by the expanded search area. It aggregates the features of points into contextual points according to their importance. Less important points share fewer contextual points and vice versa, thus suppressing most of the background noise. BEA and CPA are inspired by the SGFormer [26], which utilizes hybrid scale significance maps to assign more tokens to salient regions of 2D images. Our experiments clearly demonstrate the remarkable performance of HVTrack in high temporal variation scenarios, as illustrated in Fig. 2. Our contributions can be summarized as follows:", + "bbox": [ + 212, + 372, + 787, + 690 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- For the first time, to the best of our knowledge, we explore the new 3D SOT task for high temporal variation scenarios, and propose a novel framework called HVTrack for the task.", + "- We propose three novel modules, RPM, BEA, and CPA, to address three challenges for 3D SOT in HV point clouds: strong point cloud variations, similar object distractions, and heavy background noise.", + "- HVTrack yields state-of-the-art results on KITTI-HV and Waymo, and ranks second on KITTI. Our experimental results demonstrate the robustness of HVTrack in both smooth and high temporal variation cases." + ], + "bbox": [ + 225, + 702, + 784, + 838 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "3D SOT in Point Clouds with High Temporal Variation", + "bbox": [ + 359, + 114, + 730, + 128 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Related Work", + "text_level": 1, + "bbox": [ + 215, + 143, + 388, + 160 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.1 3D Single-object Tracking", + "text_level": 1, + "bbox": [ + 215, + 180, + 478, + 196 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Most of the 3D SOT approaches are based on a Siamese framework, because the appearance variations of the target between neighboring frames are not significant. The work of Giancola et al. [9] constitutes the pioneering method in 3D SOT. However, it only solved the discriminative feature learning problem, and used a time-consuming and inaccurate heuristic matching to locate the target. Zarzar et al. [42] utilized a 2D RPN in bird's eyes view to build an end-to-end tracker. The P2B network [24] employs VoteNet [22] as RPN and constructs the first point-based tracker. The following works [7, 11, 12, 28, 34, 44] develop different architectures of trackers based on P2B [24]. V2B [11] leverages the target completion model to generate the dense and complete targets and proposes a simple yet effective voxel-to-BEV target localization network. BAT [44] utilizes the relationship between points and the bounding box, integrating the box information into the point clouds. With the development of transformer networks, a number of works [5, 10, 12, 28, 39, 46] have proposed to exploit various attention mechanisms. STNet [12] forms an iterative coarse-to-fine cross-and self-attention to correlate the target and search area. CXTrack [39] employs a target-centric transformer to integrate targetness information and contextual information. TAT [17] leverages the temporal information to integrate target cues by applying an RNN-based [4] correlation module. Zheng et al. [45] presented a motion-centric method M2-Track, which is appearance matching-free and has made great progress in dealing with the sparse point cloud tracking problem. Wu et al. [37] proposed the first semi-supervised framework in 3D SOT.", + "bbox": [ + 212, + 209, + 787, + 541 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "While effective in their context, the above methods are designed based on the assumption that the point cloud variation and motion of the objects across neighboring frames are not significant. In high temporal variation scenarios, this assumption will lead to performance degradation because of the point cloud variations and interference naturally occurring in large scenes. Here, we introduce HVTrack to tackle the challenges of 3D SOT in high temporal variation scenarios.", + "bbox": [ + 212, + 542, + 789, + 633 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.2 3D Multi-object Tracking", + "text_level": 1, + "bbox": [ + 215, + 659, + 473, + 676 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3D multi-object tracking (MOT) in point clouds follows two main streams: Tracking-by-detection, and learning-based methods. Tracking-by-detection [1, 3, 32, 35] usually exploits methods such as Kalman filtering to correlate the detection results and track the targets. CenterTrack [47], CenterPoint [40], and SimTrack [18] replace the filter by leveraging deep networks to predict the velocity and motion of the objects. The learning-based methods [6, 27, 36] typically apply a Graph Neural Network to tackle the association challenge in MOT. GNN3DMOT [36] leverages both 2D images and 3D point clouds to obtain a robust association. 3DMOTFormer [6] constructs a graph transformer framework and achieves a good performance using only 3D point clouds.", + "bbox": [ + 212, + 688, + 787, + 840 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 217, + 114, + 230, + 126 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Q. Wu et al.", + "bbox": [ + 271, + 114, + 357, + 128 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3D MOT and 3D SOT have different purposes and their own challenges [13]. 3D MOT is object-level and focuses on correlating detected objects, whereas 3D SOT is intra-object-level [14] and aims to track a single object given a template. 3D SOT methods usually come with much lower computational consumption and higher throughput [46]. Also, 3D MOT is free from the challenges posed by the dynamic change in the search area size, as MOT is not required to adopt the search area cropping strategy in SOT.", + "bbox": [ + 212, + 146, + 787, + 252 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3 Method", + "text_level": 1, + "bbox": [ + 215, + 276, + 330, + 292 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.1 Problem Definition", + "text_level": 1, + "bbox": [ + 215, + 308, + 421, + 323 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Given the template of the target, the goal of 3D SOT is to continually locate the poses of the target in the search area point cloud sequence $\\mathbf{P}^s = \\{P_0^s,\\dots ,P_t^s,\\dots ,P_n^s |P_t^s\\in \\mathbb{R}^{N_s\\times 3}\\}$ . Usually, the target point cloud with labels in the first frame is regarded as the template. Former trackers [5,7,9-12,24,28, 34,39,44-46] leverage a 3D bounding box label $B_{0} = (x,y,z,w,l,h,\\theta)\\in \\mathbb{R}^{7}$ to generate the template in the input. Here, $(x,y,z)$ , $(w,l,h)$ and $\\theta$ are the center location, bounding box size (width, length, and height), and rotation angle of the target, respectively. As objects can be assumed to be rigid, the trackers only need to regress the center and rotation angle of the target.", + "bbox": [ + 212, + 334, + 787, + 470 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2 Overview", + "text_level": 1, + "bbox": [ + 215, + 493, + 339, + 507 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We propose HVTrack to exploit both temporal and spatial information and achieve robust tracking in high temporal variation scenarios. As shown in Fig. 3, we take the point cloud $P_{t}^{s}$ at time $t$ as the search area, and leverage memory banks as the template. We first employ a backbone to extract the local spatial features $\\mathcal{X}_0 \\in \\mathbb{R}^{N \\times C}$ of $P_{t}^{s}$ , with $N$ and $C$ the point number and feature channel, respectively. Then, $L$ transformer layers are employed to extract spatio-temporal information. For each layer $l$ , (i) we capture the template information $Mem_{l} \\in \\mathbb{R}^{KN \\times C}$ from the Relative-Pose-Aware Memory module, with $K$ the memory bank size (Sec. 3.3); (ii) the memory features and search area features $\\mathcal{X}_{l-1}$ are correlated in the Base-Expansion Features Cross-Attention (Sec. 3.4); (iii) the Contextual Point Guided Self-Attention (Sec. 3.5) leverages the attention map in the Base-Expansion Features Cross-Attention to suppress the noise features; (iv) we update the Layer Features memory bank using $\\mathcal{X}_{l-1}$ . After the transformer layers, an RPN is applied to regress the location $(x_{t}, y_{t}, z_{t}, \\theta_{t})$ , the mask $\\mathcal{M}_{t} \\in \\mathbb{R}^{N \\times 1}$ , and the observation angle $\\alpha \\in \\mathbb{R}^2$ . Finally, the mask and observation angle memory banks are updated using the predicted results.", + "bbox": [ + 212, + 518, + 787, + 762 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3 Relative-Pose-Aware Memory Module", + "text_level": 1, + "bbox": [ + 215, + 784, + 576, + 799 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "As shown in Fig. 1(b), rapid changes in relative pose lead to large variations in the shape of the object point cloud across the frames. Correlating the object", + "bbox": [ + 212, + 809, + 785, + 840 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "3D SOT in Point Clouds with High Temporal Variation", + "bbox": [ + 359, + 114, + 730, + 128 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/c0cc4a36f7d581f333a86b58541c438110d97eeed1b64756128fa6dd0d4642a2.jpg", + "image_caption": [ + "Fig. 3: HVTrack framework. We first utilize a backbone to extract the local embedding features of the search area. Then, we construct $L$ transformer layers to fuse spatio-temporal information. For each transformer layer, (i) we apply three memory bank features in the Relative-Pose-Aware Memory module to generate temporal template information; (ii) we employ the Base-Expansion Feature Cross-Attention to correlate the template and search area by leveraging hybrid scale spatial context-aware features; (iii) we introduce a Contextual Point Guided Self-Attention to suppress unimportant noise. After each layer, we update the layer features memory bank using the layer input. Finally, we apply an RPN to regress the 3D bounding box, and update the mask and observation angle memory banks." + ], + "image_footnote": [], + "bbox": [ + 230, + 143, + 769, + 280 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "features in $(t - 2, t - 1, t)$ then becomes difficult, as they have a low overlap with each other. To address this, we introduce the observation angle into the memory bank. The observation angle gives us knowledge of the coarse distribution of an object's point cloud. Thus, the model can learn the variations in point cloud distribution from the historical changes of observation angle.", + "bbox": [ + 212, + 460, + 787, + 537 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To exploit the temporal information as the template, we propose a Relative-Pose-Aware Memory (RPM) module. RPM contains 3 memory banks. 1) A layer features memory bank (LM) $\\in \\mathbb{R}^{L\\times K\\times N\\times C}$ : We leverage the historical transformer layer features as the template features to reduce the template inference time in former trackers [5,9-12,24,28,34,44,46]. 2) A mask memory bank (MM) $\\in \\mathbb{R}^{K\\times N\\times 1}$ : Inspired by the mask-based trackers [39,45], we utilize the mask as the foreground representation. 3) An observation angle memory bank (OM) $\\in \\mathbb{R}^{K\\times 2}$ . For each transformer layer $l$ , we process the memory features as", + "bbox": [ + 212, + 539, + 787, + 660 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nT _ {l} = \\operatorname {L i n e a r} ([ \\mathrm {L M} _ {1}, \\mathrm {M M}, \\operatorname {R e p e a t} (\\mathrm {O M}) ]), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 359, + 675, + 785, + 691 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $T_{l} \\in \\mathbb{R}^{KN \\times C}$ denotes the template features, Linear(\\cdot) is a linear layer that projects the features from $\\mathbb{R}^{KN \\times (C + 3)}$ to $\\mathbb{R}^{KN \\times C}$ , [.] is the concatenation operation, and Repeat(\\cdot) stacks the OM to $\\mathbb{R}^{K \\times N \\times 2}$ . Then, we project $T_{l}$ into Query (Q), Key (K), and Value (V) using the learnable parameter matrices as", + "bbox": [ + 212, + 705, + 787, + 768 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nQ _ {l} ^ {T} = \\mathrm {L N} (\\mathrm {L N} (T _ {l}) W _ {l} ^ {T Q} + \\mathrm {P E} ^ {T}),\n$$\n", + "text_format": "latex", + "bbox": [ + 385, + 781, + 617, + 801 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nK _ {l} ^ {T} = \\operatorname {L N} \\left(T _ {l}\\right) W _ {l} ^ {T K}, \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 385, + 803, + 784, + 821 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nV _ {l} ^ {T} = \\mathrm {L N} (T _ {l}) W _ {l} ^ {T V},\n$$\n", + "text_format": "latex", + "bbox": [ + 387, + 823, + 532, + 842 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Q. Wu et al.", + "bbox": [ + 271, + 114, + 357, + 128 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/e0a53b32019808bd9d52aad04d8ccbb081dd8e0591cafb5da842715f08728267.jpg", + "image_caption": [ + "(a) BEA." + ], + "image_footnote": [], + "bbox": [ + 215, + 143, + 480, + 314 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/dc2e9b1c243ffa8aaaef51d094460cf0a5d9216ebef62d9a3834fefd7ed2d73b.jpg", + "image_caption": [ + "(b) CPA.", + "Fig. 4: (a) Base-Expansion Feature Cross-Attention (BEA). The $H$ heads in the multi-head attention (MHA) are split to process hybrid scale features. For the base scale branch, we directly put the local features into the MHA. For the expansion scale branch, we apply an EdgeConv [33] to expand the receptive field of each point and extract more abstract features before MHA. BEA captures the spatial context-aware information with a humble extra computational cost. (b) Contextual Point Guided Self-Attention (CPA). We determine the importance of each point by both base and expansion scale attention maps. Then, we aggregate all the points into $U$ clusters (contextual points) according to their importance and project the clusters to $K$ and $V$ . We assign fewer contextual points for low-importance points, and vice versa. CPA not only suppresses the noise but also reduces the computational cost of the attention." + ], + "image_footnote": [], + "bbox": [ + 526, + 143, + 782, + 314 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $\\mathrm{LN}(\\cdot)$ is the layer norm, and $\\mathrm{PE}^T \\in \\mathbb{R}^{KN \\times C}$ is the positional embedding of the historical point cloud coordinates. We utilize a linear layer to project the point cloud coordinates to their positional embedding. Finally, a self-attention is applied for internal interactions between temporal information as", + "bbox": [ + 212, + 521, + 787, + 583 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nM e m _ {l} ^ {*} = T _ {l} + \\operatorname {D r o p o u t} \\left(\\mathrm {M H A} \\left(Q _ {l} ^ {T}, K _ {l} ^ {T}, V _ {l} ^ {T}\\right)\\right), \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 334, + 593, + 784, + 612 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where MHA is the multi-head attention in [31], and Dropout is the random dropping operation in [29]. Following CXTrack [39], we apply dropout and feedforward network (FFN) after self-attention, i.e.,", + "bbox": [ + 212, + 622, + 787, + 669 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nM e m _ {l} = M e m _ {l} ^ {*} + \\operatorname {D r o p o u t} (\\operatorname {F F N} (\\operatorname {L N} (M e m _ {l} ^ {*}))), \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 326, + 680, + 784, + 698 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {F F N} (x) = \\max \\left(0, x W _ {1} + b _ {1}\\right) W _ {2} + b _ {2}. \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 364, + 723, + 784, + 739 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.4 Base-Expansion Feature Cross-Attention", + "text_level": 1, + "bbox": [ + 214, + 768, + 598, + 784 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Most of the existing trackers [11,24,28,34,39,44,46] employ a point based backbone [23,33] and focus on local region features, which we call base scale features. Using only base scale features in the whole pipeline is quite efficient and effective", + "bbox": [ + 212, + 794, + 787, + 840 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "3D SOT in Point Clouds with High Temporal Variation", + "bbox": [ + 359, + 114, + 732, + 128 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "in small scenes. However, the base scale features are limited in representing the neighboring environment features around the object in large search areas. To tackle the challenge of similar object distractions, spatial context information across consecutive frames is crucial for effective object tracking [39]. Expanding the receptive field of features can help capture spatial contextual information, and such features are called expansion scale features. Inspired by [26], we propose Base-Expansion Feature Cross-Attention (BEA) to capture both local and more abstract features, and exploit spatial context-aware information.", + "bbox": [ + 212, + 146, + 787, + 267 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "As shown in Fig. 4a, the input features $X_{l-1}$ are projected into $\\mathbf{Q}$ . Usually, the memory features Meml would be projected into $\\mathbf{K}$ and $\\mathbf{V}$ . Then, multi-head cross-attention adopts $H$ independent heads, and processes them using the same base scale features. By contrast, we split the $H$ heads into 2 groups. $H/2$ heads exploit local spatial context information. We directly process the base scale features with normal cross-attention, and output base scale features $\\hat{X}_{l-1}^{base} \\in \\mathbb{R}^{N \\times C/2}$ and attention map Attn $^{base} \\in \\mathbb{R}^{N \\times KN}$ . The other $H/2$ heads capture environment context features. We first apply an EdgeConv [33] to extract more abstract features Meml $^{expan} \\in \\mathbb{R}^{KN/8 \\times C}$ , which are expansion scale features, i.e.,", + "bbox": [ + 212, + 267, + 787, + 417 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\nM e m _ {l} ^ {e x p a n} = \\operatorname {E d g e C o n v} \\left(M e m _ {l}\\right). \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 385, + 419, + 784, + 436 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Then, we project the expansion features into K and V, and perform multi-head cross-attention with Q. Specifically, for the $i$ -th head belonging to the expansion scale branch, we generate Q, K, and V as", + "bbox": [ + 214, + 441, + 787, + 488 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} Q _ {i} = L N (L N (X _ {l - 1}) W _ {i} ^ {Q} + \\mathrm {P E} _ {i} ^ {S}), \\\\ K _ {i} = L N \\left(M e m _ {l} ^ {e x p a n}\\right) W _ {i} ^ {K}, \\tag {7} \\\\ V _ {i} = L N (M e m _ {l} ^ {e x p a n}) W _ {i} ^ {V}, \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 383, + 496, + 784, + 554 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "where $\\mathrm{PE}_i^S$ is the positional embedding of search area point cloud coordinates. Then, cross-attention is performed as", + "bbox": [ + 214, + 565, + 782, + 597 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\nA t t n _ {i} ^ {e x p a n} = \\operatorname {S o f t m a x} \\left(\\frac {Q _ {i} K _ {i}}{\\sqrt {d _ {h}}}\\right), \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 395, + 604, + 784, + 638 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\nh _ {i} ^ {e x p a n} = A t t n _ {i} ^ {e x p a n} V _ {i}, \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 421, + 651, + 784, + 670 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "where $d_h$ is the feature dimension of the heads, and $h_i^{expan}$ is the output features of the $i$ -th head. After that, we concatenate the output features and attention map of each head as", + "bbox": [ + 212, + 672, + 787, + 719 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\hat {X} _ {l - 1} ^ {\\text {e x p a n}} = \\left[ h _ {1}, \\dots , h _ {H / 2} \\right], \\\\ A t t n ^ {e x p a n} = \\left[ A t t n _ {1} ^ {e x p a n}, \\dots , A t t n _ {H / 2} ^ {e x p a n} \\right], \\tag {10} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 359, + 728, + 784, + 768 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "where $\\hat{X}_{l-1}^{expan} \\in \\mathbb{R}^{N \\times C/2}$ , and $Attn^{expan} \\in \\mathbb{R}^{N \\times KN/8}$ . Finally, we concatenate the base scale and expansion scale outputs as the resulting correlation feature $\\hat{X}_{l-1} \\in \\mathbb{R}^{N \\times C}$ . Thus, BEA provides rich hybrid scale spatial contextual information for each point, with a very humble extra computational cost.", + "bbox": [ + 214, + 777, + 787, + 840 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Q. Wu et al.", + "bbox": [ + 271, + 114, + 356, + 128 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3.5 Contextual Point Guided Self-Attention", + "text_level": 1, + "bbox": [ + 215, + 146, + 591, + 161 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Most of the information in the search area will be regarded as noise, because we are only interested in one single object to be tracked. Existing trackers [11,24,28, 34,44] aim to find the features with high template-response in the search area, but neglect the suppress to the noise. Zhou et al. [46] proposed a Relation-Aware Sampling for preserving more template-relevant points in the search area before inputting it to the backbone. By contrast, we focus on suppressing the noise after feature correlation via a Contextual Point Guided Self-Attention (CPA).", + "bbox": [ + 212, + 167, + 787, + 272 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "As shown in Fig. 4b, we leverage the base and expansion scale attention maps to generate the importance map $I \\in \\mathbb{R}^{N \\times 1}$ as", + "bbox": [ + 212, + 273, + 787, + 303 + ], + "page_idx": 8 + }, + { + "type": "equation", + "text": "\n$$\nI = \\operatorname {M e a n} \\left(A t t n ^ {\\text {b a s e}}\\right) + \\operatorname {M e a n} \\left(A t t n ^ {\\text {e x p a n}}\\right). \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 356, + 310, + 785, + 325 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "The higher the importance of the point, the more spatial context-aware information related to the target it contains. We sort the points according to the magnitude of their importance values. Then, all the points will be separated into $G$ groups according to their importance. For each group with points $P_{i}^{G} \\in \\mathbb{R}^{G_{i} \\times C}$ , we aggregate the points into $U_{i}$ clusters, which we call contextual points. Specifically, we first reshape the points as $P_{i}^{G} \\in \\mathbb{R}^{U_{i} \\times C \\times G_{i} / U_{i}}$ . Second, a linear layer is employed to project the group to the contextual points $P_{i}^{U} \\in \\mathbb{R}^{U_{i} \\times C}$ . We assign fewer contextual points for the groups with lower importance, and suppress the noise feature expression. Finally, all the contextual points are concatenated and projected into Key $K^{U} \\in \\mathbb{R}^{U \\times C}$ and Value $V^{U} \\in \\mathbb{R}^{U \\times C}$ . We project $\\hat{X}_{l-1}$ to Q and perform a multi-head attention with $K^{U}$ and $V^{U}$ , and an FFN is applied after attention. CPA shrinks the length of K and V, and leads to a computational cost decrease in self-attention.", + "bbox": [ + 212, + 333, + 787, + 529 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "3.6 Implementation Details", + "text_level": 1, + "bbox": [ + 215, + 547, + 455, + 561 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Backbone & Loss Functions. Following CXTrack [39], we adopt DGCNN [33] as our backbone, and apply X-RPN [39] as the RPN of our framework. We add two Shared MLP layers to X-RPN for predicting the observation angles $(\\alpha)$ and the masks. Therefore, the overall loss is expressed as", + "bbox": [ + 212, + 568, + 787, + 630 + ], + "page_idx": 8 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\gamma_ {1} \\mathcal {L} _ {c c} + \\gamma_ {2} \\mathcal {L} _ {\\text {m a s k}} + \\gamma_ {3} \\mathcal {L} _ {\\text {a l p h a}} + \\gamma_ {4} \\mathcal {L} _ {\\text {r m}} + \\gamma_ {5} \\mathcal {L} _ {\\text {b o x}}, \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 316, + 638, + 784, + 652 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "where $\\mathcal{L}_{cc}$ , $\\mathcal{L}_{mask}$ , $\\mathcal{L}_{alpha}$ , $\\mathcal{L}_{box}$ , and $\\mathcal{L}_{box}$ are the loss for the coarse center, foreground mask, observation angle, targetness mask, and bounding box, respectively. We apply the $L_{2}$ loss for $\\mathcal{L}_{cc}$ , the standard cross entropy loss for $\\mathcal{L}_{mask}$ and $\\mathcal{L}_{rm}$ , and the Huber loss for $\\mathcal{L}_{alpha}$ and $\\mathcal{L}_{box}$ . $\\gamma_{1}, \\gamma_{2}, \\gamma_{3}, \\gamma_{4}$ , and $\\gamma_{5}$ are empirically set as 10.0, 0.2, 1.0, 1.0, and 1.0.", + "bbox": [ + 212, + 657, + 787, + 734 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Training & Testing. We train our model on NVIDIA RTX-3090 GPUs with the Adam optimizer and an initial learning rate of 0.001. Due to GPU memory limitation, we construct point cloud sequences with 8 frames for training, and set $K = 2$ in training, and $K = 6$ in testing. Following existing methods [39,45], we set $N$ and $C$ to 128. We stack $L = 2$ transformer layers and apply $H = 4$ heads in BEA and CPA. We adopt $G = 3$ groups in CPA, and assign [32,64,32] points and $U = [4,32,16]$ contextual points for the groups, respectively.", + "bbox": [ + 212, + 734, + 787, + 842 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "3D SOT in Point Clouds with High Temporal Variation", + "bbox": [ + 359, + 114, + 730, + 128 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4 Experiments", + "text_level": 1, + "bbox": [ + 217, + 143, + 374, + 162 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We leverage two famous 3D tracking benchmarks of KITTI [8] and Waymo [30] to evaluate the general performance of our approach in regular 3D SOT. In addition, we establish a new KITTI-HV dataset to test our performance in high temporal variation scenarios.", + "bbox": [ + 218, + 174, + 784, + 234 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Regular Datasets. The KITTI tracking dataset comprises 21 training sequences and 29 test sequences, encompassing eight object types. Following prior studies [9,24,34,39,44,45], we use the sequences 0-16 as training data, 17-18 for validation, and 19-20 for testing. The Waymo dataset is large-scale. We adopt the approach outlined in LiDAR-SOT [21] to utilize 1121 tracklets, which are subsequently categorized into easy, medium, and hard subsets based on the number of points in the first frame of each tracklet.", + "bbox": [ + 218, + 234, + 784, + 340 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "HV Dataset. We build a dataset with high temporal variation for 3D SOT based on KITTI, called KITTI-HV. Although high temporal variation scenarios are present in the existing benchmarks, there is no exact threshold to determine whether the scenario is a high temporal variation scenario or not. Large point cloud variations and significant object motions are two major challenges in high temporal variation scenarios. Sampling at frame intervals is a good way to simulate these two challenges. Also, the constructed KITTI-HV can provide a preliminary platform for exploring tracking in scenarios such as skipped-tracking, edge devices, and high dynamics. For a fairer comparison with existing methods, we set the frame interval to 2, 3, 5, and 10. We set up more dense testings at low frame intervals to exploit the performance of the existing methods in point cloud variations close to smooth scenarios. We train and test all methods from scratch individually on each frame interval.", + "bbox": [ + 218, + 340, + 784, + 536 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Evaluation Metrics. We employ One Pass Evaluation [38] to evaluate the different methods in terms of Success and Precision. Success is determined by measuring the Intersection Over Union between the proposed bounding box and the ground-truth (GT) bounding box. Precision is evaluated by computing the Area Under the Curve of the distance error between the centers of the two bounding boxes, ranging from 0 to 2 meters.", + "bbox": [ + 218, + 537, + 784, + 627 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4.1 Comparison with the State of the Art", + "text_level": 1, + "bbox": [ + 218, + 648, + 571, + 665 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Results on HV tracking. We evaluate our HVTrack in 4 categories ('Car', 'Pedestrian', 'Van', and 'Cyclist') following existing methods [24, 39, 44, 45] in the KITTI-HV dataset. The methods we choose to compare with HVTrack are the most representative SOT methods from 2020 to 2023 (Most cited methods published in each year according to Google Scholar). As illustrated in Tab. 1, our approach consistently outperforms the state-of-the-art methods [24, 39, 44, 45] across all frame intervals, confirming the effectiveness of the proposed tracking framework for high temporal variation scenarios. Notably, the performance gap between our HVTrack and existing trackers widens as variations are exacerbated. In the particularly challenging scenario of 10 frame intervals, we achieve a substantial $9.1\\%$ ↑ improvement in success and a remarkable $10.4\\%$ ↑ enhancement", + "bbox": [ + 218, + 672, + 784, + 839 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Q. Wu et al.", + "bbox": [ + 271, + 114, + 354, + 127 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/7bb53e197a6e703f83209d54c3107b45446a524259e5c0386dd6b4fa8efd9891.jpg", + "table_caption": [ + "Table 1: Comparison of HVTrack with the state-of-the-art methods on each category of the KITTI-HV dataset. We construct the HV dataset KITTI-HV for training and testing by setting different frame intervals for sampling in the KITTI dataset. Bold and underline denote the best and second-best performance, respectively. Success/Precision are used for evaluation. Improvement and deterioration are shown in green and red, respectively." + ], + "table_footnote": [], + "table_body": "
Frame Intervals2 Intervals3 Intervals
Category Frame NumberCar 6424Pestrian 6088Van 1248Cyclist 308Mean 14068Car 6424Pestrian 6088Van 1248Cyclist 308Mean 14068
P2B [24]56.3/71.030.8/53.033.4/38.441.8/61.442.9/60.143.4/51.827.9/46.827.9/31.844.8/64.435.4/48.1
BAT [44]61.8/74.236.5/61.126.8/30.454.1/78.747.6/64.751.7/61.931.8/53.524.0/28.250.5/72.640.6/55.5
M2-Track [45]63.0/76.654.6/81.752.8/66.568.3/89.358.6/78.262.1/72.751.8/74.333.6/41.664.7/82.055.1/70.8
CXTrack [39]61.4/70.962.6/86.356.0/69.159.2/76.961.4/77.547.4/53.157.9/79.348.5/58.840.7/58.451.9/65.1
HVTrack Improvement67.1/77.560.0/84.050.6/61.773.9/93.662.7/79.366.8/76.551.1/71.938.7/46.966.5/89.757.5/72.2
4.1↑/0.9↑2.6↓/2.3↓6.0↓/7.4↓5.6↑/4.3↑1.3↑/1.1↑4.7↑/3.8↑6.8↓/7.4↓9.8↓/11.9↓1.8↑/7.7↑2.4↑/1.4↑
Frame Intervals5 Intervals10 Intervals
Category Frame NumberCar 6424Pestrian 6088Van 1248Cyclist 308Mean 14068Car 6424Pestrian 6088Van 1248Cyclist 308Mean 14068
P2B [24]39.3/46.127.4/43.527.2/30.435.0/44.433.0/43.528.6/29.223.1/31.125.9/27.329.1/28.326.0/29.8
BAT [44]44.1/51.121.1/32.826.1/29.535.7/46.332.4/41.130.6/33.121.7/29.220.8/20.729.3/29.125.9/30.2
M2-Track [45]50.9/58.631.6/45.430.0/36.547.4/61.040.6/51.033.0/35.117.5/24.120.7/20.827.7/26.625.0/28.9
CXTrack [39]38.6/42.235.0/47.821.6/24.325.7/33.335.3/42.830.2/32.418.2/21.417.5/17.927.7/26.523.8/26.2
HVTrack Improvement60.3/68.935.1/52.128.7/32.458.2/71.746.6/58.549.4/54.722.5/29.122.2/23.439.5/45.435.1/40.6
9.4↑/10.3↑0.1↑/4.3↑1.3↓/4.1↓10.8↑/10.7↑6.0↑/7.5↑16.4↑/19.6↑0.6↓/0.1↓3.7↓/3.9↓10.2↑/16.3↑9.1↑/10.4
", + "bbox": [ + 218, + 241, + 787, + 441 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "in precision. This showcases the robustness of our method in accommodating various levels of point cloud variation. Our method delivers outstanding performance on 'Car' and 'Cyclist', in which we gain a great improvement in 5 frame intervals (9.4%↑/10.3%↑ for 'Car' and 10.8%↑/10.7%↑ for 'Cyclist') and 10 frame intervals (16.4%↑/19.6%↑ for 'Car' and 10.2%↑/16.3%↑ for 'Cyclist'). However, the challenge of tracking large objects persists in high temporal variation cases for our method. Note that the performance of CXTrack drops dramatically after 3 frame intervals. In particular, in the medium variation case of 5 frame intervals, we achieve 11.3%↑/15.7%↑ improvement in overall success/precision compared to CXTrack, despite the fact that our HVTrack shares the same backbone and RPN with CXTrack [39]. Furthermore, HVTrack surpasses CXTrack on 'Car' and 'Cyclist' by a very large margin (21.7%↑/26.7%↑ for 'Car' and 32.5%↑/38.4%↑ for 'Cyclist'). The distinct performance gap between HVTrack and CXTrack in HV tracking showcases the effectiveness of our feature correlation module design.", + "bbox": [ + 217, + 474, + 787, + 700 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Results on regular tracking. For the KITTI dataset, we compare HVTrack with 12 top-performing trackers [7,9,11,12,17,24,28,34,39,44-46]. As shown in Tab. 2, our overall performance is close to the SOTA tracker CXTrack [39], and achieves the second best result on the average in success (2.0%↓ w.r.t. CXTrack). Note that HVTrack outperforms TAT [17] on average (0.8%↑/0.3%↑), which utilizes temporal information by concatenating historical template features. This demonstrates our better design for leveraging the spatio-temporal context information. However, the performance of HVTrack drops when dealing with large objects ('Van'). We conjecture this performance drop to be caused by CPA,", + "bbox": [ + 217, + 704, + 787, + 840 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "3D SOT in Point Clouds with High Temporal Variation", + "bbox": [ + 359, + 114, + 730, + 128 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 767, + 116, + 782, + 126 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/7e5b04df2a48a564d827f1eda8c66fffb8c9c3332b3ae69ac37f56cf48b6cd3a.jpg", + "table_caption": [ + "Table 2: Comparison of HVTrack with the SOTA methods on each category of the KITTI dataset." + ], + "table_footnote": [], + "table_body": "
Category Frame NumberCar 6424Pedestrian 6088Van 1248Cyclist 308Mean 14068
SC3D [9]41.3/57.918.2/37.840.4/47.041.5/70.431.2/48.5
P2B [24]56.2/72.828.7/49.640.8/48.432.1/44.742.4/60.0
3DSiamRPN [7]58.2/76.235.2/56.245.7/52.936.2/49.046.7/64.9
MLVSNet [34]56.0/74.034.1/61.152.0/61.434.3/44.545.7/66.7
BAT [44]60.5/77.742.1/70.152.4/67.033.7/45.451.2/72.8
PTT [28]67.8/81.844.9/72.043.6/52.537.2/47.355.1/74.2
V2B [11]70.5/81.348.3/73.550.1/58.040.8/49.758.4/75.2
PTTR [46]65.2/77.450.9/81.652.5/61.865.1/90.557.9/78.1
STNet [12]72.1/84.049.9/77.258.0/70.673.5/93.761.3/80.1
TAT [17]72.2/83.357.4/84.458.9/69.274.2/93.964.7/82.8
M2-Track [45]65.5/80.861.5/88.253.8/70.773.2/93.562.9/83.4
CXTrack [39]69.1/81.667.0/91.560.0/71.874.2/94.367.5/85.3
HVTrack68.2/79.264.6/90.654.8/63.872.4/93.765.5/83.1
", + "bbox": [ + 346, + 184, + 658, + 334 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/98d53ff1a89c76f4f47177c984addfe0dc695f3960e5b93a255404995f394da2.jpg", + "table_caption": [ + "Table 3: Comparison of HVTrack with the SOTA methods on the Waymo dataset." + ], + "table_footnote": [], + "table_body": "
MethodVehicle (185632)Pedestrian (241168)Mean
EasyMediumHardMeanEasyMediumHardMeanMean (426800)
P2B [24]57.1/65.452.0/60.747.9/58.552.6/61.718.1/30.817.8/30.017.7/29.317.9/30.133.0/43.8
BAT [44]61.0/68.353.3/60.948.9/57.854.7/62.719.3/32.617.8/29.817.2/28.318.2/30.334.1/44.4
V2B [11]64.5/71.555.1/63.252.0/62.057.6/65.927.9/43.922.5/36.220.1/33.123.7/37.938.4/50.1
STNet [12]65.9/72.757.5/66.054.6/64.759.7/68.029.2/45.324.7/38.222.2/35.825.5/39.940.4/52.1
CXTrack [39]63.9/71.154.2/62.752.1/63.757.1/66.135.4/55.329.7/47.926.3/44.430.7/49.442.2/56.7
HVTrack(Ours)66.2/75.257.0/66.055.3/67.159.8/69.734.2/53.528.7/47.926.7/45.230.0/49.143.0/58.1
", + "bbox": [ + 230, + 375, + 769, + 465 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "which will be further explored in Sec. 4.2. For the Waymo dataset, following the benchmark setting in LiDAR-SOT [21] and STNet [12], we test our HVTrack in 2 categories ('Vehicle', 'Pedestrian') with 3 difficulty levels. All the methods are pre-trained on KITTI. The results of P2B [24], BAT [44], and V2B [11] on Waymo are provided by STNet [12]. As shown in Tab. 3, our method achieves the best performance in success $(0.8\\% \\uparrow)$ and precision $(1.4\\% \\uparrow)$ . Notably, HVTrack does not surpass CXTrack and reach SOTA on the KTTTI benchmark, while the opposite situation occurs in the larger dataset of Waymo. The improvement on Waymo clearly demonstrates the robustness of our method in the large-scale dataset. Also, HVTrack surpasses other SOTA methods on all categories of 'Hard' difficulty, revealing our excellent ability to handle sparse cases. The experimental results show that our method can generally solve the problem of 3D SOT under various levels of point cloud variations, and achieve outstanding performance.", + "bbox": [ + 212, + 492, + 787, + 705 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "4.2 Analysis Experiments", + "text_level": 1, + "bbox": [ + 215, + 724, + 441, + 739 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "In this section, we extensively analyze HVTrack via a series of experiments. All the experiments are conducted on KITTI-HV with 5 frame intervals unless otherwise stated.", + "bbox": [ + 212, + 750, + 782, + 792 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Ablation Study. We conduct experiments to analyze the effectiveness of different modules in HVTrack. As shown in Tab. 4, we respectively ablate OM, BEA, and CPA from HVTrack. We only ablate OM in RPM because LM and MM", + "bbox": [ + 212, + 794, + 784, + 839 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Q. Wu et al.", + "bbox": [ + 271, + 114, + 354, + 128 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/52bb8e7385d65b382c934da9bd9ee66461f0bf42328c5f158edad885b8e3a7cc.jpg", + "table_caption": [ + "Table 4: Ablation analysis of HVTrack." + ], + "table_footnote": [], + "table_body": "
OMBEACPACarPedestrianVanCyclistMean
60.0/69.033.9/50.028.4/32.254.2/67.145.8/57.5
60.3/69.435.0/50.226.7/30.743.9/61.546.0/57.5
58.2/66.934.7/49.828.1/33.547.7/63.945.1/56.5
60.3/68.935.1/52.128.7/32.458.2/71.746.6/58.5
", + "bbox": [ + 316, + 170, + 686, + 241 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/5d03b1bff40f46684a7e6e107572079fe47b658f0387601eb4a910735ae9748d.jpg", + "table_caption": [ + "Table 5: Ablation experiment of BEA. 'Base'/'Expansion' denotes only using the base/expansion branch in BEA." + ], + "table_footnote": [], + "table_body": "
CategoryCarPedestrianVanCyclistMean
Base60.3/69.435.0/50.226.7/30.743.9/61.546.0/57.5
Expansion60.0/68.634.7/50.531.4/36.854.5/67.546.4/57.9
", + "bbox": [ + 330, + 297, + 671, + 349 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "serve as the template and are the indivisible parts of HVTrack. BEA and CPA are replaced by vanilla cross-attention and self-attention. In general, all components have been proven to be effective; removing an arbitrary module degrades the 'mean' performance.", + "bbox": [ + 212, + 381, + 784, + 441 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Analysis Experiment of BEA. The performance slightly drops on the 'Car' when we apply BEA on HVTrack as shown in Tab. 4. We conjecture this to be caused by the side effect of aggregating larger scale features in BEA, which will involve more background noise at each point. Further, 'Car' has a medium size and does not have the distraction of crowded similar objects like small objects ('Pedestrian' and 'Cyclist'), nor does it require a larger receptive field like large objects ('Van'). To verify this issue, we further analyze each branch of BEA as shown in Tab. 5. 'Pedestrian', 'Van', and 'Cyclist' benefit from the expansion branch and achieve a better performance compared to using only the base branch in BEA. On the other hand, the performance in the 'Car' category has the opposite behavior to the other categories. The experimental results validate our hypothesis that BEA is beneficial to small and large objects, while negatively affecting medium-sized objects.", + "bbox": [ + 212, + 444, + 787, + 641 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Analysis Experiment of CPA. Our method yields better results on 'Van' after we remove CPA as shown in Tab. 4, which reveals the relation between CPA and the large object tracking challenge. We believe that this is caused by the suppressing strategy in CPA. Large objects usually have more points, and under the same probability of misclassification of importance, they will have more foreground points assigned as low importance in the attention map, resulting in a part of useful information being suppressed in CPA. As shown in Fig. 5b, the importance conflict in the object leads to tracking failure. That part of the information will be further suppressed when stacking multiple transformer layers. However, the performance drops in other categories, without CPA to suppress the background noise for medium and small objects. As shown in Fig. 5a, most of the background points are assigned with low importance and suppressed in the success case, which proves our idea of CPA.", + "bbox": [ + 212, + 643, + 787, + 840 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "3D SOT in Point Clouds with High Temporal Variation", + "bbox": [ + 359, + 114, + 730, + 128 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 767, + 114, + 784, + 126 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/f392ef54e3800668ca3216913b5128bcabd2bf09928622bc88a2f19690d53e23.jpg", + "image_caption": [ + "Fig. 5: The attention maps of 'Van' in CPA." + ], + "image_footnote": [], + "bbox": [ + 313, + 146, + 509, + 228 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/a4c7ba14639332f576747313a68f34f186a9f622a44f1775cfb2fb46fa6bf851.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 519, + 146, + 663, + 229 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/6e12dc51e1aefee041843a0adbe73eafe29db5f056fd2aad348590f15ab93a9b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 666, + 145, + 683, + 229 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/cc47e9e88f0ca5e74568cf84967b47c44992a7b3b980ab006a56a5b5abbb5e4b.jpg", + "table_caption": [ + "Table 6: Results of HVTrack when using different memory sizes. We train HVTrack with a memory size of 2, and evaluate it with memory sizes ranging from 1 to 8." + ], + "table_footnote": [], + "table_body": "
Memory SizeCarPedestrianVanCyclistMean
158.3/66.530.9/46.226.8/29.857.1/70.543.6/54.6
258.6/67.031.7/47.927.1/30.657.6/70.944.1/55.6
359.2/67.633.8/49.927.7/3155.8/67.745.3/56.7
460.0/68.533.7/50.629.5/33.657.9/71.345.9/57.7
560.0/68.533.8/51.228.7/32.657.8/70.845.8/57.9
660.3/68.935.1/52.128.7/32.458.2/71.746.6/58.5
759.7/68.235.6/52.928.0/31.558.1/71.446.4/58.4
859.8/68.335.1/52.428.2/32.058.1/71.446.3/58.3
", + "bbox": [ + 316, + 327, + 687, + 441 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Memory Size. Intuitively, trackers will achieve better performance when leveraging more temporal information. However, the performance of the trackers cannot continuously improve with the accumulation of historical information, due to inaccuracies in the historical tracklets. As shown in Tab. 6, we train HVTrack with a memory size of 2 due to the GPU memory limitation, and evaluate it with memory sizes from 1 to 8. The performance peaks for a memory size of 6, which is consistent with our assumption. Thus, we set 6 as our memory size and achieve a tracking speed of 31 FPS.", + "bbox": [ + 212, + 472, + 787, + 592 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "5 Conclusion", + "text_level": 1, + "bbox": [ + 215, + 619, + 359, + 636 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "In this paper, we have explored a new task in 3D SOT, and presented the first 3D SOT framework for high temporal variation scenarios, HVTrack. Its three main components, RPM, BEA, and CPA, allow HVTrack to achieve robustness to point cloud variations, similar object distractions, and background noise. Our experiments have demonstrated that HVTrack significantly outperforms the state of the art in high temporal variation scenarios, and achieves remarkable performance in regular tracking.", + "bbox": [ + 212, + 657, + 787, + 763 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "**Limitation.** Our CPA relies on fixed manual hyperparameters to suppress noise. This makes it difficult to balance the performance in different object and search area sizes, leading to a performance drop in tracking large objects. In the future, we will therefore explore the use of a learnable function to replace the manual hyperparameters in CPA and overcome the large object tracking challenge.", + "bbox": [ + 212, + 763, + 787, + 839 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Q. Wu et al.", + "bbox": [ + 271, + 114, + 354, + 128 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Acknowledgements", + "text_level": 1, + "bbox": [ + 217, + 143, + 401, + 162 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "This work is supported in part by the National Natural Science Foundation of China (NFSC) under Grants 62372377 and 62176242.", + "bbox": [ + 215, + 180, + 787, + 210 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 217, + 239, + 321, + 253 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "1. Chen, X., Shi, S., Zhang, C., Zhu, B., Wang, Q., Cheung, K.C., See, S., Li, H.: Trajectoryformer: 3d object tracking transformer with predictive trajectory hypotheses. arXiv preprint arXiv:2306.05888 (2023)", + "2. Cheng, R., Wang, X., Sohel, F., Lei, H.: Topology-aware universal adversarial attack on 3d object tracking. Visual Intelligence 1(1), 31 (2023)", + "3. Chiu, H.k., Prioletti, A., Li, J., Bohg, J.: Probabilistic 3d multi-object tracking for autonomous driving. arXiv preprint arXiv:2001.05673 (2020)", + "4. Chung, J., Gulcehre, C., Cho, K., Bengio, Y.: Empirical evaluation of gated recurrent neural networks on sequence modeling. arXiv preprint arXiv:1412.3555 (2014)", + "5. Cui, Y., Fang, Z., Shan, J., Gu, Z., Zhou, S.: 3d object tracking with transformer. arXiv preprint arXiv:2110.14921 (2021)", + "6. Ding, S., Rehder, E., Schneider, L., Cordts, M., Gall, J.: 3dmotformer: Graph transformer for online 3d multi-object tracking. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 9784-9794 (2023)", + "7. Fang, Z., Zhou, S., Cui, Y., Scherer, S.: 3d-siamrpn: An end-to-end learning method for real-time 3d single object tracking using raw point cloud. IEEE Sensors Journal 21(4), 4995-5011 (2020)", + "8. Geiger, A., Lenz, P., Urtasun, R.: Are we ready for autonomous driving? the KITTI vision benchmark suite. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 3354-3361 (2012)", + "9. Giancola, S., Zarzar, J., Ghanem, B.: Leveraging shape completion for 3d siamese tracking. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 1359-1368 (2019)", + "10. Guo, Z., Mao, Y., Zhou, W., Wang, M., Li, H.: Cmt: Context-matching-guided transformer for 3d tracking in point clouds. In: European Conference on Computer Vision. pp. 95-111. Springer (2022)", + "1. Hui, L., Wang, L., Cheng, M., Xie, J., Yang, J.: 3d siamese voxel-to-bev tracker for sparse point clouds. Advances in Neural Information Processing Systems 34, 28714-28727 (2021)", + "2. Hui, L., Wang, L., Tang, L., Lan, K., Xie, J., Yang, J.: 3d siamese transformer network for single object tracking on point clouds. arXiv preprint arXiv:2207.11995 (2022)", + "3. Jiao, L., Wang, D., Bai, Y., Chen, P., Liu, F.: Deep learning in visual tracking: A review. IEEE transactions on neural networks and learning systems (2021)", + "4. Jiayao, S., Zhou, S., Cui, Y., Fang, Z.: Real-time 3d single object tracking with transformer. IEEE Transactions on Multimedia (2022)", + "5. Kapania, S., Saini, D., Goyal, S., Thakur, N., Jain, R., Nagrath, P.: Multi object tracking with uavs using deep sort and yolov3 retina detection framework. In: Proceedings of the 1st ACM Workshop on Autonomous and Intelligent Mobile Systems. pp. 1-6 (2020)" + ], + "bbox": [ + 225, + 273, + 784, + 839 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "3D SOT in Point Clouds with High Temporal Variation", + "bbox": [ + 359, + 114, + 730, + 128 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 767, + 116, + 784, + 126 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "16. Kart, U., Lukezic, A., Kristan, M., Kamarainen, J.K., Matas, J.: Object tracking by reconstruction with view-specific discriminative correlation filters. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 1339-1348 (2019)", + "17. Lan, K., Jiang, H., Xie, J.: Temporal-aware siamese tracker: Integrate temporal context for 3d object tracking. In: Proceedings of the Asian Conference on Computer Vision. pp. 399-414 (2022)", + "18. Luo, C., Yang, X., Yuille, A.: Exploring simple 3d multi-object tracking for autonomous driving. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 10488-10497 (2021)", + "19. Machida, E., Cao, M., Murao, T., Hashimoto, H.: Human motion tracking of mobile robot with Kinect 3d sensor. In: Proceedings of SICE Annual Conference (SICE). pp. 2207-2211. IEEE (2012)", + "20. Nishimura, H., Komorita, S., Kawanishi, Y., Murase, H.: Sdof-tracker: Fast and accurate multiple human tracking by skipped-detection and optical-flow. IEICE TRANSACTIONS on Information and Systems 105(11), 1938-1946 (2022)", + "21. Pang, Z., Li, Z., Wang, N.: Model-free vehicle tracking and state estimation in point cloud sequences. In: 2021 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). pp. 8075-8082. IEEE (2021)", + "22. Qi, C.R., Litany, O., He, K., Guibas, L.J.: Deep hough voting for 3d object detection in point clouds. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 9277-9286 (2019)", + "23. Qi, C.R., Yi, L., Su, H., Guibas, L.J.: Pointnet++: Deep hierarchical feature learning on point sets in a metric space. Advances in neural information processing systems 30 (2017)", + "24. Qi, H., Feng, C., Cao, Z., Zhao, F., Xiao, Y.: P2b: Point-to-box network for 3d object tracking in point clouds. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 6329-6338 (2020)", + "25. Ren, C., Xu, Q., Zhang, S., Yang, J.: Hierarchical prior mining for non-local multiview stereo. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 3611-3620 (2023)", + "26. Ren, S., Yang, X., Liu, S., Wang, X.: Sg-former: Self-guided transformer with evolving token reallocation. arXiv preprint arXiv:2308.12216 (2023)", + "27. Sadjadpour, T., Li, J., Ambrus, R., Bohg, J.: Shasta: Modeling shape and spatiotemporal affinities for 3d multi-object tracking. IEEE Robotics and Automation Letters (2023)", + "28. Shan, J., Zhou, S., Fang, Z., Cui, Y.: Ptt: Point-track-transformer module for 3d single object tracking in point clouds. In: Proceedings of the IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). pp. 1310-1316 (2021)", + "29. Srivastava, N., Hinton, G., Krizhevsky, A., Sutskever, I., Salakhutdinov, R.: Dropout: a simple way to prevent neural networks from overfitting. The journal of machine learning research 15(1), 1929-1958 (2014)", + "30. Sun, P., Kretzschmar, H., Dotiwalla, X., Chouard, A., Patnaik, V., Tsui, P., Guo, J., Zhou, Y., Chai, Y., Caine, B., et al.: Scalability in perception for autonomous driving: Waymo open dataset. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 2446-2454 (2020)", + "31. Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, L., Polosukhin, I.: Attention is all you need. Advances in neural information processing systems 30 (2017)", + "32. Wang, Q., Chen, Y., Pang, Z., Wang, N., Zhang, Z.: Immortal tracker: Tracklet never dies. arXiv preprint arXiv:2111.13672 (2021)" + ], + "bbox": [ + 215, + 146, + 785, + 840 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Q. Wu et al.", + "bbox": [ + 271, + 114, + 354, + 128 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "33. Wang, Y., Sun, Y., Liu, Z., Sarma, S.E., Bronstein, M.M., Solomon, J.M.: Dynamic graph cnn for learning on point clouds. ACM Transactions on Graphics (tog) 38(5), 1-12 (2019)", + "34. Wang, Z., Xie, Q., Lai, Y.K., Wu, J., Long, K., Wang, J.: Mlvsnet: Multi-level voting siamese network for 3d visual tracking. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 3101-3110 (2021)", + "35. Weng, X., Wang, J., Held, D., Kitani, K.: 3d multi-object tracking: A baseline and new evaluation metrics. In: 2020 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). pp. 10359-10366. IEEE (2020)", + "36. Weng, X., Wang, Y., Man, Y., Kitani, K.M.: Gnn3dmot: Graph neural network for 3d multi-object tracking with 2d-3d multi-feature learning. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 6499-6508 (2020)", + "37. Wu, Q., Yang, J., Sun, K., Zhang, C., Zhang, Y., Salzmann, M.: Mixcycle: Mixup assisted semi-supervised 3d single object tracking with cycle consistency. In: Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV). pp. 13956-13966 (2023)", + "38. Wu, Y., Lim, J., Yang, M.H.: Online object tracking: A benchmark. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 2411-2418 (2013)", + "39. Xu, T.X., Guo, Y.C., Lai, Y.K., Zhang, S.H.: Cxtrack: Improving 3d point cloud tracking with contextual information. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 1084-1093 (2023)", + "40. Yin, T., Zhou, X., Krahenbuhl, P.: Center-based 3d object detection and tracking. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 11784-11793 (2021)", + "41. Yoo, J.S., Lee, H., Jung, S.W.: Video object segmentation-aware video frame interpolation. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 12322-12333 (2023)", + "42. Zarzar, J., Giancola, S., Ghanem, B.: Efficient bird eye view proposals for 3d siamese tracking. arXiv preprint arXiv:1903.10168 (2019)", + "43. Zhang, X., Yang, J., Zhang, S., Zhang, Y.: 3d registration with maximal cliques. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 17745-17754 (2023)", + "44. Zheng, C., Yan, X., Gao, J., Zhao, W., Zhang, W., Li, Z., Cui, S.: Box-aware feature enhancement for single object tracking on point clouds. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 13199-13208 (2021)", + "45. Zheng, C., Yan, X., Zhang, H., Wang, B., Cheng, S., Cui, S., Li, Z.: Beyond 3d siamese tracking: A motion-centric paradigm for 3d single object tracking in point clouds. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 8111-8120 (2022)", + "46. Zhou, C., Luo, Z., Luo, Y., Liu, T., Pan, L., Cai, Z., Zhao, H., Lu, S.: Ptttr: Relational 3d point cloud object tracking with transformer. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 8531-8540 (2022)", + "47. Zhou, X., Koltun, V., Krähenbuhl, P.: Tracking objects as points. In: European conference on computer vision. pp. 474-490. Springer (2020)" + ], + "bbox": [ + 215, + 146, + 785, + 811 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "3D SOT in Point Clouds with High Temporal Variation", + "bbox": [ + 359, + 114, + 730, + 128 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 767, + 116, + 784, + 126 + ], + "page_idx": 16 + } +] \ No newline at end of file diff --git a/2024/3D Single-object Tracking in Point Clouds with High Temporal Variation/c2204d64-6706-4e48-94d5-09db9f8770f0_model.json b/2024/3D Single-object Tracking in Point Clouds with High Temporal Variation/c2204d64-6706-4e48-94d5-09db9f8770f0_model.json new file mode 100644 index 0000000000000000000000000000000000000000..e6d9184f334074c77e4cd59e284fa3283354c34b --- /dev/null +++ b/2024/3D Single-object Tracking in Point Clouds with High Temporal Variation/c2204d64-6706-4e48-94d5-09db9f8770f0_model.json @@ -0,0 +1,2280 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.232, + 0.141, + 0.773, + 0.187 + ], + "angle": 0, + "content": "3D Single-object Tracking in Point Clouds with High Temporal Variation" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.212, + 0.77, + 0.245 + ], + "angle": 0, + "content": "Qiao Wu\\(^{1}\\), Kun Sun\\(^{2}\\), Pei An\\(^{3}\\), Mathieu Salzmann\\(^{4}\\), Yanning Zhang\\(^{1}\\), and Jiaqi Yang\\(^{1\\star}\\)" + }, + { + "type": "text", + "bbox": [ + 0.363, + 0.255, + 0.639, + 0.269 + ], + "angle": 0, + "content": "1 Northwestern Polytechnical University" + }, + { + "type": "text", + "bbox": [ + 0.358, + 0.269, + 0.644, + 0.283 + ], + "angle": 0, + "content": "\\(^{2}\\) China University of Geosciences, Wuhan" + }, + { + "type": "text", + "bbox": [ + 0.332, + 0.283, + 0.67, + 0.297 + ], + "angle": 0, + "content": "3 HuaZhong University of Science and Technology" + }, + { + "type": "text", + "bbox": [ + 0.332, + 0.297, + 0.668, + 0.325 + ], + "angle": 0, + "content": "4 École Polytechnique Fédérale de Lausanne qiaowu@mail.nwu.edu.cn, jqyang@nwpu.edu.cn" + }, + { + "type": "text", + "bbox": [ + 0.261, + 0.361, + 0.744, + 0.584 + ], + "angle": 0, + "content": "Abstract. The high temporal variation of the point clouds is the key challenge of 3D single-object tracking (3D SOT). Existing approaches rely on the assumption that the shape variation of the point clouds and the motion of the objects across neighboring frames are smooth, failing to cope with high temporal variation data. In this paper, we present a novel framework for 3D SOT in point clouds with high temporal variation, called HVTrack. HVTrack proposes three novel components to tackle the challenges in the high temporal variation scenario: 1) A Relative-Pose-Aware Memory module to handle temporal point cloud shape variations; 2) a Base-Expansion Feature Cross-Attention module to deal with similar object distractions in expanded search areas; 3) a Contextual Point Guided Self-Attention module for suppressing heavy background noise. We construct a dataset with high temporal variation (KITTI-HV) by setting different frame intervals for sampling in the KITTI dataset. On the KITTI-HV with 5 frame intervals, our HVTrack surpasses the state-of-the-art tracker CXTracker by \\(11.3\\% / 15.7\\%\\) in Success/Precision." + }, + { + "type": "text", + "bbox": [ + 0.261, + 0.596, + 0.741, + 0.625 + ], + "angle": 0, + "content": "Keywords: 3D single-object tracking \\(\\cdot\\) High temporal variation \\(\\cdot\\) Point cloud" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.65, + 0.377, + 0.667 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.681, + 0.788, + 0.818 + ], + "angle": 0, + "content": "3D single-object tracking (3D SOT) is pivotal for autonomous driving [2,40] and robotics [16, 19, 25, 43]. Given the target point cloud and 3D bounding box as template, the goal of 3D SOT is to regress the target 3D poses in the tracking point cloud sequence. Existing approaches [5,7,9-12,24,28,34,39,44-46] rely on the assumption that the point cloud variations and motion of the object across neighboring frames are relatively smooth. They crop out a small search area around the last proposal for tracking, thus dramatically reducing the complexity of the problem. The template and search area features are then typically correlated as shown in Fig. 1a, and used to regress the 3D bounding box." + }, + { + "type": "page_footnote", + "bbox": [ + 0.218, + 0.825, + 0.386, + 0.841 + ], + "angle": 0, + "content": "* Corresponding author." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "2" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.358, + 0.129 + ], + "angle": 0, + "content": "Q. Wu et al." + }, + { + "type": "image", + "bbox": [ + 0.217, + 0.145, + 0.787, + 0.229 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.242, + 0.788, + 0.34 + ], + "angle": 0, + "content": "Fig. 1: Feature correlation in 3D SOT. (a) Feature correlation in the smooth case (1 frame interval). Correlating the features is relatively trivial as the target undergoes only small shape variations, and the observation angles are consistent in the three frames. (b-c) Feature correlation in high temporal variation cases (10 frames interval). The pose relative to the camera changes rapidly. Correlating the features using historical information is highly challenging (b). We encode the historical observation angles \\(\\alpha\\) into the features to guide the variation of relative pose to the camera (c)." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.369, + 0.788, + 0.46 + ], + "angle": 0, + "content": "In practice, these approaches are challenged by the presence of large point cloud variations due to the limited sensor temporal resolution and the moving speed of objects as shown in Fig. 1b. We refer to this significant variation in point cloud and object position between two frames as the high temporal variation (HV). The high temporal variation challenge is non-negligible in existing benchmarks, and exists in other scenarios not yet covered by them, such as:" + }, + { + "type": "text", + "bbox": [ + 0.225, + 0.469, + 0.784, + 0.514 + ], + "angle": 0, + "content": "- Skipped-tracking, which can greatly reduce computational consumption in tracking and serve a wide range of other tasks such as detection [20] and segmentation [41]." + }, + { + "type": "text", + "bbox": [ + 0.226, + 0.515, + 0.785, + 0.543 + ], + "angle": 0, + "content": "- Tracking in edge devices, which is essential for deploying trackers on common devices with limited frame rate, resolution, computation, and power etc." + }, + { + "type": "text", + "bbox": [ + 0.225, + 0.545, + 0.784, + 0.573 + ], + "angle": 0, + "content": "- Tracking in highly dynamic scenarios [15], which is common in life. For example, tracking in sports events, highway, and UAV scenarios." + }, + { + "type": "list", + "bbox": [ + 0.225, + 0.469, + 0.785, + 0.573 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.584, + 0.789, + 0.841 + ], + "angle": 0, + "content": "There are three challenges for 3D SOT in HV point clouds, and existing approaches are not sufficient to address these challenges. 1) Strong shape variations of the point clouds: Point cloud shape variations are usually caused by the occlusion and relative pose transformation between the object and the sensor. As illustrated in Fig. 1b, feature correlation in existing approaches fails because of the dramatic change in the density and distribution of points. 2) Distractions due to similar objects: When objects suffer from a significant motion, the search area needs to be enlarged to incorporate the target, thus introducing more distractions from similar objects. Most of the existing trackers focus on local scale features, which discards environmental spatial contextual information to handle distractions. 3) Heavy background noise: The expansion of the search area further reduces the proportion of target information in the scene. While aiming to find the high template-response features in the feature correlation stage, existing methods then neglect to suppress the noise interference and reduce the impact of noise features. We evaluate state-of-the-art (SOTA) trackers [24,39,44,45] in the high temporal variation scenario as shown in Fig. 2. Their performance drops dramatically as the temporal variation of scene point clouds enlarges." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.36, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "3D SOT in Point Clouds with High Temporal Variation" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "3" + }, + { + "type": "image", + "bbox": [ + 0.333, + 0.144, + 0.505, + 0.301 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.144, + 0.672, + 0.301 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.215, + 0.313, + 0.785, + 0.342 + ], + "angle": 0, + "content": "Fig. 2: Comparison of HVTrack with the SOTAs [24,39,44,45] on 'Car' from KITTI-HV (KITTI [8] with different frame intervals, see Sec. 4)." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.373, + 0.788, + 0.691 + ], + "angle": 0, + "content": "To address the above challenges, we propose a novel framework for 3D SOT in point clouds with High temporal Variation, which we call HVTrack. Specifically, we propose three novel modules to address each of the three above-mentioned challenges. 1) A Relative-Pose-Aware Memory (RPM) module to handle the strong shape variations of the point clouds. Different from [17], we integrate the foreground masks and observation angles into the memory bank. Therefore, the model can implicitly learn the distribution variation of point clouds from the relative pose in time. The information arising from observation angles has been overlooked by all existing trackers. 2) A Base-Expansion Feature Cross-Attention (BEA) module to deal with the problem of similar object distractions occurring in large scenes. We synchronize the correlation of the hybrid scales features (base and expansion scales, Sec. 3.4) in the cross-attention, and efficiently utilize spatial contextual information. 3) A Contextual Point Guided Self-Attention (CPA) module to suppress the background noise introduced by the expanded search area. It aggregates the features of points into contextual points according to their importance. Less important points share fewer contextual points and vice versa, thus suppressing most of the background noise. BEA and CPA are inspired by the SGFormer [26], which utilizes hybrid scale significance maps to assign more tokens to salient regions of 2D images. Our experiments clearly demonstrate the remarkable performance of HVTrack in high temporal variation scenarios, as illustrated in Fig. 2. Our contributions can be summarized as follows:" + }, + { + "type": "text", + "bbox": [ + 0.226, + 0.703, + 0.785, + 0.746 + ], + "angle": 0, + "content": "- For the first time, to the best of our knowledge, we explore the new 3D SOT task for high temporal variation scenarios, and propose a novel framework called HVTrack for the task." + }, + { + "type": "text", + "bbox": [ + 0.226, + 0.749, + 0.785, + 0.793 + ], + "angle": 0, + "content": "- We propose three novel modules, RPM, BEA, and CPA, to address three challenges for 3D SOT in HV point clouds: strong point cloud variations, similar object distractions, and heavy background noise." + }, + { + "type": "text", + "bbox": [ + 0.226, + 0.795, + 0.785, + 0.839 + ], + "angle": 0, + "content": "- HVTrack yields state-of-the-art results on KITTI-HV and Waymo, and ranks second on KITTI. Our experimental results demonstrate the robustness of HVTrack in both smooth and high temporal variation cases." + }, + { + "type": "list", + "bbox": [ + 0.226, + 0.703, + 0.785, + 0.839 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.232, + 0.127 + ], + "angle": 0, + "content": "4" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.358, + 0.129 + ], + "angle": 0, + "content": "Q. Wu et al." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.145, + 0.39, + 0.161 + ], + "angle": 0, + "content": "2 Related Work" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.181, + 0.479, + 0.198 + ], + "angle": 0, + "content": "2.1 3D Single-object Tracking" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.21, + 0.789, + 0.542 + ], + "angle": 0, + "content": "Most of the 3D SOT approaches are based on a Siamese framework, because the appearance variations of the target between neighboring frames are not significant. The work of Giancola et al. [9] constitutes the pioneering method in 3D SOT. However, it only solved the discriminative feature learning problem, and used a time-consuming and inaccurate heuristic matching to locate the target. Zarzar et al. [42] utilized a 2D RPN in bird's eyes view to build an end-to-end tracker. The P2B network [24] employs VoteNet [22] as RPN and constructs the first point-based tracker. The following works [7, 11, 12, 28, 34, 44] develop different architectures of trackers based on P2B [24]. V2B [11] leverages the target completion model to generate the dense and complete targets and proposes a simple yet effective voxel-to-BEV target localization network. BAT [44] utilizes the relationship between points and the bounding box, integrating the box information into the point clouds. With the development of transformer networks, a number of works [5, 10, 12, 28, 39, 46] have proposed to exploit various attention mechanisms. STNet [12] forms an iterative coarse-to-fine cross-and self-attention to correlate the target and search area. CXTrack [39] employs a target-centric transformer to integrate targetness information and contextual information. TAT [17] leverages the temporal information to integrate target cues by applying an RNN-based [4] correlation module. Zheng et al. [45] presented a motion-centric method M2-Track, which is appearance matching-free and has made great progress in dealing with the sparse point cloud tracking problem. Wu et al. [37] proposed the first semi-supervised framework in 3D SOT." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.543, + 0.79, + 0.634 + ], + "angle": 0, + "content": "While effective in their context, the above methods are designed based on the assumption that the point cloud variation and motion of the objects across neighboring frames are not significant. In high temporal variation scenarios, this assumption will lead to performance degradation because of the point cloud variations and interference naturally occurring in large scenes. Here, we introduce HVTrack to tackle the challenges of 3D SOT in high temporal variation scenarios." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.66, + 0.475, + 0.677 + ], + "angle": 0, + "content": "2.2 3D Multi-object Tracking" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.689, + 0.789, + 0.841 + ], + "angle": 0, + "content": "3D multi-object tracking (MOT) in point clouds follows two main streams: Tracking-by-detection, and learning-based methods. Tracking-by-detection [1, 3, 32, 35] usually exploits methods such as Kalman filtering to correlate the detection results and track the targets. CenterTrack [47], CenterPoint [40], and SimTrack [18] replace the filter by leveraging deep networks to predict the velocity and motion of the objects. The learning-based methods [6, 27, 36] typically apply a Graph Neural Network to tackle the association challenge in MOT. GNN3DMOT [36] leverages both 2D images and 3D point clouds to obtain a robust association. 3DMOTFormer [6] constructs a graph transformer framework and achieves a good performance using only 3D point clouds." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.36, + 0.115, + 0.732, + 0.13 + ], + "angle": 0, + "content": "3D SOT in Point Clouds with High Temporal Variation" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "5" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.253 + ], + "angle": 0, + "content": "3D MOT and 3D SOT have different purposes and their own challenges [13]. 3D MOT is object-level and focuses on correlating detected objects, whereas 3D SOT is intra-object-level [14] and aims to track a single object given a template. 3D SOT methods usually come with much lower computational consumption and higher throughput [46]. Also, 3D MOT is free from the challenges posed by the dynamic change in the search area size, as MOT is not required to adopt the search area cropping strategy in SOT." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.277, + 0.331, + 0.293 + ], + "angle": 0, + "content": "3 Method" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.309, + 0.422, + 0.324 + ], + "angle": 0, + "content": "3.1 Problem Definition" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.335, + 0.789, + 0.471 + ], + "angle": 0, + "content": "Given the template of the target, the goal of 3D SOT is to continually locate the poses of the target in the search area point cloud sequence \\(\\mathbf{P}^s = \\{P_0^s,\\dots ,P_t^s,\\dots ,P_n^s |P_t^s\\in \\mathbb{R}^{N_s\\times 3}\\}\\). Usually, the target point cloud with labels in the first frame is regarded as the template. Former trackers [5,7,9-12,24,28, 34,39,44-46] leverage a 3D bounding box label \\(B_{0} = (x,y,z,w,l,h,\\theta)\\in \\mathbb{R}^{7}\\) to generate the template in the input. Here, \\((x,y,z)\\), \\((w,l,h)\\) and \\(\\theta\\) are the center location, bounding box size (width, length, and height), and rotation angle of the target, respectively. As objects can be assumed to be rigid, the trackers only need to regress the center and rotation angle of the target." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.494, + 0.341, + 0.508 + ], + "angle": 0, + "content": "3.2 Overview" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.52, + 0.789, + 0.763 + ], + "angle": 0, + "content": "We propose HVTrack to exploit both temporal and spatial information and achieve robust tracking in high temporal variation scenarios. As shown in Fig. 3, we take the point cloud \\( P_{t}^{s} \\) at time \\( t \\) as the search area, and leverage memory banks as the template. We first employ a backbone to extract the local spatial features \\( \\mathcal{X}_0 \\in \\mathbb{R}^{N \\times C} \\) of \\( P_{t}^{s} \\), with \\( N \\) and \\( C \\) the point number and feature channel, respectively. Then, \\( L \\) transformer layers are employed to extract spatio-temporal information. For each layer \\( l \\), (i) we capture the template information \\( Mem_{l} \\in \\mathbb{R}^{KN \\times C} \\) from the Relative-Pose-Aware Memory module, with \\( K \\) the memory bank size (Sec. 3.3); (ii) the memory features and search area features \\( \\mathcal{X}_{l-1} \\) are correlated in the Base-Expansion Features Cross-Attention (Sec. 3.4); (iii) the Contextual Point Guided Self-Attention (Sec. 3.5) leverages the attention map in the Base-Expansion Features Cross-Attention to suppress the noise features; (iv) we update the Layer Features memory bank using \\( \\mathcal{X}_{l-1} \\). After the transformer layers, an RPN is applied to regress the location \\( (x_{t}, y_{t}, z_{t}, \\theta_{t}) \\), the mask \\( \\mathcal{M}_{t} \\in \\mathbb{R}^{N \\times 1} \\), and the observation angle \\( \\alpha \\in \\mathbb{R}^2 \\). Finally, the mask and observation angle memory banks are updated using the predicted results." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.785, + 0.577, + 0.8 + ], + "angle": 0, + "content": "3.3 Relative-Pose-Aware Memory Module" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.81, + 0.787, + 0.842 + ], + "angle": 0, + "content": "As shown in Fig. 1(b), rapid changes in relative pose lead to large variations in the shape of the object point cloud across the frames. Correlating the object" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "6" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.358, + 0.129 + ], + "angle": 0, + "content": "Q. Wu et al." + }, + { + "type": "image", + "bbox": [ + 0.231, + 0.144, + 0.77, + 0.281 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.289, + 0.789, + 0.43 + ], + "angle": 0, + "content": "Fig. 3: HVTrack framework. We first utilize a backbone to extract the local embedding features of the search area. Then, we construct \\( L \\) transformer layers to fuse spatio-temporal information. For each transformer layer, (i) we apply three memory bank features in the Relative-Pose-Aware Memory module to generate temporal template information; (ii) we employ the Base-Expansion Feature Cross-Attention to correlate the template and search area by leveraging hybrid scale spatial context-aware features; (iii) we introduce a Contextual Point Guided Self-Attention to suppress unimportant noise. After each layer, we update the layer features memory bank using the layer input. Finally, we apply an RPN to regress the 3D bounding box, and update the mask and observation angle memory banks." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.462, + 0.789, + 0.538 + ], + "angle": 0, + "content": "features in \\((t - 2, t - 1, t)\\) then becomes difficult, as they have a low overlap with each other. To address this, we introduce the observation angle into the memory bank. The observation angle gives us knowledge of the coarse distribution of an object's point cloud. Thus, the model can learn the variations in point cloud distribution from the historical changes of observation angle." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.54, + 0.788, + 0.661 + ], + "angle": 0, + "content": "To exploit the temporal information as the template, we propose a Relative-Pose-Aware Memory (RPM) module. RPM contains 3 memory banks. 1) A layer features memory bank (LM) \\(\\in \\mathbb{R}^{L\\times K\\times N\\times C}\\): We leverage the historical transformer layer features as the template features to reduce the template inference time in former trackers [5,9-12,24,28,34,44,46]. 2) A mask memory bank (MM) \\(\\in \\mathbb{R}^{K\\times N\\times 1}\\): Inspired by the mask-based trackers [39,45], we utilize the mask as the foreground representation. 3) An observation angle memory bank (OM) \\(\\in \\mathbb{R}^{K\\times 2}\\). For each transformer layer \\(l\\), we process the memory features as" + }, + { + "type": "equation", + "bbox": [ + 0.36, + 0.676, + 0.787, + 0.693 + ], + "angle": 0, + "content": "\\[\nT _ {l} = \\operatorname {L i n e a r} ([ \\mathrm {L M} _ {1}, \\mathrm {M M}, \\operatorname {R e p e a t} (\\mathrm {O M}) ]), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.707, + 0.789, + 0.769 + ], + "angle": 0, + "content": "where \\( T_{l} \\in \\mathbb{R}^{KN \\times C} \\) denotes the template features, Linear(\\cdot) is a linear layer that projects the features from \\( \\mathbb{R}^{KN \\times (C + 3)} \\) to \\( \\mathbb{R}^{KN \\times C} \\), [.] is the concatenation operation, and Repeat(\\cdot) stacks the OM to \\( \\mathbb{R}^{K \\times N \\times 2} \\). Then, we project \\( T_{l} \\) into Query (Q), Key (K), and Value (V) using the learnable parameter matrices as" + }, + { + "type": "equation", + "bbox": [ + 0.386, + 0.782, + 0.618, + 0.802 + ], + "angle": 0, + "content": "\\[\nQ _ {l} ^ {T} = \\mathrm {L N} (\\mathrm {L N} (T _ {l}) W _ {l} ^ {T Q} + \\mathrm {P E} ^ {T}),\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.386, + 0.804, + 0.785, + 0.823 + ], + "angle": 0, + "content": "\\[\nK _ {l} ^ {T} = \\operatorname {L N} \\left(T _ {l}\\right) W _ {l} ^ {T K}, \\tag {2}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.388, + 0.824, + 0.534, + 0.843 + ], + "angle": 0, + "content": "\\[\nV _ {l} ^ {T} = \\mathrm {L N} (T _ {l}) W _ {l} ^ {T V},\n\\]" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.36, + 0.115, + 0.733, + 0.13 + ], + "angle": 0, + "content": "3D SOT in Point Clouds with High Temporal Variation" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "7" + }, + { + "type": "image", + "bbox": [ + 0.217, + 0.145, + 0.481, + 0.315 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.319, + 0.317, + 0.374, + 0.329 + ], + "angle": 0, + "content": "(a) BEA." + }, + { + "type": "image", + "bbox": [ + 0.527, + 0.145, + 0.784, + 0.315 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.63, + 0.317, + 0.685, + 0.328 + ], + "angle": 0, + "content": "(b) CPA." + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.34, + 0.788, + 0.493 + ], + "angle": 0, + "content": "Fig. 4: (a) Base-Expansion Feature Cross-Attention (BEA). The \\( H \\) heads in the multi-head attention (MHA) are split to process hybrid scale features. For the base scale branch, we directly put the local features into the MHA. For the expansion scale branch, we apply an EdgeConv [33] to expand the receptive field of each point and extract more abstract features before MHA. BEA captures the spatial context-aware information with a humble extra computational cost. (b) Contextual Point Guided Self-Attention (CPA). We determine the importance of each point by both base and expansion scale attention maps. Then, we aggregate all the points into \\( U \\) clusters (contextual points) according to their importance and project the clusters to \\( K \\) and \\( V \\). We assign fewer contextual points for low-importance points, and vice versa. CPA not only suppresses the noise but also reduces the computational cost of the attention." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.522, + 0.788, + 0.584 + ], + "angle": 0, + "content": "where \\(\\mathrm{LN}(\\cdot)\\) is the layer norm, and \\(\\mathrm{PE}^T \\in \\mathbb{R}^{KN \\times C}\\) is the positional embedding of the historical point cloud coordinates. We utilize a linear layer to project the point cloud coordinates to their positional embedding. Finally, a self-attention is applied for internal interactions between temporal information as" + }, + { + "type": "equation", + "bbox": [ + 0.335, + 0.594, + 0.785, + 0.613 + ], + "angle": 0, + "content": "\\[\nM e m _ {l} ^ {*} = T _ {l} + \\operatorname {D r o p o u t} \\left(\\mathrm {M H A} \\left(Q _ {l} ^ {T}, K _ {l} ^ {T}, V _ {l} ^ {T}\\right)\\right), \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.623, + 0.788, + 0.67 + ], + "angle": 0, + "content": "where MHA is the multi-head attention in [31], and Dropout is the random dropping operation in [29]. Following CXTrack [39], we apply dropout and feedforward network (FFN) after self-attention, i.e.," + }, + { + "type": "equation", + "bbox": [ + 0.328, + 0.681, + 0.785, + 0.699 + ], + "angle": 0, + "content": "\\[\nM e m _ {l} = M e m _ {l} ^ {*} + \\operatorname {D r o p o u t} (\\operatorname {F F N} (\\operatorname {L N} (M e m _ {l} ^ {*}))), \\tag {4}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.365, + 0.724, + 0.785, + 0.741 + ], + "angle": 0, + "content": "\\[\n\\operatorname {F F N} (x) = \\max \\left(0, x W _ {1} + b _ {1}\\right) W _ {2} + b _ {2}. \\tag {5}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.77, + 0.599, + 0.785 + ], + "angle": 0, + "content": "3.4 Base-Expansion Feature Cross-Attention" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.795, + 0.788, + 0.842 + ], + "angle": 0, + "content": "Most of the existing trackers [11,24,28,34,39,44,46] employ a point based backbone [23,33] and focus on local region features, which we call base scale features. Using only base scale features in the whole pipeline is quite efficient and effective" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "8" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.357, + 0.129 + ], + "angle": 0, + "content": "Q. Wu et al." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.268 + ], + "angle": 0, + "content": "in small scenes. However, the base scale features are limited in representing the neighboring environment features around the object in large search areas. To tackle the challenge of similar object distractions, spatial context information across consecutive frames is crucial for effective object tracking [39]. Expanding the receptive field of features can help capture spatial contextual information, and such features are called expansion scale features. Inspired by [26], we propose Base-Expansion Feature Cross-Attention (BEA) to capture both local and more abstract features, and exploit spatial context-aware information." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.268, + 0.789, + 0.419 + ], + "angle": 0, + "content": "As shown in Fig. 4a, the input features \\(X_{l-1}\\) are projected into \\(\\mathbf{Q}\\). Usually, the memory features Meml would be projected into \\(\\mathbf{K}\\) and \\(\\mathbf{V}\\). Then, multi-head cross-attention adopts \\(H\\) independent heads, and processes them using the same base scale features. By contrast, we split the \\(H\\) heads into 2 groups. \\(H/2\\) heads exploit local spatial context information. We directly process the base scale features with normal cross-attention, and output base scale features \\(\\hat{X}_{l-1}^{base} \\in \\mathbb{R}^{N \\times C/2}\\) and attention map Attn\\(^{base} \\in \\mathbb{R}^{N \\times KN}\\). The other \\(H/2\\) heads capture environment context features. We first apply an EdgeConv [33] to extract more abstract features Meml\\(^{expan} \\in \\mathbb{R}^{KN/8 \\times C}\\), which are expansion scale features, i.e.," + }, + { + "type": "equation", + "bbox": [ + 0.386, + 0.42, + 0.785, + 0.438 + ], + "angle": 0, + "content": "\\[\nM e m _ {l} ^ {e x p a n} = \\operatorname {E d g e C o n v} \\left(M e m _ {l}\\right). \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.442, + 0.788, + 0.489 + ], + "angle": 0, + "content": "Then, we project the expansion features into K and V, and perform multi-head cross-attention with Q. Specifically, for the \\(i\\)-th head belonging to the expansion scale branch, we generate Q, K, and V as" + }, + { + "type": "equation", + "bbox": [ + 0.384, + 0.497, + 0.785, + 0.555 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} Q _ {i} = L N (L N (X _ {l - 1}) W _ {i} ^ {Q} + \\mathrm {P E} _ {i} ^ {S}), \\\\ K _ {i} = L N \\left(M e m _ {l} ^ {e x p a n}\\right) W _ {i} ^ {K}, \\tag {7} \\\\ V _ {i} = L N (M e m _ {l} ^ {e x p a n}) W _ {i} ^ {V}, \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.566, + 0.784, + 0.598 + ], + "angle": 0, + "content": "where \\(\\mathrm{PE}_i^S\\) is the positional embedding of search area point cloud coordinates. Then, cross-attention is performed as" + }, + { + "type": "equation", + "bbox": [ + 0.397, + 0.606, + 0.785, + 0.639 + ], + "angle": 0, + "content": "\\[\nA t t n _ {i} ^ {e x p a n} = \\operatorname {S o f t m a x} \\left(\\frac {Q _ {i} K _ {i}}{\\sqrt {d _ {h}}}\\right), \\tag {8}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.422, + 0.652, + 0.785, + 0.671 + ], + "angle": 0, + "content": "\\[\nh _ {i} ^ {e x p a n} = A t t n _ {i} ^ {e x p a n} V _ {i}, \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.674, + 0.788, + 0.72 + ], + "angle": 0, + "content": "where \\( d_h \\) is the feature dimension of the heads, and \\( h_i^{expan} \\) is the output features of the \\( i \\)-th head. After that, we concatenate the output features and attention map of each head as" + }, + { + "type": "equation", + "bbox": [ + 0.36, + 0.729, + 0.785, + 0.769 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\hat {X} _ {l - 1} ^ {\\text {e x p a n}} = \\left[ h _ {1}, \\dots , h _ {H / 2} \\right], \\\\ A t t n ^ {e x p a n} = \\left[ A t t n _ {1} ^ {e x p a n}, \\dots , A t t n _ {H / 2} ^ {e x p a n} \\right], \\tag {10} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.778, + 0.788, + 0.842 + ], + "angle": 0, + "content": "where \\(\\hat{X}_{l-1}^{expan} \\in \\mathbb{R}^{N \\times C/2}\\), and \\(Attn^{expan} \\in \\mathbb{R}^{N \\times KN/8}\\). Finally, we concatenate the base scale and expansion scale outputs as the resulting correlation feature \\(\\hat{X}_{l-1} \\in \\mathbb{R}^{N \\times C}\\). Thus, BEA provides rich hybrid scale spatial contextual information for each point, with a very humble extra computational cost." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.36, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "3D SOT in Point Clouds with High Temporal Variation" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "9" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.147, + 0.592, + 0.162 + ], + "angle": 0, + "content": "3.5 Contextual Point Guided Self-Attention" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.168, + 0.788, + 0.273 + ], + "angle": 0, + "content": "Most of the information in the search area will be regarded as noise, because we are only interested in one single object to be tracked. Existing trackers [11,24,28, 34,44] aim to find the features with high template-response in the search area, but neglect the suppress to the noise. Zhou et al. [46] proposed a Relation-Aware Sampling for preserving more template-relevant points in the search area before inputting it to the backbone. By contrast, we focus on suppressing the noise after feature correlation via a Contextual Point Guided Self-Attention (CPA)." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.274, + 0.788, + 0.304 + ], + "angle": 0, + "content": "As shown in Fig. 4b, we leverage the base and expansion scale attention maps to generate the importance map \\( I \\in \\mathbb{R}^{N \\times 1} \\) as" + }, + { + "type": "equation", + "bbox": [ + 0.357, + 0.311, + 0.786, + 0.327 + ], + "angle": 0, + "content": "\\[\nI = \\operatorname {M e a n} \\left(A t t n ^ {\\text {b a s e}}\\right) + \\operatorname {M e a n} \\left(A t t n ^ {\\text {e x p a n}}\\right). \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.334, + 0.789, + 0.53 + ], + "angle": 0, + "content": "The higher the importance of the point, the more spatial context-aware information related to the target it contains. We sort the points according to the magnitude of their importance values. Then, all the points will be separated into \\( G \\) groups according to their importance. For each group with points \\( P_{i}^{G} \\in \\mathbb{R}^{G_{i} \\times C} \\), we aggregate the points into \\( U_{i} \\) clusters, which we call contextual points. Specifically, we first reshape the points as \\( P_{i}^{G} \\in \\mathbb{R}^{U_{i} \\times C \\times G_{i} / U_{i}} \\). Second, a linear layer is employed to project the group to the contextual points \\( P_{i}^{U} \\in \\mathbb{R}^{U_{i} \\times C} \\). We assign fewer contextual points for the groups with lower importance, and suppress the noise feature expression. Finally, all the contextual points are concatenated and projected into Key \\( K^{U} \\in \\mathbb{R}^{U \\times C} \\) and Value \\( V^{U} \\in \\mathbb{R}^{U \\times C} \\). We project \\( \\hat{X}_{l-1} \\) to Q and perform a multi-head attention with \\( K^{U} \\) and \\( V^{U} \\), and an FFN is applied after attention. CPA shrinks the length of K and V, and leads to a computational cost decrease in self-attention." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.548, + 0.457, + 0.563 + ], + "angle": 0, + "content": "3.6 Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.569, + 0.788, + 0.631 + ], + "angle": 0, + "content": "Backbone & Loss Functions. Following CXTrack [39], we adopt DGCNN [33] as our backbone, and apply X-RPN [39] as the RPN of our framework. We add two Shared MLP layers to X-RPN for predicting the observation angles \\((\\alpha)\\) and the masks. Therefore, the overall loss is expressed as" + }, + { + "type": "equation", + "bbox": [ + 0.317, + 0.639, + 0.785, + 0.653 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\gamma_ {1} \\mathcal {L} _ {c c} + \\gamma_ {2} \\mathcal {L} _ {\\text {m a s k}} + \\gamma_ {3} \\mathcal {L} _ {\\text {a l p h a}} + \\gamma_ {4} \\mathcal {L} _ {\\text {r m}} + \\gamma_ {5} \\mathcal {L} _ {\\text {b o x}}, \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.659, + 0.788, + 0.735 + ], + "angle": 0, + "content": "where \\(\\mathcal{L}_{cc}\\), \\(\\mathcal{L}_{mask}\\), \\(\\mathcal{L}_{alpha}\\), \\(\\mathcal{L}_{box}\\), and \\(\\mathcal{L}_{box}\\) are the loss for the coarse center, foreground mask, observation angle, targetness mask, and bounding box, respectively. We apply the \\(L_{2}\\) loss for \\(\\mathcal{L}_{cc}\\), the standard cross entropy loss for \\(\\mathcal{L}_{mask}\\) and \\(\\mathcal{L}_{rm}\\), and the Huber loss for \\(\\mathcal{L}_{alpha}\\) and \\(\\mathcal{L}_{box}\\). \\(\\gamma_{1}, \\gamma_{2}, \\gamma_{3}, \\gamma_{4}\\), and \\(\\gamma_{5}\\) are empirically set as 10.0, 0.2, 1.0, 1.0, and 1.0." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.735, + 0.788, + 0.843 + ], + "angle": 0, + "content": "Training & Testing. We train our model on NVIDIA RTX-3090 GPUs with the Adam optimizer and an initial learning rate of 0.001. Due to GPU memory limitation, we construct point cloud sequences with 8 frames for training, and set \\( K = 2 \\) in training, and \\( K = 6 \\) in testing. Following existing methods [39,45], we set \\( N \\) and \\( C \\) to 128. We stack \\( L = 2 \\) transformer layers and apply \\( H = 4 \\) heads in BEA and CPA. We adopt \\( G = 3 \\) groups in CPA, and assign [32,64,32] points and \\( U = [4,32,16] \\) contextual points for the groups, respectively." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "10" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.356, + 0.128 + ], + "angle": 0, + "content": "Q. Wu et al." + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.145, + 0.375, + 0.163 + ], + "angle": 0, + "content": "4 Experiments" + }, + { + "type": "text", + "bbox": [ + 0.219, + 0.175, + 0.785, + 0.235 + ], + "angle": 0, + "content": "We leverage two famous 3D tracking benchmarks of KITTI [8] and Waymo [30] to evaluate the general performance of our approach in regular 3D SOT. In addition, we establish a new KITTI-HV dataset to test our performance in high temporal variation scenarios." + }, + { + "type": "text", + "bbox": [ + 0.219, + 0.236, + 0.785, + 0.341 + ], + "angle": 0, + "content": "Regular Datasets. The KITTI tracking dataset comprises 21 training sequences and 29 test sequences, encompassing eight object types. Following prior studies [9,24,34,39,44,45], we use the sequences 0-16 as training data, 17-18 for validation, and 19-20 for testing. The Waymo dataset is large-scale. We adopt the approach outlined in LiDAR-SOT [21] to utilize 1121 tracklets, which are subsequently categorized into easy, medium, and hard subsets based on the number of points in the first frame of each tracklet." + }, + { + "type": "text", + "bbox": [ + 0.219, + 0.342, + 0.785, + 0.537 + ], + "angle": 0, + "content": "HV Dataset. We build a dataset with high temporal variation for 3D SOT based on KITTI, called KITTI-HV. Although high temporal variation scenarios are present in the existing benchmarks, there is no exact threshold to determine whether the scenario is a high temporal variation scenario or not. Large point cloud variations and significant object motions are two major challenges in high temporal variation scenarios. Sampling at frame intervals is a good way to simulate these two challenges. Also, the constructed KITTI-HV can provide a preliminary platform for exploring tracking in scenarios such as skipped-tracking, edge devices, and high dynamics. For a fairer comparison with existing methods, we set the frame interval to 2, 3, 5, and 10. We set up more dense testings at low frame intervals to exploit the performance of the existing methods in point cloud variations close to smooth scenarios. We train and test all methods from scratch individually on each frame interval." + }, + { + "type": "text", + "bbox": [ + 0.219, + 0.538, + 0.785, + 0.628 + ], + "angle": 0, + "content": "Evaluation Metrics. We employ One Pass Evaluation [38] to evaluate the different methods in terms of Success and Precision. Success is determined by measuring the Intersection Over Union between the proposed bounding box and the ground-truth (GT) bounding box. Precision is evaluated by computing the Area Under the Curve of the distance error between the centers of the two bounding boxes, ranging from 0 to 2 meters." + }, + { + "type": "title", + "bbox": [ + 0.219, + 0.65, + 0.573, + 0.666 + ], + "angle": 0, + "content": "4.1 Comparison with the State of the Art" + }, + { + "type": "text", + "bbox": [ + 0.219, + 0.673, + 0.785, + 0.84 + ], + "angle": 0, + "content": "Results on HV tracking. We evaluate our HVTrack in 4 categories ('Car', 'Pedestrian', 'Van', and 'Cyclist') following existing methods [24, 39, 44, 45] in the KITTI-HV dataset. The methods we choose to compare with HVTrack are the most representative SOT methods from 2020 to 2023 (Most cited methods published in each year according to Google Scholar). As illustrated in Tab. 1, our approach consistently outperforms the state-of-the-art methods [24, 39, 44, 45] across all frame intervals, confirming the effectiveness of the proposed tracking framework for high temporal variation scenarios. Notably, the performance gap between our HVTrack and existing trackers widens as variations are exacerbated. In the particularly challenging scenario of 10 frame intervals, we achieve a substantial \\(9.1\\%\\) ↑ improvement in success and a remarkable \\(10.4\\%\\) ↑ enhancement" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.361, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "3D SOT in Point Clouds with High Temporal Variation" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.117, + 0.784, + 0.127 + ], + "angle": 0, + "content": "11" + }, + { + "type": "table_caption", + "bbox": [ + 0.217, + 0.145, + 0.788, + 0.228 + ], + "angle": 0, + "content": "Table 1: Comparison of HVTrack with the state-of-the-art methods on each category of the KITTI-HV dataset. We construct the HV dataset KITTI-HV for training and testing by setting different frame intervals for sampling in the KITTI dataset. Bold and underline denote the best and second-best performance, respectively. Success/Precision are used for evaluation. Improvement and deterioration are shown in green and red, respectively." + }, + { + "type": "table", + "bbox": [ + 0.22, + 0.242, + 0.788, + 0.442 + ], + "angle": 0, + "content": "
Frame Intervals2 Intervals3 Intervals
Category Frame NumberCar 6424Pestrian 6088Van 1248Cyclist 308Mean 14068Car 6424Pestrian 6088Van 1248Cyclist 308Mean 14068
P2B [24]56.3/71.030.8/53.033.4/38.441.8/61.442.9/60.143.4/51.827.9/46.827.9/31.844.8/64.435.4/48.1
BAT [44]61.8/74.236.5/61.126.8/30.454.1/78.747.6/64.751.7/61.931.8/53.524.0/28.250.5/72.640.6/55.5
M2-Track [45]63.0/76.654.6/81.752.8/66.568.3/89.358.6/78.262.1/72.751.8/74.333.6/41.664.7/82.055.1/70.8
CXTrack [39]61.4/70.962.6/86.356.0/69.159.2/76.961.4/77.547.4/53.157.9/79.348.5/58.840.7/58.451.9/65.1
HVTrack Improvement67.1/77.560.0/84.050.6/61.773.9/93.662.7/79.366.8/76.551.1/71.938.7/46.966.5/89.757.5/72.2
4.1↑/0.9↑2.6↓/2.3↓6.0↓/7.4↓5.6↑/4.3↑1.3↑/1.1↑4.7↑/3.8↑6.8↓/7.4↓9.8↓/11.9↓1.8↑/7.7↑2.4↑/1.4↑
Frame Intervals5 Intervals10 Intervals
Category Frame NumberCar 6424Pestrian 6088Van 1248Cyclist 308Mean 14068Car 6424Pestrian 6088Van 1248Cyclist 308Mean 14068
P2B [24]39.3/46.127.4/43.527.2/30.435.0/44.433.0/43.528.6/29.223.1/31.125.9/27.329.1/28.326.0/29.8
BAT [44]44.1/51.121.1/32.826.1/29.535.7/46.332.4/41.130.6/33.121.7/29.220.8/20.729.3/29.125.9/30.2
M2-Track [45]50.9/58.631.6/45.430.0/36.547.4/61.040.6/51.033.0/35.117.5/24.120.7/20.827.7/26.625.0/28.9
CXTrack [39]38.6/42.235.0/47.821.6/24.325.7/33.335.3/42.830.2/32.418.2/21.417.5/17.927.7/26.523.8/26.2
HVTrack Improvement60.3/68.935.1/52.128.7/32.458.2/71.746.6/58.549.4/54.722.5/29.122.2/23.439.5/45.435.1/40.6
9.4↑/10.3↑0.1↑/4.3↑1.3↓/4.1↓10.8↑/10.7↑6.0↑/7.5↑16.4↑/19.6↑0.6↓/0.1↓3.7↓/3.9↓10.2↑/16.3↑9.1↑/10.4
" + }, + { + "type": "text", + "bbox": [ + 0.218, + 0.476, + 0.788, + 0.702 + ], + "angle": 0, + "content": "in precision. This showcases the robustness of our method in accommodating various levels of point cloud variation. Our method delivers outstanding performance on 'Car' and 'Cyclist', in which we gain a great improvement in 5 frame intervals (9.4%↑/10.3%↑ for 'Car' and 10.8%↑/10.7%↑ for 'Cyclist') and 10 frame intervals (16.4%↑/19.6%↑ for 'Car' and 10.2%↑/16.3%↑ for 'Cyclist'). However, the challenge of tracking large objects persists in high temporal variation cases for our method. Note that the performance of CXTrack drops dramatically after 3 frame intervals. In particular, in the medium variation case of 5 frame intervals, we achieve 11.3%↑/15.7%↑ improvement in overall success/precision compared to CXTrack, despite the fact that our HVTrack shares the same backbone and RPN with CXTrack [39]. Furthermore, HVTrack surpasses CXTrack on 'Car' and 'Cyclist' by a very large margin (21.7%↑/26.7%↑ for 'Car' and 32.5%↑/38.4%↑ for 'Cyclist'). The distinct performance gap between HVTrack and CXTrack in HV tracking showcases the effectiveness of our feature correlation module design." + }, + { + "type": "text", + "bbox": [ + 0.218, + 0.705, + 0.788, + 0.841 + ], + "angle": 0, + "content": "Results on regular tracking. For the KITTI dataset, we compare HVTrack with 12 top-performing trackers [7,9,11,12,17,24,28,34,39,44-46]. As shown in Tab. 2, our overall performance is close to the SOTA tracker CXTrack [39], and achieves the second best result on the average in success (2.0%↓ w.r.t. CXTrack). Note that HVTrack outperforms TAT [17] on average (0.8%↑/0.3%↑), which utilizes temporal information by concatenating historical template features. This demonstrates our better design for leveraging the spatio-temporal context information. However, the performance of HVTrack drops when dealing with large objects ('Van'). We conjecture this performance drop to be caused by CPA," + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "12" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.356, + 0.129 + ], + "angle": 0, + "content": "Q. Wu et al." + }, + { + "type": "table_caption", + "bbox": [ + 0.216, + 0.145, + 0.785, + 0.172 + ], + "angle": 0, + "content": "Table 2: Comparison of HVTrack with the SOTA methods on each category of the KITTI dataset." + }, + { + "type": "table", + "bbox": [ + 0.348, + 0.185, + 0.66, + 0.335 + ], + "angle": 0, + "content": "
Category Frame NumberCar 6424Pedestrian 6088Van 1248Cyclist 308Mean 14068
SC3D [9]41.3/57.918.2/37.840.4/47.041.5/70.431.2/48.5
P2B [24]56.2/72.828.7/49.640.8/48.432.1/44.742.4/60.0
3DSiamRPN [7]58.2/76.235.2/56.245.7/52.936.2/49.046.7/64.9
MLVSNet [34]56.0/74.034.1/61.152.0/61.434.3/44.545.7/66.7
BAT [44]60.5/77.742.1/70.152.4/67.033.7/45.451.2/72.8
PTT [28]67.8/81.844.9/72.043.6/52.537.2/47.355.1/74.2
V2B [11]70.5/81.348.3/73.550.1/58.040.8/49.758.4/75.2
PTTR [46]65.2/77.450.9/81.652.5/61.865.1/90.557.9/78.1
STNet [12]72.1/84.049.9/77.258.0/70.673.5/93.761.3/80.1
TAT [17]72.2/83.357.4/84.458.9/69.274.2/93.964.7/82.8
M2-Track [45]65.5/80.861.5/88.253.8/70.773.2/93.562.9/83.4
CXTrack [39]69.1/81.667.0/91.560.0/71.874.2/94.367.5/85.3
HVTrack68.2/79.264.6/90.654.8/63.872.4/93.765.5/83.1
" + }, + { + "type": "table_caption", + "bbox": [ + 0.223, + 0.349, + 0.779, + 0.364 + ], + "angle": 0, + "content": "Table 3: Comparison of HVTrack with the SOTA methods on the Waymo dataset." + }, + { + "type": "table", + "bbox": [ + 0.232, + 0.376, + 0.77, + 0.467 + ], + "angle": 0, + "content": "
MethodVehicle (185632)Pedestrian (241168)Mean
EasyMediumHardMeanEasyMediumHardMeanMean (426800)
P2B [24]57.1/65.452.0/60.747.9/58.552.6/61.718.1/30.817.8/30.017.7/29.317.9/30.133.0/43.8
BAT [44]61.0/68.353.3/60.948.9/57.854.7/62.719.3/32.617.8/29.817.2/28.318.2/30.334.1/44.4
V2B [11]64.5/71.555.1/63.252.0/62.057.6/65.927.9/43.922.5/36.220.1/33.123.7/37.938.4/50.1
STNet [12]65.9/72.757.5/66.054.6/64.759.7/68.029.2/45.324.7/38.222.2/35.825.5/39.940.4/52.1
CXTrack [39]63.9/71.154.2/62.752.1/63.757.1/66.135.4/55.329.7/47.926.3/44.430.7/49.442.2/56.7
HVTrack(Ours)66.2/75.257.0/66.055.3/67.159.8/69.734.2/53.528.7/47.926.7/45.230.0/49.143.0/58.1
" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.493, + 0.788, + 0.706 + ], + "angle": 0, + "content": "which will be further explored in Sec. 4.2. For the Waymo dataset, following the benchmark setting in LiDAR-SOT [21] and STNet [12], we test our HVTrack in 2 categories ('Vehicle', 'Pedestrian') with 3 difficulty levels. All the methods are pre-trained on KITTI. The results of P2B [24], BAT [44], and V2B [11] on Waymo are provided by STNet [12]. As shown in Tab. 3, our method achieves the best performance in success \\((0.8\\% \\uparrow)\\) and precision \\((1.4\\% \\uparrow)\\). Notably, HVTrack does not surpass CXTrack and reach SOTA on the KTTTI benchmark, while the opposite situation occurs in the larger dataset of Waymo. The improvement on Waymo clearly demonstrates the robustness of our method in the large-scale dataset. Also, HVTrack surpasses other SOTA methods on all categories of 'Hard' difficulty, revealing our excellent ability to handle sparse cases. The experimental results show that our method can generally solve the problem of 3D SOT under various levels of point cloud variations, and achieve outstanding performance." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.726, + 0.442, + 0.741 + ], + "angle": 0, + "content": "4.2 Analysis Experiments" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.75, + 0.784, + 0.794 + ], + "angle": 0, + "content": "In this section, we extensively analyze HVTrack via a series of experiments. All the experiments are conducted on KITTI-HV with 5 frame intervals unless otherwise stated." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.795, + 0.785, + 0.84 + ], + "angle": 0, + "content": "Ablation Study. We conduct experiments to analyze the effectiveness of different modules in HVTrack. As shown in Tab. 4, we respectively ablate OM, BEA, and CPA from HVTrack. We only ablate OM in RPM because LM and MM" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.36, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "3D SOT in Point Clouds with High Temporal Variation" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.116, + 0.785, + 0.127 + ], + "angle": 0, + "content": "13" + }, + { + "type": "table_caption", + "bbox": [ + 0.365, + 0.145, + 0.637, + 0.159 + ], + "angle": 0, + "content": "Table 4: Ablation analysis of HVTrack." + }, + { + "type": "table", + "bbox": [ + 0.318, + 0.171, + 0.687, + 0.242 + ], + "angle": 0, + "content": "
OMBEACPACarPedestrianVanCyclistMean
60.0/69.033.9/50.028.4/32.254.2/67.145.8/57.5
60.3/69.435.0/50.226.7/30.743.9/61.546.0/57.5
58.2/66.934.7/49.828.1/33.547.7/63.945.1/56.5
60.3/68.935.1/52.128.7/32.458.2/71.746.6/58.5
" + }, + { + "type": "table_caption", + "bbox": [ + 0.216, + 0.258, + 0.785, + 0.286 + ], + "angle": 0, + "content": "Table 5: Ablation experiment of BEA. 'Base'/'Expansion' denotes only using the base/expansion branch in BEA." + }, + { + "type": "table", + "bbox": [ + 0.331, + 0.299, + 0.673, + 0.351 + ], + "angle": 0, + "content": "
CategoryCarPedestrianVanCyclistMean
Base60.3/69.435.0/50.226.7/30.743.9/61.546.0/57.5
Expansion60.0/68.634.7/50.531.4/36.854.5/67.546.4/57.9
" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.382, + 0.785, + 0.442 + ], + "angle": 0, + "content": "serve as the template and are the indivisible parts of HVTrack. BEA and CPA are replaced by vanilla cross-attention and self-attention. In general, all components have been proven to be effective; removing an arbitrary module degrades the 'mean' performance." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.445, + 0.788, + 0.642 + ], + "angle": 0, + "content": "Analysis Experiment of BEA. The performance slightly drops on the 'Car' when we apply BEA on HVTrack as shown in Tab. 4. We conjecture this to be caused by the side effect of aggregating larger scale features in BEA, which will involve more background noise at each point. Further, 'Car' has a medium size and does not have the distraction of crowded similar objects like small objects ('Pedestrian' and 'Cyclist'), nor does it require a larger receptive field like large objects ('Van'). To verify this issue, we further analyze each branch of BEA as shown in Tab. 5. 'Pedestrian', 'Van', and 'Cyclist' benefit from the expansion branch and achieve a better performance compared to using only the base branch in BEA. On the other hand, the performance in the 'Car' category has the opposite behavior to the other categories. The experimental results validate our hypothesis that BEA is beneficial to small and large objects, while negatively affecting medium-sized objects." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.644, + 0.788, + 0.841 + ], + "angle": 0, + "content": "Analysis Experiment of CPA. Our method yields better results on 'Van' after we remove CPA as shown in Tab. 4, which reveals the relation between CPA and the large object tracking challenge. We believe that this is caused by the suppressing strategy in CPA. Large objects usually have more points, and under the same probability of misclassification of importance, they will have more foreground points assigned as low importance in the attention map, resulting in a part of useful information being suppressed in CPA. As shown in Fig. 5b, the importance conflict in the object leads to tracking failure. That part of the information will be further suppressed when stacking multiple transformer layers. However, the performance drops in other categories, without CPA to suppress the background noise for medium and small objects. As shown in Fig. 5a, most of the background points are assigned with low importance and suppressed in the success case, which proves our idea of CPA." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "14" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.356, + 0.129 + ], + "angle": 0, + "content": "Q. Wu et al." + }, + { + "type": "image", + "bbox": [ + 0.315, + 0.147, + 0.511, + 0.229 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.52, + 0.147, + 0.665, + 0.23 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.668, + 0.146, + 0.684, + 0.23 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.35, + 0.243, + 0.651, + 0.258 + ], + "angle": 0, + "content": "Fig. 5: The attention maps of 'Van' in CPA." + }, + { + "type": "table_caption", + "bbox": [ + 0.215, + 0.273, + 0.788, + 0.314 + ], + "angle": 0, + "content": "Table 6: Results of HVTrack when using different memory sizes. We train HVTrack with a memory size of 2, and evaluate it with memory sizes ranging from 1 to 8." + }, + { + "type": "table", + "bbox": [ + 0.317, + 0.328, + 0.688, + 0.443 + ], + "angle": 0, + "content": "
Memory SizeCarPedestrianVanCyclistMean
158.3/66.530.9/46.226.8/29.857.1/70.543.6/54.6
258.6/67.031.7/47.927.1/30.657.6/70.944.1/55.6
359.2/67.633.8/49.927.7/3155.8/67.745.3/56.7
460.0/68.533.7/50.629.5/33.657.9/71.345.9/57.7
560.0/68.533.8/51.228.7/32.657.8/70.845.8/57.9
660.3/68.935.1/52.128.7/32.458.2/71.746.6/58.5
759.7/68.235.6/52.928.0/31.558.1/71.446.4/58.4
859.8/68.335.1/52.428.2/32.058.1/71.446.3/58.3
" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.473, + 0.788, + 0.593 + ], + "angle": 0, + "content": "Memory Size. Intuitively, trackers will achieve better performance when leveraging more temporal information. However, the performance of the trackers cannot continuously improve with the accumulation of historical information, due to inaccuracies in the historical tracklets. As shown in Tab. 6, we train HVTrack with a memory size of 2 due to the GPU memory limitation, and evaluate it with memory sizes from 1 to 8. The performance peaks for a memory size of 6, which is consistent with our assumption. Thus, we set 6 as our memory size and achieve a tracking speed of 31 FPS." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.621, + 0.36, + 0.637 + ], + "angle": 0, + "content": "5 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.658, + 0.788, + 0.764 + ], + "angle": 0, + "content": "In this paper, we have explored a new task in 3D SOT, and presented the first 3D SOT framework for high temporal variation scenarios, HVTrack. Its three main components, RPM, BEA, and CPA, allow HVTrack to achieve robustness to point cloud variations, similar object distractions, and background noise. Our experiments have demonstrated that HVTrack significantly outperforms the state of the art in high temporal variation scenarios, and achieves remarkable performance in regular tracking." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.765, + 0.788, + 0.84 + ], + "angle": 0, + "content": "**Limitation.** Our CPA relies on fixed manual hyperparameters to suppress noise. This makes it difficult to balance the performance in different object and search area sizes, leading to a performance drop in tracking large objects. In the future, we will therefore explore the use of a learnable function to replace the manual hyperparameters in CPA and overcome the large object tracking challenge." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.36, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "3D SOT in Point Clouds with High Temporal Variation" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "15" + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.145, + 0.403, + 0.163 + ], + "angle": 0, + "content": "Acknowledgements" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.181, + 0.788, + 0.211 + ], + "angle": 0, + "content": "This work is supported in part by the National Natural Science Foundation of China (NFSC) under Grants 62372377 and 62176242." + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.24, + 0.323, + 0.254 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.274, + 0.785, + 0.317 + ], + "angle": 0, + "content": "1. Chen, X., Shi, S., Zhang, C., Zhu, B., Wang, Q., Cheung, K.C., See, S., Li, H.: Trajectoryformer: 3d object tracking transformer with predictive trajectory hypotheses. arXiv preprint arXiv:2306.05888 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.318, + 0.785, + 0.345 + ], + "angle": 0, + "content": "2. Cheng, R., Wang, X., Sohel, F., Lei, H.: Topology-aware universal adversarial attack on 3d object tracking. Visual Intelligence 1(1), 31 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.347, + 0.785, + 0.373 + ], + "angle": 0, + "content": "3. Chiu, H.k., Prioletti, A., Li, J., Bohg, J.: Probabilistic 3d multi-object tracking for autonomous driving. arXiv preprint arXiv:2001.05673 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.375, + 0.785, + 0.402 + ], + "angle": 0, + "content": "4. Chung, J., Gulcehre, C., Cho, K., Bengio, Y.: Empirical evaluation of gated recurrent neural networks on sequence modeling. arXiv preprint arXiv:1412.3555 (2014)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.404, + 0.785, + 0.431 + ], + "angle": 0, + "content": "5. Cui, Y., Fang, Z., Shan, J., Gu, Z., Zhou, S.: 3d object tracking with transformer. arXiv preprint arXiv:2110.14921 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.432, + 0.785, + 0.472 + ], + "angle": 0, + "content": "6. Ding, S., Rehder, E., Schneider, L., Cordts, M., Gall, J.: 3dmotformer: Graph transformer for online 3d multi-object tracking. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 9784-9794 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.474, + 0.785, + 0.514 + ], + "angle": 0, + "content": "7. Fang, Z., Zhou, S., Cui, Y., Scherer, S.: 3d-siamrpn: An end-to-end learning method for real-time 3d single object tracking using raw point cloud. IEEE Sensors Journal 21(4), 4995-5011 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.517, + 0.785, + 0.557 + ], + "angle": 0, + "content": "8. Geiger, A., Lenz, P., Urtasun, R.: Are we ready for autonomous driving? the KITTI vision benchmark suite. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 3354-3361 (2012)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.559, + 0.785, + 0.6 + ], + "angle": 0, + "content": "9. Giancola, S., Zarzar, J., Ghanem, B.: Leveraging shape completion for 3d siamese tracking. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 1359-1368 (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.601, + 0.785, + 0.642 + ], + "angle": 0, + "content": "10. Guo, Z., Mao, Y., Zhou, W., Wang, M., Li, H.: Cmt: Context-matching-guided transformer for 3d tracking in point clouds. In: European Conference on Computer Vision. pp. 95-111. Springer (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.643, + 0.785, + 0.684 + ], + "angle": 0, + "content": "1. Hui, L., Wang, L., Cheng, M., Xie, J., Yang, J.: 3d siamese voxel-to-bev tracker for sparse point clouds. Advances in Neural Information Processing Systems 34, 28714-28727 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.686, + 0.785, + 0.727 + ], + "angle": 0, + "content": "2. Hui, L., Wang, L., Tang, L., Lan, K., Xie, J., Yang, J.: 3d siamese transformer network for single object tracking on point clouds. arXiv preprint arXiv:2207.11995 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.728, + 0.785, + 0.755 + ], + "angle": 0, + "content": "3. Jiao, L., Wang, D., Bai, Y., Chen, P., Liu, F.: Deep learning in visual tracking: A review. IEEE transactions on neural networks and learning systems (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.757, + 0.785, + 0.783 + ], + "angle": 0, + "content": "4. Jiayao, S., Zhou, S., Cui, Y., Fang, Z.: Real-time 3d single object tracking with transformer. IEEE Transactions on Multimedia (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.785, + 0.785, + 0.84 + ], + "angle": 0, + "content": "5. Kapania, S., Saini, D., Goyal, S., Thakur, N., Jain, R., Nagrath, P.: Multi object tracking with uavs using deep sort and yolov3 retina detection framework. In: Proceedings of the 1st ACM Workshop on Autonomous and Intelligent Mobile Systems. pp. 1-6 (2020)" + }, + { + "type": "list", + "bbox": [ + 0.226, + 0.274, + 0.785, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "16" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.356, + 0.129 + ], + "angle": 0, + "content": "Q. Wu et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.147, + 0.787, + 0.203 + ], + "angle": 0, + "content": "16. Kart, U., Lukezic, A., Kristan, M., Kamarainen, J.K., Matas, J.: Object tracking by reconstruction with view-specific discriminative correlation filters. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 1339-1348 (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.203, + 0.787, + 0.244 + ], + "angle": 0, + "content": "17. Lan, K., Jiang, H., Xie, J.: Temporal-aware siamese tracker: Integrate temporal context for 3d object tracking. In: Proceedings of the Asian Conference on Computer Vision. pp. 399-414 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.244, + 0.787, + 0.285 + ], + "angle": 0, + "content": "18. Luo, C., Yang, X., Yuille, A.: Exploring simple 3d multi-object tracking for autonomous driving. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 10488-10497 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.285, + 0.787, + 0.325 + ], + "angle": 0, + "content": "19. Machida, E., Cao, M., Murao, T., Hashimoto, H.: Human motion tracking of mobile robot with Kinect 3d sensor. In: Proceedings of SICE Annual Conference (SICE). pp. 2207-2211. IEEE (2012)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.325, + 0.787, + 0.366 + ], + "angle": 0, + "content": "20. Nishimura, H., Komorita, S., Kawanishi, Y., Murase, H.: Sdof-tracker: Fast and accurate multiple human tracking by skipped-detection and optical-flow. IEICE TRANSACTIONS on Information and Systems 105(11), 1938-1946 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.366, + 0.787, + 0.407 + ], + "angle": 0, + "content": "21. Pang, Z., Li, Z., Wang, N.: Model-free vehicle tracking and state estimation in point cloud sequences. In: 2021 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). pp. 8075-8082. IEEE (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.407, + 0.787, + 0.447 + ], + "angle": 0, + "content": "22. Qi, C.R., Litany, O., He, K., Guibas, L.J.: Deep hough voting for 3d object detection in point clouds. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 9277-9286 (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.447, + 0.787, + 0.488 + ], + "angle": 0, + "content": "23. Qi, C.R., Yi, L., Su, H., Guibas, L.J.: Pointnet++: Deep hierarchical feature learning on point sets in a metric space. Advances in neural information processing systems 30 (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.488, + 0.787, + 0.529 + ], + "angle": 0, + "content": "24. Qi, H., Feng, C., Cao, Z., Zhao, F., Xiao, Y.: P2b: Point-to-box network for 3d object tracking in point clouds. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 6329-6338 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.529, + 0.787, + 0.57 + ], + "angle": 0, + "content": "25. Ren, C., Xu, Q., Zhang, S., Yang, J.: Hierarchical prior mining for non-local multiview stereo. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 3611-3620 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.569, + 0.787, + 0.597 + ], + "angle": 0, + "content": "26. Ren, S., Yang, X., Liu, S., Wang, X.: Sg-former: Self-guided transformer with evolving token reallocation. arXiv preprint arXiv:2308.12216 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.596, + 0.787, + 0.637 + ], + "angle": 0, + "content": "27. Sadjadpour, T., Li, J., Ambrus, R., Bohg, J.: Shasta: Modeling shape and spatiotemporal affinities for 3d multi-object tracking. IEEE Robotics and Automation Letters (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.636, + 0.787, + 0.678 + ], + "angle": 0, + "content": "28. Shan, J., Zhou, S., Fang, Z., Cui, Y.: Ptt: Point-track-transformer module for 3d single object tracking in point clouds. In: Proceedings of the IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). pp. 1310-1316 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.678, + 0.787, + 0.718 + ], + "angle": 0, + "content": "29. Srivastava, N., Hinton, G., Krizhevsky, A., Sutskever, I., Salakhutdinov, R.: Dropout: a simple way to prevent neural networks from overfitting. The journal of machine learning research 15(1), 1929-1958 (2014)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.718, + 0.787, + 0.773 + ], + "angle": 0, + "content": "30. Sun, P., Kretzschmar, H., Dotiwalla, X., Chouard, A., Patnaik, V., Tsui, P., Guo, J., Zhou, Y., Chai, Y., Caine, B., et al.: Scalability in perception for autonomous driving: Waymo open dataset. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 2446-2454 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.773, + 0.787, + 0.813 + ], + "angle": 0, + "content": "31. Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, L., Polosukhin, I.: Attention is all you need. Advances in neural information processing systems 30 (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.813, + 0.787, + 0.841 + ], + "angle": 0, + "content": "32. Wang, Q., Chen, Y., Pang, Z., Wang, N., Zhang, Z.: Immortal tracker: Tracklet never dies. arXiv preprint arXiv:2111.13672 (2021)" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.147, + 0.787, + 0.841 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.36, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "3D SOT in Point Clouds with High Temporal Variation" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "17" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.147, + 0.786, + 0.189 + ], + "angle": 0, + "content": "33. Wang, Y., Sun, Y., Liu, Z., Sarma, S.E., Bronstein, M.M., Solomon, J.M.: Dynamic graph cnn for learning on point clouds. ACM Transactions on Graphics (tog) 38(5), 1-12 (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.19, + 0.787, + 0.232 + ], + "angle": 0, + "content": "34. Wang, Z., Xie, Q., Lai, Y.K., Wu, J., Long, K., Wang, J.: Mlvsnet: Multi-level voting siamese network for 3d visual tracking. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 3101-3110 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.232, + 0.786, + 0.273 + ], + "angle": 0, + "content": "35. Weng, X., Wang, J., Held, D., Kitani, K.: 3d multi-object tracking: A baseline and new evaluation metrics. In: 2020 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). pp. 10359-10366. IEEE (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.273, + 0.786, + 0.327 + ], + "angle": 0, + "content": "36. Weng, X., Wang, Y., Man, Y., Kitani, K.M.: Gnn3dmot: Graph neural network for 3d multi-object tracking with 2d-3d multi-feature learning. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 6499-6508 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.328, + 0.786, + 0.383 + ], + "angle": 0, + "content": "37. Wu, Q., Yang, J., Sun, K., Zhang, C., Zhang, Y., Salzmann, M.: Mixcycle: Mixup assisted semi-supervised 3d single object tracking with cycle consistency. In: Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV). pp. 13956-13966 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.384, + 0.786, + 0.424 + ], + "angle": 0, + "content": "38. Wu, Y., Lim, J., Yang, M.H.: Online object tracking: A benchmark. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 2411-2418 (2013)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.425, + 0.786, + 0.467 + ], + "angle": 0, + "content": "39. Xu, T.X., Guo, Y.C., Lai, Y.K., Zhang, S.H.: Cxtrack: Improving 3d point cloud tracking with contextual information. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 1084-1093 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.467, + 0.786, + 0.508 + ], + "angle": 0, + "content": "40. Yin, T., Zhou, X., Krahenbuhl, P.: Center-based 3d object detection and tracking. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 11784-11793 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.508, + 0.786, + 0.55 + ], + "angle": 0, + "content": "41. Yoo, J.S., Lee, H., Jung, S.W.: Video object segmentation-aware video frame interpolation. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 12322-12333 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.55, + 0.786, + 0.577 + ], + "angle": 0, + "content": "42. Zarzar, J., Giancola, S., Ghanem, B.: Efficient bird eye view proposals for 3d siamese tracking. arXiv preprint arXiv:1903.10168 (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.577, + 0.786, + 0.619 + ], + "angle": 0, + "content": "43. Zhang, X., Yang, J., Zhang, S., Zhang, Y.: 3d registration with maximal cliques. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 17745-17754 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.619, + 0.786, + 0.674 + ], + "angle": 0, + "content": "44. Zheng, C., Yan, X., Gao, J., Zhao, W., Zhang, W., Li, Z., Cui, S.: Box-aware feature enhancement for single object tracking on point clouds. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 13199-13208 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.674, + 0.786, + 0.731 + ], + "angle": 0, + "content": "45. Zheng, C., Yan, X., Zhang, H., Wang, B., Cheng, S., Cui, S., Li, Z.: Beyond 3d siamese tracking: A motion-centric paradigm for 3d single object tracking in point clouds. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 8111-8120 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.731, + 0.786, + 0.784 + ], + "angle": 0, + "content": "46. Zhou, C., Luo, Z., Luo, Y., Liu, T., Pan, L., Cai, Z., Zhao, H., Lu, S.: Ptttr: Relational 3d point cloud object tracking with transformer. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 8531-8540 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.785, + 0.786, + 0.813 + ], + "angle": 0, + "content": "47. Zhou, X., Koltun, V., Krähenbuhl, P.: Tracking objects as points. In: European conference on computer vision. pp. 474-490. Springer (2020)" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.147, + 0.787, + 0.813 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/2024/3D Single-object Tracking in Point Clouds with High Temporal Variation/c2204d64-6706-4e48-94d5-09db9f8770f0_origin.pdf b/2024/3D Single-object Tracking in Point Clouds with High Temporal Variation/c2204d64-6706-4e48-94d5-09db9f8770f0_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..61334c1d1bca50b6a44099366ee8d8549c368c5e --- /dev/null +++ b/2024/3D Single-object Tracking in Point Clouds with High Temporal Variation/c2204d64-6706-4e48-94d5-09db9f8770f0_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f640f0f04f025169711d3f469a1949169ea7c257b7b489de6699f4a9649a33a0 +size 840840 diff --git a/2024/3D Single-object Tracking in Point Clouds with High Temporal Variation/full.md b/2024/3D Single-object Tracking in Point Clouds with High Temporal Variation/full.md new file mode 100644 index 0000000000000000000000000000000000000000..b8c0305be9a804a87dbd3061096700e8a39c61f2 --- /dev/null +++ b/2024/3D Single-object Tracking in Point Clouds with High Temporal Variation/full.md @@ -0,0 +1,304 @@ +# 3D Single-object Tracking in Point Clouds with High Temporal Variation + +Qiao Wu $^{1}$ , Kun Sun $^{2}$ , Pei An $^{3}$ , Mathieu Salzmann $^{4}$ , Yanning Zhang $^{1}$ , and Jiaqi Yang $^{1\star}$ + +1 Northwestern Polytechnical University + +$^{2}$ China University of Geosciences, Wuhan + +3 HuaZhong University of Science and Technology + +4 École Polytechnique Fédérale de Lausanne qiaowu@mail.nwu.edu.cn, jqyang@nwpu.edu.cn + +Abstract. The high temporal variation of the point clouds is the key challenge of 3D single-object tracking (3D SOT). Existing approaches rely on the assumption that the shape variation of the point clouds and the motion of the objects across neighboring frames are smooth, failing to cope with high temporal variation data. In this paper, we present a novel framework for 3D SOT in point clouds with high temporal variation, called HVTrack. HVTrack proposes three novel components to tackle the challenges in the high temporal variation scenario: 1) A Relative-Pose-Aware Memory module to handle temporal point cloud shape variations; 2) a Base-Expansion Feature Cross-Attention module to deal with similar object distractions in expanded search areas; 3) a Contextual Point Guided Self-Attention module for suppressing heavy background noise. We construct a dataset with high temporal variation (KITTI-HV) by setting different frame intervals for sampling in the KITTI dataset. On the KITTI-HV with 5 frame intervals, our HVTrack surpasses the state-of-the-art tracker CXTracker by $11.3\% / 15.7\%$ in Success/Precision. + +Keywords: 3D single-object tracking $\cdot$ High temporal variation $\cdot$ Point cloud + +# 1 Introduction + +3D single-object tracking (3D SOT) is pivotal for autonomous driving [2,40] and robotics [16, 19, 25, 43]. Given the target point cloud and 3D bounding box as template, the goal of 3D SOT is to regress the target 3D poses in the tracking point cloud sequence. Existing approaches [5,7,9-12,24,28,34,39,44-46] rely on the assumption that the point cloud variations and motion of the object across neighboring frames are relatively smooth. They crop out a small search area around the last proposal for tracking, thus dramatically reducing the complexity of the problem. The template and search area features are then typically correlated as shown in Fig. 1a, and used to regress the 3D bounding box. + +![](images/b61358caaf88a58a50d563d5ad68178071fa021fe560a3ab69e67133729ccba2.jpg) +Fig. 1: Feature correlation in 3D SOT. (a) Feature correlation in the smooth case (1 frame interval). Correlating the features is relatively trivial as the target undergoes only small shape variations, and the observation angles are consistent in the three frames. (b-c) Feature correlation in high temporal variation cases (10 frames interval). The pose relative to the camera changes rapidly. Correlating the features using historical information is highly challenging (b). We encode the historical observation angles $\alpha$ into the features to guide the variation of relative pose to the camera (c). + +In practice, these approaches are challenged by the presence of large point cloud variations due to the limited sensor temporal resolution and the moving speed of objects as shown in Fig. 1b. We refer to this significant variation in point cloud and object position between two frames as the high temporal variation (HV). The high temporal variation challenge is non-negligible in existing benchmarks, and exists in other scenarios not yet covered by them, such as: + +- Skipped-tracking, which can greatly reduce computational consumption in tracking and serve a wide range of other tasks such as detection [20] and segmentation [41]. +- Tracking in edge devices, which is essential for deploying trackers on common devices with limited frame rate, resolution, computation, and power etc. +- Tracking in highly dynamic scenarios [15], which is common in life. For example, tracking in sports events, highway, and UAV scenarios. + +There are three challenges for 3D SOT in HV point clouds, and existing approaches are not sufficient to address these challenges. 1) Strong shape variations of the point clouds: Point cloud shape variations are usually caused by the occlusion and relative pose transformation between the object and the sensor. As illustrated in Fig. 1b, feature correlation in existing approaches fails because of the dramatic change in the density and distribution of points. 2) Distractions due to similar objects: When objects suffer from a significant motion, the search area needs to be enlarged to incorporate the target, thus introducing more distractions from similar objects. Most of the existing trackers focus on local scale features, which discards environmental spatial contextual information to handle distractions. 3) Heavy background noise: The expansion of the search area further reduces the proportion of target information in the scene. While aiming to find the high template-response features in the feature correlation stage, existing methods then neglect to suppress the noise interference and reduce the impact of noise features. We evaluate state-of-the-art (SOTA) trackers [24,39,44,45] in the high temporal variation scenario as shown in Fig. 2. Their performance drops dramatically as the temporal variation of scene point clouds enlarges. + +![](images/0a79de94d6bee311fd49a27bd2643270542e0f6a519c9ac96df629629bc43d48.jpg) +Fig. 2: Comparison of HVTrack with the SOTAs [24,39,44,45] on 'Car' from KITTI-HV (KITTI [8] with different frame intervals, see Sec. 4). + +![](images/979aa47cf04a5ba1c9a7068f41de170d603ca2467e05c7be82fb26d172676d5a.jpg) + +To address the above challenges, we propose a novel framework for 3D SOT in point clouds with High temporal Variation, which we call HVTrack. Specifically, we propose three novel modules to address each of the three above-mentioned challenges. 1) A Relative-Pose-Aware Memory (RPM) module to handle the strong shape variations of the point clouds. Different from [17], we integrate the foreground masks and observation angles into the memory bank. Therefore, the model can implicitly learn the distribution variation of point clouds from the relative pose in time. The information arising from observation angles has been overlooked by all existing trackers. 2) A Base-Expansion Feature Cross-Attention (BEA) module to deal with the problem of similar object distractions occurring in large scenes. We synchronize the correlation of the hybrid scales features (base and expansion scales, Sec. 3.4) in the cross-attention, and efficiently utilize spatial contextual information. 3) A Contextual Point Guided Self-Attention (CPA) module to suppress the background noise introduced by the expanded search area. It aggregates the features of points into contextual points according to their importance. Less important points share fewer contextual points and vice versa, thus suppressing most of the background noise. BEA and CPA are inspired by the SGFormer [26], which utilizes hybrid scale significance maps to assign more tokens to salient regions of 2D images. Our experiments clearly demonstrate the remarkable performance of HVTrack in high temporal variation scenarios, as illustrated in Fig. 2. Our contributions can be summarized as follows: + +- For the first time, to the best of our knowledge, we explore the new 3D SOT task for high temporal variation scenarios, and propose a novel framework called HVTrack for the task. +- We propose three novel modules, RPM, BEA, and CPA, to address three challenges for 3D SOT in HV point clouds: strong point cloud variations, similar object distractions, and heavy background noise. +- HVTrack yields state-of-the-art results on KITTI-HV and Waymo, and ranks second on KITTI. Our experimental results demonstrate the robustness of HVTrack in both smooth and high temporal variation cases. + +# 2 Related Work + +# 2.1 3D Single-object Tracking + +Most of the 3D SOT approaches are based on a Siamese framework, because the appearance variations of the target between neighboring frames are not significant. The work of Giancola et al. [9] constitutes the pioneering method in 3D SOT. However, it only solved the discriminative feature learning problem, and used a time-consuming and inaccurate heuristic matching to locate the target. Zarzar et al. [42] utilized a 2D RPN in bird's eyes view to build an end-to-end tracker. The P2B network [24] employs VoteNet [22] as RPN and constructs the first point-based tracker. The following works [7, 11, 12, 28, 34, 44] develop different architectures of trackers based on P2B [24]. V2B [11] leverages the target completion model to generate the dense and complete targets and proposes a simple yet effective voxel-to-BEV target localization network. BAT [44] utilizes the relationship between points and the bounding box, integrating the box information into the point clouds. With the development of transformer networks, a number of works [5, 10, 12, 28, 39, 46] have proposed to exploit various attention mechanisms. STNet [12] forms an iterative coarse-to-fine cross-and self-attention to correlate the target and search area. CXTrack [39] employs a target-centric transformer to integrate targetness information and contextual information. TAT [17] leverages the temporal information to integrate target cues by applying an RNN-based [4] correlation module. Zheng et al. [45] presented a motion-centric method M2-Track, which is appearance matching-free and has made great progress in dealing with the sparse point cloud tracking problem. Wu et al. [37] proposed the first semi-supervised framework in 3D SOT. + +While effective in their context, the above methods are designed based on the assumption that the point cloud variation and motion of the objects across neighboring frames are not significant. In high temporal variation scenarios, this assumption will lead to performance degradation because of the point cloud variations and interference naturally occurring in large scenes. Here, we introduce HVTrack to tackle the challenges of 3D SOT in high temporal variation scenarios. + +# 2.2 3D Multi-object Tracking + +3D multi-object tracking (MOT) in point clouds follows two main streams: Tracking-by-detection, and learning-based methods. Tracking-by-detection [1, 3, 32, 35] usually exploits methods such as Kalman filtering to correlate the detection results and track the targets. CenterTrack [47], CenterPoint [40], and SimTrack [18] replace the filter by leveraging deep networks to predict the velocity and motion of the objects. The learning-based methods [6, 27, 36] typically apply a Graph Neural Network to tackle the association challenge in MOT. GNN3DMOT [36] leverages both 2D images and 3D point clouds to obtain a robust association. 3DMOTFormer [6] constructs a graph transformer framework and achieves a good performance using only 3D point clouds. + +3D MOT and 3D SOT have different purposes and their own challenges [13]. 3D MOT is object-level and focuses on correlating detected objects, whereas 3D SOT is intra-object-level [14] and aims to track a single object given a template. 3D SOT methods usually come with much lower computational consumption and higher throughput [46]. Also, 3D MOT is free from the challenges posed by the dynamic change in the search area size, as MOT is not required to adopt the search area cropping strategy in SOT. + +# 3 Method + +# 3.1 Problem Definition + +Given the template of the target, the goal of 3D SOT is to continually locate the poses of the target in the search area point cloud sequence $\mathbf{P}^s = \{P_0^s,\dots ,P_t^s,\dots ,P_n^s |P_t^s\in \mathbb{R}^{N_s\times 3}\}$ . Usually, the target point cloud with labels in the first frame is regarded as the template. Former trackers [5,7,9-12,24,28, 34,39,44-46] leverage a 3D bounding box label $B_{0} = (x,y,z,w,l,h,\theta)\in \mathbb{R}^{7}$ to generate the template in the input. Here, $(x,y,z)$ , $(w,l,h)$ and $\theta$ are the center location, bounding box size (width, length, and height), and rotation angle of the target, respectively. As objects can be assumed to be rigid, the trackers only need to regress the center and rotation angle of the target. + +# 3.2 Overview + +We propose HVTrack to exploit both temporal and spatial information and achieve robust tracking in high temporal variation scenarios. As shown in Fig. 3, we take the point cloud $P_{t}^{s}$ at time $t$ as the search area, and leverage memory banks as the template. We first employ a backbone to extract the local spatial features $\mathcal{X}_0 \in \mathbb{R}^{N \times C}$ of $P_{t}^{s}$ , with $N$ and $C$ the point number and feature channel, respectively. Then, $L$ transformer layers are employed to extract spatio-temporal information. For each layer $l$ , (i) we capture the template information $Mem_{l} \in \mathbb{R}^{KN \times C}$ from the Relative-Pose-Aware Memory module, with $K$ the memory bank size (Sec. 3.3); (ii) the memory features and search area features $\mathcal{X}_{l-1}$ are correlated in the Base-Expansion Features Cross-Attention (Sec. 3.4); (iii) the Contextual Point Guided Self-Attention (Sec. 3.5) leverages the attention map in the Base-Expansion Features Cross-Attention to suppress the noise features; (iv) we update the Layer Features memory bank using $\mathcal{X}_{l-1}$ . After the transformer layers, an RPN is applied to regress the location $(x_{t}, y_{t}, z_{t}, \theta_{t})$ , the mask $\mathcal{M}_{t} \in \mathbb{R}^{N \times 1}$ , and the observation angle $\alpha \in \mathbb{R}^2$ . Finally, the mask and observation angle memory banks are updated using the predicted results. + +# 3.3 Relative-Pose-Aware Memory Module + +As shown in Fig. 1(b), rapid changes in relative pose lead to large variations in the shape of the object point cloud across the frames. Correlating the object + +![](images/c0cc4a36f7d581f333a86b58541c438110d97eeed1b64756128fa6dd0d4642a2.jpg) +Fig. 3: HVTrack framework. We first utilize a backbone to extract the local embedding features of the search area. Then, we construct $L$ transformer layers to fuse spatio-temporal information. For each transformer layer, (i) we apply three memory bank features in the Relative-Pose-Aware Memory module to generate temporal template information; (ii) we employ the Base-Expansion Feature Cross-Attention to correlate the template and search area by leveraging hybrid scale spatial context-aware features; (iii) we introduce a Contextual Point Guided Self-Attention to suppress unimportant noise. After each layer, we update the layer features memory bank using the layer input. Finally, we apply an RPN to regress the 3D bounding box, and update the mask and observation angle memory banks. + +features in $(t - 2, t - 1, t)$ then becomes difficult, as they have a low overlap with each other. To address this, we introduce the observation angle into the memory bank. The observation angle gives us knowledge of the coarse distribution of an object's point cloud. Thus, the model can learn the variations in point cloud distribution from the historical changes of observation angle. + +To exploit the temporal information as the template, we propose a Relative-Pose-Aware Memory (RPM) module. RPM contains 3 memory banks. 1) A layer features memory bank (LM) $\in \mathbb{R}^{L\times K\times N\times C}$ : We leverage the historical transformer layer features as the template features to reduce the template inference time in former trackers [5,9-12,24,28,34,44,46]. 2) A mask memory bank (MM) $\in \mathbb{R}^{K\times N\times 1}$ : Inspired by the mask-based trackers [39,45], we utilize the mask as the foreground representation. 3) An observation angle memory bank (OM) $\in \mathbb{R}^{K\times 2}$ . For each transformer layer $l$ , we process the memory features as + +$$ +T _ {l} = \operatorname {L i n e a r} ([ \mathrm {L M} _ {1}, \mathrm {M M}, \operatorname {R e p e a t} (\mathrm {O M}) ]), \tag {1} +$$ + +where $T_{l} \in \mathbb{R}^{KN \times C}$ denotes the template features, Linear(\cdot) is a linear layer that projects the features from $\mathbb{R}^{KN \times (C + 3)}$ to $\mathbb{R}^{KN \times C}$ , [.] is the concatenation operation, and Repeat(\cdot) stacks the OM to $\mathbb{R}^{K \times N \times 2}$ . Then, we project $T_{l}$ into Query (Q), Key (K), and Value (V) using the learnable parameter matrices as + +$$ +Q _ {l} ^ {T} = \mathrm {L N} (\mathrm {L N} (T _ {l}) W _ {l} ^ {T Q} + \mathrm {P E} ^ {T}), +$$ + +$$ +K _ {l} ^ {T} = \operatorname {L N} \left(T _ {l}\right) W _ {l} ^ {T K}, \tag {2} +$$ + +$$ +V _ {l} ^ {T} = \mathrm {L N} (T _ {l}) W _ {l} ^ {T V}, +$$ + +![](images/e0a53b32019808bd9d52aad04d8ccbb081dd8e0591cafb5da842715f08728267.jpg) +(a) BEA. + +![](images/dc2e9b1c243ffa8aaaef51d094460cf0a5d9216ebef62d9a3834fefd7ed2d73b.jpg) +(b) CPA. +Fig. 4: (a) Base-Expansion Feature Cross-Attention (BEA). The $H$ heads in the multi-head attention (MHA) are split to process hybrid scale features. For the base scale branch, we directly put the local features into the MHA. For the expansion scale branch, we apply an EdgeConv [33] to expand the receptive field of each point and extract more abstract features before MHA. BEA captures the spatial context-aware information with a humble extra computational cost. (b) Contextual Point Guided Self-Attention (CPA). We determine the importance of each point by both base and expansion scale attention maps. Then, we aggregate all the points into $U$ clusters (contextual points) according to their importance and project the clusters to $K$ and $V$ . We assign fewer contextual points for low-importance points, and vice versa. CPA not only suppresses the noise but also reduces the computational cost of the attention. + +where $\mathrm{LN}(\cdot)$ is the layer norm, and $\mathrm{PE}^T \in \mathbb{R}^{KN \times C}$ is the positional embedding of the historical point cloud coordinates. We utilize a linear layer to project the point cloud coordinates to their positional embedding. Finally, a self-attention is applied for internal interactions between temporal information as + +$$ +M e m _ {l} ^ {*} = T _ {l} + \operatorname {D r o p o u t} \left(\mathrm {M H A} \left(Q _ {l} ^ {T}, K _ {l} ^ {T}, V _ {l} ^ {T}\right)\right), \tag {3} +$$ + +where MHA is the multi-head attention in [31], and Dropout is the random dropping operation in [29]. Following CXTrack [39], we apply dropout and feedforward network (FFN) after self-attention, i.e., + +$$ +M e m _ {l} = M e m _ {l} ^ {*} + \operatorname {D r o p o u t} (\operatorname {F F N} (\operatorname {L N} (M e m _ {l} ^ {*}))), \tag {4} +$$ + +$$ +\operatorname {F F N} (x) = \max \left(0, x W _ {1} + b _ {1}\right) W _ {2} + b _ {2}. \tag {5} +$$ + +# 3.4 Base-Expansion Feature Cross-Attention + +Most of the existing trackers [11,24,28,34,39,44,46] employ a point based backbone [23,33] and focus on local region features, which we call base scale features. Using only base scale features in the whole pipeline is quite efficient and effective + +in small scenes. However, the base scale features are limited in representing the neighboring environment features around the object in large search areas. To tackle the challenge of similar object distractions, spatial context information across consecutive frames is crucial for effective object tracking [39]. Expanding the receptive field of features can help capture spatial contextual information, and such features are called expansion scale features. Inspired by [26], we propose Base-Expansion Feature Cross-Attention (BEA) to capture both local and more abstract features, and exploit spatial context-aware information. + +As shown in Fig. 4a, the input features $X_{l-1}$ are projected into $\mathbf{Q}$ . Usually, the memory features Meml would be projected into $\mathbf{K}$ and $\mathbf{V}$ . Then, multi-head cross-attention adopts $H$ independent heads, and processes them using the same base scale features. By contrast, we split the $H$ heads into 2 groups. $H/2$ heads exploit local spatial context information. We directly process the base scale features with normal cross-attention, and output base scale features $\hat{X}_{l-1}^{base} \in \mathbb{R}^{N \times C/2}$ and attention map Attn $^{base} \in \mathbb{R}^{N \times KN}$ . The other $H/2$ heads capture environment context features. We first apply an EdgeConv [33] to extract more abstract features Meml $^{expan} \in \mathbb{R}^{KN/8 \times C}$ , which are expansion scale features, i.e., + +$$ +M e m _ {l} ^ {e x p a n} = \operatorname {E d g e C o n v} \left(M e m _ {l}\right). \tag {6} +$$ + +Then, we project the expansion features into K and V, and perform multi-head cross-attention with Q. Specifically, for the $i$ -th head belonging to the expansion scale branch, we generate Q, K, and V as + +$$ +\begin{array}{l} Q _ {i} = L N (L N (X _ {l - 1}) W _ {i} ^ {Q} + \mathrm {P E} _ {i} ^ {S}), \\ K _ {i} = L N \left(M e m _ {l} ^ {e x p a n}\right) W _ {i} ^ {K}, \tag {7} \\ V _ {i} = L N (M e m _ {l} ^ {e x p a n}) W _ {i} ^ {V}, \\ \end{array} +$$ + +where $\mathrm{PE}_i^S$ is the positional embedding of search area point cloud coordinates. Then, cross-attention is performed as + +$$ +A t t n _ {i} ^ {e x p a n} = \operatorname {S o f t m a x} \left(\frac {Q _ {i} K _ {i}}{\sqrt {d _ {h}}}\right), \tag {8} +$$ + +$$ +h _ {i} ^ {e x p a n} = A t t n _ {i} ^ {e x p a n} V _ {i}, \tag {9} +$$ + +where $d_h$ is the feature dimension of the heads, and $h_i^{expan}$ is the output features of the $i$ -th head. After that, we concatenate the output features and attention map of each head as + +$$ +\begin{array}{l} \hat {X} _ {l - 1} ^ {\text {e x p a n}} = \left[ h _ {1}, \dots , h _ {H / 2} \right], \\ A t t n ^ {e x p a n} = \left[ A t t n _ {1} ^ {e x p a n}, \dots , A t t n _ {H / 2} ^ {e x p a n} \right], \tag {10} \\ \end{array} +$$ + +where $\hat{X}_{l-1}^{expan} \in \mathbb{R}^{N \times C/2}$ , and $Attn^{expan} \in \mathbb{R}^{N \times KN/8}$ . Finally, we concatenate the base scale and expansion scale outputs as the resulting correlation feature $\hat{X}_{l-1} \in \mathbb{R}^{N \times C}$ . Thus, BEA provides rich hybrid scale spatial contextual information for each point, with a very humble extra computational cost. + +# 3.5 Contextual Point Guided Self-Attention + +Most of the information in the search area will be regarded as noise, because we are only interested in one single object to be tracked. Existing trackers [11,24,28, 34,44] aim to find the features with high template-response in the search area, but neglect the suppress to the noise. Zhou et al. [46] proposed a Relation-Aware Sampling for preserving more template-relevant points in the search area before inputting it to the backbone. By contrast, we focus on suppressing the noise after feature correlation via a Contextual Point Guided Self-Attention (CPA). + +As shown in Fig. 4b, we leverage the base and expansion scale attention maps to generate the importance map $I \in \mathbb{R}^{N \times 1}$ as + +$$ +I = \operatorname {M e a n} \left(A t t n ^ {\text {b a s e}}\right) + \operatorname {M e a n} \left(A t t n ^ {\text {e x p a n}}\right). \tag {11} +$$ + +The higher the importance of the point, the more spatial context-aware information related to the target it contains. We sort the points according to the magnitude of their importance values. Then, all the points will be separated into $G$ groups according to their importance. For each group with points $P_{i}^{G} \in \mathbb{R}^{G_{i} \times C}$ , we aggregate the points into $U_{i}$ clusters, which we call contextual points. Specifically, we first reshape the points as $P_{i}^{G} \in \mathbb{R}^{U_{i} \times C \times G_{i} / U_{i}}$ . Second, a linear layer is employed to project the group to the contextual points $P_{i}^{U} \in \mathbb{R}^{U_{i} \times C}$ . We assign fewer contextual points for the groups with lower importance, and suppress the noise feature expression. Finally, all the contextual points are concatenated and projected into Key $K^{U} \in \mathbb{R}^{U \times C}$ and Value $V^{U} \in \mathbb{R}^{U \times C}$ . We project $\hat{X}_{l-1}$ to Q and perform a multi-head attention with $K^{U}$ and $V^{U}$ , and an FFN is applied after attention. CPA shrinks the length of K and V, and leads to a computational cost decrease in self-attention. + +# 3.6 Implementation Details + +Backbone & Loss Functions. Following CXTrack [39], we adopt DGCNN [33] as our backbone, and apply X-RPN [39] as the RPN of our framework. We add two Shared MLP layers to X-RPN for predicting the observation angles $(\alpha)$ and the masks. Therefore, the overall loss is expressed as + +$$ +\mathcal {L} = \gamma_ {1} \mathcal {L} _ {c c} + \gamma_ {2} \mathcal {L} _ {\text {m a s k}} + \gamma_ {3} \mathcal {L} _ {\text {a l p h a}} + \gamma_ {4} \mathcal {L} _ {\text {r m}} + \gamma_ {5} \mathcal {L} _ {\text {b o x}}, \tag {12} +$$ + +where $\mathcal{L}_{cc}$ , $\mathcal{L}_{mask}$ , $\mathcal{L}_{alpha}$ , $\mathcal{L}_{box}$ , and $\mathcal{L}_{box}$ are the loss for the coarse center, foreground mask, observation angle, targetness mask, and bounding box, respectively. We apply the $L_{2}$ loss for $\mathcal{L}_{cc}$ , the standard cross entropy loss for $\mathcal{L}_{mask}$ and $\mathcal{L}_{rm}$ , and the Huber loss for $\mathcal{L}_{alpha}$ and $\mathcal{L}_{box}$ . $\gamma_{1}, \gamma_{2}, \gamma_{3}, \gamma_{4}$ , and $\gamma_{5}$ are empirically set as 10.0, 0.2, 1.0, 1.0, and 1.0. + +Training & Testing. We train our model on NVIDIA RTX-3090 GPUs with the Adam optimizer and an initial learning rate of 0.001. Due to GPU memory limitation, we construct point cloud sequences with 8 frames for training, and set $K = 2$ in training, and $K = 6$ in testing. Following existing methods [39,45], we set $N$ and $C$ to 128. We stack $L = 2$ transformer layers and apply $H = 4$ heads in BEA and CPA. We adopt $G = 3$ groups in CPA, and assign [32,64,32] points and $U = [4,32,16]$ contextual points for the groups, respectively. + +# 4 Experiments + +We leverage two famous 3D tracking benchmarks of KITTI [8] and Waymo [30] to evaluate the general performance of our approach in regular 3D SOT. In addition, we establish a new KITTI-HV dataset to test our performance in high temporal variation scenarios. + +Regular Datasets. The KITTI tracking dataset comprises 21 training sequences and 29 test sequences, encompassing eight object types. Following prior studies [9,24,34,39,44,45], we use the sequences 0-16 as training data, 17-18 for validation, and 19-20 for testing. The Waymo dataset is large-scale. We adopt the approach outlined in LiDAR-SOT [21] to utilize 1121 tracklets, which are subsequently categorized into easy, medium, and hard subsets based on the number of points in the first frame of each tracklet. + +HV Dataset. We build a dataset with high temporal variation for 3D SOT based on KITTI, called KITTI-HV. Although high temporal variation scenarios are present in the existing benchmarks, there is no exact threshold to determine whether the scenario is a high temporal variation scenario or not. Large point cloud variations and significant object motions are two major challenges in high temporal variation scenarios. Sampling at frame intervals is a good way to simulate these two challenges. Also, the constructed KITTI-HV can provide a preliminary platform for exploring tracking in scenarios such as skipped-tracking, edge devices, and high dynamics. For a fairer comparison with existing methods, we set the frame interval to 2, 3, 5, and 10. We set up more dense testings at low frame intervals to exploit the performance of the existing methods in point cloud variations close to smooth scenarios. We train and test all methods from scratch individually on each frame interval. + +Evaluation Metrics. We employ One Pass Evaluation [38] to evaluate the different methods in terms of Success and Precision. Success is determined by measuring the Intersection Over Union between the proposed bounding box and the ground-truth (GT) bounding box. Precision is evaluated by computing the Area Under the Curve of the distance error between the centers of the two bounding boxes, ranging from 0 to 2 meters. + +# 4.1 Comparison with the State of the Art + +Results on HV tracking. We evaluate our HVTrack in 4 categories ('Car', 'Pedestrian', 'Van', and 'Cyclist') following existing methods [24, 39, 44, 45] in the KITTI-HV dataset. The methods we choose to compare with HVTrack are the most representative SOT methods from 2020 to 2023 (Most cited methods published in each year according to Google Scholar). As illustrated in Tab. 1, our approach consistently outperforms the state-of-the-art methods [24, 39, 44, 45] across all frame intervals, confirming the effectiveness of the proposed tracking framework for high temporal variation scenarios. Notably, the performance gap between our HVTrack and existing trackers widens as variations are exacerbated. In the particularly challenging scenario of 10 frame intervals, we achieve a substantial $9.1\%$ ↑ improvement in success and a remarkable $10.4\%$ ↑ enhancement + +Table 1: Comparison of HVTrack with the state-of-the-art methods on each category of the KITTI-HV dataset. We construct the HV dataset KITTI-HV for training and testing by setting different frame intervals for sampling in the KITTI dataset. Bold and underline denote the best and second-best performance, respectively. Success/Precision are used for evaluation. Improvement and deterioration are shown in green and red, respectively. + +
Frame Intervals2 Intervals3 Intervals
Category Frame NumberCar 6424Pestrian 6088Van 1248Cyclist 308Mean 14068Car 6424Pestrian 6088Van 1248Cyclist 308Mean 14068
P2B [24]56.3/71.030.8/53.033.4/38.441.8/61.442.9/60.143.4/51.827.9/46.827.9/31.844.8/64.435.4/48.1
BAT [44]61.8/74.236.5/61.126.8/30.454.1/78.747.6/64.751.7/61.931.8/53.524.0/28.250.5/72.640.6/55.5
M2-Track [45]63.0/76.654.6/81.752.8/66.568.3/89.358.6/78.262.1/72.751.8/74.333.6/41.664.7/82.055.1/70.8
CXTrack [39]61.4/70.962.6/86.356.0/69.159.2/76.961.4/77.547.4/53.157.9/79.348.5/58.840.7/58.451.9/65.1
HVTrack Improvement67.1/77.560.0/84.050.6/61.773.9/93.662.7/79.366.8/76.551.1/71.938.7/46.966.5/89.757.5/72.2
4.1↑/0.9↑2.6↓/2.3↓6.0↓/7.4↓5.6↑/4.3↑1.3↑/1.1↑4.7↑/3.8↑6.8↓/7.4↓9.8↓/11.9↓1.8↑/7.7↑2.4↑/1.4↑
Frame Intervals5 Intervals10 Intervals
Category Frame NumberCar 6424Pestrian 6088Van 1248Cyclist 308Mean 14068Car 6424Pestrian 6088Van 1248Cyclist 308Mean 14068
P2B [24]39.3/46.127.4/43.527.2/30.435.0/44.433.0/43.528.6/29.223.1/31.125.9/27.329.1/28.326.0/29.8
BAT [44]44.1/51.121.1/32.826.1/29.535.7/46.332.4/41.130.6/33.121.7/29.220.8/20.729.3/29.125.9/30.2
M2-Track [45]50.9/58.631.6/45.430.0/36.547.4/61.040.6/51.033.0/35.117.5/24.120.7/20.827.7/26.625.0/28.9
CXTrack [39]38.6/42.235.0/47.821.6/24.325.7/33.335.3/42.830.2/32.418.2/21.417.5/17.927.7/26.523.8/26.2
HVTrack Improvement60.3/68.935.1/52.128.7/32.458.2/71.746.6/58.549.4/54.722.5/29.122.2/23.439.5/45.435.1/40.6
9.4↑/10.3↑0.1↑/4.3↑1.3↓/4.1↓10.8↑/10.7↑6.0↑/7.5↑16.4↑/19.6↑0.6↓/0.1↓3.7↓/3.9↓10.2↑/16.3↑9.1↑/10.4
+ +in precision. This showcases the robustness of our method in accommodating various levels of point cloud variation. Our method delivers outstanding performance on 'Car' and 'Cyclist', in which we gain a great improvement in 5 frame intervals (9.4%↑/10.3%↑ for 'Car' and 10.8%↑/10.7%↑ for 'Cyclist') and 10 frame intervals (16.4%↑/19.6%↑ for 'Car' and 10.2%↑/16.3%↑ for 'Cyclist'). However, the challenge of tracking large objects persists in high temporal variation cases for our method. Note that the performance of CXTrack drops dramatically after 3 frame intervals. In particular, in the medium variation case of 5 frame intervals, we achieve 11.3%↑/15.7%↑ improvement in overall success/precision compared to CXTrack, despite the fact that our HVTrack shares the same backbone and RPN with CXTrack [39]. Furthermore, HVTrack surpasses CXTrack on 'Car' and 'Cyclist' by a very large margin (21.7%↑/26.7%↑ for 'Car' and 32.5%↑/38.4%↑ for 'Cyclist'). The distinct performance gap between HVTrack and CXTrack in HV tracking showcases the effectiveness of our feature correlation module design. + +Results on regular tracking. For the KITTI dataset, we compare HVTrack with 12 top-performing trackers [7,9,11,12,17,24,28,34,39,44-46]. As shown in Tab. 2, our overall performance is close to the SOTA tracker CXTrack [39], and achieves the second best result on the average in success (2.0%↓ w.r.t. CXTrack). Note that HVTrack outperforms TAT [17] on average (0.8%↑/0.3%↑), which utilizes temporal information by concatenating historical template features. This demonstrates our better design for leveraging the spatio-temporal context information. However, the performance of HVTrack drops when dealing with large objects ('Van'). We conjecture this performance drop to be caused by CPA, + +Table 2: Comparison of HVTrack with the SOTA methods on each category of the KITTI dataset. + +
Category Frame NumberCar 6424Pedestrian 6088Van 1248Cyclist 308Mean 14068
SC3D [9]41.3/57.918.2/37.840.4/47.041.5/70.431.2/48.5
P2B [24]56.2/72.828.7/49.640.8/48.432.1/44.742.4/60.0
3DSiamRPN [7]58.2/76.235.2/56.245.7/52.936.2/49.046.7/64.9
MLVSNet [34]56.0/74.034.1/61.152.0/61.434.3/44.545.7/66.7
BAT [44]60.5/77.742.1/70.152.4/67.033.7/45.451.2/72.8
PTT [28]67.8/81.844.9/72.043.6/52.537.2/47.355.1/74.2
V2B [11]70.5/81.348.3/73.550.1/58.040.8/49.758.4/75.2
PTTR [46]65.2/77.450.9/81.652.5/61.865.1/90.557.9/78.1
STNet [12]72.1/84.049.9/77.258.0/70.673.5/93.761.3/80.1
TAT [17]72.2/83.357.4/84.458.9/69.274.2/93.964.7/82.8
M2-Track [45]65.5/80.861.5/88.253.8/70.773.2/93.562.9/83.4
CXTrack [39]69.1/81.667.0/91.560.0/71.874.2/94.367.5/85.3
HVTrack68.2/79.264.6/90.654.8/63.872.4/93.765.5/83.1
+ +Table 3: Comparison of HVTrack with the SOTA methods on the Waymo dataset. + +
MethodVehicle (185632)Pedestrian (241168)Mean
EasyMediumHardMeanEasyMediumHardMeanMean (426800)
P2B [24]57.1/65.452.0/60.747.9/58.552.6/61.718.1/30.817.8/30.017.7/29.317.9/30.133.0/43.8
BAT [44]61.0/68.353.3/60.948.9/57.854.7/62.719.3/32.617.8/29.817.2/28.318.2/30.334.1/44.4
V2B [11]64.5/71.555.1/63.252.0/62.057.6/65.927.9/43.922.5/36.220.1/33.123.7/37.938.4/50.1
STNet [12]65.9/72.757.5/66.054.6/64.759.7/68.029.2/45.324.7/38.222.2/35.825.5/39.940.4/52.1
CXTrack [39]63.9/71.154.2/62.752.1/63.757.1/66.135.4/55.329.7/47.926.3/44.430.7/49.442.2/56.7
HVTrack(Ours)66.2/75.257.0/66.055.3/67.159.8/69.734.2/53.528.7/47.926.7/45.230.0/49.143.0/58.1
+ +which will be further explored in Sec. 4.2. For the Waymo dataset, following the benchmark setting in LiDAR-SOT [21] and STNet [12], we test our HVTrack in 2 categories ('Vehicle', 'Pedestrian') with 3 difficulty levels. All the methods are pre-trained on KITTI. The results of P2B [24], BAT [44], and V2B [11] on Waymo are provided by STNet [12]. As shown in Tab. 3, our method achieves the best performance in success $(0.8\% \uparrow)$ and precision $(1.4\% \uparrow)$ . Notably, HVTrack does not surpass CXTrack and reach SOTA on the KTTTI benchmark, while the opposite situation occurs in the larger dataset of Waymo. The improvement on Waymo clearly demonstrates the robustness of our method in the large-scale dataset. Also, HVTrack surpasses other SOTA methods on all categories of 'Hard' difficulty, revealing our excellent ability to handle sparse cases. The experimental results show that our method can generally solve the problem of 3D SOT under various levels of point cloud variations, and achieve outstanding performance. + +# 4.2 Analysis Experiments + +In this section, we extensively analyze HVTrack via a series of experiments. All the experiments are conducted on KITTI-HV with 5 frame intervals unless otherwise stated. + +Ablation Study. We conduct experiments to analyze the effectiveness of different modules in HVTrack. As shown in Tab. 4, we respectively ablate OM, BEA, and CPA from HVTrack. We only ablate OM in RPM because LM and MM + +Table 4: Ablation analysis of HVTrack. + +
OMBEACPACarPedestrianVanCyclistMean
60.0/69.033.9/50.028.4/32.254.2/67.145.8/57.5
60.3/69.435.0/50.226.7/30.743.9/61.546.0/57.5
58.2/66.934.7/49.828.1/33.547.7/63.945.1/56.5
60.3/68.935.1/52.128.7/32.458.2/71.746.6/58.5
+ +Table 5: Ablation experiment of BEA. 'Base'/'Expansion' denotes only using the base/expansion branch in BEA. + +
CategoryCarPedestrianVanCyclistMean
Base60.3/69.435.0/50.226.7/30.743.9/61.546.0/57.5
Expansion60.0/68.634.7/50.531.4/36.854.5/67.546.4/57.9
+ +serve as the template and are the indivisible parts of HVTrack. BEA and CPA are replaced by vanilla cross-attention and self-attention. In general, all components have been proven to be effective; removing an arbitrary module degrades the 'mean' performance. + +Analysis Experiment of BEA. The performance slightly drops on the 'Car' when we apply BEA on HVTrack as shown in Tab. 4. We conjecture this to be caused by the side effect of aggregating larger scale features in BEA, which will involve more background noise at each point. Further, 'Car' has a medium size and does not have the distraction of crowded similar objects like small objects ('Pedestrian' and 'Cyclist'), nor does it require a larger receptive field like large objects ('Van'). To verify this issue, we further analyze each branch of BEA as shown in Tab. 5. 'Pedestrian', 'Van', and 'Cyclist' benefit from the expansion branch and achieve a better performance compared to using only the base branch in BEA. On the other hand, the performance in the 'Car' category has the opposite behavior to the other categories. The experimental results validate our hypothesis that BEA is beneficial to small and large objects, while negatively affecting medium-sized objects. + +Analysis Experiment of CPA. Our method yields better results on 'Van' after we remove CPA as shown in Tab. 4, which reveals the relation between CPA and the large object tracking challenge. We believe that this is caused by the suppressing strategy in CPA. Large objects usually have more points, and under the same probability of misclassification of importance, they will have more foreground points assigned as low importance in the attention map, resulting in a part of useful information being suppressed in CPA. As shown in Fig. 5b, the importance conflict in the object leads to tracking failure. That part of the information will be further suppressed when stacking multiple transformer layers. However, the performance drops in other categories, without CPA to suppress the background noise for medium and small objects. As shown in Fig. 5a, most of the background points are assigned with low importance and suppressed in the success case, which proves our idea of CPA. + +![](images/f392ef54e3800668ca3216913b5128bcabd2bf09928622bc88a2f19690d53e23.jpg) +Fig. 5: The attention maps of 'Van' in CPA. + +![](images/a4c7ba14639332f576747313a68f34f186a9f622a44f1775cfb2fb46fa6bf851.jpg) + +![](images/6e12dc51e1aefee041843a0adbe73eafe29db5f056fd2aad348590f15ab93a9b.jpg) + +Table 6: Results of HVTrack when using different memory sizes. We train HVTrack with a memory size of 2, and evaluate it with memory sizes ranging from 1 to 8. + +
Memory SizeCarPedestrianVanCyclistMean
158.3/66.530.9/46.226.8/29.857.1/70.543.6/54.6
258.6/67.031.7/47.927.1/30.657.6/70.944.1/55.6
359.2/67.633.8/49.927.7/3155.8/67.745.3/56.7
460.0/68.533.7/50.629.5/33.657.9/71.345.9/57.7
560.0/68.533.8/51.228.7/32.657.8/70.845.8/57.9
660.3/68.935.1/52.128.7/32.458.2/71.746.6/58.5
759.7/68.235.6/52.928.0/31.558.1/71.446.4/58.4
859.8/68.335.1/52.428.2/32.058.1/71.446.3/58.3
+ +Memory Size. Intuitively, trackers will achieve better performance when leveraging more temporal information. However, the performance of the trackers cannot continuously improve with the accumulation of historical information, due to inaccuracies in the historical tracklets. As shown in Tab. 6, we train HVTrack with a memory size of 2 due to the GPU memory limitation, and evaluate it with memory sizes from 1 to 8. The performance peaks for a memory size of 6, which is consistent with our assumption. Thus, we set 6 as our memory size and achieve a tracking speed of 31 FPS. + +# 5 Conclusion + +In this paper, we have explored a new task in 3D SOT, and presented the first 3D SOT framework for high temporal variation scenarios, HVTrack. Its three main components, RPM, BEA, and CPA, allow HVTrack to achieve robustness to point cloud variations, similar object distractions, and background noise. Our experiments have demonstrated that HVTrack significantly outperforms the state of the art in high temporal variation scenarios, and achieves remarkable performance in regular tracking. + +**Limitation.** Our CPA relies on fixed manual hyperparameters to suppress noise. This makes it difficult to balance the performance in different object and search area sizes, leading to a performance drop in tracking large objects. In the future, we will therefore explore the use of a learnable function to replace the manual hyperparameters in CPA and overcome the large object tracking challenge. + +# Acknowledgements + +This work is supported in part by the National Natural Science Foundation of China (NFSC) under Grants 62372377 and 62176242. + +# References + +1. Chen, X., Shi, S., Zhang, C., Zhu, B., Wang, Q., Cheung, K.C., See, S., Li, H.: Trajectoryformer: 3d object tracking transformer with predictive trajectory hypotheses. arXiv preprint arXiv:2306.05888 (2023) +2. Cheng, R., Wang, X., Sohel, F., Lei, H.: Topology-aware universal adversarial attack on 3d object tracking. Visual Intelligence 1(1), 31 (2023) +3. Chiu, H.k., Prioletti, A., Li, J., Bohg, J.: Probabilistic 3d multi-object tracking for autonomous driving. arXiv preprint arXiv:2001.05673 (2020) +4. Chung, J., Gulcehre, C., Cho, K., Bengio, Y.: Empirical evaluation of gated recurrent neural networks on sequence modeling. arXiv preprint arXiv:1412.3555 (2014) +5. Cui, Y., Fang, Z., Shan, J., Gu, Z., Zhou, S.: 3d object tracking with transformer. arXiv preprint arXiv:2110.14921 (2021) +6. Ding, S., Rehder, E., Schneider, L., Cordts, M., Gall, J.: 3dmotformer: Graph transformer for online 3d multi-object tracking. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 9784-9794 (2023) +7. Fang, Z., Zhou, S., Cui, Y., Scherer, S.: 3d-siamrpn: An end-to-end learning method for real-time 3d single object tracking using raw point cloud. IEEE Sensors Journal 21(4), 4995-5011 (2020) +8. Geiger, A., Lenz, P., Urtasun, R.: Are we ready for autonomous driving? the KITTI vision benchmark suite. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 3354-3361 (2012) +9. Giancola, S., Zarzar, J., Ghanem, B.: Leveraging shape completion for 3d siamese tracking. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 1359-1368 (2019) +10. Guo, Z., Mao, Y., Zhou, W., Wang, M., Li, H.: Cmt: Context-matching-guided transformer for 3d tracking in point clouds. In: European Conference on Computer Vision. pp. 95-111. Springer (2022) +1. Hui, L., Wang, L., Cheng, M., Xie, J., Yang, J.: 3d siamese voxel-to-bev tracker for sparse point clouds. Advances in Neural Information Processing Systems 34, 28714-28727 (2021) +2. Hui, L., Wang, L., Tang, L., Lan, K., Xie, J., Yang, J.: 3d siamese transformer network for single object tracking on point clouds. arXiv preprint arXiv:2207.11995 (2022) +3. Jiao, L., Wang, D., Bai, Y., Chen, P., Liu, F.: Deep learning in visual tracking: A review. IEEE transactions on neural networks and learning systems (2021) +4. Jiayao, S., Zhou, S., Cui, Y., Fang, Z.: Real-time 3d single object tracking with transformer. IEEE Transactions on Multimedia (2022) +5. Kapania, S., Saini, D., Goyal, S., Thakur, N., Jain, R., Nagrath, P.: Multi object tracking with uavs using deep sort and yolov3 retina detection framework. In: Proceedings of the 1st ACM Workshop on Autonomous and Intelligent Mobile Systems. pp. 1-6 (2020) + +16. Kart, U., Lukezic, A., Kristan, M., Kamarainen, J.K., Matas, J.: Object tracking by reconstruction with view-specific discriminative correlation filters. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 1339-1348 (2019) +17. Lan, K., Jiang, H., Xie, J.: Temporal-aware siamese tracker: Integrate temporal context for 3d object tracking. In: Proceedings of the Asian Conference on Computer Vision. pp. 399-414 (2022) +18. Luo, C., Yang, X., Yuille, A.: Exploring simple 3d multi-object tracking for autonomous driving. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 10488-10497 (2021) +19. Machida, E., Cao, M., Murao, T., Hashimoto, H.: Human motion tracking of mobile robot with Kinect 3d sensor. In: Proceedings of SICE Annual Conference (SICE). pp. 2207-2211. IEEE (2012) +20. Nishimura, H., Komorita, S., Kawanishi, Y., Murase, H.: Sdof-tracker: Fast and accurate multiple human tracking by skipped-detection and optical-flow. IEICE TRANSACTIONS on Information and Systems 105(11), 1938-1946 (2022) +21. Pang, Z., Li, Z., Wang, N.: Model-free vehicle tracking and state estimation in point cloud sequences. In: 2021 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). pp. 8075-8082. IEEE (2021) +22. Qi, C.R., Litany, O., He, K., Guibas, L.J.: Deep hough voting for 3d object detection in point clouds. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 9277-9286 (2019) +23. Qi, C.R., Yi, L., Su, H., Guibas, L.J.: Pointnet++: Deep hierarchical feature learning on point sets in a metric space. Advances in neural information processing systems 30 (2017) +24. Qi, H., Feng, C., Cao, Z., Zhao, F., Xiao, Y.: P2b: Point-to-box network for 3d object tracking in point clouds. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 6329-6338 (2020) +25. Ren, C., Xu, Q., Zhang, S., Yang, J.: Hierarchical prior mining for non-local multiview stereo. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 3611-3620 (2023) +26. Ren, S., Yang, X., Liu, S., Wang, X.: Sg-former: Self-guided transformer with evolving token reallocation. arXiv preprint arXiv:2308.12216 (2023) +27. Sadjadpour, T., Li, J., Ambrus, R., Bohg, J.: Shasta: Modeling shape and spatiotemporal affinities for 3d multi-object tracking. IEEE Robotics and Automation Letters (2023) +28. Shan, J., Zhou, S., Fang, Z., Cui, Y.: Ptt: Point-track-transformer module for 3d single object tracking in point clouds. In: Proceedings of the IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). pp. 1310-1316 (2021) +29. Srivastava, N., Hinton, G., Krizhevsky, A., Sutskever, I., Salakhutdinov, R.: Dropout: a simple way to prevent neural networks from overfitting. The journal of machine learning research 15(1), 1929-1958 (2014) +30. Sun, P., Kretzschmar, H., Dotiwalla, X., Chouard, A., Patnaik, V., Tsui, P., Guo, J., Zhou, Y., Chai, Y., Caine, B., et al.: Scalability in perception for autonomous driving: Waymo open dataset. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 2446-2454 (2020) +31. Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, L., Polosukhin, I.: Attention is all you need. Advances in neural information processing systems 30 (2017) +32. Wang, Q., Chen, Y., Pang, Z., Wang, N., Zhang, Z.: Immortal tracker: Tracklet never dies. arXiv preprint arXiv:2111.13672 (2021) + +33. Wang, Y., Sun, Y., Liu, Z., Sarma, S.E., Bronstein, M.M., Solomon, J.M.: Dynamic graph cnn for learning on point clouds. ACM Transactions on Graphics (tog) 38(5), 1-12 (2019) +34. Wang, Z., Xie, Q., Lai, Y.K., Wu, J., Long, K., Wang, J.: Mlvsnet: Multi-level voting siamese network for 3d visual tracking. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 3101-3110 (2021) +35. Weng, X., Wang, J., Held, D., Kitani, K.: 3d multi-object tracking: A baseline and new evaluation metrics. In: 2020 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). pp. 10359-10366. IEEE (2020) +36. Weng, X., Wang, Y., Man, Y., Kitani, K.M.: Gnn3dmot: Graph neural network for 3d multi-object tracking with 2d-3d multi-feature learning. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 6499-6508 (2020) +37. Wu, Q., Yang, J., Sun, K., Zhang, C., Zhang, Y., Salzmann, M.: Mixcycle: Mixup assisted semi-supervised 3d single object tracking with cycle consistency. In: Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV). pp. 13956-13966 (2023) +38. Wu, Y., Lim, J., Yang, M.H.: Online object tracking: A benchmark. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 2411-2418 (2013) +39. Xu, T.X., Guo, Y.C., Lai, Y.K., Zhang, S.H.: Cxtrack: Improving 3d point cloud tracking with contextual information. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 1084-1093 (2023) +40. Yin, T., Zhou, X., Krahenbuhl, P.: Center-based 3d object detection and tracking. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 11784-11793 (2021) +41. Yoo, J.S., Lee, H., Jung, S.W.: Video object segmentation-aware video frame interpolation. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 12322-12333 (2023) +42. Zarzar, J., Giancola, S., Ghanem, B.: Efficient bird eye view proposals for 3d siamese tracking. arXiv preprint arXiv:1903.10168 (2019) +43. Zhang, X., Yang, J., Zhang, S., Zhang, Y.: 3d registration with maximal cliques. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 17745-17754 (2023) +44. Zheng, C., Yan, X., Gao, J., Zhao, W., Zhang, W., Li, Z., Cui, S.: Box-aware feature enhancement for single object tracking on point clouds. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 13199-13208 (2021) +45. Zheng, C., Yan, X., Zhang, H., Wang, B., Cheng, S., Cui, S., Li, Z.: Beyond 3d siamese tracking: A motion-centric paradigm for 3d single object tracking in point clouds. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 8111-8120 (2022) +46. Zhou, C., Luo, Z., Luo, Y., Liu, T., Pan, L., Cai, Z., Zhao, H., Lu, S.: Ptttr: Relational 3d point cloud object tracking with transformer. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 8531-8540 (2022) +47. Zhou, X., Koltun, V., Krähenbuhl, P.: Tracking objects as points. In: European conference on computer vision. pp. 474-490. Springer (2020) \ No newline at end of file diff --git a/2024/3D Single-object Tracking in Point Clouds with High Temporal Variation/images.zip b/2024/3D Single-object Tracking in Point Clouds with High Temporal Variation/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..8cc5d193c01f26825ccc6a3aa6c2a8430ba1ef2b --- /dev/null +++ b/2024/3D Single-object Tracking in Point Clouds with High Temporal Variation/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f662c5ee431a970871aa5a86de13dc109adf18f7f03ba6af4f9580b3e80b9bc +size 529294 diff --git a/2024/3D Single-object Tracking in Point Clouds with High Temporal Variation/layout.json b/2024/3D Single-object Tracking in Point Clouds with High Temporal Variation/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..18c3f45d03f2c17ad6945cc573e8d22885b0e206 --- /dev/null +++ b/2024/3D Single-object Tracking in Point Clouds with High Temporal Variation/layout.json @@ -0,0 +1,9090 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 141, + 111, + 473, + 148 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 111, + 473, + 148 + ], + "spans": [ + { + "bbox": [ + 141, + 111, + 473, + 148 + ], + "type": "text", + "content": "3D Single-object Tracking in Point Clouds with High Temporal Variation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 143, + 167, + 471, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 167, + 471, + 194 + ], + "spans": [ + { + "bbox": [ + 143, + 167, + 471, + 194 + ], + "type": "text", + "content": "Qiao Wu" + }, + { + "bbox": [ + 143, + 167, + 471, + 194 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 143, + 167, + 471, + 194 + ], + "type": "text", + "content": ", Kun Sun" + }, + { + "bbox": [ + 143, + 167, + 471, + 194 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 143, + 167, + 471, + 194 + ], + "type": "text", + "content": ", Pei An" + }, + { + "bbox": [ + 143, + 167, + 471, + 194 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 143, + 167, + 471, + 194 + ], + "type": "text", + "content": ", Mathieu Salzmann" + }, + { + "bbox": [ + 143, + 167, + 471, + 194 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 143, + 167, + 471, + 194 + ], + "type": "text", + "content": ", Yanning Zhang" + }, + { + "bbox": [ + 143, + 167, + 471, + 194 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 143, + 167, + 471, + 194 + ], + "type": "text", + "content": ", and Jiaqi Yang" + }, + { + "bbox": [ + 143, + 167, + 471, + 194 + ], + "type": "inline_equation", + "content": "^{1\\star}" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 222, + 201, + 391, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 222, + 201, + 391, + 213 + ], + "spans": [ + { + "bbox": [ + 222, + 201, + 391, + 213 + ], + "type": "text", + "content": "1 Northwestern Polytechnical University" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 219, + 213, + 394, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 213, + 394, + 224 + ], + "spans": [ + { + "bbox": [ + 219, + 213, + 394, + 224 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 219, + 213, + 394, + 224 + ], + "type": "text", + "content": " China University of Geosciences, Wuhan" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 203, + 224, + 410, + 235 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 203, + 224, + 410, + 235 + ], + "spans": [ + { + "bbox": [ + 203, + 224, + 410, + 235 + ], + "type": "text", + "content": "3 HuaZhong University of Science and Technology" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 203, + 235, + 408, + 257 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 203, + 235, + 408, + 257 + ], + "spans": [ + { + "bbox": [ + 203, + 235, + 408, + 257 + ], + "type": "text", + "content": "4 École Polytechnique Fédérale de Lausanne qiaowu@mail.nwu.edu.cn, jqyang@nwpu.edu.cn" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 159, + 285, + 455, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 285, + 455, + 462 + ], + "spans": [ + { + "bbox": [ + 159, + 285, + 455, + 462 + ], + "type": "text", + "content": "Abstract. The high temporal variation of the point clouds is the key challenge of 3D single-object tracking (3D SOT). Existing approaches rely on the assumption that the shape variation of the point clouds and the motion of the objects across neighboring frames are smooth, failing to cope with high temporal variation data. In this paper, we present a novel framework for 3D SOT in point clouds with high temporal variation, called HVTrack. HVTrack proposes three novel components to tackle the challenges in the high temporal variation scenario: 1) A Relative-Pose-Aware Memory module to handle temporal point cloud shape variations; 2) a Base-Expansion Feature Cross-Attention module to deal with similar object distractions in expanded search areas; 3) a Contextual Point Guided Self-Attention module for suppressing heavy background noise. We construct a dataset with high temporal variation (KITTI-HV) by setting different frame intervals for sampling in the KITTI dataset. On the KITTI-HV with 5 frame intervals, our HVTrack surpasses the state-of-the-art tracker CXTracker by " + }, + { + "bbox": [ + 159, + 285, + 455, + 462 + ], + "type": "inline_equation", + "content": "11.3\\% / 15.7\\%" + }, + { + "bbox": [ + 159, + 285, + 455, + 462 + ], + "type": "text", + "content": " in Success/Precision." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 159, + 472, + 453, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 472, + 453, + 495 + ], + "spans": [ + { + "bbox": [ + 159, + 472, + 453, + 495 + ], + "type": "text", + "content": "Keywords: 3D single-object tracking " + }, + { + "bbox": [ + 159, + 472, + 453, + 495 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 159, + 472, + 453, + 495 + ], + "type": "text", + "content": " High temporal variation " + }, + { + "bbox": [ + 159, + 472, + 453, + 495 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 159, + 472, + 453, + 495 + ], + "type": "text", + "content": " Point cloud" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 514, + 230, + 528 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 514, + 230, + 528 + ], + "spans": [ + { + "bbox": [ + 132, + 514, + 230, + 528 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 539, + 482, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 539, + 482, + 647 + ], + "spans": [ + { + "bbox": [ + 130, + 539, + 482, + 647 + ], + "type": "text", + "content": "3D single-object tracking (3D SOT) is pivotal for autonomous driving [2,40] and robotics [16, 19, 25, 43]. Given the target point cloud and 3D bounding box as template, the goal of 3D SOT is to regress the target 3D poses in the tracking point cloud sequence. Existing approaches [5,7,9-12,24,28,34,39,44-46] rely on the assumption that the point cloud variations and motion of the object across neighboring frames are relatively smooth. They crop out a small search area around the last proposal for tracking, thus dramatically reducing the complexity of the problem. The template and search area features are then typically correlated as shown in Fig. 1a, and used to regress the 3D bounding box." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 653, + 236, + 666 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 653, + 236, + 666 + ], + "spans": [ + { + "bbox": [ + 133, + 653, + 236, + 666 + ], + "type": "text", + "content": "* Corresponding author." + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 132, + 114, + 481, + 181 + ], + "blocks": [ + { + "bbox": [ + 132, + 114, + 481, + 181 + ], + "lines": [ + { + "bbox": [ + 132, + 114, + 481, + 181 + ], + "spans": [ + { + "bbox": [ + 132, + 114, + 481, + 181 + ], + "type": "image", + "image_path": "b61358caaf88a58a50d563d5ad68178071fa021fe560a3ab69e67133729ccba2.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 191, + 482, + 269 + ], + "lines": [ + { + "bbox": [ + 130, + 191, + 482, + 269 + ], + "spans": [ + { + "bbox": [ + 130, + 191, + 482, + 269 + ], + "type": "text", + "content": "Fig. 1: Feature correlation in 3D SOT. (a) Feature correlation in the smooth case (1 frame interval). Correlating the features is relatively trivial as the target undergoes only small shape variations, and the observation angles are consistent in the three frames. (b-c) Feature correlation in high temporal variation cases (10 frames interval). The pose relative to the camera changes rapidly. Correlating the features using historical information is highly challenging (b). We encode the historical observation angles " + }, + { + "bbox": [ + 130, + 191, + 482, + 269 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 130, + 191, + 482, + 269 + ], + "type": "text", + "content": " into the features to guide the variation of relative pose to the camera (c)." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 292, + 482, + 364 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 292, + 482, + 364 + ], + "spans": [ + { + "bbox": [ + 130, + 292, + 482, + 364 + ], + "type": "text", + "content": "In practice, these approaches are challenged by the presence of large point cloud variations due to the limited sensor temporal resolution and the moving speed of objects as shown in Fig. 1b. We refer to this significant variation in point cloud and object position between two frames as the high temporal variation (HV). The high temporal variation challenge is non-negligible in existing benchmarks, and exists in other scenarios not yet covered by them, such as:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 137, + 371, + 480, + 453 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 137, + 371, + 479, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 371, + 479, + 407 + ], + "spans": [ + { + "bbox": [ + 137, + 371, + 479, + 407 + ], + "type": "text", + "content": "- Skipped-tracking, which can greatly reduce computational consumption in tracking and serve a wide range of other tasks such as detection [20] and segmentation [41]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 138, + 407, + 480, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 407, + 480, + 430 + ], + "spans": [ + { + "bbox": [ + 138, + 407, + 480, + 430 + ], + "type": "text", + "content": "- Tracking in edge devices, which is essential for deploying trackers on common devices with limited frame rate, resolution, computation, and power etc." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 137, + 431, + 479, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 431, + 479, + 453 + ], + "spans": [ + { + "bbox": [ + 137, + 431, + 479, + 453 + ], + "type": "text", + "content": "- Tracking in highly dynamic scenarios [15], which is common in life. For example, tracking in sports events, highway, and UAV scenarios." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 130, + 462, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 462, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 462, + 482, + 666 + ], + "type": "text", + "content": "There are three challenges for 3D SOT in HV point clouds, and existing approaches are not sufficient to address these challenges. 1) Strong shape variations of the point clouds: Point cloud shape variations are usually caused by the occlusion and relative pose transformation between the object and the sensor. As illustrated in Fig. 1b, feature correlation in existing approaches fails because of the dramatic change in the density and distribution of points. 2) Distractions due to similar objects: When objects suffer from a significant motion, the search area needs to be enlarged to incorporate the target, thus introducing more distractions from similar objects. Most of the existing trackers focus on local scale features, which discards environmental spatial contextual information to handle distractions. 3) Heavy background noise: The expansion of the search area further reduces the proportion of target information in the scene. While aiming to find the high template-response features in the feature correlation stage, existing methods then neglect to suppress the noise interference and reduce the impact of noise features. We evaluate state-of-the-art (SOTA) trackers [24,39,44,45] in the high temporal variation scenario as shown in Fig. 2. Their performance drops dramatically as the temporal variation of scene point clouds enlarges." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 219, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 219, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 219, + 102 + ], + "type": "text", + "content": "Q. Wu et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 203, + 114, + 309, + 238 + ], + "blocks": [ + { + "bbox": [ + 203, + 114, + 309, + 238 + ], + "lines": [ + { + "bbox": [ + 203, + 114, + 309, + 238 + ], + "spans": [ + { + "bbox": [ + 203, + 114, + 309, + 238 + ], + "type": "image", + "image_path": "0a79de94d6bee311fd49a27bd2643270542e0f6a519c9ac96df629629bc43d48.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 131, + 247, + 480, + 270 + ], + "lines": [ + { + "bbox": [ + 131, + 247, + 480, + 270 + ], + "spans": [ + { + "bbox": [ + 131, + 247, + 480, + 270 + ], + "type": "text", + "content": "Fig. 2: Comparison of HVTrack with the SOTAs [24,39,44,45] on 'Car' from KITTI-HV (KITTI [8] with different frame intervals, see Sec. 4)." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 309, + 114, + 411, + 238 + ], + "blocks": [ + { + "bbox": [ + 309, + 114, + 411, + 238 + ], + "lines": [ + { + "bbox": [ + 309, + 114, + 411, + 238 + ], + "spans": [ + { + "bbox": [ + 309, + 114, + 411, + 238 + ], + "type": "image", + "image_path": "979aa47cf04a5ba1c9a7068f41de170d603ca2467e05c7be82fb26d172676d5a.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 295, + 482, + 547 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 295, + 482, + 547 + ], + "spans": [ + { + "bbox": [ + 130, + 295, + 482, + 547 + ], + "type": "text", + "content": "To address the above challenges, we propose a novel framework for 3D SOT in point clouds with High temporal Variation, which we call HVTrack. Specifically, we propose three novel modules to address each of the three above-mentioned challenges. 1) A Relative-Pose-Aware Memory (RPM) module to handle the strong shape variations of the point clouds. Different from [17], we integrate the foreground masks and observation angles into the memory bank. Therefore, the model can implicitly learn the distribution variation of point clouds from the relative pose in time. The information arising from observation angles has been overlooked by all existing trackers. 2) A Base-Expansion Feature Cross-Attention (BEA) module to deal with the problem of similar object distractions occurring in large scenes. We synchronize the correlation of the hybrid scales features (base and expansion scales, Sec. 3.4) in the cross-attention, and efficiently utilize spatial contextual information. 3) A Contextual Point Guided Self-Attention (CPA) module to suppress the background noise introduced by the expanded search area. It aggregates the features of points into contextual points according to their importance. Less important points share fewer contextual points and vice versa, thus suppressing most of the background noise. BEA and CPA are inspired by the SGFormer [26], which utilizes hybrid scale significance maps to assign more tokens to salient regions of 2D images. Our experiments clearly demonstrate the remarkable performance of HVTrack in high temporal variation scenarios, as illustrated in Fig. 2. Our contributions can be summarized as follows:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 138, + 556, + 480, + 664 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 138, + 556, + 480, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 556, + 480, + 590 + ], + "spans": [ + { + "bbox": [ + 138, + 556, + 480, + 590 + ], + "type": "text", + "content": "- For the first time, to the best of our knowledge, we explore the new 3D SOT task for high temporal variation scenarios, and propose a novel framework called HVTrack for the task." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 138, + 593, + 480, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 593, + 480, + 628 + ], + "spans": [ + { + "bbox": [ + 138, + 593, + 480, + 628 + ], + "type": "text", + "content": "- We propose three novel modules, RPM, BEA, and CPA, to address three challenges for 3D SOT in HV point clouds: strong point cloud variations, similar object distractions, and heavy background noise." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 138, + 629, + 480, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 629, + 480, + 664 + ], + "spans": [ + { + "bbox": [ + 138, + 629, + 480, + 664 + ], + "type": "text", + "content": "- HVTrack yields state-of-the-art results on KITTI-HV and Waymo, and ranks second on KITTI. Our experimental results demonstrate the robustness of HVTrack in both smooth and high temporal variation cases." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 220, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 220, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 220, + 91, + 447, + 102 + ], + "type": "text", + "content": "3D SOT in Point Clouds with High Temporal Variation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 114, + 238, + 127 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 114, + 238, + 127 + ], + "spans": [ + { + "bbox": [ + 132, + 114, + 238, + 127 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 143, + 293, + 156 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 143, + 293, + 156 + ], + "spans": [ + { + "bbox": [ + 132, + 143, + 293, + 156 + ], + "type": "text", + "content": "2.1 3D Single-object Tracking" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 166, + 482, + 429 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 166, + 482, + 429 + ], + "spans": [ + { + "bbox": [ + 130, + 166, + 482, + 429 + ], + "type": "text", + "content": "Most of the 3D SOT approaches are based on a Siamese framework, because the appearance variations of the target between neighboring frames are not significant. The work of Giancola et al. [9] constitutes the pioneering method in 3D SOT. However, it only solved the discriminative feature learning problem, and used a time-consuming and inaccurate heuristic matching to locate the target. Zarzar et al. [42] utilized a 2D RPN in bird's eyes view to build an end-to-end tracker. The P2B network [24] employs VoteNet [22] as RPN and constructs the first point-based tracker. The following works [7, 11, 12, 28, 34, 44] develop different architectures of trackers based on P2B [24]. V2B [11] leverages the target completion model to generate the dense and complete targets and proposes a simple yet effective voxel-to-BEV target localization network. BAT [44] utilizes the relationship between points and the bounding box, integrating the box information into the point clouds. With the development of transformer networks, a number of works [5, 10, 12, 28, 39, 46] have proposed to exploit various attention mechanisms. STNet [12] forms an iterative coarse-to-fine cross-and self-attention to correlate the target and search area. CXTrack [39] employs a target-centric transformer to integrate targetness information and contextual information. TAT [17] leverages the temporal information to integrate target cues by applying an RNN-based [4] correlation module. Zheng et al. [45] presented a motion-centric method M2-Track, which is appearance matching-free and has made great progress in dealing with the sparse point cloud tracking problem. Wu et al. [37] proposed the first semi-supervised framework in 3D SOT." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 430, + 483, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 430, + 483, + 502 + ], + "spans": [ + { + "bbox": [ + 130, + 430, + 483, + 502 + ], + "type": "text", + "content": "While effective in their context, the above methods are designed based on the assumption that the point cloud variation and motion of the objects across neighboring frames are not significant. In high temporal variation scenarios, this assumption will lead to performance degradation because of the point cloud variations and interference naturally occurring in large scenes. Here, we introduce HVTrack to tackle the challenges of 3D SOT in high temporal variation scenarios." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 522, + 290, + 536 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 522, + 290, + 536 + ], + "spans": [ + { + "bbox": [ + 132, + 522, + 290, + 536 + ], + "type": "text", + "content": "2.2 3D Multi-object Tracking" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 545, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 545, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 545, + 482, + 666 + ], + "type": "text", + "content": "3D multi-object tracking (MOT) in point clouds follows two main streams: Tracking-by-detection, and learning-based methods. Tracking-by-detection [1, 3, 32, 35] usually exploits methods such as Kalman filtering to correlate the detection results and track the targets. CenterTrack [47], CenterPoint [40], and SimTrack [18] replace the filter by leveraging deep networks to predict the velocity and motion of the objects. The learning-based methods [6, 27, 36] typically apply a Graph Neural Network to tackle the association challenge in MOT. GNN3DMOT [36] leverages both 2D images and 3D point clouds to obtain a robust association. 3DMOTFormer [6] constructs a graph transformer framework and achieves a good performance using only 3D point clouds." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 141, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 141, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 141, + 100 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 219, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 219, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 219, + 102 + ], + "type": "text", + "content": "Q. Wu et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 200 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 200 + ], + "type": "text", + "content": "3D MOT and 3D SOT have different purposes and their own challenges [13]. 3D MOT is object-level and focuses on correlating detected objects, whereas 3D SOT is intra-object-level [14] and aims to track a single object given a template. 3D SOT methods usually come with much lower computational consumption and higher throughput [46]. Also, 3D MOT is free from the challenges posed by the dynamic change in the search area size, as MOT is not required to adopt the search area cropping strategy in SOT." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 219, + 202, + 232 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 219, + 202, + 232 + ], + "spans": [ + { + "bbox": [ + 132, + 219, + 202, + 232 + ], + "type": "text", + "content": "3 Method" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 244, + 258, + 256 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 244, + 258, + 256 + ], + "spans": [ + { + "bbox": [ + 132, + 244, + 258, + 256 + ], + "type": "text", + "content": "3.1 Problem Definition" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 265, + 482, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 265, + 482, + 373 + ], + "spans": [ + { + "bbox": [ + 130, + 265, + 482, + 373 + ], + "type": "text", + "content": "Given the template of the target, the goal of 3D SOT is to continually locate the poses of the target in the search area point cloud sequence " + }, + { + "bbox": [ + 130, + 265, + 482, + 373 + ], + "type": "inline_equation", + "content": "\\mathbf{P}^s = \\{P_0^s,\\dots ,P_t^s,\\dots ,P_n^s |P_t^s\\in \\mathbb{R}^{N_s\\times 3}\\}" + }, + { + "bbox": [ + 130, + 265, + 482, + 373 + ], + "type": "text", + "content": ". Usually, the target point cloud with labels in the first frame is regarded as the template. Former trackers [5,7,9-12,24,28, 34,39,44-46] leverage a 3D bounding box label " + }, + { + "bbox": [ + 130, + 265, + 482, + 373 + ], + "type": "inline_equation", + "content": "B_{0} = (x,y,z,w,l,h,\\theta)\\in \\mathbb{R}^{7}" + }, + { + "bbox": [ + 130, + 265, + 482, + 373 + ], + "type": "text", + "content": " to generate the template in the input. Here, " + }, + { + "bbox": [ + 130, + 265, + 482, + 373 + ], + "type": "inline_equation", + "content": "(x,y,z)" + }, + { + "bbox": [ + 130, + 265, + 482, + 373 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 265, + 482, + 373 + ], + "type": "inline_equation", + "content": "(w,l,h)" + }, + { + "bbox": [ + 130, + 265, + 482, + 373 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 265, + 482, + 373 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 130, + 265, + 482, + 373 + ], + "type": "text", + "content": " are the center location, bounding box size (width, length, and height), and rotation angle of the target, respectively. As objects can be assumed to be rigid, the trackers only need to regress the center and rotation angle of the target." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 391, + 208, + 402 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 391, + 208, + 402 + ], + "spans": [ + { + "bbox": [ + 132, + 391, + 208, + 402 + ], + "type": "text", + "content": "3.2 Overview" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 411, + 482, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 411, + 482, + 604 + ], + "spans": [ + { + "bbox": [ + 130, + 411, + 482, + 604 + ], + "type": "text", + "content": "We propose HVTrack to exploit both temporal and spatial information and achieve robust tracking in high temporal variation scenarios. As shown in Fig. 3, we take the point cloud " + }, + { + "bbox": [ + 130, + 411, + 482, + 604 + ], + "type": "inline_equation", + "content": "P_{t}^{s}" + }, + { + "bbox": [ + 130, + 411, + 482, + 604 + ], + "type": "text", + "content": " at time " + }, + { + "bbox": [ + 130, + 411, + 482, + 604 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 130, + 411, + 482, + 604 + ], + "type": "text", + "content": " as the search area, and leverage memory banks as the template. We first employ a backbone to extract the local spatial features " + }, + { + "bbox": [ + 130, + 411, + 482, + 604 + ], + "type": "inline_equation", + "content": "\\mathcal{X}_0 \\in \\mathbb{R}^{N \\times C}" + }, + { + "bbox": [ + 130, + 411, + 482, + 604 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 130, + 411, + 482, + 604 + ], + "type": "inline_equation", + "content": "P_{t}^{s}" + }, + { + "bbox": [ + 130, + 411, + 482, + 604 + ], + "type": "text", + "content": ", with " + }, + { + "bbox": [ + 130, + 411, + 482, + 604 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 130, + 411, + 482, + 604 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 411, + 482, + 604 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 130, + 411, + 482, + 604 + ], + "type": "text", + "content": " the point number and feature channel, respectively. Then, " + }, + { + "bbox": [ + 130, + 411, + 482, + 604 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 130, + 411, + 482, + 604 + ], + "type": "text", + "content": " transformer layers are employed to extract spatio-temporal information. For each layer " + }, + { + "bbox": [ + 130, + 411, + 482, + 604 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 130, + 411, + 482, + 604 + ], + "type": "text", + "content": ", (i) we capture the template information " + }, + { + "bbox": [ + 130, + 411, + 482, + 604 + ], + "type": "inline_equation", + "content": "Mem_{l} \\in \\mathbb{R}^{KN \\times C}" + }, + { + "bbox": [ + 130, + 411, + 482, + 604 + ], + "type": "text", + "content": " from the Relative-Pose-Aware Memory module, with " + }, + { + "bbox": [ + 130, + 411, + 482, + 604 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 130, + 411, + 482, + 604 + ], + "type": "text", + "content": " the memory bank size (Sec. 3.3); (ii) the memory features and search area features " + }, + { + "bbox": [ + 130, + 411, + 482, + 604 + ], + "type": "inline_equation", + "content": "\\mathcal{X}_{l-1}" + }, + { + "bbox": [ + 130, + 411, + 482, + 604 + ], + "type": "text", + "content": " are correlated in the Base-Expansion Features Cross-Attention (Sec. 3.4); (iii) the Contextual Point Guided Self-Attention (Sec. 3.5) leverages the attention map in the Base-Expansion Features Cross-Attention to suppress the noise features; (iv) we update the Layer Features memory bank using " + }, + { + "bbox": [ + 130, + 411, + 482, + 604 + ], + "type": "inline_equation", + "content": "\\mathcal{X}_{l-1}" + }, + { + "bbox": [ + 130, + 411, + 482, + 604 + ], + "type": "text", + "content": ". After the transformer layers, an RPN is applied to regress the location " + }, + { + "bbox": [ + 130, + 411, + 482, + 604 + ], + "type": "inline_equation", + "content": "(x_{t}, y_{t}, z_{t}, \\theta_{t})" + }, + { + "bbox": [ + 130, + 411, + 482, + 604 + ], + "type": "text", + "content": ", the mask " + }, + { + "bbox": [ + 130, + 411, + 482, + 604 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{t} \\in \\mathbb{R}^{N \\times 1}" + }, + { + "bbox": [ + 130, + 411, + 482, + 604 + ], + "type": "text", + "content": ", and the observation angle " + }, + { + "bbox": [ + 130, + 411, + 482, + 604 + ], + "type": "inline_equation", + "content": "\\alpha \\in \\mathbb{R}^2" + }, + { + "bbox": [ + 130, + 411, + 482, + 604 + ], + "type": "text", + "content": ". Finally, the mask and observation angle memory banks are updated using the predicted results." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 621, + 353, + 633 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 621, + 353, + 633 + ], + "spans": [ + { + "bbox": [ + 132, + 621, + 353, + 633 + ], + "type": "text", + "content": "3.3 Relative-Pose-Aware Memory Module" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 641, + 481, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 641, + 481, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 641, + 481, + 666 + ], + "type": "text", + "content": "As shown in Fig. 1(b), rapid changes in relative pose lead to large variations in the shape of the object point cloud across the frames. Correlating the object" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 220, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 220, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 220, + 91, + 447, + 102 + ], + "type": "text", + "content": "3D SOT in Point Clouds with High Temporal Variation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 141, + 114, + 471, + 222 + ], + "blocks": [ + { + "bbox": [ + 141, + 114, + 471, + 222 + ], + "lines": [ + { + "bbox": [ + 141, + 114, + 471, + 222 + ], + "spans": [ + { + "bbox": [ + 141, + 114, + 471, + 222 + ], + "type": "image", + "image_path": "c0cc4a36f7d581f333a86b58541c438110d97eeed1b64756128fa6dd0d4642a2.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 228, + 482, + 340 + ], + "lines": [ + { + "bbox": [ + 130, + 228, + 482, + 340 + ], + "spans": [ + { + "bbox": [ + 130, + 228, + 482, + 340 + ], + "type": "text", + "content": "Fig. 3: HVTrack framework. We first utilize a backbone to extract the local embedding features of the search area. Then, we construct " + }, + { + "bbox": [ + 130, + 228, + 482, + 340 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 130, + 228, + 482, + 340 + ], + "type": "text", + "content": " transformer layers to fuse spatio-temporal information. For each transformer layer, (i) we apply three memory bank features in the Relative-Pose-Aware Memory module to generate temporal template information; (ii) we employ the Base-Expansion Feature Cross-Attention to correlate the template and search area by leveraging hybrid scale spatial context-aware features; (iii) we introduce a Contextual Point Guided Self-Attention to suppress unimportant noise. After each layer, we update the layer features memory bank using the layer input. Finally, we apply an RPN to regress the 3D bounding box, and update the mask and observation angle memory banks." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 365, + 482, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 365, + 482, + 426 + ], + "spans": [ + { + "bbox": [ + 130, + 365, + 482, + 426 + ], + "type": "text", + "content": "features in " + }, + { + "bbox": [ + 130, + 365, + 482, + 426 + ], + "type": "inline_equation", + "content": "(t - 2, t - 1, t)" + }, + { + "bbox": [ + 130, + 365, + 482, + 426 + ], + "type": "text", + "content": " then becomes difficult, as they have a low overlap with each other. To address this, we introduce the observation angle into the memory bank. The observation angle gives us knowledge of the coarse distribution of an object's point cloud. Thus, the model can learn the variations in point cloud distribution from the historical changes of observation angle." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 427, + 482, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 427, + 482, + 523 + ], + "spans": [ + { + "bbox": [ + 130, + 427, + 482, + 523 + ], + "type": "text", + "content": "To exploit the temporal information as the template, we propose a Relative-Pose-Aware Memory (RPM) module. RPM contains 3 memory banks. 1) A layer features memory bank (LM) " + }, + { + "bbox": [ + 130, + 427, + 482, + 523 + ], + "type": "inline_equation", + "content": "\\in \\mathbb{R}^{L\\times K\\times N\\times C}" + }, + { + "bbox": [ + 130, + 427, + 482, + 523 + ], + "type": "text", + "content": ": We leverage the historical transformer layer features as the template features to reduce the template inference time in former trackers [5,9-12,24,28,34,44,46]. 2) A mask memory bank (MM) " + }, + { + "bbox": [ + 130, + 427, + 482, + 523 + ], + "type": "inline_equation", + "content": "\\in \\mathbb{R}^{K\\times N\\times 1}" + }, + { + "bbox": [ + 130, + 427, + 482, + 523 + ], + "type": "text", + "content": ": Inspired by the mask-based trackers [39,45], we utilize the mask as the foreground representation. 3) An observation angle memory bank (OM) " + }, + { + "bbox": [ + 130, + 427, + 482, + 523 + ], + "type": "inline_equation", + "content": "\\in \\mathbb{R}^{K\\times 2}" + }, + { + "bbox": [ + 130, + 427, + 482, + 523 + ], + "type": "text", + "content": ". For each transformer layer " + }, + { + "bbox": [ + 130, + 427, + 482, + 523 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 130, + 427, + 482, + 523 + ], + "type": "text", + "content": ", we process the memory features as" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 220, + 535, + 481, + 548 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 220, + 535, + 481, + 548 + ], + "spans": [ + { + "bbox": [ + 220, + 535, + 481, + 548 + ], + "type": "interline_equation", + "content": "T _ {l} = \\operatorname {L i n e a r} ([ \\mathrm {L M} _ {1}, \\mathrm {M M}, \\operatorname {R e p e a t} (\\mathrm {O M}) ]), \\tag {1}", + "image_path": "9d77c883d910bac92d69e3429f3c98e796f10a8308027cb31308464e408800ec.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 559, + 482, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 559, + 482, + 609 + ], + "spans": [ + { + "bbox": [ + 130, + 559, + 482, + 609 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 559, + 482, + 609 + ], + "type": "inline_equation", + "content": "T_{l} \\in \\mathbb{R}^{KN \\times C}" + }, + { + "bbox": [ + 130, + 559, + 482, + 609 + ], + "type": "text", + "content": " denotes the template features, Linear(\\cdot) is a linear layer that projects the features from " + }, + { + "bbox": [ + 130, + 559, + 482, + 609 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^{KN \\times (C + 3)}" + }, + { + "bbox": [ + 130, + 559, + 482, + 609 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 130, + 559, + 482, + 609 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^{KN \\times C}" + }, + { + "bbox": [ + 130, + 559, + 482, + 609 + ], + "type": "text", + "content": ", [.] is the concatenation operation, and Repeat(\\cdot) stacks the OM to " + }, + { + "bbox": [ + 130, + 559, + 482, + 609 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^{K \\times N \\times 2}" + }, + { + "bbox": [ + 130, + 559, + 482, + 609 + ], + "type": "text", + "content": ". Then, we project " + }, + { + "bbox": [ + 130, + 559, + 482, + 609 + ], + "type": "inline_equation", + "content": "T_{l}" + }, + { + "bbox": [ + 130, + 559, + 482, + 609 + ], + "type": "text", + "content": " into Query (Q), Key (K), and Value (V) using the learnable parameter matrices as" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 236, + 619, + 378, + 635 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 619, + 378, + 635 + ], + "spans": [ + { + "bbox": [ + 236, + 619, + 378, + 635 + ], + "type": "interline_equation", + "content": "Q _ {l} ^ {T} = \\mathrm {L N} (\\mathrm {L N} (T _ {l}) W _ {l} ^ {T Q} + \\mathrm {P E} ^ {T}),", + "image_path": "51cb2dae3ab3e8050fa010855f9812657c13ec8aaf4638f6eda5f1eda2be964b.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 236, + 636, + 480, + 651 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 636, + 480, + 651 + ], + "spans": [ + { + "bbox": [ + 236, + 636, + 480, + 651 + ], + "type": "interline_equation", + "content": "K _ {l} ^ {T} = \\operatorname {L N} \\left(T _ {l}\\right) W _ {l} ^ {T K}, \\tag {2}", + "image_path": "5ece6e554473de519c94edcebb8de2040d7468bd14930c2b85eab0e491501741.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 237, + 652, + 326, + 667 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 237, + 652, + 326, + 667 + ], + "spans": [ + { + "bbox": [ + 237, + 652, + 326, + 667 + ], + "type": "interline_equation", + "content": "V _ {l} ^ {T} = \\mathrm {L N} (T _ {l}) W _ {l} ^ {T V},", + "image_path": "c0eb0096bb0ee1eb01bc186c7cfe9c7ae44cc840e9215664b9b52331e2b59256.jpg" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 219, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 219, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 219, + 102 + ], + "type": "text", + "content": "Q. Wu et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 132, + 114, + 294, + 249 + ], + "blocks": [ + { + "bbox": [ + 132, + 114, + 294, + 249 + ], + "lines": [ + { + "bbox": [ + 132, + 114, + 294, + 249 + ], + "spans": [ + { + "bbox": [ + 132, + 114, + 294, + 249 + ], + "type": "image", + "image_path": "e0a53b32019808bd9d52aad04d8ccbb081dd8e0591cafb5da842715f08728267.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 195, + 251, + 228, + 260 + ], + "lines": [ + { + "bbox": [ + 195, + 251, + 228, + 260 + ], + "spans": [ + { + "bbox": [ + 195, + 251, + 228, + 260 + ], + "type": "text", + "content": "(a) BEA." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 322, + 114, + 479, + 249 + ], + "blocks": [ + { + "bbox": [ + 322, + 114, + 479, + 249 + ], + "lines": [ + { + "bbox": [ + 322, + 114, + 479, + 249 + ], + "spans": [ + { + "bbox": [ + 322, + 114, + 479, + 249 + ], + "type": "image", + "image_path": "dc2e9b1c243ffa8aaaef51d094460cf0a5d9216ebef62d9a3834fefd7ed2d73b.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 385, + 251, + 419, + 259 + ], + "lines": [ + { + "bbox": [ + 385, + 251, + 419, + 259 + ], + "spans": [ + { + "bbox": [ + 385, + 251, + 419, + 259 + ], + "type": "text", + "content": "(b) CPA." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 130, + 269, + 482, + 390 + ], + "lines": [ + { + "bbox": [ + 130, + 269, + 482, + 390 + ], + "spans": [ + { + "bbox": [ + 130, + 269, + 482, + 390 + ], + "type": "text", + "content": "Fig. 4: (a) Base-Expansion Feature Cross-Attention (BEA). The " + }, + { + "bbox": [ + 130, + 269, + 482, + 390 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 130, + 269, + 482, + 390 + ], + "type": "text", + "content": " heads in the multi-head attention (MHA) are split to process hybrid scale features. For the base scale branch, we directly put the local features into the MHA. For the expansion scale branch, we apply an EdgeConv [33] to expand the receptive field of each point and extract more abstract features before MHA. BEA captures the spatial context-aware information with a humble extra computational cost. (b) Contextual Point Guided Self-Attention (CPA). We determine the importance of each point by both base and expansion scale attention maps. Then, we aggregate all the points into " + }, + { + "bbox": [ + 130, + 269, + 482, + 390 + ], + "type": "inline_equation", + "content": "U" + }, + { + "bbox": [ + 130, + 269, + 482, + 390 + ], + "type": "text", + "content": " clusters (contextual points) according to their importance and project the clusters to " + }, + { + "bbox": [ + 130, + 269, + 482, + 390 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 130, + 269, + 482, + 390 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 269, + 482, + 390 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 130, + 269, + 482, + 390 + ], + "type": "text", + "content": ". We assign fewer contextual points for low-importance points, and vice versa. CPA not only suppresses the noise but also reduces the computational cost of the attention." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 413, + 482, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 413, + 482, + 462 + ], + "spans": [ + { + "bbox": [ + 130, + 413, + 482, + 462 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 413, + 482, + 462 + ], + "type": "inline_equation", + "content": "\\mathrm{LN}(\\cdot)" + }, + { + "bbox": [ + 130, + 413, + 482, + 462 + ], + "type": "text", + "content": " is the layer norm, and " + }, + { + "bbox": [ + 130, + 413, + 482, + 462 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}^T \\in \\mathbb{R}^{KN \\times C}" + }, + { + "bbox": [ + 130, + 413, + 482, + 462 + ], + "type": "text", + "content": " is the positional embedding of the historical point cloud coordinates. We utilize a linear layer to project the point cloud coordinates to their positional embedding. Finally, a self-attention is applied for internal interactions between temporal information as" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 205, + 470, + 480, + 485 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 205, + 470, + 480, + 485 + ], + "spans": [ + { + "bbox": [ + 205, + 470, + 480, + 485 + ], + "type": "interline_equation", + "content": "M e m _ {l} ^ {*} = T _ {l} + \\operatorname {D r o p o u t} \\left(\\mathrm {M H A} \\left(Q _ {l} ^ {T}, K _ {l} ^ {T}, V _ {l} ^ {T}\\right)\\right), \\tag {3}", + "image_path": "34adbbe616815ebfaa72a7cbc57f340c05c8635f1f410848413711f92dd5fcb7.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 493, + 482, + 530 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 493, + 482, + 530 + ], + "spans": [ + { + "bbox": [ + 130, + 493, + 482, + 530 + ], + "type": "text", + "content": "where MHA is the multi-head attention in [31], and Dropout is the random dropping operation in [29]. Following CXTrack [39], we apply dropout and feedforward network (FFN) after self-attention, i.e.," + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 200, + 539, + 480, + 553 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 200, + 539, + 480, + 553 + ], + "spans": [ + { + "bbox": [ + 200, + 539, + 480, + 553 + ], + "type": "interline_equation", + "content": "M e m _ {l} = M e m _ {l} ^ {*} + \\operatorname {D r o p o u t} (\\operatorname {F F N} (\\operatorname {L N} (M e m _ {l} ^ {*}))), \\tag {4}", + "image_path": "3c284054ed9febef065f2334952984c74347ec5228b67710d28c117b5e1f2a01.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 223, + 573, + 480, + 586 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 223, + 573, + 480, + 586 + ], + "spans": [ + { + "bbox": [ + 223, + 573, + 480, + 586 + ], + "type": "interline_equation", + "content": "\\operatorname {F F N} (x) = \\max \\left(0, x W _ {1} + b _ {1}\\right) W _ {2} + b _ {2}. \\tag {5}", + "image_path": "06fc25a8788997d40426b9192d2d602f41c70d24c0af6195e162e46d5773c492.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 131, + 609, + 366, + 621 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 609, + 366, + 621 + ], + "spans": [ + { + "bbox": [ + 131, + 609, + 366, + 621 + ], + "type": "text", + "content": "3.4 Base-Expansion Feature Cross-Attention" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "type": "text", + "content": "Most of the existing trackers [11,24,28,34,39,44,46] employ a point based backbone [23,33] and focus on local region features, which we call base scale features. Using only base scale features in the whole pipeline is quite efficient and effective" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 220, + 91, + 448, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 220, + 91, + 448, + 102 + ], + "spans": [ + { + "bbox": [ + 220, + 91, + 448, + 102 + ], + "type": "text", + "content": "3D SOT in Point Clouds with High Temporal Variation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 212 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 212 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 212 + ], + "type": "text", + "content": "in small scenes. However, the base scale features are limited in representing the neighboring environment features around the object in large search areas. To tackle the challenge of similar object distractions, spatial context information across consecutive frames is crucial for effective object tracking [39]. Expanding the receptive field of features can help capture spatial contextual information, and such features are called expansion scale features. Inspired by [26], we propose Base-Expansion Feature Cross-Attention (BEA) to capture both local and more abstract features, and exploit spatial context-aware information." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 212, + 482, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 212, + 482, + 331 + ], + "spans": [ + { + "bbox": [ + 130, + 212, + 482, + 331 + ], + "type": "text", + "content": "As shown in Fig. 4a, the input features " + }, + { + "bbox": [ + 130, + 212, + 482, + 331 + ], + "type": "inline_equation", + "content": "X_{l-1}" + }, + { + "bbox": [ + 130, + 212, + 482, + 331 + ], + "type": "text", + "content": " are projected into " + }, + { + "bbox": [ + 130, + 212, + 482, + 331 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}" + }, + { + "bbox": [ + 130, + 212, + 482, + 331 + ], + "type": "text", + "content": ". Usually, the memory features Meml would be projected into " + }, + { + "bbox": [ + 130, + 212, + 482, + 331 + ], + "type": "inline_equation", + "content": "\\mathbf{K}" + }, + { + "bbox": [ + 130, + 212, + 482, + 331 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 212, + 482, + 331 + ], + "type": "inline_equation", + "content": "\\mathbf{V}" + }, + { + "bbox": [ + 130, + 212, + 482, + 331 + ], + "type": "text", + "content": ". Then, multi-head cross-attention adopts " + }, + { + "bbox": [ + 130, + 212, + 482, + 331 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 130, + 212, + 482, + 331 + ], + "type": "text", + "content": " independent heads, and processes them using the same base scale features. By contrast, we split the " + }, + { + "bbox": [ + 130, + 212, + 482, + 331 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 130, + 212, + 482, + 331 + ], + "type": "text", + "content": " heads into 2 groups. " + }, + { + "bbox": [ + 130, + 212, + 482, + 331 + ], + "type": "inline_equation", + "content": "H/2" + }, + { + "bbox": [ + 130, + 212, + 482, + 331 + ], + "type": "text", + "content": " heads exploit local spatial context information. We directly process the base scale features with normal cross-attention, and output base scale features " + }, + { + "bbox": [ + 130, + 212, + 482, + 331 + ], + "type": "inline_equation", + "content": "\\hat{X}_{l-1}^{base} \\in \\mathbb{R}^{N \\times C/2}" + }, + { + "bbox": [ + 130, + 212, + 482, + 331 + ], + "type": "text", + "content": " and attention map Attn" + }, + { + "bbox": [ + 130, + 212, + 482, + 331 + ], + "type": "inline_equation", + "content": "^{base} \\in \\mathbb{R}^{N \\times KN}" + }, + { + "bbox": [ + 130, + 212, + 482, + 331 + ], + "type": "text", + "content": ". The other " + }, + { + "bbox": [ + 130, + 212, + 482, + 331 + ], + "type": "inline_equation", + "content": "H/2" + }, + { + "bbox": [ + 130, + 212, + 482, + 331 + ], + "type": "text", + "content": " heads capture environment context features. We first apply an EdgeConv [33] to extract more abstract features Meml" + }, + { + "bbox": [ + 130, + 212, + 482, + 331 + ], + "type": "inline_equation", + "content": "^{expan} \\in \\mathbb{R}^{KN/8 \\times C}" + }, + { + "bbox": [ + 130, + 212, + 482, + 331 + ], + "type": "text", + "content": ", which are expansion scale features, i.e.," + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 236, + 332, + 480, + 346 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 332, + 480, + 346 + ], + "spans": [ + { + "bbox": [ + 236, + 332, + 480, + 346 + ], + "type": "interline_equation", + "content": "M e m _ {l} ^ {e x p a n} = \\operatorname {E d g e C o n v} \\left(M e m _ {l}\\right). \\tag {6}", + "image_path": "3c0a2b5221f2ad99abe16befc787d1c885681119cb91a27800340d43c9fb083e.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 131, + 350, + 482, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 350, + 482, + 387 + ], + "spans": [ + { + "bbox": [ + 131, + 350, + 482, + 387 + ], + "type": "text", + "content": "Then, we project the expansion features into K and V, and perform multi-head cross-attention with Q. Specifically, for the " + }, + { + "bbox": [ + 131, + 350, + 482, + 387 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 131, + 350, + 482, + 387 + ], + "type": "text", + "content": "-th head belonging to the expansion scale branch, we generate Q, K, and V as" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 235, + 393, + 480, + 439 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 235, + 393, + 480, + 439 + ], + "spans": [ + { + "bbox": [ + 235, + 393, + 480, + 439 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} Q _ {i} = L N (L N (X _ {l - 1}) W _ {i} ^ {Q} + \\mathrm {P E} _ {i} ^ {S}), \\\\ K _ {i} = L N \\left(M e m _ {l} ^ {e x p a n}\\right) W _ {i} ^ {K}, \\tag {7} \\\\ V _ {i} = L N (M e m _ {l} ^ {e x p a n}) W _ {i} ^ {V}, \\\\ \\end{array}", + "image_path": "891f0f9cf9c909dd8aaeabd12451aac2060e8dca202b9f19f28162bb50db9fd3.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 131, + 448, + 479, + 473 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 448, + 479, + 473 + ], + "spans": [ + { + "bbox": [ + 131, + 448, + 479, + 473 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 131, + 448, + 479, + 473 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}_i^S" + }, + { + "bbox": [ + 131, + 448, + 479, + 473 + ], + "type": "text", + "content": " is the positional embedding of search area point cloud coordinates. Then, cross-attention is performed as" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 242, + 479, + 480, + 506 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 242, + 479, + 480, + 506 + ], + "spans": [ + { + "bbox": [ + 242, + 479, + 480, + 506 + ], + "type": "interline_equation", + "content": "A t t n _ {i} ^ {e x p a n} = \\operatorname {S o f t m a x} \\left(\\frac {Q _ {i} K _ {i}}{\\sqrt {d _ {h}}}\\right), \\tag {8}", + "image_path": "ed7bfaa721eb400003505c160e0566310edafda565ae1305df2e1910bdebe6eb.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 258, + 516, + 480, + 531 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 258, + 516, + 480, + 531 + ], + "spans": [ + { + "bbox": [ + 258, + 516, + 480, + 531 + ], + "type": "interline_equation", + "content": "h _ {i} ^ {e x p a n} = A t t n _ {i} ^ {e x p a n} V _ {i}, \\tag {9}", + "image_path": "2d3ae44974ef62de667529646e8621226e5f3367d9de576e01e12436e66ab74b.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 533, + 482, + 570 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 533, + 482, + 570 + ], + "spans": [ + { + "bbox": [ + 130, + 533, + 482, + 570 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 533, + 482, + 570 + ], + "type": "inline_equation", + "content": "d_h" + }, + { + "bbox": [ + 130, + 533, + 482, + 570 + ], + "type": "text", + "content": " is the feature dimension of the heads, and " + }, + { + "bbox": [ + 130, + 533, + 482, + 570 + ], + "type": "inline_equation", + "content": "h_i^{expan}" + }, + { + "bbox": [ + 130, + 533, + 482, + 570 + ], + "type": "text", + "content": " is the output features of the " + }, + { + "bbox": [ + 130, + 533, + 482, + 570 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 130, + 533, + 482, + 570 + ], + "type": "text", + "content": "-th head. After that, we concatenate the output features and attention map of each head as" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 220, + 577, + 480, + 609 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 220, + 577, + 480, + 609 + ], + "spans": [ + { + "bbox": [ + 220, + 577, + 480, + 609 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\hat {X} _ {l - 1} ^ {\\text {e x p a n}} = \\left[ h _ {1}, \\dots , h _ {H / 2} \\right], \\\\ A t t n ^ {e x p a n} = \\left[ A t t n _ {1} ^ {e x p a n}, \\dots , A t t n _ {H / 2} ^ {e x p a n} \\right], \\tag {10} \\\\ \\end{array}", + "image_path": "5be004a2868633d948be95af42ec8dc23aeeb3a335fd1bbcd2dd352a689c7138.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 131, + 616, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 616, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 131, + 616, + 482, + 666 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 131, + 616, + 482, + 666 + ], + "type": "inline_equation", + "content": "\\hat{X}_{l-1}^{expan} \\in \\mathbb{R}^{N \\times C/2}" + }, + { + "bbox": [ + 131, + 616, + 482, + 666 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 131, + 616, + 482, + 666 + ], + "type": "inline_equation", + "content": "Attn^{expan} \\in \\mathbb{R}^{N \\times KN/8}" + }, + { + "bbox": [ + 131, + 616, + 482, + 666 + ], + "type": "text", + "content": ". Finally, we concatenate the base scale and expansion scale outputs as the resulting correlation feature " + }, + { + "bbox": [ + 131, + 616, + 482, + 666 + ], + "type": "inline_equation", + "content": "\\hat{X}_{l-1} \\in \\mathbb{R}^{N \\times C}" + }, + { + "bbox": [ + 131, + 616, + 482, + 666 + ], + "type": "text", + "content": ". Thus, BEA provides rich hybrid scale spatial contextual information for each point, with a very humble extra computational cost." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 218, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 218, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 218, + 102 + ], + "type": "text", + "content": "Q. Wu et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 362, + 128 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 116, + 362, + 128 + ], + "spans": [ + { + "bbox": [ + 132, + 116, + 362, + 128 + ], + "type": "text", + "content": "3.5 Contextual Point Guided Self-Attention" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 133, + 482, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 133, + 482, + 216 + ], + "spans": [ + { + "bbox": [ + 130, + 133, + 482, + 216 + ], + "type": "text", + "content": "Most of the information in the search area will be regarded as noise, because we are only interested in one single object to be tracked. Existing trackers [11,24,28, 34,44] aim to find the features with high template-response in the search area, but neglect the suppress to the noise. Zhou et al. [46] proposed a Relation-Aware Sampling for preserving more template-relevant points in the search area before inputting it to the backbone. By contrast, we focus on suppressing the noise after feature correlation via a Contextual Point Guided Self-Attention (CPA)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 217, + 482, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 217, + 482, + 240 + ], + "spans": [ + { + "bbox": [ + 130, + 217, + 482, + 240 + ], + "type": "text", + "content": "As shown in Fig. 4b, we leverage the base and expansion scale attention maps to generate the importance map " + }, + { + "bbox": [ + 130, + 217, + 482, + 240 + ], + "type": "inline_equation", + "content": "I \\in \\mathbb{R}^{N \\times 1}" + }, + { + "bbox": [ + 130, + 217, + 482, + 240 + ], + "type": "text", + "content": " as" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 218, + 246, + 481, + 258 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 218, + 246, + 481, + 258 + ], + "spans": [ + { + "bbox": [ + 218, + 246, + 481, + 258 + ], + "type": "interline_equation", + "content": "I = \\operatorname {M e a n} \\left(A t t n ^ {\\text {b a s e}}\\right) + \\operatorname {M e a n} \\left(A t t n ^ {\\text {e x p a n}}\\right). \\tag {11}", + "image_path": "f4975e1c2a0e939610510507f03e33cf890bf5a04b37fd3f13b93fac04f0af17.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 264, + 482, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 264, + 482, + 419 + ], + "spans": [ + { + "bbox": [ + 130, + 264, + 482, + 419 + ], + "type": "text", + "content": "The higher the importance of the point, the more spatial context-aware information related to the target it contains. We sort the points according to the magnitude of their importance values. Then, all the points will be separated into " + }, + { + "bbox": [ + 130, + 264, + 482, + 419 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 130, + 264, + 482, + 419 + ], + "type": "text", + "content": " groups according to their importance. For each group with points " + }, + { + "bbox": [ + 130, + 264, + 482, + 419 + ], + "type": "inline_equation", + "content": "P_{i}^{G} \\in \\mathbb{R}^{G_{i} \\times C}" + }, + { + "bbox": [ + 130, + 264, + 482, + 419 + ], + "type": "text", + "content": ", we aggregate the points into " + }, + { + "bbox": [ + 130, + 264, + 482, + 419 + ], + "type": "inline_equation", + "content": "U_{i}" + }, + { + "bbox": [ + 130, + 264, + 482, + 419 + ], + "type": "text", + "content": " clusters, which we call contextual points. Specifically, we first reshape the points as " + }, + { + "bbox": [ + 130, + 264, + 482, + 419 + ], + "type": "inline_equation", + "content": "P_{i}^{G} \\in \\mathbb{R}^{U_{i} \\times C \\times G_{i} / U_{i}}" + }, + { + "bbox": [ + 130, + 264, + 482, + 419 + ], + "type": "text", + "content": ". Second, a linear layer is employed to project the group to the contextual points " + }, + { + "bbox": [ + 130, + 264, + 482, + 419 + ], + "type": "inline_equation", + "content": "P_{i}^{U} \\in \\mathbb{R}^{U_{i} \\times C}" + }, + { + "bbox": [ + 130, + 264, + 482, + 419 + ], + "type": "text", + "content": ". We assign fewer contextual points for the groups with lower importance, and suppress the noise feature expression. Finally, all the contextual points are concatenated and projected into Key " + }, + { + "bbox": [ + 130, + 264, + 482, + 419 + ], + "type": "inline_equation", + "content": "K^{U} \\in \\mathbb{R}^{U \\times C}" + }, + { + "bbox": [ + 130, + 264, + 482, + 419 + ], + "type": "text", + "content": " and Value " + }, + { + "bbox": [ + 130, + 264, + 482, + 419 + ], + "type": "inline_equation", + "content": "V^{U} \\in \\mathbb{R}^{U \\times C}" + }, + { + "bbox": [ + 130, + 264, + 482, + 419 + ], + "type": "text", + "content": ". We project " + }, + { + "bbox": [ + 130, + 264, + 482, + 419 + ], + "type": "inline_equation", + "content": "\\hat{X}_{l-1}" + }, + { + "bbox": [ + 130, + 264, + 482, + 419 + ], + "type": "text", + "content": " to Q and perform a multi-head attention with " + }, + { + "bbox": [ + 130, + 264, + 482, + 419 + ], + "type": "inline_equation", + "content": "K^{U}" + }, + { + "bbox": [ + 130, + 264, + 482, + 419 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 264, + 482, + 419 + ], + "type": "inline_equation", + "content": "V^{U}" + }, + { + "bbox": [ + 130, + 264, + 482, + 419 + ], + "type": "text", + "content": ", and an FFN is applied after attention. CPA shrinks the length of K and V, and leads to a computational cost decrease in self-attention." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 434, + 279, + 445 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 434, + 279, + 445 + ], + "spans": [ + { + "bbox": [ + 132, + 434, + 279, + 445 + ], + "type": "text", + "content": "3.6 Implementation Details" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 450, + 482, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 450, + 482, + 499 + ], + "spans": [ + { + "bbox": [ + 130, + 450, + 482, + 499 + ], + "type": "text", + "content": "Backbone & Loss Functions. Following CXTrack [39], we adopt DGCNN [33] as our backbone, and apply X-RPN [39] as the RPN of our framework. We add two Shared MLP layers to X-RPN for predicting the observation angles " + }, + { + "bbox": [ + 130, + 450, + 482, + 499 + ], + "type": "inline_equation", + "content": "(\\alpha)" + }, + { + "bbox": [ + 130, + 450, + 482, + 499 + ], + "type": "text", + "content": " and the masks. Therefore, the overall loss is expressed as" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 194, + 506, + 480, + 517 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 194, + 506, + 480, + 517 + ], + "spans": [ + { + "bbox": [ + 194, + 506, + 480, + 517 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\gamma_ {1} \\mathcal {L} _ {c c} + \\gamma_ {2} \\mathcal {L} _ {\\text {m a s k}} + \\gamma_ {3} \\mathcal {L} _ {\\text {a l p h a}} + \\gamma_ {4} \\mathcal {L} _ {\\text {r m}} + \\gamma_ {5} \\mathcal {L} _ {\\text {b o x}}, \\tag {12}", + "image_path": "df7c9b6423a57ca61577da4035102a426cd638a15524f546686023ca31abb073.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 521, + 482, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 521, + 482, + 582 + ], + "spans": [ + { + "bbox": [ + 130, + 521, + 482, + 582 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 521, + 482, + 582 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{cc}" + }, + { + "bbox": [ + 130, + 521, + 482, + 582 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 521, + 482, + 582 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{mask}" + }, + { + "bbox": [ + 130, + 521, + 482, + 582 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 521, + 482, + 582 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{alpha}" + }, + { + "bbox": [ + 130, + 521, + 482, + 582 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 521, + 482, + 582 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{box}" + }, + { + "bbox": [ + 130, + 521, + 482, + 582 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 130, + 521, + 482, + 582 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{box}" + }, + { + "bbox": [ + 130, + 521, + 482, + 582 + ], + "type": "text", + "content": " are the loss for the coarse center, foreground mask, observation angle, targetness mask, and bounding box, respectively. We apply the " + }, + { + "bbox": [ + 130, + 521, + 482, + 582 + ], + "type": "inline_equation", + "content": "L_{2}" + }, + { + "bbox": [ + 130, + 521, + 482, + 582 + ], + "type": "text", + "content": " loss for " + }, + { + "bbox": [ + 130, + 521, + 482, + 582 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{cc}" + }, + { + "bbox": [ + 130, + 521, + 482, + 582 + ], + "type": "text", + "content": ", the standard cross entropy loss for " + }, + { + "bbox": [ + 130, + 521, + 482, + 582 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{mask}" + }, + { + "bbox": [ + 130, + 521, + 482, + 582 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 521, + 482, + 582 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{rm}" + }, + { + "bbox": [ + 130, + 521, + 482, + 582 + ], + "type": "text", + "content": ", and the Huber loss for " + }, + { + "bbox": [ + 130, + 521, + 482, + 582 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{alpha}" + }, + { + "bbox": [ + 130, + 521, + 482, + 582 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 521, + 482, + 582 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{box}" + }, + { + "bbox": [ + 130, + 521, + 482, + 582 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 130, + 521, + 482, + 582 + ], + "type": "inline_equation", + "content": "\\gamma_{1}, \\gamma_{2}, \\gamma_{3}, \\gamma_{4}" + }, + { + "bbox": [ + 130, + 521, + 482, + 582 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 130, + 521, + 482, + 582 + ], + "type": "inline_equation", + "content": "\\gamma_{5}" + }, + { + "bbox": [ + 130, + 521, + 482, + 582 + ], + "type": "text", + "content": " are empirically set as 10.0, 0.2, 1.0, 1.0, and 1.0." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 582, + 482, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 582, + 482, + 667 + ], + "spans": [ + { + "bbox": [ + 130, + 582, + 482, + 667 + ], + "type": "text", + "content": "Training & Testing. We train our model on NVIDIA RTX-3090 GPUs with the Adam optimizer and an initial learning rate of 0.001. Due to GPU memory limitation, we construct point cloud sequences with 8 frames for training, and set " + }, + { + "bbox": [ + 130, + 582, + 482, + 667 + ], + "type": "inline_equation", + "content": "K = 2" + }, + { + "bbox": [ + 130, + 582, + 482, + 667 + ], + "type": "text", + "content": " in training, and " + }, + { + "bbox": [ + 130, + 582, + 482, + 667 + ], + "type": "inline_equation", + "content": "K = 6" + }, + { + "bbox": [ + 130, + 582, + 482, + 667 + ], + "type": "text", + "content": " in testing. Following existing methods [39,45], we set " + }, + { + "bbox": [ + 130, + 582, + 482, + 667 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 130, + 582, + 482, + 667 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 582, + 482, + 667 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 130, + 582, + 482, + 667 + ], + "type": "text", + "content": " to 128. We stack " + }, + { + "bbox": [ + 130, + 582, + 482, + 667 + ], + "type": "inline_equation", + "content": "L = 2" + }, + { + "bbox": [ + 130, + 582, + 482, + 667 + ], + "type": "text", + "content": " transformer layers and apply " + }, + { + "bbox": [ + 130, + 582, + 482, + 667 + ], + "type": "inline_equation", + "content": "H = 4" + }, + { + "bbox": [ + 130, + 582, + 482, + 667 + ], + "type": "text", + "content": " heads in BEA and CPA. We adopt " + }, + { + "bbox": [ + 130, + 582, + 482, + 667 + ], + "type": "inline_equation", + "content": "G = 3" + }, + { + "bbox": [ + 130, + 582, + 482, + 667 + ], + "type": "text", + "content": " groups in CPA, and assign [32,64,32] points and " + }, + { + "bbox": [ + 130, + 582, + 482, + 667 + ], + "type": "inline_equation", + "content": "U = [4,32,16]" + }, + { + "bbox": [ + 130, + 582, + 482, + 667 + ], + "type": "text", + "content": " contextual points for the groups, respectively." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 220, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 220, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 220, + 91, + 447, + 102 + ], + "type": "text", + "content": "3D SOT in Point Clouds with High Temporal Variation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 133, + 114, + 229, + 129 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 114, + 229, + 129 + ], + "spans": [ + { + "bbox": [ + 133, + 114, + 229, + 129 + ], + "type": "text", + "content": "4 Experiments" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 134, + 138, + 480, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 138, + 480, + 186 + ], + "spans": [ + { + "bbox": [ + 134, + 138, + 480, + 186 + ], + "type": "text", + "content": "We leverage two famous 3D tracking benchmarks of KITTI [8] and Waymo [30] to evaluate the general performance of our approach in regular 3D SOT. In addition, we establish a new KITTI-HV dataset to test our performance in high temporal variation scenarios." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 134, + 186, + 480, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 186, + 480, + 270 + ], + "spans": [ + { + "bbox": [ + 134, + 186, + 480, + 270 + ], + "type": "text", + "content": "Regular Datasets. The KITTI tracking dataset comprises 21 training sequences and 29 test sequences, encompassing eight object types. Following prior studies [9,24,34,39,44,45], we use the sequences 0-16 as training data, 17-18 for validation, and 19-20 for testing. The Waymo dataset is large-scale. We adopt the approach outlined in LiDAR-SOT [21] to utilize 1121 tracklets, which are subsequently categorized into easy, medium, and hard subsets based on the number of points in the first frame of each tracklet." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 134, + 270, + 480, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 270, + 480, + 425 + ], + "spans": [ + { + "bbox": [ + 134, + 270, + 480, + 425 + ], + "type": "text", + "content": "HV Dataset. We build a dataset with high temporal variation for 3D SOT based on KITTI, called KITTI-HV. Although high temporal variation scenarios are present in the existing benchmarks, there is no exact threshold to determine whether the scenario is a high temporal variation scenario or not. Large point cloud variations and significant object motions are two major challenges in high temporal variation scenarios. Sampling at frame intervals is a good way to simulate these two challenges. Also, the constructed KITTI-HV can provide a preliminary platform for exploring tracking in scenarios such as skipped-tracking, edge devices, and high dynamics. For a fairer comparison with existing methods, we set the frame interval to 2, 3, 5, and 10. We set up more dense testings at low frame intervals to exploit the performance of the existing methods in point cloud variations close to smooth scenarios. We train and test all methods from scratch individually on each frame interval." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 134, + 426, + 480, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 426, + 480, + 497 + ], + "spans": [ + { + "bbox": [ + 134, + 426, + 480, + 497 + ], + "type": "text", + "content": "Evaluation Metrics. We employ One Pass Evaluation [38] to evaluate the different methods in terms of Success and Precision. Success is determined by measuring the Intersection Over Union between the proposed bounding box and the ground-truth (GT) bounding box. Precision is evaluated by computing the Area Under the Curve of the distance error between the centers of the two bounding boxes, ranging from 0 to 2 meters." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 134, + 514, + 350, + 527 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 514, + 350, + 527 + ], + "spans": [ + { + "bbox": [ + 134, + 514, + 350, + 527 + ], + "type": "text", + "content": "4.1 Comparison with the State of the Art" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 134, + 533, + 480, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 533, + 480, + 665 + ], + "spans": [ + { + "bbox": [ + 134, + 533, + 480, + 665 + ], + "type": "text", + "content": "Results on HV tracking. We evaluate our HVTrack in 4 categories ('Car', 'Pedestrian', 'Van', and 'Cyclist') following existing methods [24, 39, 44, 45] in the KITTI-HV dataset. The methods we choose to compare with HVTrack are the most representative SOT methods from 2020 to 2023 (Most cited methods published in each year according to Google Scholar). As illustrated in Tab. 1, our approach consistently outperforms the state-of-the-art methods [24, 39, 44, 45] across all frame intervals, confirming the effectiveness of the proposed tracking framework for high temporal variation scenarios. Notably, the performance gap between our HVTrack and existing trackers widens as variations are exacerbated. In the particularly challenging scenario of 10 frame intervals, we achieve a substantial " + }, + { + "bbox": [ + 134, + 533, + 480, + 665 + ], + "type": "inline_equation", + "content": "9.1\\%" + }, + { + "bbox": [ + 134, + 533, + 480, + 665 + ], + "type": "text", + "content": " ↑ improvement in success and a remarkable " + }, + { + "bbox": [ + 134, + 533, + 480, + 665 + ], + "type": "inline_equation", + "content": "10.4\\%" + }, + { + "bbox": [ + 134, + 533, + 480, + 665 + ], + "type": "text", + "content": " ↑ enhancement" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 217, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 217, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 217, + 101 + ], + "type": "text", + "content": "Q. Wu et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 134, + 191, + 482, + 350 + ], + "blocks": [ + { + "bbox": [ + 132, + 114, + 482, + 180 + ], + "lines": [ + { + "bbox": [ + 132, + 114, + 482, + 180 + ], + "spans": [ + { + "bbox": [ + 132, + 114, + 482, + 180 + ], + "type": "text", + "content": "Table 1: Comparison of HVTrack with the state-of-the-art methods on each category of the KITTI-HV dataset. We construct the HV dataset KITTI-HV for training and testing by setting different frame intervals for sampling in the KITTI dataset. Bold and underline denote the best and second-best performance, respectively. Success/Precision are used for evaluation. Improvement and deterioration are shown in green and red, respectively." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 134, + 191, + 482, + 350 + ], + "lines": [ + { + "bbox": [ + 134, + 191, + 482, + 350 + ], + "spans": [ + { + "bbox": [ + 134, + 191, + 482, + 350 + ], + "type": "table", + "html": "
Frame Intervals2 Intervals3 Intervals
Category Frame NumberCar 6424Pestrian 6088Van 1248Cyclist 308Mean 14068Car 6424Pestrian 6088Van 1248Cyclist 308Mean 14068
P2B [24]56.3/71.030.8/53.033.4/38.441.8/61.442.9/60.143.4/51.827.9/46.827.9/31.844.8/64.435.4/48.1
BAT [44]61.8/74.236.5/61.126.8/30.454.1/78.747.6/64.751.7/61.931.8/53.524.0/28.250.5/72.640.6/55.5
M2-Track [45]63.0/76.654.6/81.752.8/66.568.3/89.358.6/78.262.1/72.751.8/74.333.6/41.664.7/82.055.1/70.8
CXTrack [39]61.4/70.962.6/86.356.0/69.159.2/76.961.4/77.547.4/53.157.9/79.348.5/58.840.7/58.451.9/65.1
HVTrack Improvement67.1/77.560.0/84.050.6/61.773.9/93.662.7/79.366.8/76.551.1/71.938.7/46.966.5/89.757.5/72.2
4.1↑/0.9↑2.6↓/2.3↓6.0↓/7.4↓5.6↑/4.3↑1.3↑/1.1↑4.7↑/3.8↑6.8↓/7.4↓9.8↓/11.9↓1.8↑/7.7↑2.4↑/1.4↑
Frame Intervals5 Intervals10 Intervals
Category Frame NumberCar 6424Pestrian 6088Van 1248Cyclist 308Mean 14068Car 6424Pestrian 6088Van 1248Cyclist 308Mean 14068
P2B [24]39.3/46.127.4/43.527.2/30.435.0/44.433.0/43.528.6/29.223.1/31.125.9/27.329.1/28.326.0/29.8
BAT [44]44.1/51.121.1/32.826.1/29.535.7/46.332.4/41.130.6/33.121.7/29.220.8/20.729.3/29.125.9/30.2
M2-Track [45]50.9/58.631.6/45.430.0/36.547.4/61.040.6/51.033.0/35.117.5/24.120.7/20.827.7/26.625.0/28.9
CXTrack [39]38.6/42.235.0/47.821.6/24.325.7/33.335.3/42.830.2/32.418.2/21.417.5/17.927.7/26.523.8/26.2
HVTrack Improvement60.3/68.935.1/52.128.7/32.458.2/71.746.6/58.549.4/54.722.5/29.122.2/23.439.5/45.435.1/40.6
9.4↑/10.3↑0.1↑/4.3↑1.3↓/4.1↓10.8↑/10.7↑6.0↑/7.5↑16.4↑/19.6↑0.6↓/0.1↓3.7↓/3.9↓10.2↑/16.3↑9.1↑/10.4
", + "image_path": "7bb53e197a6e703f83209d54c3107b45446a524259e5c0386dd6b4fa8efd9891.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 133, + 376, + 482, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 376, + 482, + 555 + ], + "spans": [ + { + "bbox": [ + 133, + 376, + 482, + 555 + ], + "type": "text", + "content": "in precision. This showcases the robustness of our method in accommodating various levels of point cloud variation. Our method delivers outstanding performance on 'Car' and 'Cyclist', in which we gain a great improvement in 5 frame intervals (9.4%↑/10.3%↑ for 'Car' and 10.8%↑/10.7%↑ for 'Cyclist') and 10 frame intervals (16.4%↑/19.6%↑ for 'Car' and 10.2%↑/16.3%↑ for 'Cyclist'). However, the challenge of tracking large objects persists in high temporal variation cases for our method. Note that the performance of CXTrack drops dramatically after 3 frame intervals. In particular, in the medium variation case of 5 frame intervals, we achieve 11.3%↑/15.7%↑ improvement in overall success/precision compared to CXTrack, despite the fact that our HVTrack shares the same backbone and RPN with CXTrack [39]. Furthermore, HVTrack surpasses CXTrack on 'Car' and 'Cyclist' by a very large margin (21.7%↑/26.7%↑ for 'Car' and 32.5%↑/38.4%↑ for 'Cyclist'). The distinct performance gap between HVTrack and CXTrack in HV tracking showcases the effectiveness of our feature correlation module design." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 133, + 558, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 558, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 133, + 558, + 482, + 666 + ], + "type": "text", + "content": "Results on regular tracking. For the KITTI dataset, we compare HVTrack with 12 top-performing trackers [7,9,11,12,17,24,28,34,39,44-46]. As shown in Tab. 2, our overall performance is close to the SOTA tracker CXTrack [39], and achieves the second best result on the average in success (2.0%↓ w.r.t. CXTrack). Note that HVTrack outperforms TAT [17] on average (0.8%↑/0.3%↑), which utilizes temporal information by concatenating historical template features. This demonstrates our better design for leveraging the spatio-temporal context information. However, the performance of HVTrack drops when dealing with large objects ('Van'). We conjecture this performance drop to be caused by CPA," + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 220, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 220, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 220, + 91, + 447, + 102 + ], + "type": "text", + "content": "3D SOT in Point Clouds with High Temporal Variation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 479, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 479, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 479, + 100 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 212, + 146, + 403, + 265 + ], + "blocks": [ + { + "bbox": [ + 132, + 114, + 480, + 136 + ], + "lines": [ + { + "bbox": [ + 132, + 114, + 480, + 136 + ], + "spans": [ + { + "bbox": [ + 132, + 114, + 480, + 136 + ], + "type": "text", + "content": "Table 2: Comparison of HVTrack with the SOTA methods on each category of the KITTI dataset." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 212, + 146, + 403, + 265 + ], + "lines": [ + { + "bbox": [ + 212, + 146, + 403, + 265 + ], + "spans": [ + { + "bbox": [ + 212, + 146, + 403, + 265 + ], + "type": "table", + "html": "
Category Frame NumberCar 6424Pedestrian 6088Van 1248Cyclist 308Mean 14068
SC3D [9]41.3/57.918.2/37.840.4/47.041.5/70.431.2/48.5
P2B [24]56.2/72.828.7/49.640.8/48.432.1/44.742.4/60.0
3DSiamRPN [7]58.2/76.235.2/56.245.7/52.936.2/49.046.7/64.9
MLVSNet [34]56.0/74.034.1/61.152.0/61.434.3/44.545.7/66.7
BAT [44]60.5/77.742.1/70.152.4/67.033.7/45.451.2/72.8
PTT [28]67.8/81.844.9/72.043.6/52.537.2/47.355.1/74.2
V2B [11]70.5/81.348.3/73.550.1/58.040.8/49.758.4/75.2
PTTR [46]65.2/77.450.9/81.652.5/61.865.1/90.557.9/78.1
STNet [12]72.1/84.049.9/77.258.0/70.673.5/93.761.3/80.1
TAT [17]72.2/83.357.4/84.458.9/69.274.2/93.964.7/82.8
M2-Track [45]65.5/80.861.5/88.253.8/70.773.2/93.562.9/83.4
CXTrack [39]69.1/81.667.0/91.560.0/71.874.2/94.367.5/85.3
HVTrack68.2/79.264.6/90.654.8/63.872.4/93.765.5/83.1
", + "image_path": "7e5b04df2a48a564d827f1eda8c66fffb8c9c3332b3ae69ac37f56cf48b6cd3a.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 141, + 297, + 471, + 369 + ], + "blocks": [ + { + "bbox": [ + 136, + 276, + 476, + 288 + ], + "lines": [ + { + "bbox": [ + 136, + 276, + 476, + 288 + ], + "spans": [ + { + "bbox": [ + 136, + 276, + 476, + 288 + ], + "type": "text", + "content": "Table 3: Comparison of HVTrack with the SOTA methods on the Waymo dataset." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 141, + 297, + 471, + 369 + ], + "lines": [ + { + "bbox": [ + 141, + 297, + 471, + 369 + ], + "spans": [ + { + "bbox": [ + 141, + 297, + 471, + 369 + ], + "type": "table", + "html": "
MethodVehicle (185632)Pedestrian (241168)Mean
EasyMediumHardMeanEasyMediumHardMeanMean (426800)
P2B [24]57.1/65.452.0/60.747.9/58.552.6/61.718.1/30.817.8/30.017.7/29.317.9/30.133.0/43.8
BAT [44]61.0/68.353.3/60.948.9/57.854.7/62.719.3/32.617.8/29.817.2/28.318.2/30.334.1/44.4
V2B [11]64.5/71.555.1/63.252.0/62.057.6/65.927.9/43.922.5/36.220.1/33.123.7/37.938.4/50.1
STNet [12]65.9/72.757.5/66.054.6/64.759.7/68.029.2/45.324.7/38.222.2/35.825.5/39.940.4/52.1
CXTrack [39]63.9/71.154.2/62.752.1/63.757.1/66.135.4/55.329.7/47.926.3/44.430.7/49.442.2/56.7
HVTrack(Ours)66.2/75.257.0/66.055.3/67.159.8/69.734.2/53.528.7/47.926.7/45.230.0/49.143.0/58.1
", + "image_path": "98d53ff1a89c76f4f47177c984addfe0dc695f3960e5b93a255404995f394da2.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 390, + 482, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 390, + 482, + 559 + ], + "spans": [ + { + "bbox": [ + 130, + 390, + 482, + 559 + ], + "type": "text", + "content": "which will be further explored in Sec. 4.2. For the Waymo dataset, following the benchmark setting in LiDAR-SOT [21] and STNet [12], we test our HVTrack in 2 categories ('Vehicle', 'Pedestrian') with 3 difficulty levels. All the methods are pre-trained on KITTI. The results of P2B [24], BAT [44], and V2B [11] on Waymo are provided by STNet [12]. As shown in Tab. 3, our method achieves the best performance in success " + }, + { + "bbox": [ + 130, + 390, + 482, + 559 + ], + "type": "inline_equation", + "content": "(0.8\\% \\uparrow)" + }, + { + "bbox": [ + 130, + 390, + 482, + 559 + ], + "type": "text", + "content": " and precision " + }, + { + "bbox": [ + 130, + 390, + 482, + 559 + ], + "type": "inline_equation", + "content": "(1.4\\% \\uparrow)" + }, + { + "bbox": [ + 130, + 390, + 482, + 559 + ], + "type": "text", + "content": ". Notably, HVTrack does not surpass CXTrack and reach SOTA on the KTTTI benchmark, while the opposite situation occurs in the larger dataset of Waymo. The improvement on Waymo clearly demonstrates the robustness of our method in the large-scale dataset. Also, HVTrack surpasses other SOTA methods on all categories of 'Hard' difficulty, revealing our excellent ability to handle sparse cases. The experimental results show that our method can generally solve the problem of 3D SOT under various levels of point cloud variations, and achieve outstanding performance." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 574, + 270, + 586 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 574, + 270, + 586 + ], + "spans": [ + { + "bbox": [ + 132, + 574, + 270, + 586 + ], + "type": "text", + "content": "4.2 Analysis Experiments" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 594, + 479, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 594, + 479, + 628 + ], + "spans": [ + { + "bbox": [ + 130, + 594, + 479, + 628 + ], + "type": "text", + "content": "In this section, we extensively analyze HVTrack via a series of experiments. All the experiments are conducted on KITTI-HV with 5 frame intervals unless otherwise stated." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 629, + 480, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 629, + 480, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 629, + 480, + 665 + ], + "type": "text", + "content": "Ablation Study. We conduct experiments to analyze the effectiveness of different modules in HVTrack. As shown in Tab. 4, we respectively ablate OM, BEA, and CPA from HVTrack. We only ablate OM in RPM because LM and MM" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 217, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 217, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 217, + 102 + ], + "type": "text", + "content": "Q. Wu et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 194, + 135, + 420, + 191 + ], + "blocks": [ + { + "bbox": [ + 223, + 114, + 389, + 125 + ], + "lines": [ + { + "bbox": [ + 223, + 114, + 389, + 125 + ], + "spans": [ + { + "bbox": [ + 223, + 114, + 389, + 125 + ], + "type": "text", + "content": "Table 4: Ablation analysis of HVTrack." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 194, + 135, + 420, + 191 + ], + "lines": [ + { + "bbox": [ + 194, + 135, + 420, + 191 + ], + "spans": [ + { + "bbox": [ + 194, + 135, + 420, + 191 + ], + "type": "table", + "html": "
OMBEACPACarPedestrianVanCyclistMean
60.0/69.033.9/50.028.4/32.254.2/67.145.8/57.5
60.3/69.435.0/50.226.7/30.743.9/61.546.0/57.5
58.2/66.934.7/49.828.1/33.547.7/63.945.1/56.5
60.3/68.935.1/52.128.7/32.458.2/71.746.6/58.5
", + "image_path": "52bb8e7385d65b382c934da9bd9ee66461f0bf42328c5f158edad885b8e3a7cc.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 202, + 236, + 411, + 277 + ], + "blocks": [ + { + "bbox": [ + 132, + 204, + 480, + 226 + ], + "lines": [ + { + "bbox": [ + 132, + 204, + 480, + 226 + ], + "spans": [ + { + "bbox": [ + 132, + 204, + 480, + 226 + ], + "type": "text", + "content": "Table 5: Ablation experiment of BEA. 'Base'/'Expansion' denotes only using the base/expansion branch in BEA." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 202, + 236, + 411, + 277 + ], + "lines": [ + { + "bbox": [ + 202, + 236, + 411, + 277 + ], + "spans": [ + { + "bbox": [ + 202, + 236, + 411, + 277 + ], + "type": "table", + "html": "
CategoryCarPedestrianVanCyclistMean
Base60.3/69.435.0/50.226.7/30.743.9/61.546.0/57.5
Expansion60.0/68.634.7/50.531.4/36.854.5/67.546.4/57.9
", + "image_path": "5d03b1bff40f46684a7e6e107572079fe47b658f0387601eb4a910735ae9748d.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 302, + 480, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 302, + 480, + 350 + ], + "spans": [ + { + "bbox": [ + 130, + 302, + 480, + 350 + ], + "type": "text", + "content": "serve as the template and are the indivisible parts of HVTrack. BEA and CPA are replaced by vanilla cross-attention and self-attention. In general, all components have been proven to be effective; removing an arbitrary module degrades the 'mean' performance." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 352, + 482, + 508 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 352, + 482, + 508 + ], + "spans": [ + { + "bbox": [ + 130, + 352, + 482, + 508 + ], + "type": "text", + "content": "Analysis Experiment of BEA. The performance slightly drops on the 'Car' when we apply BEA on HVTrack as shown in Tab. 4. We conjecture this to be caused by the side effect of aggregating larger scale features in BEA, which will involve more background noise at each point. Further, 'Car' has a medium size and does not have the distraction of crowded similar objects like small objects ('Pedestrian' and 'Cyclist'), nor does it require a larger receptive field like large objects ('Van'). To verify this issue, we further analyze each branch of BEA as shown in Tab. 5. 'Pedestrian', 'Van', and 'Cyclist' benefit from the expansion branch and achieve a better performance compared to using only the base branch in BEA. On the other hand, the performance in the 'Car' category has the opposite behavior to the other categories. The experimental results validate our hypothesis that BEA is beneficial to small and large objects, while negatively affecting medium-sized objects." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 510, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 510, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 510, + 482, + 666 + ], + "type": "text", + "content": "Analysis Experiment of CPA. Our method yields better results on 'Van' after we remove CPA as shown in Tab. 4, which reveals the relation between CPA and the large object tracking challenge. We believe that this is caused by the suppressing strategy in CPA. Large objects usually have more points, and under the same probability of misclassification of importance, they will have more foreground points assigned as low importance in the attention map, resulting in a part of useful information being suppressed in CPA. As shown in Fig. 5b, the importance conflict in the object leads to tracking failure. That part of the information will be further suppressed when stacking multiple transformer layers. However, the performance drops in other categories, without CPA to suppress the background noise for medium and small objects. As shown in Fig. 5a, most of the background points are assigned with low importance and suppressed in the success case, which proves our idea of CPA." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 220, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 220, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 220, + 91, + 447, + 102 + ], + "type": "text", + "content": "3D SOT in Point Clouds with High Temporal Variation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 480, + 100 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 192, + 116, + 312, + 181 + ], + "blocks": [ + { + "bbox": [ + 192, + 116, + 312, + 181 + ], + "lines": [ + { + "bbox": [ + 192, + 116, + 312, + 181 + ], + "spans": [ + { + "bbox": [ + 192, + 116, + 312, + 181 + ], + "type": "image", + "image_path": "f392ef54e3800668ca3216913b5128bcabd2bf09928622bc88a2f19690d53e23.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 214, + 192, + 398, + 204 + ], + "lines": [ + { + "bbox": [ + 214, + 192, + 398, + 204 + ], + "spans": [ + { + "bbox": [ + 214, + 192, + 398, + 204 + ], + "type": "text", + "content": "Fig. 5: The attention maps of 'Van' in CPA." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 318, + 116, + 406, + 182 + ], + "blocks": [ + { + "bbox": [ + 318, + 116, + 406, + 182 + ], + "lines": [ + { + "bbox": [ + 318, + 116, + 406, + 182 + ], + "spans": [ + { + "bbox": [ + 318, + 116, + 406, + 182 + ], + "type": "image", + "image_path": "a4c7ba14639332f576747313a68f34f186a9f622a44f1775cfb2fb46fa6bf851.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 408, + 115, + 418, + 182 + ], + "blocks": [ + { + "bbox": [ + 408, + 115, + 418, + 182 + ], + "lines": [ + { + "bbox": [ + 408, + 115, + 418, + 182 + ], + "spans": [ + { + "bbox": [ + 408, + 115, + 418, + 182 + ], + "type": "image", + "image_path": "6e12dc51e1aefee041843a0adbe73eafe29db5f056fd2aad348590f15ab93a9b.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 194, + 259, + 421, + 350 + ], + "blocks": [ + { + "bbox": [ + 131, + 216, + 482, + 248 + ], + "lines": [ + { + "bbox": [ + 131, + 216, + 482, + 248 + ], + "spans": [ + { + "bbox": [ + 131, + 216, + 482, + 248 + ], + "type": "text", + "content": "Table 6: Results of HVTrack when using different memory sizes. We train HVTrack with a memory size of 2, and evaluate it with memory sizes ranging from 1 to 8." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 194, + 259, + 421, + 350 + ], + "lines": [ + { + "bbox": [ + 194, + 259, + 421, + 350 + ], + "spans": [ + { + "bbox": [ + 194, + 259, + 421, + 350 + ], + "type": "table", + "html": "
Memory SizeCarPedestrianVanCyclistMean
158.3/66.530.9/46.226.8/29.857.1/70.543.6/54.6
258.6/67.031.7/47.927.1/30.657.6/70.944.1/55.6
359.2/67.633.8/49.927.7/3155.8/67.745.3/56.7
460.0/68.533.7/50.629.5/33.657.9/71.345.9/57.7
560.0/68.533.8/51.228.7/32.657.8/70.845.8/57.9
660.3/68.935.1/52.128.7/32.458.2/71.746.6/58.5
759.7/68.235.6/52.928.0/31.558.1/71.446.4/58.4
859.8/68.335.1/52.428.2/32.058.1/71.446.3/58.3
", + "image_path": "cc47e9e88f0ca5e74568cf84967b47c44992a7b3b980ab006a56a5b5abbb5e4b.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 374, + 482, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 374, + 482, + 469 + ], + "spans": [ + { + "bbox": [ + 130, + 374, + 482, + 469 + ], + "type": "text", + "content": "Memory Size. Intuitively, trackers will achieve better performance when leveraging more temporal information. However, the performance of the trackers cannot continuously improve with the accumulation of historical information, due to inaccuracies in the historical tracklets. As shown in Tab. 6, we train HVTrack with a memory size of 2 due to the GPU memory limitation, and evaluate it with memory sizes from 1 to 8. The performance peaks for a memory size of 6, which is consistent with our assumption. Thus, we set 6 as our memory size and achieve a tracking speed of 31 FPS." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 491, + 220, + 504 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 491, + 220, + 504 + ], + "spans": [ + { + "bbox": [ + 132, + 491, + 220, + 504 + ], + "type": "text", + "content": "5 Conclusion" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 521, + 482, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 521, + 482, + 605 + ], + "spans": [ + { + "bbox": [ + 130, + 521, + 482, + 605 + ], + "type": "text", + "content": "In this paper, we have explored a new task in 3D SOT, and presented the first 3D SOT framework for high temporal variation scenarios, HVTrack. Its three main components, RPM, BEA, and CPA, allow HVTrack to achieve robustness to point cloud variations, similar object distractions, and background noise. Our experiments have demonstrated that HVTrack significantly outperforms the state of the art in high temporal variation scenarios, and achieves remarkable performance in regular tracking." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 605, + 482, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 605, + 482, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 605, + 482, + 665 + ], + "type": "text", + "content": "**Limitation.** Our CPA relies on fixed manual hyperparameters to suppress noise. This makes it difficult to balance the performance in different object and search area sizes, leading to a performance drop in tracking large objects. In the future, we will therefore explore the use of a learnable function to replace the manual hyperparameters in CPA and overcome the large object tracking challenge." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 217, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 217, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 217, + 102 + ], + "type": "text", + "content": "Q. Wu et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 133, + 114, + 246, + 129 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 114, + 246, + 129 + ], + "spans": [ + { + "bbox": [ + 133, + 114, + 246, + 129 + ], + "type": "text", + "content": "Acknowledgements" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 143, + 482, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 143, + 482, + 167 + ], + "spans": [ + { + "bbox": [ + 132, + 143, + 482, + 167 + ], + "type": "text", + "content": "This work is supported in part by the National Natural Science Foundation of China (NFSC) under Grants 62372377 and 62176242." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 133, + 190, + 197, + 201 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 190, + 197, + 201 + ], + "spans": [ + { + "bbox": [ + 133, + 190, + 197, + 201 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 138, + 217, + 480, + 665 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 138, + 217, + 480, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 217, + 480, + 251 + ], + "spans": [ + { + "bbox": [ + 138, + 217, + 480, + 251 + ], + "type": "text", + "content": "1. Chen, X., Shi, S., Zhang, C., Zhu, B., Wang, Q., Cheung, K.C., See, S., Li, H.: Trajectoryformer: 3d object tracking transformer with predictive trajectory hypotheses. arXiv preprint arXiv:2306.05888 (2023)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 138, + 251, + 480, + 273 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 251, + 480, + 273 + ], + "spans": [ + { + "bbox": [ + 138, + 251, + 480, + 273 + ], + "type": "text", + "content": "2. Cheng, R., Wang, X., Sohel, F., Lei, H.: Topology-aware universal adversarial attack on 3d object tracking. Visual Intelligence 1(1), 31 (2023)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 138, + 274, + 480, + 295 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 274, + 480, + 295 + ], + "spans": [ + { + "bbox": [ + 138, + 274, + 480, + 295 + ], + "type": "text", + "content": "3. Chiu, H.k., Prioletti, A., Li, J., Bohg, J.: Probabilistic 3d multi-object tracking for autonomous driving. arXiv preprint arXiv:2001.05673 (2020)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 138, + 297, + 480, + 318 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 297, + 480, + 318 + ], + "spans": [ + { + "bbox": [ + 138, + 297, + 480, + 318 + ], + "type": "text", + "content": "4. Chung, J., Gulcehre, C., Cho, K., Bengio, Y.: Empirical evaluation of gated recurrent neural networks on sequence modeling. arXiv preprint arXiv:1412.3555 (2014)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 138, + 319, + 480, + 341 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 319, + 480, + 341 + ], + "spans": [ + { + "bbox": [ + 138, + 319, + 480, + 341 + ], + "type": "text", + "content": "5. Cui, Y., Fang, Z., Shan, J., Gu, Z., Zhou, S.: 3d object tracking with transformer. arXiv preprint arXiv:2110.14921 (2021)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 138, + 342, + 480, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 342, + 480, + 373 + ], + "spans": [ + { + "bbox": [ + 138, + 342, + 480, + 373 + ], + "type": "text", + "content": "6. Ding, S., Rehder, E., Schneider, L., Cordts, M., Gall, J.: 3dmotformer: Graph transformer for online 3d multi-object tracking. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 9784-9794 (2023)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 138, + 375, + 480, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 375, + 480, + 407 + ], + "spans": [ + { + "bbox": [ + 138, + 375, + 480, + 407 + ], + "type": "text", + "content": "7. Fang, Z., Zhou, S., Cui, Y., Scherer, S.: 3d-siamrpn: An end-to-end learning method for real-time 3d single object tracking using raw point cloud. IEEE Sensors Journal 21(4), 4995-5011 (2020)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 138, + 409, + 480, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 409, + 480, + 441 + ], + "spans": [ + { + "bbox": [ + 138, + 409, + 480, + 441 + ], + "type": "text", + "content": "8. Geiger, A., Lenz, P., Urtasun, R.: Are we ready for autonomous driving? the KITTI vision benchmark suite. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 3354-3361 (2012)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 138, + 442, + 480, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 442, + 480, + 475 + ], + "spans": [ + { + "bbox": [ + 138, + 442, + 480, + 475 + ], + "type": "text", + "content": "9. Giancola, S., Zarzar, J., Ghanem, B.: Leveraging shape completion for 3d siamese tracking. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 1359-1368 (2019)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 138, + 475, + 480, + 508 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 475, + 480, + 508 + ], + "spans": [ + { + "bbox": [ + 138, + 475, + 480, + 508 + ], + "type": "text", + "content": "10. Guo, Z., Mao, Y., Zhou, W., Wang, M., Li, H.: Cmt: Context-matching-guided transformer for 3d tracking in point clouds. In: European Conference on Computer Vision. pp. 95-111. Springer (2022)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 138, + 509, + 480, + 541 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 509, + 480, + 541 + ], + "spans": [ + { + "bbox": [ + 138, + 509, + 480, + 541 + ], + "type": "text", + "content": "1. Hui, L., Wang, L., Cheng, M., Xie, J., Yang, J.: 3d siamese voxel-to-bev tracker for sparse point clouds. Advances in Neural Information Processing Systems 34, 28714-28727 (2021)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 138, + 543, + 480, + 575 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 543, + 480, + 575 + ], + "spans": [ + { + "bbox": [ + 138, + 543, + 480, + 575 + ], + "type": "text", + "content": "2. Hui, L., Wang, L., Tang, L., Lan, K., Xie, J., Yang, J.: 3d siamese transformer network for single object tracking on point clouds. arXiv preprint arXiv:2207.11995 (2022)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 138, + 576, + 480, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 576, + 480, + 597 + ], + "spans": [ + { + "bbox": [ + 138, + 576, + 480, + 597 + ], + "type": "text", + "content": "3. Jiao, L., Wang, D., Bai, Y., Chen, P., Liu, F.: Deep learning in visual tracking: A review. IEEE transactions on neural networks and learning systems (2021)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 138, + 599, + 480, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 599, + 480, + 620 + ], + "spans": [ + { + "bbox": [ + 138, + 599, + 480, + 620 + ], + "type": "text", + "content": "4. Jiayao, S., Zhou, S., Cui, Y., Fang, Z.: Real-time 3d single object tracking with transformer. IEEE Transactions on Multimedia (2022)" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 138, + 621, + 480, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 621, + 480, + 665 + ], + "spans": [ + { + "bbox": [ + 138, + 621, + 480, + 665 + ], + "type": "text", + "content": "5. Kapania, S., Saini, D., Goyal, S., Thakur, N., Jain, R., Nagrath, P.: Multi object tracking with uavs using deep sort and yolov3 retina detection framework. In: Proceedings of the 1st ACM Workshop on Autonomous and Intelligent Mobile Systems. pp. 1-6 (2020)" + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 220, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 220, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 220, + 91, + 447, + 102 + ], + "type": "text", + "content": "3D SOT in Point Clouds with High Temporal Variation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 481, + 666 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 133, + 116, + 481, + 160 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 116, + 481, + 160 + ], + "spans": [ + { + "bbox": [ + 133, + 116, + 481, + 160 + ], + "type": "text", + "content": "16. Kart, U., Lukezic, A., Kristan, M., Kamarainen, J.K., Matas, J.: Object tracking by reconstruction with view-specific discriminative correlation filters. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 1339-1348 (2019)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 133, + 160, + 481, + 193 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 160, + 481, + 193 + ], + "spans": [ + { + "bbox": [ + 133, + 160, + 481, + 193 + ], + "type": "text", + "content": "17. Lan, K., Jiang, H., Xie, J.: Temporal-aware siamese tracker: Integrate temporal context for 3d object tracking. In: Proceedings of the Asian Conference on Computer Vision. pp. 399-414 (2022)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 133, + 193, + 481, + 225 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 193, + 481, + 225 + ], + "spans": [ + { + "bbox": [ + 133, + 193, + 481, + 225 + ], + "type": "text", + "content": "18. Luo, C., Yang, X., Yuille, A.: Exploring simple 3d multi-object tracking for autonomous driving. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 10488-10497 (2021)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 133, + 225, + 481, + 257 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 225, + 481, + 257 + ], + "spans": [ + { + "bbox": [ + 133, + 225, + 481, + 257 + ], + "type": "text", + "content": "19. Machida, E., Cao, M., Murao, T., Hashimoto, H.: Human motion tracking of mobile robot with Kinect 3d sensor. In: Proceedings of SICE Annual Conference (SICE). pp. 2207-2211. IEEE (2012)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 257, + 481, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 257, + 481, + 289 + ], + "spans": [ + { + "bbox": [ + 132, + 257, + 481, + 289 + ], + "type": "text", + "content": "20. Nishimura, H., Komorita, S., Kawanishi, Y., Murase, H.: Sdof-tracker: Fast and accurate multiple human tracking by skipped-detection and optical-flow. IEICE TRANSACTIONS on Information and Systems 105(11), 1938-1946 (2022)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 289, + 481, + 322 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 289, + 481, + 322 + ], + "spans": [ + { + "bbox": [ + 132, + 289, + 481, + 322 + ], + "type": "text", + "content": "21. Pang, Z., Li, Z., Wang, N.: Model-free vehicle tracking and state estimation in point cloud sequences. In: 2021 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). pp. 8075-8082. IEEE (2021)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 322, + 481, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 322, + 481, + 354 + ], + "spans": [ + { + "bbox": [ + 132, + 322, + 481, + 354 + ], + "type": "text", + "content": "22. Qi, C.R., Litany, O., He, K., Guibas, L.J.: Deep hough voting for 3d object detection in point clouds. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 9277-9286 (2019)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 354, + 481, + 386 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 354, + 481, + 386 + ], + "spans": [ + { + "bbox": [ + 132, + 354, + 481, + 386 + ], + "type": "text", + "content": "23. Qi, C.R., Yi, L., Su, H., Guibas, L.J.: Pointnet++: Deep hierarchical feature learning on point sets in a metric space. Advances in neural information processing systems 30 (2017)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 386, + 481, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 386, + 481, + 418 + ], + "spans": [ + { + "bbox": [ + 132, + 386, + 481, + 418 + ], + "type": "text", + "content": "24. Qi, H., Feng, C., Cao, Z., Zhao, F., Xiao, Y.: P2b: Point-to-box network for 3d object tracking in point clouds. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 6329-6338 (2020)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 418, + 481, + 451 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 418, + 481, + 451 + ], + "spans": [ + { + "bbox": [ + 132, + 418, + 481, + 451 + ], + "type": "text", + "content": "25. Ren, C., Xu, Q., Zhang, S., Yang, J.: Hierarchical prior mining for non-local multiview stereo. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 3611-3620 (2023)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 450, + 481, + 472 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 450, + 481, + 472 + ], + "spans": [ + { + "bbox": [ + 132, + 450, + 481, + 472 + ], + "type": "text", + "content": "26. Ren, S., Yang, X., Liu, S., Wang, X.: Sg-former: Self-guided transformer with evolving token reallocation. arXiv preprint arXiv:2308.12216 (2023)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 132, + 472, + 481, + 504 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 472, + 481, + 504 + ], + "spans": [ + { + "bbox": [ + 132, + 472, + 481, + 504 + ], + "type": "text", + "content": "27. Sadjadpour, T., Li, J., Ambrus, R., Bohg, J.: Shasta: Modeling shape and spatiotemporal affinities for 3d multi-object tracking. IEEE Robotics and Automation Letters (2023)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 132, + 503, + 481, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 503, + 481, + 536 + ], + "spans": [ + { + "bbox": [ + 132, + 503, + 481, + 536 + ], + "type": "text", + "content": "28. Shan, J., Zhou, S., Fang, Z., Cui, Y.: Ptt: Point-track-transformer module for 3d single object tracking in point clouds. In: Proceedings of the IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). pp. 1310-1316 (2021)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 132, + 536, + 481, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 536, + 481, + 568 + ], + "spans": [ + { + "bbox": [ + 132, + 536, + 481, + 568 + ], + "type": "text", + "content": "29. Srivastava, N., Hinton, G., Krizhevsky, A., Sutskever, I., Salakhutdinov, R.: Dropout: a simple way to prevent neural networks from overfitting. The journal of machine learning research 15(1), 1929-1958 (2014)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 132, + 568, + 481, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 568, + 481, + 612 + ], + "spans": [ + { + "bbox": [ + 132, + 568, + 481, + 612 + ], + "type": "text", + "content": "30. Sun, P., Kretzschmar, H., Dotiwalla, X., Chouard, A., Patnaik, V., Tsui, P., Guo, J., Zhou, Y., Chai, Y., Caine, B., et al.: Scalability in perception for autonomous driving: Waymo open dataset. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 2446-2454 (2020)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 132, + 612, + 481, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 612, + 481, + 643 + ], + "spans": [ + { + "bbox": [ + 132, + 612, + 481, + 643 + ], + "type": "text", + "content": "31. Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, L., Polosukhin, I.: Attention is all you need. Advances in neural information processing systems 30 (2017)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 132, + 643, + 481, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 643, + 481, + 666 + ], + "spans": [ + { + "bbox": [ + 132, + 643, + 481, + 666 + ], + "type": "text", + "content": "32. Wang, Q., Chen, Y., Pang, Z., Wang, N., Zhang, Z.: Immortal tracker: Tracklet never dies. arXiv preprint arXiv:2111.13672 (2021)" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 217, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 217, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 217, + 102 + ], + "type": "text", + "content": "Q. Wu et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 481, + 643 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 132, + 116, + 481, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 116, + 481, + 149 + ], + "spans": [ + { + "bbox": [ + 132, + 116, + 481, + 149 + ], + "type": "text", + "content": "33. Wang, Y., Sun, Y., Liu, Z., Sarma, S.E., Bronstein, M.M., Solomon, J.M.: Dynamic graph cnn for learning on point clouds. ACM Transactions on Graphics (tog) 38(5), 1-12 (2019)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 150, + 481, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 150, + 481, + 183 + ], + "spans": [ + { + "bbox": [ + 132, + 150, + 481, + 183 + ], + "type": "text", + "content": "34. Wang, Z., Xie, Q., Lai, Y.K., Wu, J., Long, K., Wang, J.: Mlvsnet: Multi-level voting siamese network for 3d visual tracking. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 3101-3110 (2021)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 183, + 481, + 216 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 183, + 481, + 216 + ], + "spans": [ + { + "bbox": [ + 132, + 183, + 481, + 216 + ], + "type": "text", + "content": "35. Weng, X., Wang, J., Held, D., Kitani, K.: 3d multi-object tracking: A baseline and new evaluation metrics. In: 2020 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). pp. 10359-10366. IEEE (2020)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 216, + 481, + 258 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 216, + 481, + 258 + ], + "spans": [ + { + "bbox": [ + 132, + 216, + 481, + 258 + ], + "type": "text", + "content": "36. Weng, X., Wang, Y., Man, Y., Kitani, K.M.: Gnn3dmot: Graph neural network for 3d multi-object tracking with 2d-3d multi-feature learning. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 6499-6508 (2020)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 259, + 481, + 303 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 259, + 481, + 303 + ], + "spans": [ + { + "bbox": [ + 132, + 259, + 481, + 303 + ], + "type": "text", + "content": "37. Wu, Q., Yang, J., Sun, K., Zhang, C., Zhang, Y., Salzmann, M.: Mixcycle: Mixup assisted semi-supervised 3d single object tracking with cycle consistency. In: Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV). pp. 13956-13966 (2023)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 304, + 481, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 304, + 481, + 335 + ], + "spans": [ + { + "bbox": [ + 132, + 304, + 481, + 335 + ], + "type": "text", + "content": "38. Wu, Y., Lim, J., Yang, M.H.: Online object tracking: A benchmark. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 2411-2418 (2013)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 336, + 481, + 369 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 336, + 481, + 369 + ], + "spans": [ + { + "bbox": [ + 132, + 336, + 481, + 369 + ], + "type": "text", + "content": "39. Xu, T.X., Guo, Y.C., Lai, Y.K., Zhang, S.H.: Cxtrack: Improving 3d point cloud tracking with contextual information. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 1084-1093 (2023)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 369, + 481, + 402 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 369, + 481, + 402 + ], + "spans": [ + { + "bbox": [ + 132, + 369, + 481, + 402 + ], + "type": "text", + "content": "40. Yin, T., Zhou, X., Krahenbuhl, P.: Center-based 3d object detection and tracking. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 11784-11793 (2021)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 402, + 481, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 402, + 481, + 435 + ], + "spans": [ + { + "bbox": [ + 132, + 402, + 481, + 435 + ], + "type": "text", + "content": "41. Yoo, J.S., Lee, H., Jung, S.W.: Video object segmentation-aware video frame interpolation. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 12322-12333 (2023)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 435, + 481, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 435, + 481, + 456 + ], + "spans": [ + { + "bbox": [ + 132, + 435, + 481, + 456 + ], + "type": "text", + "content": "42. Zarzar, J., Giancola, S., Ghanem, B.: Efficient bird eye view proposals for 3d siamese tracking. arXiv preprint arXiv:1903.10168 (2019)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 456, + 481, + 490 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 456, + 481, + 490 + ], + "spans": [ + { + "bbox": [ + 132, + 456, + 481, + 490 + ], + "type": "text", + "content": "43. Zhang, X., Yang, J., Zhang, S., Zhang, Y.: 3d registration with maximal cliques. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 17745-17754 (2023)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 132, + 490, + 481, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 490, + 481, + 533 + ], + "spans": [ + { + "bbox": [ + 132, + 490, + 481, + 533 + ], + "type": "text", + "content": "44. Zheng, C., Yan, X., Gao, J., Zhao, W., Zhang, W., Li, Z., Cui, S.: Box-aware feature enhancement for single object tracking on point clouds. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 13199-13208 (2021)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 132, + 533, + 481, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 533, + 481, + 578 + ], + "spans": [ + { + "bbox": [ + 132, + 533, + 481, + 578 + ], + "type": "text", + "content": "45. Zheng, C., Yan, X., Zhang, H., Wang, B., Cheng, S., Cui, S., Li, Z.: Beyond 3d siamese tracking: A motion-centric paradigm for 3d single object tracking in point clouds. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 8111-8120 (2022)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 132, + 578, + 481, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 578, + 481, + 620 + ], + "spans": [ + { + "bbox": [ + 132, + 578, + 481, + 620 + ], + "type": "text", + "content": "46. Zhou, C., Luo, Z., Luo, Y., Liu, T., Pan, L., Cai, Z., Zhao, H., Lu, S.: Ptttr: Relational 3d point cloud object tracking with transformer. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 8531-8540 (2022)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 132, + 621, + 481, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 621, + 481, + 643 + ], + "spans": [ + { + "bbox": [ + 132, + 621, + 481, + 643 + ], + "type": "text", + "content": "47. Zhou, X., Koltun, V., Krähenbuhl, P.: Tracking objects as points. In: European conference on computer vision. pp. 474-490. Springer (2020)" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 220, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 220, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 220, + 91, + 447, + 102 + ], + "type": "text", + "content": "3D SOT in Point Clouds with High Temporal Variation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/3D Small Object Detection with Dynamic Spatial Pruning/2b6d04de-265e-4c48-b6b1-d03973f89d8a_content_list.json b/2024/3D Small Object Detection with Dynamic Spatial Pruning/2b6d04de-265e-4c48-b6b1-d03973f89d8a_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..587094b31d96ce3cc71d593601247a67a4179108 --- /dev/null +++ b/2024/3D Small Object Detection with Dynamic Spatial Pruning/2b6d04de-265e-4c48-b6b1-d03973f89d8a_content_list.json @@ -0,0 +1,1649 @@ +[ + { + "type": "text", + "text": "DSPDet3D: 3D Small Object Detection with Dynamic Spatial Pruning", + "text_level": 1, + "bbox": [ + 232, + 140, + 772, + 186 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xiuwei $\\mathrm{Xu}^{1\\star}$ , Zhihao $\\mathrm{Sun}^{2*}$ , Ziwei Wang $^3$ , Hongmin Liu $^{2\\dagger}$ , Jie Zhou $^1$ , Jiwen Lu $^{1\\dagger}$", + "bbox": [ + 220, + 210, + 781, + 227 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Tsinghua University", + "bbox": [ + 431, + 227, + 573, + 241 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{2}$ University of Science and Technology Beijing", + "bbox": [ + 346, + 242, + 658, + 258 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{3}$ Carnegie Mellon University", + "bbox": [ + 406, + 258, + 599, + 273 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "xxw21@mails.tsinghua.edu.cn; d202210361@xs.ustb.edu.cn;", + "bbox": [ + 259, + 275, + 738, + 287 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ziweiwa2@andrew.cmu.edu; hmliu@ustb.edu.cn;", + "bbox": [ + 313, + 290, + 687, + 303 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{jzhou,lujiwen}@tsinghua.edu.cn", + "bbox": [ + 361, + 305, + 642, + 318 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract. In this paper, we propose an efficient feature pruning strategy for 3D small object detection. Conventional 3D object detection methods struggle on small objects due to the weak geometric information from a small number of points. Although increasing the spatial resolution of feature representations can improve the detection performance on small objects, the additional computational overhead is unaffordable. With in-depth study, we observe the growth of computation mainly comes from the upsampling operation in the decoder of 3D detector. Motivated by this, we present a multi-level 3D detector named DSPDet3D which benefits from high spatial resolution to achieve high accuracy on small object detection, while reducing redundant computation by only focusing on small object areas. Specifically, we theoretically derive a dynamic spatial pruning (DSP) strategy to prune the redundant spatial representation of 3D scene in a cascade manner according to the distribution of objects. Then we design DSP module following this strategy and construct DSPDet3D with this efficient module. On ScanNet and TO-SCENE dataset, our method achieves leading performance on small object detection. Moreover, DSPDet3D trained with only ScanNet rooms can generalize well to scenes in larger scale. It takes less than 2s to directly process a whole building consisting of more than $4500\\mathrm{k}$ points while detecting out almost all objects, ranging from cups to beds, on a single RTX 3090 GPU. Code.", + "bbox": [ + 259, + 358, + 743, + 623 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Keywords: 3D small object detection $\\cdot$ Spatial pruning $\\cdot$ Efficient inference", + "bbox": [ + 261, + 641, + 714, + 655 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 215, + 675, + 356, + 691 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "3D object detection is a fundamental scene understanding problem, which aims to detect 3D bounding boxes and semantic labels from a point cloud of 3D scene. With the recent advances of deep learning techniques on point cloud understanding [7, 13, 34, 35], 3D detection methods have shown remarkable progress [39, 40, 46, 56]. However, with 3D object detection being widely adopted in fields like robotics [30, 57] and autonomous driving [2] which require highly precise and fine-grained perception, small object detection becomes one of the most important yet unsolved problems. In autonomous driving", + "bbox": [ + 212, + 709, + 787, + 815 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "* Equal contribution. † Corresponding author.", + "bbox": [ + 218, + 824, + 501, + 840 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/750fbe39739c992072150d0c57b35ff3953b106544110e86c67298469a085483.jpg", + "image_caption": [ + "Fig.1: Trained with only rooms from ScanNet, our DSPDet3D generalizes well to process a whole house with dozens of rooms. It takes less than 2s to generate fine-grained detection results with a RTX 3090 single GPU." + ], + "image_footnote": [], + "bbox": [ + 217, + 143, + 782, + 328 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "scenarios [12], we observe a significant performance gap between cars and pedestrians. In indoor scenes [4,9] where the size variance is much larger (e.g. a bed is 1000x larger than a cup), detecting small objects is more difficult. We focus on indoor 3D object detection task where scenes are crowded with objects of multiple categories and sizes.", + "bbox": [ + 212, + 414, + 784, + 474 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "For indoor 3D object detection, although great improvement has been achieved in both speed and accuracy on previous benchmarks [1,9,43], they are still far from general purpose 3D object detection due to the limited range of object size they can handle. For instance, these methods focus on furniture-level objects such as bed and table, while smaller ones like laptop, keyboard and bottle are ignored. With the arrival of 3D small object benchmarks [37, 50, 51] which contain objects with wider size variance (e.g. from tabletop object like cup to large furniture like bed), it is shown that previous 3D detectors get very low accuracy on small objects and some even fail to detect any small objects. This is because extracting fine-grained representation for a large scene is too computationally expensive, so current methods aggressively downsamples the 3D features, which harms the representation of small objects.", + "bbox": [ + 212, + 476, + 787, + 642 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this paper, we propose a dynamic spatial pruning approach for 3D small object detection. Although increasing the spatial resolution of the feature representations is a simple and effective way to boost the performance of 3D small object detection, the large computational overhead makes this plan infeasible for real application. With in-depth study, we observe the memory footprint mainly comes from the huge number of features generated by the upsampling operation in the decoder of 3D detector. Inspired by the fact that small objects only occupy a small proportion of space, we adopt a multi-level detection framework to detect different sizes of objects in different levels. As the multi-level detector has already detected out larger objects in lower resolution, there are many redundant features in the scene representations of higher resolution. To this end, we propose to dynamically prune the features after detecting out objects in each level, which skips the upsampling operation at regions where there is no smaller object. Specifically, we first theoretically derive a pruning mask generation strategy to", + "bbox": [ + 212, + 643, + 787, + 840 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Authors Suppressed Due to Excessive Length", + "bbox": [ + 271, + 114, + 542, + 128 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/5ba71b0b125f9fbe0d2601e0cf6b5bd34589191d8f4bee751bfbfbb72fbfda88.jpg", + "image_caption": [ + "Fig. 2: Detection accuracy (mAP@0.25 of all categories) and speed (FPS) of mainstream 3D object detection methods on TO-SCENE dataset. Our DSPDet3D shows absolute advantage on 3D small object detection and provides flexible accuracy-speed tradeoff by simply adjusting the pruning threshold without retraining." + ], + "image_footnote": [], + "bbox": [ + 287, + 148, + 709, + 297 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "supervise the pruning module, which prunes as much features as possible while not affecting the features of object proposals. Then we design a dynamic spatial pruning (DSP) module according to the theoretical analysis and use it to construct a 3D object detector named DSPDet3D. On the popular ScanNet [9] dataset, DSPDet3D improves the mAP of all categories by $3\\%$ and mAP of small object by $14\\%$ compared with current state-of-the-art. On TO-SCENE [50] dataset with more tabletop objects, we improve the mAP of all categories by $8\\%$ while achieving leading inference speed among all mainstream indoor 3D object detection methods.", + "bbox": [ + 215, + 393, + 785, + 513 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Related Work", + "text_level": 1, + "bbox": [ + 217, + 536, + 364, + 553 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Indoor 3D object detection: Since PointNet and PointNet++ [34, 35], deep learning-based 3D detection methods for point clouds begin to emerge in recent years, which can be mainly divided into three categories: voting-based [6, 33, 47, 49, 54], transformer-based [26, 28] and voxel-based [14, 38, 39, 46] methods. Inspired by 2D hough voting, VoteNet [33] proposes the first voting-based 3D detector, which aggregates the point features on surfaces into object center by 3D voting and predicts bounding boxes from the voted centers. Drawing on the success of transformer-based detector [3] in 2D domain, GroupFree3D [26] and 3DETR [28] adopts transformer architecture to decode the object proposals into 3D boxes. As extracting point features require time-consuming sampling and aggregation operation, GSDN [14] proposes a fully convolutional detection network based on sparse convolution [7, 13, 19, 52], which achieves much faster speed. FCAF3D [38] and TR3D [39] further improves the performance of GSDN with a simple anchor-free architecture. Our method also adopts voxel-based architecture considering its efficiency and scalability.", + "bbox": [ + 215, + 566, + 785, + 779 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Small object detection: Small object detection [45] is a challenging problem in 2D vision due to the low-resolution features. To tackle this, a series of methods have been proposed, which can be categorized into three types: (1) small object augmentation and oversampling methods [17, 25, 58]; (2) scale-aware training and inference", + "bbox": [ + 215, + 779, + 785, + 839 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "DSPDet3D: 3D Small Object Detection with Dynamic Spatial Pruning", + "bbox": [ + 313, + 114, + 730, + 128 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 774, + 116, + 785, + 126 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "strategy [11, 31, 41, 42]; (3) increasing the resolution of features or generating high-resolution features [5, 10, 21, 22, 48, 53]. However, there are far less works about 3D small object detection due to the limit of data and network capability. BackToReality [51] proposes ScanNet-md40 benchmark which contains small objects and finds many current methods suffer a lot in small object detection. TO-SCENE [50] proposes a new dataset and learning strategy for understanding 3D tabletop scenes. However, it relies on densely sampled points from CAD models, which is infeasible in practical scenarios where the points from small objects are very sparse. In contrast, we aim to directly detect small objects from naturally sampled point clouds.", + "bbox": [ + 212, + 146, + 787, + 282 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Network pruning: Network pruning can be divided into two categories: architecture pruning [15, 16, 18, 20, 27, 29] and spatial pruning [24, 36, 55]. Architecture pruning aims to remove a portion of weights from a neural network to shrink the size of a network, which includes unstructured pruning [15, 18, 29] and structured pruning [16, 20, 27]. The former removes network weights without a predefined structure, while the latter removes whole channels or network layers. On the contrary, spatial pruning does not prune the parameters of a network, but spatially removing redundant computation on the feature maps. DynamicViT [36] prunes the tokens in vision transformer with an attention masking strategy. SPS-Conv [24] dynamically prunes the convolutional kernel to suppress the activation on background voxels in sparse convolution layer. Ada3D [55] proposes a pruning framework for 3D and BEV features. Our dynamic spatial pruning method also belongs to spatial pruning, which directly removes redundant voxel features level by level according to the distribution of objects.", + "bbox": [ + 212, + 282, + 787, + 479 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3 Approach", + "text_level": 1, + "bbox": [ + 215, + 498, + 333, + 516 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In this section, we describe our DSPDet3D for efficient 3D small object detection. We first revisit the multi-level 3D detector and analyze the computational cost distribution. Then we propose dynamic spatial pruning with theoretical analysis on how to prune features without affecting detection performance. Finally we design DSP module according to the theoretical analysis and use it to construct DSPDet3D.", + "bbox": [ + 212, + 527, + 787, + 603 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1 Analysis on Multi-level 3D Detector", + "text_level": 1, + "bbox": [ + 215, + 621, + 504, + 638 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Preliminaries: We choose multi-level FCOS-like [44] 3D detector [38, 39] with sparse convolution [7, 13] for small object detection due to its high performance on both accuracy and speed (more detail can be found in Table 1 and 2).", + "bbox": [ + 212, + 643, + 787, + 688 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "As shown in Figure 3 (middle), after extracting backbone features, multi-level detector iteratively upsamples the voxel feature representations to different levels. In each level, all voxels are regarded as object proposals to predict bounding boxes and category scores. Generative upsampling is widely adopted in this kind of architectures [14,38,39] to expand the voxels from object surfaces to the whole 3D space, where object proposals located at object centers can produce accurate predictions. During training, ground-truth bounding boxes are assigned to different levels and each box assigns several nearby voxels as positive object proposals. Only box predictions from positive object proposals will be supervised. While at inference time all voxel features from the decoder are used to predict bounding boxes, which are then filtered by 3D NMS.", + "bbox": [ + 212, + 689, + 787, + 840 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 217, + 116, + 228, + 126 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Authors Suppressed Due to Excessive Length", + "bbox": [ + 271, + 114, + 542, + 128 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/b45a131204e886a8f409076fc7c514b531393d4deedaba2bde0715dcb52c37c8.jpg", + "image_caption": [ + "Fig.3: Comparison of the decoder in typical multi-level 3D object detector [39] and our DSPDet3D. Note that the sparsity of voxels in decoder is changed due to the generative upsampling operation. After detecting out objects in a level, DSPDet3D prunes redundant voxel features according to the distribution of objects before each upsampling operation. Red boxes indicate all pruned voxels and 'scissor' boxes indicate voxels pruned in the previous layer. $\\{O\\}$ is the set of all objects and $\\{O_i\\}$ is the set of objects assigned to level $i$ ." + ], + "image_footnote": [], + "bbox": [ + 274, + 143, + 370, + 411 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/ef1b0796b11fb2f8e2b8b609482f7e84b450d9dd07fe044e0e60bb7b000db33d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 383, + 142, + 549, + 412 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/1ebd3517ad5e81e0ae90038b2f854a9731ee5e7219e636d0a4e2c2f1ebe0b857.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 563, + 143, + 728, + 412 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Increasing spatial resolution: Based on multi-level architecture, a simple way to boost the performance of small object detection is to increase the spatial resolution of feature maps, i.e., voxelizing the point clouds into smaller voxels to better preserve geometric information. Taking TR3D [39] for example, we double its spatial resolution and show the results in Figure 4. It can be seen that the performance on small object really benefits from larger resolution, but the computational overhead grows dramatically at the same time. As 3D object detection is usually adopted in tasks which requires real-time inference under limited resources, such as AR/VR and robotic navigation, directly increasing spatial resolution is infeasible. Notably, we find the computation growth is imbalanced: the decoder layers (including detection heads) account for the most memory footprint and have larger memory growth ratio than the backbone. This indicates the generative upsampling operation will significantly increase the number of voxels when the spatial resolution is high, which is the main challenge for scaling up the spatial resolution of multi-level detectors.", + "bbox": [ + 212, + 537, + 787, + 750 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2 Dynamic Spatial Pruning", + "text_level": 1, + "bbox": [ + 214, + 768, + 434, + 786 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Since small objects only occupy a small proportion of space, we assume there is a large amount of redundant computation in decoder layers, especially when the resolution is high. For instance, if a bed is detected in Layer 4, the region near this bed may be less", + "bbox": [ + 212, + 794, + 787, + 840 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "DSPDet3D: 3D Small Object Detection with Dynamic Spatial Pruning", + "bbox": [ + 313, + 114, + 732, + 128 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 774, + 116, + 785, + 126 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/9de2f2472f930b1efb99084b137643f47518bbcd7d1f0bd7af6b0e3642260016.jpg", + "image_caption": [ + "Fig. 4: The memory footprint distribution of different multi-level detectors. Layer 4 to Layer 1 refer to decoder layers (including detection heads) from coarse to fine. If doubling the spatial resolution of TR3D, the performance on 3D small object detection improves from $52.7\\%$ to $62.8\\%$ while memory footprint increases dramatically. We find decoder layers accounts for most of the costs. DSPDet3D efficiently reduces redundant computation on these layers, achieving both fast speed and high accuracy." + ], + "image_footnote": [], + "bbox": [ + 277, + 147, + 725, + 292 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "informative for detecting other objects in the follow decoder layers. If we can skip the upsampling operation at these regions, the voxels will be sparsified level by level, as shown in Figure 3 (right). In this way, small objects can be detected in Layer 1 from only a small number of voxels. Inspired by this, we propose to dynamically prune the voxel features according to the distribution of objects.", + "bbox": [ + 212, + 420, + 787, + 494 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "However, pruning a voxel will not only reduce the number of object proposals in the following levels, but also change the following voxel features computed based on the pruned voxel. Therefore, in order to reduce the redundant computation of multi-level detector without degrading the detection performance, a carefully designed pruning strategy is required. We give theoretical derivation as below.", + "bbox": [ + 212, + 497, + 787, + 571 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Problem formulation: For each scene, we denote $\\{O\\}$ as the set of all objects, $\\{O_i\\}$ as the set of objects assigned to level $i^1$ during training, $f_{i} \\in \\mathbb{R}^{N \\times (3 + C)}$ as the voxel features of level $i$ . We aim to prune $f_{i}$ after detecting out $\\{O_i\\}$ , where the objective is to remove as many voxels as possible while keeping the predictions of $\\{O\\} \\backslash \\{O_i\\}$ unaffected after the pruning. For each object $o_j$ in level $j$ ( $j < i$ ), we assume the prediction of it is unaffected if the voxel features at level $j$ near its center $c_j$ are unaffected. We make this assumption because most true positive predictions are from object proposals located at the center of bounding boxes [14, 44]. We denote the expected unaffected neighborhood as $\\mathcal{C}_j(c_j, P)$ , which means a cube centered at $c_j$ with $P \\times P \\times P$ voxels at level $j$ . Given the symmetry, $P$ should be odd. Then we formulate the objective of our pruning strategy at level $i$ as:", + "bbox": [ + 212, + 574, + 789, + 742 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\underset {\\mathcal {K} _ {i}} {\\text {m i n i m i z e}} \\sum_ {x, y, z} M _ {i} [ x ] [ y ] [ z ], M _ {i} = \\bigwedge_ {j = 1} ^ {i - 1} \\mathcal {K} _ {i} (\\boldsymbol {c _ {j}}),\n$$\n", + "text_format": "latex", + "bbox": [ + 344, + 757, + 658, + 787 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\ns. t. \\forall j < i, \\mathcal {C} _ {j} (\\boldsymbol {c} _ {j}, P) \\cap \\mathcal {A} _ {i, j} (\\neg \\mathcal {K} _ {i} (\\boldsymbol {c} _ {j}) \\star f _ {i}) = \\varnothing \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 338, + 791, + 785, + 809 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Authors Suppressed Due to Excessive Length", + "bbox": [ + 271, + 114, + 542, + 128 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "1 We adopt the same definition of level as in Figure 3, where level $i$ is finer than level $i + 1$ .", + "bbox": [ + 220, + 824, + 767, + 840 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $M_{i} \\in \\mathbb{R}^{N}$ is a binary pruning mask sharing the same length with $f_{i}$ , where 0 indicates removing and 1 indicates keeping during the pruning operation $\\star$ . $\\mathcal{K}_i(\\cdot)$ is the generation strategy of pruning mask for each object, which generates a binary pruning mask conditioned on the object center. $\\mathcal{A}_{i,j}(f)$ is defined as the affecting field of $f$ , which represents the voxels at level $j$ that will be affected by pruning $f$ at level $i$ . Without loss of generality, here we choose only one object at each level for simplicity of presentation.", + "bbox": [ + 212, + 145, + 782, + 251 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Overview of problem solving: We solve (1) by mathematical induction. Specifically, for pruning strategy $M_{i}$ at level $i$ , we first consider how to generate pruning mask $\\mathcal{K}_i(c_{i-1})$ to ensure the predictions of $\\{O_{i-1}\\}$ are unaffected. Then we show that by following our pruning strategy $\\mathcal{K}_i$ , 'the predictions of $\\{O_j\\}$ are unaffected' can be derived by 'the predictions of $\\{O_{j+1}\\}$ are unaffected'.2", + "bbox": [ + 212, + 252, + 782, + 327 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Solving $\\mathcal{K}_i(\\pmb{c}_{i-1})$ : To make sure $\\mathcal{C}_{i-1}(\\pmb{c}_{i-1}, P) \\cap \\mathcal{A}_{i,i-1}(\\cdot) = \\emptyset$ , we need to compute the affecting field of each voxel $v_i$ in level $i$ . Obviously, the upper bound of affecting field of $v_i$ expands in shape of cube with sparse convolution. Assume there are $m$ sparse convolutions with stride 1 and kernel $x_k$ ( $1 \\leq k \\leq m$ ) between pruning and generative upsampling in level $i$ , one generative transposed convolution with stride 2 and kernel $y$ , and $n$ sparse convolutions with stride 1 and kernel $z_k$ ( $1 \\leq k \\leq n$ ) until detecting out objects in level $i-1$ . Then the affecting field from pruning (level $i$ ) to detecting (level $i-1$ ) can be written as:", + "bbox": [ + 212, + 327, + 784, + 446 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {A} _ {i, i - 1} \\left(v _ {i}\\right) = \\mathcal {C} _ {i - 1} \\left(v _ {i}, a f f \\left(\\left\\{x _ {k} \\right\\}, y, \\left\\{z _ {k} \\right\\}\\right)\\right) \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 354, + 455, + 785, + 472 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $aff(\\{x_k\\}, y, \\{z_k\\})$ is the range of affecting field represented by the kernel sizes, which we will detail in supplementary material. Since the shape of the expected unaffected voxel features is a $P \\times P \\times P$ cube, $\\mathcal{K}_i(\\pmb{c}_{i-1})$ can be formulated as:", + "bbox": [ + 214, + 479, + 782, + 525 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {K} _ {i} \\left(\\boldsymbol {c} _ {\\boldsymbol {i} - \\boldsymbol {1}}\\right) [ x ] [ y ] [ z ] = \\mathbb {I} \\left(2 \\cdot | \\boldsymbol {x} - \\boldsymbol {c} _ {\\boldsymbol {i} - \\boldsymbol {1}} | _ {\\infty} \\leq r S _ {i}\\right) \\\\ r = \\lceil \\frac {P + a f f (\\{x _ {k} \\} , y , \\{z _ {k} \\}) - 2}{2} \\rceil \\tag {3} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 341, + 532, + 785, + 580 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $S_{i}$ is the size of voxel in level $i$ . $\\mathbb{I}(\\cdot)$ is the indicative function. $\\boldsymbol{x} = (x,y,z)$ is the voxel coordinates of $f_{i}$ .", + "bbox": [ + 212, + 587, + 782, + 617 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Recursion of $\\kappa_{i}$ : We now derive when the pruning strategy $\\kappa_{i}$ in (3) also works for $c_{j}$ ( $j < i - 1$ ). We can regrad $c_{j}$ as the center of object in level $i - 1$ and use (3) to generate the pruning mask. In this way, $\\mathcal{C}_{i - 1}(c_j,P)$ are unaffected. As $\\mathcal{C}_j(c_j,P)$ is covered by $\\mathcal{C}_{i - 1}(c_j,P)$ , so $\\mathcal{C}_j(c_j,P)$ is unaffected as well. We should also ensure pruning in level $i$ has no cumulative impact on pruning in level $i - 1$ :", + "bbox": [ + 212, + 618, + 782, + 694 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\left(\\mathcal {K} _ {i - 1} \\left(\\boldsymbol {c} _ {\\boldsymbol {j}}\\right) \\star f _ {i - 1}\\right) \\subseteq \\mathcal {C} _ {i - 1} \\left(\\boldsymbol {c} _ {\\boldsymbol {j}}, P\\right) \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 385, + 700, + 785, + 718 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "this means when generating pruning mask of $c_{j}$ in level $i - 1$ using $\\mathcal{K}_{i - 1}$ , the kept voxels should be covered by the unaffected voxels after pruning in level $i$ . So we have:", + "bbox": [ + 214, + 724, + 782, + 755 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nr \\cdot S _ {i - 1} \\leq P \\cdot S _ {i - 1} \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 431, + 762, + 785, + 779 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The minimum $P$ can be acquired by solving (5). In this case, strategy $\\mathcal{K}_i$ in (3) works for all $c_j$ ( $j < i$ ).", + "bbox": [ + 212, + 786, + 782, + 818 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "DSPDet3D: 3D Small Object Detection with Dynamic Spatial Pruning", + "bbox": [ + 313, + 114, + 732, + 130 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 774, + 114, + 784, + 126 + ], + "page_idx": 6 + }, + { + "type": "page_footnote", + "text": "2 We provide illustrated examples in supplementary material for better understanding.", + "bbox": [ + 220, + 823, + 728, + 840 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/6a86af61c73089b12e032991a6e4a5f28a947418d85bc61a7a55621f1a3a99ce.jpg", + "image_caption": [ + "Fig. 5: Illustration of DSPDet3D. The voxelized point clouds are fed into a high-resolution sparse convolutional backbone, which output four levels of scene representations. Four dynamic spatial pruning (DSP) modules are stacked to construct a multi-level decoder and detect objects from coarse to fine. DSP module utilizes a light-weight learnable module to predict the pruning mask. During inference, we discretize the pruning mask and use it to guide pruning before generative upsampling. While during training we interpolate the pruning mask to next level and prune the voxel features after generative upsampling." + ], + "image_footnote": [], + "bbox": [ + 225, + 146, + 772, + 321 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3.3 DSPDet3D", + "text_level": 1, + "bbox": [ + 215, + 460, + 331, + 474 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Based on the theoretical analysis, we devise a dynamic spatial pruning (DSP) module to approximate the ideal pruning strategy. We further construct a 3D small object detector named DSPDet3D with the proposed DSP module.", + "bbox": [ + 212, + 486, + 785, + 532 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "DSP module: As shown in Figure 3, we modify the layers of a typical multi-level decoder to DSP modules, which prunes redundant voxel features after detecting out objects at each level for efficient feature upsampling. Formally, given the upsampled voxel feature $f_{i}^{U}$ and the backbone feature $f_{i}^{B}$ at level $i$ , DSP module first add them for detection. However, $f_{i}^{U}$ may be much sparser than $f_{i}^{B}$ due to pruning, directly adding by taking union of them is inefficient. Therefore, we propose a new operator called partial addition to fit our pruning strategy:", + "bbox": [ + 212, + 532, + 787, + 638 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\nf _ {i} = f _ {i} ^ {B \\overrightarrow {\\quad +}} f _ {i} ^ {U} \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 450, + 647, + 785, + 667 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "where addition is constrained to be operated only on the voxels of $f_{i}^{U}$ . Then objects are detected using a shared detection head across all levels: $\\{O_i\\} = \\text{Detect}(f_i)$ . Once objects at level $i$ are detected out, we prune the voxel features according to the derived strategy described in Section 3.2. Here we devise a light-weight MLP-based learnable pruning module to decide where smaller objects (i.e. objects in level $j$ ( $j < i$ ) ) may appear, and then prune other locations:", + "bbox": [ + 212, + 678, + 787, + 770 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\bar {f} _ {i} = t \\left(\\hat {M} _ {i}\\right) \\star f _ {i}, \\hat {M} _ {i} = \\mathrm {M L P} _ {i} \\left(f _ {i}\\right) \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 383, + 779, + 785, + 797 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "where $\\hat{M}_i$ is the pruning mask predicted from $f_i$ , which represents the probability of retention for each voxel. We utilize FocalLoss [23] to supervise $\\hat{M}_i$ with the generated", + "bbox": [ + 212, + 808, + 787, + 840 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 217, + 114, + 227, + 126 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Authors Suppressed Due to Excessive Length", + "bbox": [ + 271, + 114, + 542, + 128 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "$M_{i}$ in (1). During inference, a threshold function $t(\\cdot)$ sets probability lower than $\\tau$ to be 0, others be 1 to guide pruning. After pruning, the generative upsampling is applied to acquire features for the next level: $f_{i - 1}^{U} = \\text{GeV}(\\bar{f}_{i})$ .", + "bbox": [ + 212, + 146, + 782, + 191 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "During training, as $\\hat{M}_i$ may not be so accurate (especially at beginning), we find applying the above learnable pruning module makes training difficult to converge. Instead, we switch the pruning to weak mode for context preservation. As shown in Figure 5, the weak pruning is applied after generative upsampling. For level $i$ , we upsample the pruning mask $\\hat{M}_{i+1}$ to level $i$ with nearest neighbor interpolation. Then we sort the interpolated scores and keep only $N_{max}$ voxels with the highest scores. This weak pruning mechanism aims to stabilize training, which only works when the amount of voxels is too large to conduct following operations.", + "bbox": [ + 212, + 194, + 785, + 316 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Since our theoretical analysis sets the expected unaffected neighborhood to be a $P \\times P \\times P$ cube, we also modify the assigning strategy of positive object proposals accordingly for robust training. Specifically, for a ground-truth bounding box of $o_i$ assigned to level $i$ , we sample the nearest $N_{pos}$ voxels to $c_i$ inside the cube centered at $c_i$ with length $P \\cdot S_i$ . If there are less than $N_{pos}$ voxels in the cube, we simply sample all voxels inside it. Our assigning method is independent of the size of bounding box, which ensures there are enough positive proposals even for small objects.", + "bbox": [ + 212, + 316, + 785, + 421 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "DSPDet3D: Based upon the top-performance multi-level detector TR3D [39], we remove the max pooling layer to increase the spatial resolution of backbone features. Then we replace the decoder in TR3D with four stacked DSP modules to remove redundant voxel features level by level, which achieves efficient upsampling without affecting the detection performance. To train DSPDet3D, we keep the same loss for classification and box regression as in TR3D and add additional FocalLoss to supervise $\\hat{M}_i$ with $M_i$ .", + "bbox": [ + 212, + 422, + 785, + 513 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Compare with FCAF3D: Similar to our training-time weak pruning, FCAF3D [38] also adopts a pruning strategy in the decoder to prevent the number of voxels from getting too large, which is unable to remove redundant features in early decoder layers during inference. Moreover, it directly utilizes the classification scores for bounding boxes to sort and prune the voxel features, which cannot accurately preserve geometric information for small objects.", + "bbox": [ + 212, + 513, + 785, + 604 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4 Experiment", + "text_level": 1, + "bbox": [ + 214, + 630, + 349, + 647 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In this section, we conduct experiments to investigate the performance of our approach on 3D small object detection. We first describe the datasets and experimental settings. Then we compare DSPDet3D with the state-of-the-art 3D object detection methods. We also design ablation experiments to study the effectiveness of the proposed methods. Finally we transfer DSPDet3D to extremely large scenes to show its efficiency and generalization ability.", + "bbox": [ + 212, + 665, + 785, + 756 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.1 Experimental Settings", + "text_level": 1, + "bbox": [ + 214, + 781, + 413, + 797 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Datasets and metrics: We conduct experiments on two indoor datasets including ScanNet [9] and TO-SCENE [50]. ScanNet is a richly annotated dataset of indoor scenes", + "bbox": [ + 212, + 809, + 785, + 839 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "DSPDet3D: 3D Small Object Detection with Dynamic Spatial Pruning", + "bbox": [ + 313, + 114, + 730, + 130 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 774, + 116, + 785, + 126 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/04c033936fe80a4760efe0afab76a42aad56b57d84df9a93d815337dc612c352.jpg", + "table_caption": [ + "Table 1: 3D objects detection results and computational costs of different methods on ScanNet.md40. DSPDet3D with the best pruning threshold is highlighted in gray. We set best scores in bold, runner-ups underlined." + ], + "table_footnote": [], + "table_body": "
MethodDecodermAP\\( mAP_S \\)SpeedMemory
@0.25@0.5@0.25@0.5
VoteNetVoting51.0233.690.30013.41150
VoteNetSVoting48.6231.551.0408.51500
H3DNetHybrid53.5139.233.080.907.21550
GroupFree3DTransformer56.7741.3911.70.817.81450
GroupFree3DSTransformer29.4411.940.2003.22000
RBGNetVoting55.2332.645.8106.61700
FCAF3DMulti-level59.4948.7518.388.2112.3850
CAGroup3DVoting60.2949.9016.628.633.13250
TR3DMulti-level61.5949.9827.5312.9110.81250
FCAF3D-higherMulti-level62.6551.0127.6816.237.14000
TR3D-higherMulti-level65.1854.0341.7029.565.24450
Ours(τ=0)Multi-level65.3954.5944.7931.554.44200
Ours(τ=0.3)Multi-level65.0454.3543.7730.3812.5700
", + "bbox": [ + 245, + 198, + 754, + 429 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "with 1201 training scenes and 312 validation scenes. Each object in the scenes are annotated with texts and then mapped to category IDs. We follow the ScanNet-md40 benchmark proposed by [51], which contains objects in 22 categories with large size variance. TO-SCENE is a mixed reality dataset which provides three variants called TO_Vanilla, TO_Crowd and TO_ScanNet with different numbers of tabletop objects and scene scales. We choose the room-scale TO_ScanNet benchmark, which contains 3600 training scenes and 800 validation scenes with 70 categories. However, TO_ScanNet adopts non-uniform sampling to acquire about 2000 points per tabletop object, which is infeasible in practical settings. To this end, we downsample the small objects and control the density of them to be similar with other objects and backgrounds. We name this modified version as TO-SCENE-down benchmark. We take the point clouds without color as inputs for all methods. More details about ScanNet-md40 and TO-SCENE-down benchmarks can be found in supplementary material.", + "bbox": [ + 212, + 467, + 787, + 664 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We report the mean average precision (mAP) with threshold 0.25 and 0.5. To measure the performance on different categories, we use two kinds of metrics: mAP and $\\mathrm{mAP}_S$ , which refer to the mean AP of all objects and of small objects respectively. Here we define categories of small object as ones with average volume smaller than $0.05m^3$ for both benchmarks.", + "bbox": [ + 212, + 667, + 787, + 743 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Implementation details: We implement our approach with PyTorch [32], MinkowskiEngine [7] and MMDetection3D [8]. We follow the same training strategy / hyperparameters as TR3D [39] for fair comparison. Training converges within 4 hours on a 4 GPU machine. The stride of the sparse convolution in the preencoder of DSPDet3D is set to 2, thus the voxel size of $f_1^B$ is 4cm and $S_i$ equals to $2^i \\cdot 2cm$ . We set $N_{pos} = 6$ and $N_{max} = 100000$ during training. The weight of the FocalLoss between $M_i$ and $\\hat{M}_i$ is", + "bbox": [ + 212, + 747, + 787, + 842 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Authors Suppressed Due to Excessive Length", + "bbox": [ + 271, + 114, + 542, + 128 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/1fe0990212573a14ec09b8b4fa4e0b4250c001076eb3725e19a7874aebbb5664.jpg", + "table_caption": [ + "Table 2: 3D objects detection results and computational costs of different methods on TO-SCENE-down benchmark. DSPDet3D with the best pruning threshold is highlighted in gray. We set best scores in bold, runner-ups underlined." + ], + "table_footnote": [], + "table_body": "
MethodDecodermAP\\( mAP_S \\)SpeedMemory
@0.25@0.5@0.25@0.5
VoteNetVoting26.7214.0114.514.7812.81300
\\( VoteNet_S \\)Voting31.8714.8921.757.407.61650
H3DNetHybrid27.6917.3814.837.395.11650
GroupFree3DTransformer32.4120.4320.1710.137.71700
\\( GroupFree3D_S \\)Transformer40.1423.5533.3316.152.42200
RBGNetVoting40.4230.2729.6921.615.01850
FCAF3DMulti-level45.1337.2137.1831.6511.91000
CAGroup3DVoting54.2847.5848.4943.852.23500
TR3DMulti-level55.5845.9552.7244.019.91400
FCAF3D-higherMulti-level57.2350.3953.0748.766.34250
TR3D-higherMulti-level63.9656.0662.8457.144.14600
Ours(τ=0)Multi-level66.8159.4166.5361.574.15300
Ours(τ=0.5)Multi-level66.1258.5565.8260.7313.9800
", + "bbox": [ + 245, + 198, + 754, + 429 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "0.01. In terms of block structure, we have $\\{x_k\\} = \\emptyset$ , $y = 3$ and $\\{z_k\\} = \\{3,3\\}$ . So we set $r = 7$ and $P = 7$ according to (3).", + "bbox": [ + 212, + 458, + 787, + 491 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "4.2 Comparison with State-of-the-art", + "text_level": 1, + "bbox": [ + 214, + 511, + 491, + 527 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "We compare our method with popular and state-of-the-art 3D object detection methods, including VoteNet [33], H3DNet [54], GroupFree3D [26], RBGNet [47], CA-Group3D [46], FCAF3D [38] and TR3D [39]. We also follow [50] to reduce the radius of ball query in the PointNet++ backbone for VoteNet and GroupFree3D. The modified models is distinguished by subscript $S$ . Note that the original TR3D only uses two detection heads at level 2/3 and removes the last generative upsampling. However, detecting small objects heavily relies on high-resolution feature map, so we add the upsampling back. This will make it slightly slower but much more accurate on the 3D small object detection benchmarks.", + "bbox": [ + 212, + 537, + 787, + 672 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "For all methods, we use their official code and the same training strategy / hyperparameters to train them on ScanNet-md40 and TO-SCENE-down.", + "bbox": [ + 212, + 672, + 787, + 702 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Table 1 and 2 shows the experimental results on ScanNet-md40 and TO-SCENEDown respectively. Consistent with the observation of [51], we find point-based (VoteNet, H3DNet, RBGNet) and transformer-based (GroupFree3D) methods almost fail to detect small objects on ScanNet-md40. This is because the PointNet++ backbone used by these methods adopts set abstraction (SA) operation to aggressively downsample the point clouds and extract scene representation. Since the number of small objects in ScanNet is limited, furthest point sampling has a low probability to sample points on small objects, which leads to inaccurate representation of small objects. For methods (CAGroup3D, FCAF3D, TR3D) with sparse convolutional backbone, they achieve", + "bbox": [ + 212, + 704, + 789, + 839 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "DSPDet3D: 3D Small Object Detection with Dynamic Spatial Pruning", + "bbox": [ + 313, + 114, + 732, + 130 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 767, + 114, + 784, + 126 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/1e8d60e37ce8795b764b8e60202220319de5e7c964721a51aaf285c87cc6854d.jpg", + "image_caption": [ + "Fig. 6: Visualization of pruning process on ScanNet. We show the kept voxels in each level under different thresholds. The memory footprint of each level is also listed at bottom." + ], + "image_footnote": [], + "bbox": [ + 217, + 143, + 782, + 474 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "relatively much higher $\\mathrm{mAP}_S$ due to sparse convolution [7, 13] can extract fine-grained scene representation with high efficiency. However, two-stage method like CAGroup3D is both slow and memory-consuming. Multi-level methods like FCAF3D and TR3D are efficient and get good performance on small object detection due to the FPN-like architecture, but they are still limited by resolution. On the contrary, our DSPDet3D with a proper threshold takes advantage of the high-resolution scene representation to achieve much higher performance. Furthermore, DSPDet3D is the most memory-efficient model among all mainstream methods.", + "bbox": [ + 212, + 541, + 787, + 662 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "4.3 Ablation Study", + "text_level": 1, + "bbox": [ + 214, + 681, + 362, + 698 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "We conduct ablation studies on ScanNet-md40 to study the effects of hyperparameters and different design choices.", + "bbox": [ + 212, + 704, + 785, + 733 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Pruning process: We visualize the pruning process under different thresholds in Figure 6, where the voxels in each level after pruning are shown. We also list the memory footprint of each level. It can be seen that our method significantly reduces the memory footprint by pruning most of the uninformative voxels. Our pruning module only keeps regions where there are smaller objects than current level.", + "bbox": [ + 212, + 734, + 785, + 809 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Hyperparameters: We study two hyperparameters: $r$ and $N_{pos}$ , which is highly relevant to 3D small object detection. Note that $r = \\left\\lceil \\frac{P + 9 - 2}{2} \\right\\rceil$ , thus $r$ and $P$ should be", + "bbox": [ + 212, + 809, + 785, + 842 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Authors Suppressed Due to Excessive Length", + "bbox": [ + 271, + 114, + 542, + 128 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/12403ecf7a487963078d546f95ae40636a84d7a662fec91d85d6bb43e46c5535.jpg", + "table_caption": [ + "Table 3: Ablation studies on several design choices. We control the speed of each method to 10 FPS and report the accuracy in $\\mathrm{mAP}@\\mathbf{0.25}$ and $\\mathrm{mAP}_S@\\mathbf{0.25}$ ." + ], + "table_footnote": [], + "table_body": "
MethodmAP\\( \\mathrm{mAP}_S \\)
Remove partial addition55.335.5
Addition by taking union57.936.4
Addition by interpolation62.140.9
Spherical keeping mask63.041.1
Remove training-time pruning--
Positive proposal inside bounding box62.440.7
The full design of DSP module65.144.1
", + "bbox": [ + 320, + 184, + 683, + 313 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/08b388177be4ca13c44331bf65631fb18c481b2e2fa88cf89f6f82706787ce2e.jpg", + "image_caption": [ + "Fig. 7: Ablation studies on the value of $r$ and $N_{pos}$ . For each value we report performance under different pruning threshold $\\tau$ ." + ], + "image_footnote": [], + "bbox": [ + 277, + 328, + 501, + 441 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/44173595e24b288de5d95e4fd0d03cff3cb92786fdc641582234e8efec7b27a6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 514, + 328, + 725, + 441 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "changed simultaneously. As shown in Figure 7 (left), setting $r = 7$ achieves the best performance. If $r$ is smaller than 7 then $r > P$ , which conflicts with Equation (5) and the features will be affected by pruning. While a larger $r$ will make the pruning less aggressive, resulting in a large number of redundant voxel features. Figure 7 (right) shows that the number of positive object proposals should be set properly, which is important to balance the ratio between positive and negative samples during classification.", + "bbox": [ + 212, + 515, + 787, + 606 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Design choices: We also study the design choices of DSPDet3D in Table 3. Observing the second, third and fourth rows, we conclude that the partial addition is important for efficient feature fusion. Although taking union can preserve more information, this operation will reduce the sparsity of voxels and thus make our pruning less efficient. The fifth row shows that generate the keeping mask according to the shape of affecting field is better than using a spherical shape. According to the sixth row, removing training-time pruning will significantly increase the memory footprint during training, which makes the network unable to train. The seventh row validates the effectiveness of our assigning method for positive object proposals.", + "bbox": [ + 212, + 607, + 787, + 743 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "4.4 Transferring to Larger Scenes", + "text_level": 1, + "bbox": [ + 214, + 767, + 468, + 782 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We further validate the efficiency and generalization ability of different 3D detectors by transferring them to scenes of much larger scale. We first train 3D detectors on rooms from ScanNet training set in a category-agnostic manner, which is done by regarding", + "bbox": [ + 212, + 794, + 785, + 840 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "DSPDet3D: 3D Small Object Detection with Dynamic Spatial Pruning", + "bbox": [ + 313, + 114, + 732, + 130 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 769, + 114, + 784, + 126 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/8efed2cab5f01737a94450892b72b65dd106d2033bac84a775217e808d97b746.jpg", + "image_caption": [ + "Fig. 8: Visualization of the transferring results of different 3D object detectors. The 3D detector is trained on rooms from ScanNet and directly adopted to process a whole building-level 3D scene from Matterport3D." + ], + "image_footnote": [], + "bbox": [ + 223, + 146, + 777, + 349 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "every labeled object as the same category. Then we directly adopt them to process the building-level scenes in Matterport3D [4]. We find previous methods almost all fail to process the extremely large scenes due to unaffordable memory footprint, so we only compare DSPDet3D with FCAF3D as shown in 8. It is shown that FCAF3D cannot detect out any small object and even struggles on relatively large objects like chairs when the scene is too large. On the contrary, DSPDet3D is able to accurately detect small objects like cups and thin pictures.", + "bbox": [ + 212, + 436, + 787, + 542 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "5 Conclusion", + "text_level": 1, + "bbox": [ + 215, + 563, + 344, + 579 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "In this paper, we have presented an efficient feature pruning strategy for 3D small object detection. Inspired by the fact that small objects only occupy a small proportion of space, we adopt a multi-level detection framework to detect different sizes of objects in different levels. Then we present a dynamic spatial pruning strategy to prune the voxel features after detecting out objects in each level. Specifically, we first design the dynamic spatial pruning strategy by theoretical analysis on how to prune voxels without affecting the features of object proposals. Then we propose dynamic spatial pruning (DSP) module according to the strategy and use it to construct DSPDet3D. Extensive experiments on ScanNet and TO-SCENE datasets show that our DSPDet3D achieves leading detection accuracy and speed. We also conduct transferring experiment on Matterport3D to show DSPDet3D also generalizes well to extremely large scenes.", + "bbox": [ + 212, + 592, + 787, + 758 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Acknowledgements", + "text_level": 1, + "bbox": [ + 215, + 780, + 382, + 797 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "This work was supported in part by the National Natural Science Foundation of China under Grant 62125603, Grant 62321005, and Grant 62336004.", + "bbox": [ + 212, + 809, + 785, + 839 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Authors Suppressed Due to Excessive Length", + "bbox": [ + 271, + 114, + 542, + 128 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 217, + 143, + 310, + 160 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "1. Armeni, I., Sener, O., Zamir, A.R., Jiang, H., Brilakis, I., Fischer, M., Savarese, S.: 3d semantic parsing of large-scale indoor spaces. In: ICCV. pp. 1534-1543 (2016) 2", + "2. Bansal, M., Krizhevsky, A., Ogale, A.: Chauffeurnet: Learning to drive by imitating the best and synthesizing the worst. arXiv preprint arXiv:1812.03079 (2018) 1", + "3. Carion, N., Massa, F., Synnaeve, G., Usunier, N., Kirillov, A., Zagoruyko, S.: End-to-end object detection with transformers. In: ECCV. pp. 213-229. Springer (2020) 3", + "4. Chang, A., Dai, A., Funkhouser, T., Halber, M., Niessner, M., Savva, M., Song, S., Zeng, A., Zhang, Y.: Matterport3d: Learning from rgb-d data in indoor environments. 3DV (2017) 2, 14", + "5. Chen, C., Liu, M.Y., Tuzel, O., Xiao, J.: R-cnn for small object detection. In: ACCV. pp. 214-230. Springer (2017) 4", + "6. Cheng, B., Sheng, L., Shi, S., Yang, M., Xu, D.: Back-tracing representative points for voting-based 3d object detection in point clouds. In: CVPR. pp. 8963-8972 (2021) 3", + "7. Choy, C., Gwak, J., Savarese, S.: 4d spatio-temporal convnets: Minkowski convolutional neural networks. In: CVPR. pp. 3075-3084 (2019) 1, 3, 4, 10, 12", + "8. Contributors, M.: Mmdetection3d: Openmmlab next-generation platform for general 3d object detection (2020) 10", + "9. Dai, A., Chang, A.X., Savva, M., Halber, M., Funkhouser, T., Nießner, M.: Scannet: Richly-annotated 3d reconstructions of indoor scenes. In: CVPR. pp. 5828--5839 (2017) 2, 3, 9", + "10. Deng, C., Wang, M., Liu, L., Liu, Y., Jiang, Y.: Extended feature pyramid network for small object detection. TMM 24, 1968-1979 (2021) 4", + "11. Gao, M., Yu, R., Li, A., Morariu, V.I., Davis, L.S.: Dynamic zoom-in network for fast object detection in large images. In: CVPR. pp. 6926-6935 (2018) 4", + "12. Geiger, A., Lenz, P., Urtasun, R.: Are we ready for autonomous driving? the kitti vision benchmark suite. In: CVPR. pp. 3354-3361 (2012) 2", + "13. Graham, B., Engelcke, M., Van Der Maaten, L.: 3d semantic segmentation with submanifold sparse convolutional networks. In: CVPR. pp. 9224-9232 (2018) 1, 3, 4, 12", + "14. Gwak, J., Choy, C., Savarese, S.: Generative sparse detection networks for 3d single-shot object detection. In: ECCV. pp. 297-313. Springer (2020) 3, 4, 6", + "15. Han, S., Pool, J., Tran, J., Dally, W.: Learning both weights and connections for efficient neural network. NeurIPS 28 (2015) 4", + "16. Huang, Z., Wang, N.: Data-driven sparse structure selection for deep neural networks. In: ECCV. pp. 304-320 (2018) 4", + "17. Kisantal, M., Wojna, Z., Murawski, J., Naruniec, J., Cho, K.: Augmentation for small object detection. arXiv preprint arXiv:1902.07296 (2019) 3", + "18. LeCun, Y., Denker, J., Solla, S.: Optimal brain damage. NeurIPS 2 (1989) 4", + "19. Lee, J., Choy, C., Park, J.: Putting 3d spatially sparse networks on a diet. arXiv preprint arXiv:2112.01316 (2021) 3", + "20. Li, H., Kadav, A., Durdanovic, I., Samet, H., Graf, H.P.: Pruning filters for efficient convnets. arXiv preprint arXiv:1608.08710 (2016) 4", + "21. Li, J., Liang, X., Wei, Y., Xu, T., Feng, J., Yan, S.: Perceptual generative adversarial networks for small object detection. In: CVPR. pp. 1222-1230 (2017) 4", + "22. Lin, T.Y., Dollár, P., Girshick, R., He, K., Hariharan, B., Belongie, S.: Feature pyramid networks for object detection. In: CVPR. pp. 2117-2125 (2017) 4", + "23. Lin, T.Y., Goyal, P., Girshick, R., He, K., Dólár, P.: Focal loss for dense object detection. In: ICCV. pp. 2980-2988 (2017) 8" + ], + "bbox": [ + 220, + 178, + 784, + 839 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "DSPDet3D: 3D Small Object Detection with Dynamic Spatial Pruning", + "bbox": [ + 313, + 114, + 732, + 128 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 769, + 116, + 784, + 126 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "24. Liu, J., Chen, Y., Ye, X., Tian, Z., Tan, X., Qi, X.: Spatial pruned sparse convolution for efficient 3d object detection. In: NeurIPS (2022) 4", + "25. Liu, W., Anguelov, D., Erhan, D., Szegedy, C., Reed, S., Fu, C.Y., Berg, A.C.: Ssd: Single shot multibox detector. In: ECCV. pp. 21-37 (2016) 3", + "26. Liu, Z., Zhang, Z., Cao, Y., Hu, H., Tong, X.: Group-free 3d object detection via transformers. arXiv preprint arXiv:2104.00678 (2021) 3, 11", + "27. Liu, Z., Li, J., Shen, Z., Huang, G., Yan, S., Zhang, C.: Learning efficient convolutional networks through network slimming. In: ICCV. pp. 2736-2744 (2017) 4", + "28. Misra, I., Girdhar, R., Joulin, A.: An end-to-end transformer model for 3d object detection. In: ICCV. pp. 2906-2917 (2021) 3", + "29. Molchanov, P., Tyree, S., Karras, T., Aila, T., Kautz, J.: Pruning convolutional neural networks for resource efficient inference. arXiv preprint arXiv:1611.06440 (2016) 4", + "30. Mousavian, A., Eppner, C., Fox, D.: 6-dof grapnet: Variational grasp generation for object manipulation. In: ICCV. pp. 2901-2910 (2019) 1", + "31. Najibi, M., Singh, B., Davis, L.S.: Autofocus: Efficient multi-scale inference. In: ICCV. pp. 9745-9755 (2019) 4", + "32. Paszke, A., Gross, S., Massa, F., Lerer, A., Bradbury, J., Chanan, G., Killeen, T., Lin, Z., Gimelshein, N., Antiga, L., et al.: Pytorch: An imperative style, high-performance deep learning library. NeurIPS 32 (2019) 10", + "33. Qi, C.R., Litany, O., He, K., Guibas, L.J.: Deep hough voting for 3d object detection in point clouds. In: ICCV. pp. 9277-9286 (2019) 3, 11", + "34. Qi, C.R., Su, H., Mo, K., Guibas, L.J.: Pointnet: Deep learning on point sets for 3d classification and segmentation. In: CVPR. pp. 652-660 (2017) 1, 3", + "35. Qi, C.R., Yi, L., Su, H., Guibas, L.J.: Pointnet++: Deep hierarchical feature learning on point sets in a metric space. In: NeurIPS. pp. 5099-5108 (2017) 1, 3", + "36. Rao, Y., Zhao, W., Liu, B., Lu, J., Zhou, J., Hsieh, C.J.: Dynamicvit: Efficient vision transformers with dynamic token sparsification. NeurIPS 34, 13937-13949 (2021) 4", + "37. Rozenberszki, D., Litany, O., Dai, A.: Language-grounded indoor 3d semantic segmentation in the wild. In: ECCV. pp. 125-141. Springer (2022) 2", + "38. Rukhovich, D., Vorontsova, A., Konushin, A.: Fcaf3d: fully convolutional anchor-free 3d object detection. In: ECCV. pp. 477-493. Springer (2022) 3, 4, 9, 11", + "39. Rukhovich, D., Vorontsova, A., Konushin, A.: Tr3d: Towards real-time indoor 3d object detection. arXiv preprint arXiv:2302.02858 (2023) 1, 3, 4, 5, 9, 10, 11", + "40. Shi, S., Guo, C., Jiang, L., Wang, Z., Shi, J., Wang, X., Li, H.: Pv-rcnn: Point-voxel feature set abstraction for 3d object detection. In: CVPR. pp. 10529–10538 (2020) 1", + "41. Singh, B., Davis, L.S.: An analysis of scale invariance in object detection snip. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 3578-3587 (2018) 4", + "42. Singh, B., Najibi, M., Davis, L.S.: Sniper: Efficient multi-scale training. NeurIPS 31 (2018) 4", + "43. Song, S., Lichtenberg, S.P., Xiao, J.: Sun rgb-d: A rgb-d scene understanding benchmark suite. In: CVPR. pp. 567-576 (2015) 2", + "44. Tian, Z., Shen, C., Chen, H., He, T.: Fcos: Fully convolutional one-stage object detection. In: ICCV. pp. 9627-9636 (2019) 4, 6", + "45. Tong, K., Wu, Y., Zhou, F.: Recent advances in small object detection based on deep learning: A review. IVC 97, 103910 (2020) 3", + "46. Wang, H., Ding, L., Dong, S., Shi, S., Li, A., Li, J., Li, Z., Wang, L.: Cagroup3d: Class-aware grouping for 3d object detection on point clouds. arXiv preprint arXiv:2210.04264 (2022) 1, 3, 11", + "47. Wang, H., Shi, S., Yang, Z., Fang, R., Qian, Q., Li, H., Schiele, B., Wang, L.: Rbgnet: Ray-based grouping for 3d object detection. In: CVPR. pp. 1110-1119 (2022) 3, 11" + ], + "bbox": [ + 215, + 146, + 785, + 839 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Authors Suppressed Due to Excessive Length", + "bbox": [ + 271, + 114, + 542, + 128 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "48. Wang, J., Sun, K., Cheng, T., Jiang, B., Deng, C., Zhao, Y., Liu, D., Mu, Y., Tan, M., Wang, X., et al.: Deep high-resolution representation learning for visual recognition. TPAMI 43(10), 3349-3364 (2020) 4", + "49. Xie, Q., Lai, Y.K., Wu, J., Wang, Z., Zhang, Y., Xu, K., Wang, J.: Mlcvnet: Multi-level context votenet for 3d object detection. In: CVPR. pp. 10447-10456 (2020) 3", + "50. Xu, M., Chen, P., Liu, H., Han, X.: To-scene: A large-scale dataset for understanding 3d tabletop scenes. In: ECCV. pp. 340-356. Springer (2022) 2, 3, 4, 9, 11", + "51. Xu, X., Wang, Y., Zheng, Y., Rao, Y., Zhou, J., Lu, J.: Back to reality: Weakly-supervised 3d object detection with shape-guided label enhancement. In: CVPR. pp. 8438-8447 (2022) 2, 4, 10, 11", + "52. Xu, X., Wang, Z., Zhou, J., Lu, J.: Binarizing sparse convolutional networks for efficient point cloud analysis. arXiv preprint arXiv:2303.15493 (2023) 3", + "53. Yang, C., Huang, Z., Wang, N.: Querydet: Cascaded sparse query for accelerating high-resolution small object detection. In: CVPR. pp. 13668-13677 (2022) 4", + "54. Zhang, Z., Sun, B., Yang, H., Huang, Q.: H3dnet: 3d object detection using hybrid geometric primitives. In: ECCV. pp. 311-329 (2020) 3, 11", + "55. Zhao, T., Ning, X., Hong, K., Qiu, Z., Lu, P., Zhao, Y., Zhang, L., Zhou, L., Dai, G., Yang, H., et al.: Ada3d: Exploiting the spatial redundancy with adaptive inference for efficient 3d object detection. arXiv preprint arXiv:2307.08209 (2023) 4", + "56. Zheng, W., Tang, W., Jiang, L., Fu, C.W.: Se-ssd: Self-ensembling single-stage object detector from point cloud. In: CVPR. pp. 14494–14503 (2021) 1", + "57. Zhu, Y., Mottaghi, R., Kolve, E., Lim, J.J., Gupta, A., Fei-Fei, L., Farhadi, A.: Target-driven visual navigation in indoor scenes using deep reinforcement learning. In: ICRA. pp. 3357-3364 (2017) 1", + "58. Zoph, B., Cubuk, E.D., Ghiasi, G., Lin, T.Y., Shlens, J., Le, Q.V.: Learning data augmentation strategies for object detection. In: ECCV. pp. 566-583. Springer (2020) 3" + ], + "bbox": [ + 212, + 146, + 787, + 508 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "DSPDet3D: 3D Small Object Detection with Dynamic Spatial Pruning", + "bbox": [ + 313, + 114, + 732, + 128 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 769, + 114, + 784, + 126 + ], + "page_idx": 16 + } +] \ No newline at end of file diff --git a/2024/3D Small Object Detection with Dynamic Spatial Pruning/2b6d04de-265e-4c48-b6b1-d03973f89d8a_model.json b/2024/3D Small Object Detection with Dynamic Spatial Pruning/2b6d04de-265e-4c48-b6b1-d03973f89d8a_model.json new file mode 100644 index 0000000000000000000000000000000000000000..a97de0d009e86ae81335115281d911f6ef6d2e8c --- /dev/null +++ b/2024/3D Small Object Detection with Dynamic Spatial Pruning/2b6d04de-265e-4c48-b6b1-d03973f89d8a_model.json @@ -0,0 +1,2302 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.233, + 0.141, + 0.773, + 0.187 + ], + "angle": 0, + "content": "DSPDet3D: 3D Small Object Detection with Dynamic Spatial Pruning" + }, + { + "type": "text", + "bbox": [ + 0.221, + 0.212, + 0.782, + 0.228 + ], + "angle": 0, + "content": "Xiuwei \\(\\mathrm{Xu}^{1\\star}\\), Zhihao \\(\\mathrm{Sun}^{2*}\\), Ziwei Wang\\(^3\\), Hongmin Liu\\(^{2\\dagger}\\), Jie Zhou\\(^1\\), Jiwen Lu\\(^{1\\dagger}\\)" + }, + { + "type": "text", + "bbox": [ + 0.432, + 0.228, + 0.574, + 0.242 + ], + "angle": 0, + "content": "\\(^{1}\\)Tsinghua University" + }, + { + "type": "text", + "bbox": [ + 0.348, + 0.243, + 0.66, + 0.259 + ], + "angle": 0, + "content": "\\(^{2}\\)University of Science and Technology Beijing" + }, + { + "type": "text", + "bbox": [ + 0.408, + 0.259, + 0.6, + 0.274 + ], + "angle": 0, + "content": "\\(^{3}\\)Carnegie Mellon University" + }, + { + "type": "text", + "bbox": [ + 0.261, + 0.276, + 0.74, + 0.289 + ], + "angle": 0, + "content": "xxw21@mails.tsinghua.edu.cn; d202210361@xs.ustb.edu.cn;" + }, + { + "type": "text", + "bbox": [ + 0.315, + 0.291, + 0.688, + 0.304 + ], + "angle": 0, + "content": "ziweiwa2@andrew.cmu.edu; hmliu@ustb.edu.cn;" + }, + { + "type": "text", + "bbox": [ + 0.362, + 0.306, + 0.643, + 0.319 + ], + "angle": 0, + "content": "{jzhou,lujiwen}@tsinghua.edu.cn" + }, + { + "type": "text", + "bbox": [ + 0.261, + 0.359, + 0.744, + 0.624 + ], + "angle": 0, + "content": "Abstract. In this paper, we propose an efficient feature pruning strategy for 3D small object detection. Conventional 3D object detection methods struggle on small objects due to the weak geometric information from a small number of points. Although increasing the spatial resolution of feature representations can improve the detection performance on small objects, the additional computational overhead is unaffordable. With in-depth study, we observe the growth of computation mainly comes from the upsampling operation in the decoder of 3D detector. Motivated by this, we present a multi-level 3D detector named DSPDet3D which benefits from high spatial resolution to achieve high accuracy on small object detection, while reducing redundant computation by only focusing on small object areas. Specifically, we theoretically derive a dynamic spatial pruning (DSP) strategy to prune the redundant spatial representation of 3D scene in a cascade manner according to the distribution of objects. Then we design DSP module following this strategy and construct DSPDet3D with this efficient module. On ScanNet and TO-SCENE dataset, our method achieves leading performance on small object detection. Moreover, DSPDet3D trained with only ScanNet rooms can generalize well to scenes in larger scale. It takes less than 2s to directly process a whole building consisting of more than \\(4500\\mathrm{k}\\) points while detecting out almost all objects, ranging from cups to beds, on a single RTX 3090 GPU. Code." + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.642, + 0.715, + 0.656 + ], + "angle": 0, + "content": "Keywords: 3D small object detection \\(\\cdot\\) Spatial pruning \\(\\cdot\\) Efficient inference" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.676, + 0.357, + 0.692 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.71, + 0.788, + 0.816 + ], + "angle": 0, + "content": "3D object detection is a fundamental scene understanding problem, which aims to detect 3D bounding boxes and semantic labels from a point cloud of 3D scene. With the recent advances of deep learning techniques on point cloud understanding [7, 13, 34, 35], 3D detection methods have shown remarkable progress [39, 40, 46, 56]. However, with 3D object detection being widely adopted in fields like robotics [30, 57] and autonomous driving [2] which require highly precise and fine-grained perception, small object detection becomes one of the most important yet unsolved problems. In autonomous driving" + }, + { + "type": "page_footnote", + "bbox": [ + 0.22, + 0.825, + 0.503, + 0.841 + ], + "angle": 0, + "content": "* Equal contribution. † Corresponding author." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "2" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.544, + 0.129 + ], + "angle": 0, + "content": "Authors Suppressed Due to Excessive Length" + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.145, + 0.784, + 0.329 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.339, + 0.788, + 0.381 + ], + "angle": 0, + "content": "Fig.1: Trained with only rooms from ScanNet, our DSPDet3D generalizes well to process a whole house with dozens of rooms. It takes less than 2s to generate fine-grained detection results with a RTX 3090 single GPU." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.415, + 0.785, + 0.476 + ], + "angle": 0, + "content": "scenarios [12], we observe a significant performance gap between cars and pedestrians. In indoor scenes [4,9] where the size variance is much larger (e.g. a bed is 1000x larger than a cup), detecting small objects is more difficult. We focus on indoor 3D object detection task where scenes are crowded with objects of multiple categories and sizes." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.477, + 0.788, + 0.643 + ], + "angle": 0, + "content": "For indoor 3D object detection, although great improvement has been achieved in both speed and accuracy on previous benchmarks [1,9,43], they are still far from general purpose 3D object detection due to the limited range of object size they can handle. For instance, these methods focus on furniture-level objects such as bed and table, while smaller ones like laptop, keyboard and bottle are ignored. With the arrival of 3D small object benchmarks [37, 50, 51] which contain objects with wider size variance (e.g. from tabletop object like cup to large furniture like bed), it is shown that previous 3D detectors get very low accuracy on small objects and some even fail to detect any small objects. This is because extracting fine-grained representation for a large scene is too computationally expensive, so current methods aggressively downsamples the 3D features, which harms the representation of small objects." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.645, + 0.789, + 0.842 + ], + "angle": 0, + "content": "In this paper, we propose a dynamic spatial pruning approach for 3D small object detection. Although increasing the spatial resolution of the feature representations is a simple and effective way to boost the performance of 3D small object detection, the large computational overhead makes this plan infeasible for real application. With in-depth study, we observe the memory footprint mainly comes from the huge number of features generated by the upsampling operation in the decoder of 3D detector. Inspired by the fact that small objects only occupy a small proportion of space, we adopt a multi-level detection framework to detect different sizes of objects in different levels. As the multi-level detector has already detected out larger objects in lower resolution, there are many redundant features in the scene representations of higher resolution. To this end, we propose to dynamically prune the features after detecting out objects in each level, which skips the upsampling operation at regions where there is no smaller object. Specifically, we first theoretically derive a pruning mask generation strategy to" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.314, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "DSPDet3D: 3D Small Object Detection with Dynamic Spatial Pruning" + }, + { + "type": "page_number", + "bbox": [ + 0.776, + 0.117, + 0.786, + 0.127 + ], + "angle": 0, + "content": "3" + }, + { + "type": "image", + "bbox": [ + 0.288, + 0.15, + 0.71, + 0.298 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.217, + 0.309, + 0.787, + 0.366 + ], + "angle": 0, + "content": "Fig. 2: Detection accuracy (mAP@0.25 of all categories) and speed (FPS) of mainstream 3D object detection methods on TO-SCENE dataset. Our DSPDet3D shows absolute advantage on 3D small object detection and provides flexible accuracy-speed tradeoff by simply adjusting the pruning threshold without retraining." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.394, + 0.787, + 0.515 + ], + "angle": 0, + "content": "supervise the pruning module, which prunes as much features as possible while not affecting the features of object proposals. Then we design a dynamic spatial pruning (DSP) module according to the theoretical analysis and use it to construct a 3D object detector named DSPDet3D. On the popular ScanNet [9] dataset, DSPDet3D improves the mAP of all categories by \\(3\\%\\) and mAP of small object by \\(14\\%\\) compared with current state-of-the-art. On TO-SCENE [50] dataset with more tabletop objects, we improve the mAP of all categories by \\(8\\%\\) while achieving leading inference speed among all mainstream indoor 3D object detection methods." + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.537, + 0.366, + 0.554 + ], + "angle": 0, + "content": "2 Related Work" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.568, + 0.787, + 0.78 + ], + "angle": 0, + "content": "Indoor 3D object detection: Since PointNet and PointNet++ [34, 35], deep learning-based 3D detection methods for point clouds begin to emerge in recent years, which can be mainly divided into three categories: voting-based [6, 33, 47, 49, 54], transformer-based [26, 28] and voxel-based [14, 38, 39, 46] methods. Inspired by 2D hough voting, VoteNet [33] proposes the first voting-based 3D detector, which aggregates the point features on surfaces into object center by 3D voting and predicts bounding boxes from the voted centers. Drawing on the success of transformer-based detector [3] in 2D domain, GroupFree3D [26] and 3DETR [28] adopts transformer architecture to decode the object proposals into 3D boxes. As extracting point features require time-consuming sampling and aggregation operation, GSDN [14] proposes a fully convolutional detection network based on sparse convolution [7, 13, 19, 52], which achieves much faster speed. FCAF3D [38] and TR3D [39] further improves the performance of GSDN with a simple anchor-free architecture. Our method also adopts voxel-based architecture considering its efficiency and scalability." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.78, + 0.787, + 0.84 + ], + "angle": 0, + "content": "Small object detection: Small object detection [45] is a challenging problem in 2D vision due to the low-resolution features. To tackle this, a series of methods have been proposed, which can be categorized into three types: (1) small object augmentation and oversampling methods [17, 25, 58]; (2) scale-aware training and inference" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.117, + 0.229, + 0.127 + ], + "angle": 0, + "content": "4" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.116, + 0.544, + 0.129 + ], + "angle": 0, + "content": "Authors Suppressed Due to Excessive Length" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.283 + ], + "angle": 0, + "content": "strategy [11, 31, 41, 42]; (3) increasing the resolution of features or generating high-resolution features [5, 10, 21, 22, 48, 53]. However, there are far less works about 3D small object detection due to the limit of data and network capability. BackToReality [51] proposes ScanNet-md40 benchmark which contains small objects and finds many current methods suffer a lot in small object detection. TO-SCENE [50] proposes a new dataset and learning strategy for understanding 3D tabletop scenes. However, it relies on densely sampled points from CAD models, which is infeasible in practical scenarios where the points from small objects are very sparse. In contrast, we aim to directly detect small objects from naturally sampled point clouds." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.283, + 0.788, + 0.481 + ], + "angle": 0, + "content": "Network pruning: Network pruning can be divided into two categories: architecture pruning [15, 16, 18, 20, 27, 29] and spatial pruning [24, 36, 55]. Architecture pruning aims to remove a portion of weights from a neural network to shrink the size of a network, which includes unstructured pruning [15, 18, 29] and structured pruning [16, 20, 27]. The former removes network weights without a predefined structure, while the latter removes whole channels or network layers. On the contrary, spatial pruning does not prune the parameters of a network, but spatially removing redundant computation on the feature maps. DynamicViT [36] prunes the tokens in vision transformer with an attention masking strategy. SPS-Conv [24] dynamically prunes the convolutional kernel to suppress the activation on background voxels in sparse convolution layer. Ada3D [55] proposes a pruning framework for 3D and BEV features. Our dynamic spatial pruning method also belongs to spatial pruning, which directly removes redundant voxel features level by level according to the distribution of objects." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.499, + 0.334, + 0.517 + ], + "angle": 0, + "content": "3 Approach" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.528, + 0.788, + 0.604 + ], + "angle": 0, + "content": "In this section, we describe our DSPDet3D for efficient 3D small object detection. We first revisit the multi-level 3D detector and analyze the computational cost distribution. Then we propose dynamic spatial pruning with theoretical analysis on how to prune features without affecting detection performance. Finally we design DSP module according to the theoretical analysis and use it to construct DSPDet3D." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.622, + 0.506, + 0.639 + ], + "angle": 0, + "content": "3.1 Analysis on Multi-level 3D Detector" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.644, + 0.788, + 0.689 + ], + "angle": 0, + "content": "Preliminaries: We choose multi-level FCOS-like [44] 3D detector [38, 39] with sparse convolution [7, 13] for small object detection due to its high performance on both accuracy and speed (more detail can be found in Table 1 and 2)." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.69, + 0.788, + 0.841 + ], + "angle": 0, + "content": "As shown in Figure 3 (middle), after extracting backbone features, multi-level detector iteratively upsamples the voxel feature representations to different levels. In each level, all voxels are regarded as object proposals to predict bounding boxes and category scores. Generative upsampling is widely adopted in this kind of architectures [14,38,39] to expand the voxels from object surfaces to the whole 3D space, where object proposals located at object centers can produce accurate predictions. During training, ground-truth bounding boxes are assigned to different levels and each box assigns several nearby voxels as positive object proposals. Only box predictions from positive object proposals will be supervised. While at inference time all voxel features from the decoder are used to predict bounding boxes, which are then filtered by 3D NMS." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.314, + 0.115, + 0.733, + 0.13 + ], + "angle": 0, + "content": "DSPDet3D: 3D Small Object Detection with Dynamic Spatial Pruning" + }, + { + "type": "page_number", + "bbox": [ + 0.776, + 0.117, + 0.786, + 0.127 + ], + "angle": 0, + "content": "5" + }, + { + "type": "image", + "bbox": [ + 0.276, + 0.144, + 0.371, + 0.412 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.384, + 0.143, + 0.55, + 0.413 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.564, + 0.144, + 0.729, + 0.413 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.425, + 0.788, + 0.509 + ], + "angle": 0, + "content": "Fig.3: Comparison of the decoder in typical multi-level 3D object detector [39] and our DSPDet3D. Note that the sparsity of voxels in decoder is changed due to the generative upsampling operation. After detecting out objects in a level, DSPDet3D prunes redundant voxel features according to the distribution of objects before each upsampling operation. Red boxes indicate all pruned voxels and 'scissor' boxes indicate voxels pruned in the previous layer. \\(\\{O\\}\\) is the set of all objects and \\(\\{O_i\\}\\) is the set of objects assigned to level \\(i\\)." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.538, + 0.789, + 0.75 + ], + "angle": 0, + "content": "Increasing spatial resolution: Based on multi-level architecture, a simple way to boost the performance of small object detection is to increase the spatial resolution of feature maps, i.e., voxelizing the point clouds into smaller voxels to better preserve geometric information. Taking TR3D [39] for example, we double its spatial resolution and show the results in Figure 4. It can be seen that the performance on small object really benefits from larger resolution, but the computational overhead grows dramatically at the same time. As 3D object detection is usually adopted in tasks which requires real-time inference under limited resources, such as AR/VR and robotic navigation, directly increasing spatial resolution is infeasible. Notably, we find the computation growth is imbalanced: the decoder layers (including detection heads) account for the most memory footprint and have larger memory growth ratio than the backbone. This indicates the generative upsampling operation will significantly increase the number of voxels when the spatial resolution is high, which is the main challenge for scaling up the spatial resolution of multi-level detectors." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.77, + 0.436, + 0.787 + ], + "angle": 0, + "content": "3.2 Dynamic Spatial Pruning" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.795, + 0.788, + 0.842 + ], + "angle": 0, + "content": "Since small objects only occupy a small proportion of space, we assume there is a large amount of redundant computation in decoder layers, especially when the resolution is high. For instance, if a bed is detected in Layer 4, the region near this bed may be less" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "6" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.544, + 0.129 + ], + "angle": 0, + "content": "Authors Suppressed Due to Excessive Length" + }, + { + "type": "image", + "bbox": [ + 0.279, + 0.148, + 0.726, + 0.293 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.303, + 0.789, + 0.388 + ], + "angle": 0, + "content": "Fig. 4: The memory footprint distribution of different multi-level detectors. Layer 4 to Layer 1 refer to decoder layers (including detection heads) from coarse to fine. If doubling the spatial resolution of TR3D, the performance on 3D small object detection improves from \\(52.7\\%\\) to \\(62.8\\%\\) while memory footprint increases dramatically. We find decoder layers accounts for most of the costs. DSPDet3D efficiently reduces redundant computation on these layers, achieving both fast speed and high accuracy." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.421, + 0.789, + 0.496 + ], + "angle": 0, + "content": "informative for detecting other objects in the follow decoder layers. If we can skip the upsampling operation at these regions, the voxels will be sparsified level by level, as shown in Figure 3 (right). In this way, small objects can be detected in Layer 1 from only a small number of voxels. Inspired by this, we propose to dynamically prune the voxel features according to the distribution of objects." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.498, + 0.789, + 0.573 + ], + "angle": 0, + "content": "However, pruning a voxel will not only reduce the number of object proposals in the following levels, but also change the following voxel features computed based on the pruned voxel. Therefore, in order to reduce the redundant computation of multi-level detector without degrading the detection performance, a carefully designed pruning strategy is required. We give theoretical derivation as below." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.575, + 0.79, + 0.743 + ], + "angle": 0, + "content": "Problem formulation: For each scene, we denote \\(\\{O\\}\\) as the set of all objects, \\(\\{O_i\\}\\) as the set of objects assigned to level \\(i^1\\) during training, \\(f_{i} \\in \\mathbb{R}^{N \\times (3 + C)}\\) as the voxel features of level \\(i\\). We aim to prune \\(f_{i}\\) after detecting out \\(\\{O_i\\}\\), where the objective is to remove as many voxels as possible while keeping the predictions of \\(\\{O\\} \\backslash \\{O_i\\}\\) unaffected after the pruning. For each object \\(o_j\\) in level \\(j\\) (\\(j < i\\)), we assume the prediction of it is unaffected if the voxel features at level \\(j\\) near its center \\(c_j\\) are unaffected. We make this assumption because most true positive predictions are from object proposals located at the center of bounding boxes [14, 44]. We denote the expected unaffected neighborhood as \\(\\mathcal{C}_j(c_j, P)\\), which means a cube centered at \\(c_j\\) with \\(P \\times P \\times P\\) voxels at level \\(j\\). Given the symmetry, \\(P\\) should be odd. Then we formulate the objective of our pruning strategy at level \\(i\\) as:" + }, + { + "type": "equation", + "bbox": [ + 0.346, + 0.758, + 0.66, + 0.788 + ], + "angle": 0, + "content": "\\[\n\\underset {\\mathcal {K} _ {i}} {\\text {m i n i m i z e}} \\sum_ {x, y, z} M _ {i} [ x ] [ y ] [ z ], M _ {i} = \\bigwedge_ {j = 1} ^ {i - 1} \\mathcal {K} _ {i} (\\boldsymbol {c _ {j}}),\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.339, + 0.792, + 0.786, + 0.81 + ], + "angle": 0, + "content": "\\[\ns. t. \\forall j < i, \\mathcal {C} _ {j} (\\boldsymbol {c} _ {j}, P) \\cap \\mathcal {A} _ {i, j} (\\neg \\mathcal {K} _ {i} (\\boldsymbol {c} _ {j}) \\star f _ {i}) = \\varnothing \\tag {1}\n\\]" + }, + { + "type": "page_footnote", + "bbox": [ + 0.221, + 0.825, + 0.768, + 0.841 + ], + "angle": 0, + "content": "1 We adopt the same definition of level as in Figure 3, where level \\( i \\) is finer than level \\( i + 1 \\)." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.314, + 0.115, + 0.733, + 0.131 + ], + "angle": 0, + "content": "DSPDet3D: 3D Small Object Detection with Dynamic Spatial Pruning" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.116, + 0.785, + 0.127 + ], + "angle": 0, + "content": "7" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.146, + 0.784, + 0.252 + ], + "angle": 0, + "content": "where \\( M_{i} \\in \\mathbb{R}^{N} \\) is a binary pruning mask sharing the same length with \\( f_{i} \\), where 0 indicates removing and 1 indicates keeping during the pruning operation \\( \\star \\). \\( \\mathcal{K}_i(\\cdot) \\) is the generation strategy of pruning mask for each object, which generates a binary pruning mask conditioned on the object center. \\( \\mathcal{A}_{i,j}(f) \\) is defined as the affecting field of \\( f \\), which represents the voxels at level \\( j \\) that will be affected by pruning \\( f \\) at level \\( i \\). Without loss of generality, here we choose only one object at each level for simplicity of presentation." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.253, + 0.784, + 0.328 + ], + "angle": 0, + "content": "Overview of problem solving: We solve (1) by mathematical induction. Specifically, for pruning strategy \\( M_{i} \\) at level \\( i \\), we first consider how to generate pruning mask \\( \\mathcal{K}_i(c_{i-1}) \\) to ensure the predictions of \\( \\{O_{i-1}\\} \\) are unaffected. Then we show that by following our pruning strategy \\( \\mathcal{K}_i \\), 'the predictions of \\( \\{O_j\\} \\) are unaffected' can be derived by 'the predictions of \\( \\{O_{j+1}\\} \\) are unaffected'.2" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.328, + 0.785, + 0.448 + ], + "angle": 0, + "content": "Solving \\(\\mathcal{K}_i(\\pmb{c}_{i-1})\\): To make sure \\(\\mathcal{C}_{i-1}(\\pmb{c}_{i-1}, P) \\cap \\mathcal{A}_{i,i-1}(\\cdot) = \\emptyset\\), we need to compute the affecting field of each voxel \\(v_i\\) in level \\(i\\). Obviously, the upper bound of affecting field of \\(v_i\\) expands in shape of cube with sparse convolution. Assume there are \\(m\\) sparse convolutions with stride 1 and kernel \\(x_k\\) (\\(1 \\leq k \\leq m\\)) between pruning and generative upsampling in level \\(i\\), one generative transposed convolution with stride 2 and kernel \\(y\\), and \\(n\\) sparse convolutions with stride 1 and kernel \\(z_k\\) (\\(1 \\leq k \\leq n\\)) until detecting out objects in level \\(i-1\\). Then the affecting field from pruning (level \\(i\\)) to detecting (level \\(i-1\\)) can be written as:" + }, + { + "type": "equation", + "bbox": [ + 0.355, + 0.456, + 0.786, + 0.473 + ], + "angle": 0, + "content": "\\[\n\\mathcal {A} _ {i, i - 1} \\left(v _ {i}\\right) = \\mathcal {C} _ {i - 1} \\left(v _ {i}, a f f \\left(\\left\\{x _ {k} \\right\\}, y, \\left\\{z _ {k} \\right\\}\\right)\\right) \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.48, + 0.784, + 0.526 + ], + "angle": 0, + "content": "where \\( aff(\\{x_k\\}, y, \\{z_k\\}) \\) is the range of affecting field represented by the kernel sizes, which we will detail in supplementary material. Since the shape of the expected unaffected voxel features is a \\( P \\times P \\times P \\) cube, \\( \\mathcal{K}_i(\\pmb{c}_{i-1}) \\) can be formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.342, + 0.533, + 0.786, + 0.582 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {K} _ {i} \\left(\\boldsymbol {c} _ {\\boldsymbol {i} - \\boldsymbol {1}}\\right) [ x ] [ y ] [ z ] = \\mathbb {I} \\left(2 \\cdot | \\boldsymbol {x} - \\boldsymbol {c} _ {\\boldsymbol {i} - \\boldsymbol {1}} | _ {\\infty} \\leq r S _ {i}\\right) \\\\ r = \\lceil \\frac {P + a f f (\\{x _ {k} \\} , y , \\{z _ {k} \\}) - 2}{2} \\rceil \\tag {3} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.588, + 0.784, + 0.618 + ], + "angle": 0, + "content": "where \\( S_{i} \\) is the size of voxel in level \\( i \\). \\( \\mathbb{I}(\\cdot) \\) is the indicative function. \\( \\boldsymbol{x} = (x,y,z) \\) is the voxel coordinates of \\( f_{i} \\)." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.619, + 0.784, + 0.695 + ], + "angle": 0, + "content": "Recursion of \\(\\kappa_{i}\\): We now derive when the pruning strategy \\(\\kappa_{i}\\) in (3) also works for \\(c_{j}\\) (\\(j < i - 1\\)). We can regrad \\(c_{j}\\) as the center of object in level \\(i - 1\\) and use (3) to generate the pruning mask. In this way, \\(\\mathcal{C}_{i - 1}(c_j,P)\\) are unaffected. As \\(\\mathcal{C}_j(c_j,P)\\) is covered by \\(\\mathcal{C}_{i - 1}(c_j,P)\\), so \\(\\mathcal{C}_j(c_j,P)\\) is unaffected as well. We should also ensure pruning in level \\(i\\) has no cumulative impact on pruning in level \\(i - 1\\):" + }, + { + "type": "equation", + "bbox": [ + 0.387, + 0.701, + 0.786, + 0.719 + ], + "angle": 0, + "content": "\\[\n\\left(\\mathcal {K} _ {i - 1} \\left(\\boldsymbol {c} _ {\\boldsymbol {j}}\\right) \\star f _ {i - 1}\\right) \\subseteq \\mathcal {C} _ {i - 1} \\left(\\boldsymbol {c} _ {\\boldsymbol {j}}, P\\right) \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.726, + 0.784, + 0.756 + ], + "angle": 0, + "content": "this means when generating pruning mask of \\( c_{j} \\) in level \\( i - 1 \\) using \\( \\mathcal{K}_{i - 1} \\), the kept voxels should be covered by the unaffected voxels after pruning in level \\( i \\). So we have:" + }, + { + "type": "equation", + "bbox": [ + 0.433, + 0.763, + 0.786, + 0.78 + ], + "angle": 0, + "content": "\\[\nr \\cdot S _ {i - 1} \\leq P \\cdot S _ {i - 1} \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.787, + 0.784, + 0.819 + ], + "angle": 0, + "content": "The minimum \\( P \\) can be acquired by solving (5). In this case, strategy \\( \\mathcal{K}_i \\) in (3) works for all \\( c_j \\) (\\( j < i \\))." + }, + { + "type": "page_footnote", + "bbox": [ + 0.221, + 0.824, + 0.729, + 0.841 + ], + "angle": 0, + "content": "2 We provide illustrated examples in supplementary material for better understanding." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.228, + 0.127 + ], + "angle": 0, + "content": "8" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.544, + 0.129 + ], + "angle": 0, + "content": "Authors Suppressed Due to Excessive Length" + }, + { + "type": "image", + "bbox": [ + 0.226, + 0.147, + 0.773, + 0.322 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.334, + 0.788, + 0.432 + ], + "angle": 0, + "content": "Fig. 5: Illustration of DSPDet3D. The voxelized point clouds are fed into a high-resolution sparse convolutional backbone, which output four levels of scene representations. Four dynamic spatial pruning (DSP) modules are stacked to construct a multi-level decoder and detect objects from coarse to fine. DSP module utilizes a light-weight learnable module to predict the pruning mask. During inference, we discretize the pruning mask and use it to guide pruning before generative upsampling. While during training we interpolate the pruning mask to next level and prune the voxel features after generative upsampling." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.462, + 0.333, + 0.475 + ], + "angle": 0, + "content": "3.3 DSPDet3D" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.487, + 0.787, + 0.533 + ], + "angle": 0, + "content": "Based on the theoretical analysis, we devise a dynamic spatial pruning (DSP) module to approximate the ideal pruning strategy. We further construct a 3D small object detector named DSPDet3D with the proposed DSP module." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.533, + 0.788, + 0.64 + ], + "angle": 0, + "content": "DSP module: As shown in Figure 3, we modify the layers of a typical multi-level decoder to DSP modules, which prunes redundant voxel features after detecting out objects at each level for efficient feature upsampling. Formally, given the upsampled voxel feature \\( f_{i}^{U} \\) and the backbone feature \\( f_{i}^{B} \\) at level \\( i \\), DSP module first add them for detection. However, \\( f_{i}^{U} \\) may be much sparser than \\( f_{i}^{B} \\) due to pruning, directly adding by taking union of them is inefficient. Therefore, we propose a new operator called partial addition to fit our pruning strategy:" + }, + { + "type": "equation", + "bbox": [ + 0.452, + 0.648, + 0.787, + 0.668 + ], + "angle": 0, + "content": "\\[\nf _ {i} = f _ {i} ^ {B \\overrightarrow {\\quad +}} f _ {i} ^ {U} \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.679, + 0.788, + 0.771 + ], + "angle": 0, + "content": "where addition is constrained to be operated only on the voxels of \\( f_{i}^{U} \\). Then objects are detected using a shared detection head across all levels: \\( \\{O_i\\} = \\text{Detect}(f_i) \\). Once objects at level \\( i \\) are detected out, we prune the voxel features according to the derived strategy described in Section 3.2. Here we devise a light-weight MLP-based learnable pruning module to decide where smaller objects (i.e. objects in level \\( j \\) (\\( j < i \\)) ) may appear, and then prune other locations:" + }, + { + "type": "equation", + "bbox": [ + 0.385, + 0.78, + 0.787, + 0.799 + ], + "angle": 0, + "content": "\\[\n\\bar {f} _ {i} = t \\left(\\hat {M} _ {i}\\right) \\star f _ {i}, \\hat {M} _ {i} = \\mathrm {M L P} _ {i} \\left(f _ {i}\\right) \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.809, + 0.789, + 0.842 + ], + "angle": 0, + "content": "where \\(\\hat{M}_i\\) is the pruning mask predicted from \\(f_i\\), which represents the probability of retention for each voxel. We utilize FocalLoss [23] to supervise \\(\\hat{M}_i\\) with the generated" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.314, + 0.115, + 0.732, + 0.131 + ], + "angle": 0, + "content": "DSPDet3D: 3D Small Object Detection with Dynamic Spatial Pruning" + }, + { + "type": "page_number", + "bbox": [ + 0.776, + 0.117, + 0.786, + 0.127 + ], + "angle": 0, + "content": "9" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.784, + 0.193 + ], + "angle": 0, + "content": "\\(M_{i}\\) in (1). During inference, a threshold function \\(t(\\cdot)\\) sets probability lower than \\(\\tau\\) to be 0, others be 1 to guide pruning. After pruning, the generative upsampling is applied to acquire features for the next level: \\(f_{i - 1}^{U} = \\text{GeV}(\\bar{f}_{i})\\)." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.195, + 0.787, + 0.317 + ], + "angle": 0, + "content": "During training, as \\(\\hat{M}_i\\) may not be so accurate (especially at beginning), we find applying the above learnable pruning module makes training difficult to converge. Instead, we switch the pruning to weak mode for context preservation. As shown in Figure 5, the weak pruning is applied after generative upsampling. For level \\(i\\), we upsample the pruning mask \\(\\hat{M}_{i+1}\\) to level \\(i\\) with nearest neighbor interpolation. Then we sort the interpolated scores and keep only \\(N_{max}\\) voxels with the highest scores. This weak pruning mechanism aims to stabilize training, which only works when the amount of voxels is too large to conduct following operations." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.318, + 0.787, + 0.422 + ], + "angle": 0, + "content": "Since our theoretical analysis sets the expected unaffected neighborhood to be a \\( P \\times P \\times P \\) cube, we also modify the assigning strategy of positive object proposals accordingly for robust training. Specifically, for a ground-truth bounding box of \\( o_i \\) assigned to level \\( i \\), we sample the nearest \\( N_{pos} \\) voxels to \\( c_i \\) inside the cube centered at \\( c_i \\) with length \\( P \\cdot S_i \\). If there are less than \\( N_{pos} \\) voxels in the cube, we simply sample all voxels inside it. Our assigning method is independent of the size of bounding box, which ensures there are enough positive proposals even for small objects." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.423, + 0.787, + 0.514 + ], + "angle": 0, + "content": "DSPDet3D: Based upon the top-performance multi-level detector TR3D [39], we remove the max pooling layer to increase the spatial resolution of backbone features. Then we replace the decoder in TR3D with four stacked DSP modules to remove redundant voxel features level by level, which achieves efficient upsampling without affecting the detection performance. To train DSPDet3D, we keep the same loss for classification and box regression as in TR3D and add additional FocalLoss to supervise \\(\\hat{M}_i\\) with \\(M_i\\)." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.515, + 0.787, + 0.605 + ], + "angle": 0, + "content": "Compare with FCAF3D: Similar to our training-time weak pruning, FCAF3D [38] also adopts a pruning strategy in the decoder to prevent the number of voxels from getting too large, which is unable to remove redundant features in early decoder layers during inference. Moreover, it directly utilizes the classification scores for bounding boxes to sort and prune the voxel features, which cannot accurately preserve geometric information for small objects." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.631, + 0.35, + 0.648 + ], + "angle": 0, + "content": "4 Experiment" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.666, + 0.787, + 0.757 + ], + "angle": 0, + "content": "In this section, we conduct experiments to investigate the performance of our approach on 3D small object detection. We first describe the datasets and experimental settings. Then we compare DSPDet3D with the state-of-the-art 3D object detection methods. We also design ablation experiments to study the effectiveness of the proposed methods. Finally we transfer DSPDet3D to extremely large scenes to show its efficiency and generalization ability." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.782, + 0.414, + 0.798 + ], + "angle": 0, + "content": "4.1 Experimental Settings" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.81, + 0.787, + 0.84 + ], + "angle": 0, + "content": "Datasets and metrics: We conduct experiments on two indoor datasets including ScanNet [9] and TO-SCENE [50]. ScanNet is a richly annotated dataset of indoor scenes" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "10" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.544, + 0.13 + ], + "angle": 0, + "content": "Authors Suppressed Due to Excessive Length" + }, + { + "type": "table_caption", + "bbox": [ + 0.215, + 0.145, + 0.788, + 0.187 + ], + "angle": 0, + "content": "Table 1: 3D objects detection results and computational costs of different methods on ScanNet.md40. DSPDet3D with the best pruning threshold is highlighted in gray. We set best scores in bold, runner-ups underlined." + }, + { + "type": "table", + "bbox": [ + 0.246, + 0.199, + 0.755, + 0.43 + ], + "angle": 0, + "content": "
MethodDecodermAP\\( mAP_S \\)SpeedMemory
@0.25@0.5@0.25@0.5
VoteNetVoting51.0233.690.30013.41150
VoteNetSVoting48.6231.551.0408.51500
H3DNetHybrid53.5139.233.080.907.21550
GroupFree3DTransformer56.7741.3911.70.817.81450
GroupFree3DSTransformer29.4411.940.2003.22000
RBGNetVoting55.2332.645.8106.61700
FCAF3DMulti-level59.4948.7518.388.2112.3850
CAGroup3DVoting60.2949.9016.628.633.13250
TR3DMulti-level61.5949.9827.5312.9110.81250
FCAF3D-higherMulti-level62.6551.0127.6816.237.14000
TR3D-higherMulti-level65.1854.0341.7029.565.24450
Ours(τ=0)Multi-level65.3954.5944.7931.554.44200
Ours(τ=0.3)Multi-level65.0454.3543.7730.3812.5700
" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.468, + 0.788, + 0.665 + ], + "angle": 0, + "content": "with 1201 training scenes and 312 validation scenes. Each object in the scenes are annotated with texts and then mapped to category IDs. We follow the ScanNet-md40 benchmark proposed by [51], which contains objects in 22 categories with large size variance. TO-SCENE is a mixed reality dataset which provides three variants called TO_Vanilla, TO_Crowd and TO_ScanNet with different numbers of tabletop objects and scene scales. We choose the room-scale TO_ScanNet benchmark, which contains 3600 training scenes and 800 validation scenes with 70 categories. However, TO_ScanNet adopts non-uniform sampling to acquire about 2000 points per tabletop object, which is infeasible in practical settings. To this end, we downsample the small objects and control the density of them to be similar with other objects and backgrounds. We name this modified version as TO-SCENE-down benchmark. We take the point clouds without color as inputs for all methods. More details about ScanNet-md40 and TO-SCENE-down benchmarks can be found in supplementary material." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.668, + 0.788, + 0.744 + ], + "angle": 0, + "content": "We report the mean average precision (mAP) with threshold 0.25 and 0.5. To measure the performance on different categories, we use two kinds of metrics: mAP and \\(\\mathrm{mAP}_S\\), which refer to the mean AP of all objects and of small objects respectively. Here we define categories of small object as ones with average volume smaller than \\(0.05m^3\\) for both benchmarks." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.748, + 0.788, + 0.843 + ], + "angle": 0, + "content": "Implementation details: We implement our approach with PyTorch [32], MinkowskiEngine [7] and MMDetection3D [8]. We follow the same training strategy / hyperparameters as TR3D [39] for fair comparison. Training converges within 4 hours on a 4 GPU machine. The stride of the sparse convolution in the preencoder of DSPDet3D is set to 2, thus the voxel size of \\( f_1^B \\) is 4cm and \\( S_i \\) equals to \\( 2^i \\cdot 2cm \\). We set \\( N_{pos} = 6 \\) and \\( N_{max} = 100000 \\) during training. The weight of the FocalLoss between \\( M_i \\) and \\( \\hat{M}_i \\) is" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.314, + 0.115, + 0.733, + 0.131 + ], + "angle": 0, + "content": "DSPDet3D: 3D Small Object Detection with Dynamic Spatial Pruning" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.116, + 0.785, + 0.127 + ], + "angle": 0, + "content": "11" + }, + { + "type": "table_caption", + "bbox": [ + 0.215, + 0.145, + 0.788, + 0.189 + ], + "angle": 0, + "content": "Table 2: 3D objects detection results and computational costs of different methods on TO-SCENE-down benchmark. DSPDet3D with the best pruning threshold is highlighted in gray. We set best scores in bold, runner-ups underlined." + }, + { + "type": "table", + "bbox": [ + 0.246, + 0.199, + 0.755, + 0.43 + ], + "angle": 0, + "content": "
MethodDecodermAP\\( mAP_S \\)SpeedMemory
@0.25@0.5@0.25@0.5
VoteNetVoting26.7214.0114.514.7812.81300
\\( VoteNet_S \\)Voting31.8714.8921.757.407.61650
H3DNetHybrid27.6917.3814.837.395.11650
GroupFree3DTransformer32.4120.4320.1710.137.71700
\\( GroupFree3D_S \\)Transformer40.1423.5533.3316.152.42200
RBGNetVoting40.4230.2729.6921.615.01850
FCAF3DMulti-level45.1337.2137.1831.6511.91000
CAGroup3DVoting54.2847.5848.4943.852.23500
TR3DMulti-level55.5845.9552.7244.019.91400
FCAF3D-higherMulti-level57.2350.3953.0748.766.34250
TR3D-higherMulti-level63.9656.0662.8457.144.14600
Ours(τ=0)Multi-level66.8159.4166.5361.574.15300
Ours(τ=0.5)Multi-level66.1258.5565.8260.7313.9800
" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.459, + 0.788, + 0.492 + ], + "angle": 0, + "content": "0.01. In terms of block structure, we have \\(\\{x_k\\} = \\emptyset\\), \\(y = 3\\) and \\(\\{z_k\\} = \\{3,3\\}\\). So we set \\(r = 7\\) and \\(P = 7\\) according to (3)." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.512, + 0.492, + 0.528 + ], + "angle": 0, + "content": "4.2 Comparison with State-of-the-art" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.538, + 0.788, + 0.674 + ], + "angle": 0, + "content": "We compare our method with popular and state-of-the-art 3D object detection methods, including VoteNet [33], H3DNet [54], GroupFree3D [26], RBGNet [47], CA-Group3D [46], FCAF3D [38] and TR3D [39]. We also follow [50] to reduce the radius of ball query in the PointNet++ backbone for VoteNet and GroupFree3D. The modified models is distinguished by subscript \\( S \\). Note that the original TR3D only uses two detection heads at level 2/3 and removes the last generative upsampling. However, detecting small objects heavily relies on high-resolution feature map, so we add the upsampling back. This will make it slightly slower but much more accurate on the 3D small object detection benchmarks." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.674, + 0.788, + 0.703 + ], + "angle": 0, + "content": "For all methods, we use their official code and the same training strategy / hyperparameters to train them on ScanNet-md40 and TO-SCENE-down." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.705, + 0.79, + 0.84 + ], + "angle": 0, + "content": "Table 1 and 2 shows the experimental results on ScanNet-md40 and TO-SCENEDown respectively. Consistent with the observation of [51], we find point-based (VoteNet, H3DNet, RBGNet) and transformer-based (GroupFree3D) methods almost fail to detect small objects on ScanNet-md40. This is because the PointNet++ backbone used by these methods adopts set abstraction (SA) operation to aggressively downsample the point clouds and extract scene representation. Since the number of small objects in ScanNet is limited, furthest point sampling has a low probability to sample points on small objects, which leads to inaccurate representation of small objects. For methods (CAGroup3D, FCAF3D, TR3D) with sparse convolutional backbone, they achieve" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "12" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.544, + 0.129 + ], + "angle": 0, + "content": "Authors Suppressed Due to Excessive Length" + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.145, + 0.784, + 0.475 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.488, + 0.788, + 0.515 + ], + "angle": 0, + "content": "Fig. 6: Visualization of pruning process on ScanNet. We show the kept voxels in each level under different thresholds. The memory footprint of each level is also listed at bottom." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.542, + 0.788, + 0.664 + ], + "angle": 0, + "content": "relatively much higher \\(\\mathrm{mAP}_S\\) due to sparse convolution [7, 13] can extract fine-grained scene representation with high efficiency. However, two-stage method like CAGroup3D is both slow and memory-consuming. Multi-level methods like FCAF3D and TR3D are efficient and get good performance on small object detection due to the FPN-like architecture, but they are still limited by resolution. On the contrary, our DSPDet3D with a proper threshold takes advantage of the high-resolution scene representation to achieve much higher performance. Furthermore, DSPDet3D is the most memory-efficient model among all mainstream methods." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.683, + 0.364, + 0.699 + ], + "angle": 0, + "content": "4.3 Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.705, + 0.787, + 0.734 + ], + "angle": 0, + "content": "We conduct ablation studies on ScanNet-md40 to study the effects of hyperparameters and different design choices." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.735, + 0.787, + 0.81 + ], + "angle": 0, + "content": "Pruning process: We visualize the pruning process under different thresholds in Figure 6, where the voxels in each level after pruning are shown. We also list the memory footprint of each level. It can be seen that our method significantly reduces the memory footprint by pruning most of the uninformative voxels. Our pruning module only keeps regions where there are smaller objects than current level." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.81, + 0.787, + 0.843 + ], + "angle": 0, + "content": "Hyperparameters: We study two hyperparameters: \\( r \\) and \\( N_{pos} \\), which is highly relevant to 3D small object detection. Note that \\( r = \\left\\lceil \\frac{P + 9 - 2}{2} \\right\\rceil \\), thus \\( r \\) and \\( P \\) should be" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.314, + 0.115, + 0.733, + 0.131 + ], + "angle": 0, + "content": "DSPDet3D: 3D Small Object Detection with Dynamic Spatial Pruning" + }, + { + "type": "page_number", + "bbox": [ + 0.77, + 0.116, + 0.785, + 0.127 + ], + "angle": 0, + "content": "13" + }, + { + "type": "table_caption", + "bbox": [ + 0.216, + 0.145, + 0.788, + 0.173 + ], + "angle": 0, + "content": "Table 3: Ablation studies on several design choices. We control the speed of each method to 10 FPS and report the accuracy in \\(\\mathrm{mAP}@\\mathbf{0.25}\\) and \\(\\mathrm{mAP}_S@\\mathbf{0.25}\\)." + }, + { + "type": "table", + "bbox": [ + 0.321, + 0.185, + 0.684, + 0.314 + ], + "angle": 0, + "content": "
MethodmAP\\( \\mathrm{mAP}_S \\)
Remove partial addition55.335.5
Addition by taking union57.936.4
Addition by interpolation62.140.9
Spherical keeping mask63.041.1
Remove training-time pruning--
Positive proposal inside bounding box62.440.7
The full design of DSP module65.144.1
" + }, + { + "type": "image", + "bbox": [ + 0.278, + 0.329, + 0.502, + 0.443 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.516, + 0.329, + 0.727, + 0.443 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.457, + 0.788, + 0.486 + ], + "angle": 0, + "content": "Fig. 7: Ablation studies on the value of \\( r \\) and \\( N_{pos} \\). For each value we report performance under different pruning threshold \\( \\tau \\)." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.516, + 0.788, + 0.607 + ], + "angle": 0, + "content": "changed simultaneously. As shown in Figure 7 (left), setting \\( r = 7 \\) achieves the best performance. If \\( r \\) is smaller than 7 then \\( r > P \\), which conflicts with Equation (5) and the features will be affected by pruning. While a larger \\( r \\) will make the pruning less aggressive, resulting in a large number of redundant voxel features. Figure 7 (right) shows that the number of positive object proposals should be set properly, which is important to balance the ratio between positive and negative samples during classification." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.608, + 0.788, + 0.744 + ], + "angle": 0, + "content": "Design choices: We also study the design choices of DSPDet3D in Table 3. Observing the second, third and fourth rows, we conclude that the partial addition is important for efficient feature fusion. Although taking union can preserve more information, this operation will reduce the sparsity of voxels and thus make our pruning less efficient. The fifth row shows that generate the keeping mask according to the shape of affecting field is better than using a spherical shape. According to the sixth row, removing training-time pruning will significantly increase the memory footprint during training, which makes the network unable to train. The seventh row validates the effectiveness of our assigning method for positive object proposals." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.768, + 0.469, + 0.784 + ], + "angle": 0, + "content": "4.4 Transferring to Larger Scenes" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.795, + 0.787, + 0.842 + ], + "angle": 0, + "content": "We further validate the efficiency and generalization ability of different 3D detectors by transferring them to scenes of much larger scale. We first train 3D detectors on rooms from ScanNet training set in a category-agnostic manner, which is done by regarding" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "14" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.544, + 0.129 + ], + "angle": 0, + "content": "Authors Suppressed Due to Excessive Length" + }, + { + "type": "image", + "bbox": [ + 0.225, + 0.147, + 0.778, + 0.351 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.366, + 0.788, + 0.409 + ], + "angle": 0, + "content": "Fig. 8: Visualization of the transferring results of different 3D object detectors. The 3D detector is trained on rooms from ScanNet and directly adopted to process a whole building-level 3D scene from Matterport3D." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.437, + 0.788, + 0.543 + ], + "angle": 0, + "content": "every labeled object as the same category. Then we directly adopt them to process the building-level scenes in Matterport3D [4]. We find previous methods almost all fail to process the extremely large scenes due to unaffordable memory footprint, so we only compare DSPDet3D with FCAF3D as shown in 8. It is shown that FCAF3D cannot detect out any small object and even struggles on relatively large objects like chairs when the scene is too large. On the contrary, DSPDet3D is able to accurately detect small objects like cups and thin pictures." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.564, + 0.345, + 0.58 + ], + "angle": 0, + "content": "5 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.593, + 0.789, + 0.76 + ], + "angle": 0, + "content": "In this paper, we have presented an efficient feature pruning strategy for 3D small object detection. Inspired by the fact that small objects only occupy a small proportion of space, we adopt a multi-level detection framework to detect different sizes of objects in different levels. Then we present a dynamic spatial pruning strategy to prune the voxel features after detecting out objects in each level. Specifically, we first design the dynamic spatial pruning strategy by theoretical analysis on how to prune voxels without affecting the features of object proposals. Then we propose dynamic spatial pruning (DSP) module according to the strategy and use it to construct DSPDet3D. Extensive experiments on ScanNet and TO-SCENE datasets show that our DSPDet3D achieves leading detection accuracy and speed. We also conduct transferring experiment on Matterport3D to show DSPDet3D also generalizes well to extremely large scenes." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.781, + 0.383, + 0.798 + ], + "angle": 0, + "content": "Acknowledgements" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.81, + 0.787, + 0.84 + ], + "angle": 0, + "content": "This work was supported in part by the National Natural Science Foundation of China under Grant 62125603, Grant 62321005, and Grant 62336004." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.314, + 0.115, + 0.733, + 0.13 + ], + "angle": 0, + "content": "DSPDet3D: 3D Small Object Detection with Dynamic Spatial Pruning" + }, + { + "type": "page_number", + "bbox": [ + 0.77, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "15" + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.145, + 0.312, + 0.161 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.179, + 0.785, + 0.207 + ], + "angle": 0, + "content": "1. Armeni, I., Sener, O., Zamir, A.R., Jiang, H., Brilakis, I., Fischer, M., Savarese, S.: 3d semantic parsing of large-scale indoor spaces. In: ICCV. pp. 1534-1543 (2016) 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.208, + 0.785, + 0.235 + ], + "angle": 0, + "content": "2. Bansal, M., Krizhevsky, A., Ogale, A.: Chauffeurnet: Learning to drive by imitating the best and synthesizing the worst. arXiv preprint arXiv:1812.03079 (2018) 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.236, + 0.785, + 0.263 + ], + "angle": 0, + "content": "3. Carion, N., Massa, F., Synnaeve, G., Usunier, N., Kirillov, A., Zagoruyko, S.: End-to-end object detection with transformers. In: ECCV. pp. 213-229. Springer (2020) 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.264, + 0.785, + 0.303 + ], + "angle": 0, + "content": "4. Chang, A., Dai, A., Funkhouser, T., Halber, M., Niessner, M., Savva, M., Song, S., Zeng, A., Zhang, Y.: Matterport3d: Learning from rgb-d data in indoor environments. 3DV (2017) 2, 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.305, + 0.785, + 0.332 + ], + "angle": 0, + "content": "5. Chen, C., Liu, M.Y., Tuzel, O., Xiao, J.: R-cnn for small object detection. In: ACCV. pp. 214-230. Springer (2017) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.334, + 0.785, + 0.362 + ], + "angle": 0, + "content": "6. Cheng, B., Sheng, L., Shi, S., Yang, M., Xu, D.: Back-tracing representative points for voting-based 3d object detection in point clouds. In: CVPR. pp. 8963-8972 (2021) 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.363, + 0.785, + 0.39 + ], + "angle": 0, + "content": "7. Choy, C., Gwak, J., Savarese, S.: 4d spatio-temporal convnets: Minkowski convolutional neural networks. In: CVPR. pp. 3075-3084 (2019) 1, 3, 4, 10, 12" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.391, + 0.785, + 0.417 + ], + "angle": 0, + "content": "8. Contributors, M.: Mmdetection3d: Openmmlab next-generation platform for general 3d object detection (2020) 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.418, + 0.785, + 0.457 + ], + "angle": 0, + "content": "9. Dai, A., Chang, A.X., Savva, M., Halber, M., Funkhouser, T., Nießner, M.: Scannet: Richly-annotated 3d reconstructions of indoor scenes. In: CVPR. pp. 5828--5839 (2017) 2, 3, 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.221, + 0.46, + 0.785, + 0.487 + ], + "angle": 0, + "content": "10. Deng, C., Wang, M., Liu, L., Liu, Y., Jiang, Y.: Extended feature pyramid network for small object detection. TMM 24, 1968-1979 (2021) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.221, + 0.489, + 0.785, + 0.516 + ], + "angle": 0, + "content": "11. Gao, M., Yu, R., Li, A., Morariu, V.I., Davis, L.S.: Dynamic zoom-in network for fast object detection in large images. In: CVPR. pp. 6926-6935 (2018) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.221, + 0.517, + 0.785, + 0.544 + ], + "angle": 0, + "content": "12. Geiger, A., Lenz, P., Urtasun, R.: Are we ready for autonomous driving? the kitti vision benchmark suite. In: CVPR. pp. 3354-3361 (2012) 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.221, + 0.545, + 0.785, + 0.572 + ], + "angle": 0, + "content": "13. Graham, B., Engelcke, M., Van Der Maaten, L.: 3d semantic segmentation with submanifold sparse convolutional networks. In: CVPR. pp. 9224-9232 (2018) 1, 3, 4, 12" + }, + { + "type": "ref_text", + "bbox": [ + 0.221, + 0.573, + 0.785, + 0.601 + ], + "angle": 0, + "content": "14. Gwak, J., Choy, C., Savarese, S.: Generative sparse detection networks for 3d single-shot object detection. In: ECCV. pp. 297-313. Springer (2020) 3, 4, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.221, + 0.602, + 0.785, + 0.629 + ], + "angle": 0, + "content": "15. Han, S., Pool, J., Tran, J., Dally, W.: Learning both weights and connections for efficient neural network. NeurIPS 28 (2015) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.221, + 0.63, + 0.785, + 0.656 + ], + "angle": 0, + "content": "16. Huang, Z., Wang, N.: Data-driven sparse structure selection for deep neural networks. In: ECCV. pp. 304-320 (2018) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.221, + 0.658, + 0.785, + 0.685 + ], + "angle": 0, + "content": "17. Kisantal, M., Wojna, Z., Murawski, J., Naruniec, J., Cho, K.: Augmentation for small object detection. arXiv preprint arXiv:1902.07296 (2019) 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.221, + 0.686, + 0.693, + 0.699 + ], + "angle": 0, + "content": "18. LeCun, Y., Denker, J., Solla, S.: Optimal brain damage. NeurIPS 2 (1989) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.221, + 0.7, + 0.785, + 0.727 + ], + "angle": 0, + "content": "19. Lee, J., Choy, C., Park, J.: Putting 3d spatially sparse networks on a diet. arXiv preprint arXiv:2112.01316 (2021) 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.221, + 0.728, + 0.785, + 0.755 + ], + "angle": 0, + "content": "20. Li, H., Kadav, A., Durdanovic, I., Samet, H., Graf, H.P.: Pruning filters for efficient convnets. arXiv preprint arXiv:1608.08710 (2016) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.221, + 0.757, + 0.785, + 0.784 + ], + "angle": 0, + "content": "21. Li, J., Liang, X., Wei, Y., Xu, T., Feng, J., Yan, S.: Perceptual generative adversarial networks for small object detection. In: CVPR. pp. 1222-1230 (2017) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.221, + 0.785, + 0.785, + 0.812 + ], + "angle": 0, + "content": "22. Lin, T.Y., Dollár, P., Girshick, R., He, K., Hariharan, B., Belongie, S.: Feature pyramid networks for object detection. In: CVPR. pp. 2117-2125 (2017) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.221, + 0.813, + 0.785, + 0.84 + ], + "angle": 0, + "content": "23. Lin, T.Y., Goyal, P., Girshick, R., He, K., Dólár, P.: Focal loss for dense object detection. In: ICCV. pp. 2980-2988 (2017) 8" + }, + { + "type": "list", + "bbox": [ + 0.221, + 0.179, + 0.785, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "16" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.544, + 0.129 + ], + "angle": 0, + "content": "Authors Suppressed Due to Excessive Length" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.147, + 0.786, + 0.175 + ], + "angle": 0, + "content": "24. Liu, J., Chen, Y., Ye, X., Tian, Z., Tan, X., Qi, X.: Spatial pruned sparse convolution for efficient 3d object detection. In: NeurIPS (2022) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.175, + 0.786, + 0.202 + ], + "angle": 0, + "content": "25. Liu, W., Anguelov, D., Erhan, D., Szegedy, C., Reed, S., Fu, C.Y., Berg, A.C.: Ssd: Single shot multibox detector. In: ECCV. pp. 21-37 (2016) 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.203, + 0.786, + 0.229 + ], + "angle": 0, + "content": "26. Liu, Z., Zhang, Z., Cao, Y., Hu, H., Tong, X.: Group-free 3d object detection via transformers. arXiv preprint arXiv:2104.00678 (2021) 3, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.23, + 0.786, + 0.257 + ], + "angle": 0, + "content": "27. Liu, Z., Li, J., Shen, Z., Huang, G., Yan, S., Zhang, C.: Learning efficient convolutional networks through network slimming. In: ICCV. pp. 2736-2744 (2017) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.258, + 0.786, + 0.283 + ], + "angle": 0, + "content": "28. Misra, I., Girdhar, R., Joulin, A.: An end-to-end transformer model for 3d object detection. In: ICCV. pp. 2906-2917 (2021) 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.284, + 0.786, + 0.311 + ], + "angle": 0, + "content": "29. Molchanov, P., Tyree, S., Karras, T., Aila, T., Kautz, J.: Pruning convolutional neural networks for resource efficient inference. arXiv preprint arXiv:1611.06440 (2016) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.312, + 0.786, + 0.338 + ], + "angle": 0, + "content": "30. Mousavian, A., Eppner, C., Fox, D.: 6-dof grapnet: Variational grasp generation for object manipulation. In: ICCV. pp. 2901-2910 (2019) 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.339, + 0.786, + 0.364 + ], + "angle": 0, + "content": "31. Najibi, M., Singh, B., Davis, L.S.: Autofocus: Efficient multi-scale inference. In: ICCV. pp. 9745-9755 (2019) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.366, + 0.786, + 0.405 + ], + "angle": 0, + "content": "32. Paszke, A., Gross, S., Massa, F., Lerer, A., Bradbury, J., Chanan, G., Killeen, T., Lin, Z., Gimelshein, N., Antiga, L., et al.: Pytorch: An imperative style, high-performance deep learning library. NeurIPS 32 (2019) 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.407, + 0.786, + 0.433 + ], + "angle": 0, + "content": "33. Qi, C.R., Litany, O., He, K., Guibas, L.J.: Deep hough voting for 3d object detection in point clouds. In: ICCV. pp. 9277-9286 (2019) 3, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.434, + 0.786, + 0.46 + ], + "angle": 0, + "content": "34. Qi, C.R., Su, H., Mo, K., Guibas, L.J.: Pointnet: Deep learning on point sets for 3d classification and segmentation. In: CVPR. pp. 652-660 (2017) 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.461, + 0.786, + 0.487 + ], + "angle": 0, + "content": "35. Qi, C.R., Yi, L., Su, H., Guibas, L.J.: Pointnet++: Deep hierarchical feature learning on point sets in a metric space. In: NeurIPS. pp. 5099-5108 (2017) 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.488, + 0.786, + 0.514 + ], + "angle": 0, + "content": "36. Rao, Y., Zhao, W., Liu, B., Lu, J., Zhou, J., Hsieh, C.J.: Dynamicvit: Efficient vision transformers with dynamic token sparsification. NeurIPS 34, 13937-13949 (2021) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.515, + 0.786, + 0.542 + ], + "angle": 0, + "content": "37. Rozenberszki, D., Litany, O., Dai, A.: Language-grounded indoor 3d semantic segmentation in the wild. In: ECCV. pp. 125-141. Springer (2022) 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.543, + 0.786, + 0.569 + ], + "angle": 0, + "content": "38. Rukhovich, D., Vorontsova, A., Konushin, A.: Fcaf3d: fully convolutional anchor-free 3d object detection. In: ECCV. pp. 477-493. Springer (2022) 3, 4, 9, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.569, + 0.786, + 0.595 + ], + "angle": 0, + "content": "39. Rukhovich, D., Vorontsova, A., Konushin, A.: Tr3d: Towards real-time indoor 3d object detection. arXiv preprint arXiv:2302.02858 (2023) 1, 3, 4, 5, 9, 10, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.597, + 0.786, + 0.623 + ], + "angle": 0, + "content": "40. Shi, S., Guo, C., Jiang, L., Wang, Z., Shi, J., Wang, X., Li, H.: Pv-rcnn: Point-voxel feature set abstraction for 3d object detection. In: CVPR. pp. 10529–10538 (2020) 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.624, + 0.786, + 0.663 + ], + "angle": 0, + "content": "41. Singh, B., Davis, L.S.: An analysis of scale invariance in object detection snip. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 3578-3587 (2018) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.664, + 0.786, + 0.689 + ], + "angle": 0, + "content": "42. Singh, B., Najibi, M., Davis, L.S.: Sniper: Efficient multi-scale training. NeurIPS 31 (2018) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.69, + 0.786, + 0.718 + ], + "angle": 0, + "content": "43. Song, S., Lichtenberg, S.P., Xiao, J.: Sun rgb-d: A rgb-d scene understanding benchmark suite. In: CVPR. pp. 567-576 (2015) 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.719, + 0.786, + 0.744 + ], + "angle": 0, + "content": "44. Tian, Z., Shen, C., Chen, H., He, T.: Fcos: Fully convolutional one-stage object detection. In: ICCV. pp. 9627-9636 (2019) 4, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.745, + 0.786, + 0.771 + ], + "angle": 0, + "content": "45. Tong, K., Wu, Y., Zhou, F.: Recent advances in small object detection based on deep learning: A review. IVC 97, 103910 (2020) 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.772, + 0.786, + 0.812 + ], + "angle": 0, + "content": "46. Wang, H., Ding, L., Dong, S., Shi, S., Li, A., Li, J., Li, Z., Wang, L.: Cagroup3d: Class-aware grouping for 3d object detection on point clouds. arXiv preprint arXiv:2210.04264 (2022) 1, 3, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.813, + 0.786, + 0.84 + ], + "angle": 0, + "content": "47. Wang, H., Shi, S., Yang, Z., Fang, R., Qian, Q., Li, H., Schiele, B., Wang, L.: Rbgnet: Ray-based grouping for 3d object detection. In: CVPR. pp. 1110-1119 (2022) 3, 11" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.147, + 0.786, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.314, + 0.115, + 0.733, + 0.129 + ], + "angle": 0, + "content": "DSPDet3D: 3D Small Object Detection with Dynamic Spatial Pruning" + }, + { + "type": "page_number", + "bbox": [ + 0.77, + 0.116, + 0.785, + 0.127 + ], + "angle": 0, + "content": "17" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.188 + ], + "angle": 0, + "content": "48. Wang, J., Sun, K., Cheng, T., Jiang, B., Deng, C., Zhao, Y., Liu, D., Mu, Y., Tan, M., Wang, X., et al.: Deep high-resolution representation learning for visual recognition. TPAMI 43(10), 3349-3364 (2020) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.189, + 0.788, + 0.217 + ], + "angle": 0, + "content": "49. Xie, Q., Lai, Y.K., Wu, J., Wang, Z., Zhang, Y., Xu, K., Wang, J.: Mlcvnet: Multi-level context votenet for 3d object detection. In: CVPR. pp. 10447-10456 (2020) 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.218, + 0.788, + 0.244 + ], + "angle": 0, + "content": "50. Xu, M., Chen, P., Liu, H., Han, X.: To-scene: A large-scale dataset for understanding 3d tabletop scenes. In: ECCV. pp. 340-356. Springer (2022) 2, 3, 4, 9, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.245, + 0.788, + 0.285 + ], + "angle": 0, + "content": "51. Xu, X., Wang, Y., Zheng, Y., Rao, Y., Zhou, J., Lu, J.: Back to reality: Weakly-supervised 3d object detection with shape-guided label enhancement. In: CVPR. pp. 8438-8447 (2022) 2, 4, 10, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.287, + 0.788, + 0.314 + ], + "angle": 0, + "content": "52. Xu, X., Wang, Z., Zhou, J., Lu, J.: Binarizing sparse convolutional networks for efficient point cloud analysis. arXiv preprint arXiv:2303.15493 (2023) 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.315, + 0.788, + 0.342 + ], + "angle": 0, + "content": "53. Yang, C., Huang, Z., Wang, N.: Querydet: Cascaded sparse query for accelerating high-resolution small object detection. In: CVPR. pp. 13668-13677 (2022) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.343, + 0.788, + 0.37 + ], + "angle": 0, + "content": "54. Zhang, Z., Sun, B., Yang, H., Huang, Q.: H3dnet: 3d object detection using hybrid geometric primitives. In: ECCV. pp. 311-329 (2020) 3, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.371, + 0.788, + 0.411 + ], + "angle": 0, + "content": "55. Zhao, T., Ning, X., Hong, K., Qiu, Z., Lu, P., Zhao, Y., Zhang, L., Zhou, L., Dai, G., Yang, H., et al.: Ada3d: Exploiting the spatial redundancy with adaptive inference for efficient 3d object detection. arXiv preprint arXiv:2307.08209 (2023) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.412, + 0.788, + 0.439 + ], + "angle": 0, + "content": "56. Zheng, W., Tang, W., Jiang, L., Fu, C.W.: Se-ssd: Self-ensembling single-stage object detector from point cloud. In: CVPR. pp. 14494–14503 (2021) 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.44, + 0.788, + 0.479 + ], + "angle": 0, + "content": "57. Zhu, Y., Mottaghi, R., Kolve, E., Lim, J.J., Gupta, A., Fei-Fei, L., Farhadi, A.: Target-driven visual navigation in indoor scenes using deep reinforcement learning. In: ICRA. pp. 3357-3364 (2017) 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.48, + 0.788, + 0.509 + ], + "angle": 0, + "content": "58. Zoph, B., Cubuk, E.D., Ghiasi, G., Lin, T.Y., Shlens, J., Le, Q.V.: Learning data augmentation strategies for object detection. In: ECCV. pp. 566-583. Springer (2020) 3" + }, + { + "type": "list", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.509 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/2024/3D Small Object Detection with Dynamic Spatial Pruning/2b6d04de-265e-4c48-b6b1-d03973f89d8a_origin.pdf b/2024/3D Small Object Detection with Dynamic Spatial Pruning/2b6d04de-265e-4c48-b6b1-d03973f89d8a_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..1abe3c7083a81ff9bc540cbe58c598a1ca3a4150 --- /dev/null +++ b/2024/3D Small Object Detection with Dynamic Spatial Pruning/2b6d04de-265e-4c48-b6b1-d03973f89d8a_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d3c3740897e13e89ef7856b134227e48f557e0ef4f7c13c45868cfada8dbd21 +size 10559459 diff --git a/2024/3D Small Object Detection with Dynamic Spatial Pruning/full.md b/2024/3D Small Object Detection with Dynamic Spatial Pruning/full.md new file mode 100644 index 0000000000000000000000000000000000000000..2cebcdac64608d0306bb38749dd1986f3e69f978 --- /dev/null +++ b/2024/3D Small Object Detection with Dynamic Spatial Pruning/full.md @@ -0,0 +1,288 @@ +# DSPDet3D: 3D Small Object Detection with Dynamic Spatial Pruning + +Xiuwei $\mathrm{Xu}^{1\star}$ , Zhihao $\mathrm{Sun}^{2*}$ , Ziwei Wang $^3$ , Hongmin Liu $^{2\dagger}$ , Jie Zhou $^1$ , Jiwen Lu $^{1\dagger}$ + +$^{1}$ Tsinghua University + +$^{2}$ University of Science and Technology Beijing + +$^{3}$ Carnegie Mellon University + +xxw21@mails.tsinghua.edu.cn; d202210361@xs.ustb.edu.cn; + +ziweiwa2@andrew.cmu.edu; hmliu@ustb.edu.cn; + +{jzhou,lujiwen}@tsinghua.edu.cn + +Abstract. In this paper, we propose an efficient feature pruning strategy for 3D small object detection. Conventional 3D object detection methods struggle on small objects due to the weak geometric information from a small number of points. Although increasing the spatial resolution of feature representations can improve the detection performance on small objects, the additional computational overhead is unaffordable. With in-depth study, we observe the growth of computation mainly comes from the upsampling operation in the decoder of 3D detector. Motivated by this, we present a multi-level 3D detector named DSPDet3D which benefits from high spatial resolution to achieve high accuracy on small object detection, while reducing redundant computation by only focusing on small object areas. Specifically, we theoretically derive a dynamic spatial pruning (DSP) strategy to prune the redundant spatial representation of 3D scene in a cascade manner according to the distribution of objects. Then we design DSP module following this strategy and construct DSPDet3D with this efficient module. On ScanNet and TO-SCENE dataset, our method achieves leading performance on small object detection. Moreover, DSPDet3D trained with only ScanNet rooms can generalize well to scenes in larger scale. It takes less than 2s to directly process a whole building consisting of more than $4500\mathrm{k}$ points while detecting out almost all objects, ranging from cups to beds, on a single RTX 3090 GPU. Code. + +Keywords: 3D small object detection $\cdot$ Spatial pruning $\cdot$ Efficient inference + +# 1 Introduction + +3D object detection is a fundamental scene understanding problem, which aims to detect 3D bounding boxes and semantic labels from a point cloud of 3D scene. With the recent advances of deep learning techniques on point cloud understanding [7, 13, 34, 35], 3D detection methods have shown remarkable progress [39, 40, 46, 56]. However, with 3D object detection being widely adopted in fields like robotics [30, 57] and autonomous driving [2] which require highly precise and fine-grained perception, small object detection becomes one of the most important yet unsolved problems. In autonomous driving + +![](images/750fbe39739c992072150d0c57b35ff3953b106544110e86c67298469a085483.jpg) +Fig.1: Trained with only rooms from ScanNet, our DSPDet3D generalizes well to process a whole house with dozens of rooms. It takes less than 2s to generate fine-grained detection results with a RTX 3090 single GPU. + +scenarios [12], we observe a significant performance gap between cars and pedestrians. In indoor scenes [4,9] where the size variance is much larger (e.g. a bed is 1000x larger than a cup), detecting small objects is more difficult. We focus on indoor 3D object detection task where scenes are crowded with objects of multiple categories and sizes. + +For indoor 3D object detection, although great improvement has been achieved in both speed and accuracy on previous benchmarks [1,9,43], they are still far from general purpose 3D object detection due to the limited range of object size they can handle. For instance, these methods focus on furniture-level objects such as bed and table, while smaller ones like laptop, keyboard and bottle are ignored. With the arrival of 3D small object benchmarks [37, 50, 51] which contain objects with wider size variance (e.g. from tabletop object like cup to large furniture like bed), it is shown that previous 3D detectors get very low accuracy on small objects and some even fail to detect any small objects. This is because extracting fine-grained representation for a large scene is too computationally expensive, so current methods aggressively downsamples the 3D features, which harms the representation of small objects. + +In this paper, we propose a dynamic spatial pruning approach for 3D small object detection. Although increasing the spatial resolution of the feature representations is a simple and effective way to boost the performance of 3D small object detection, the large computational overhead makes this plan infeasible for real application. With in-depth study, we observe the memory footprint mainly comes from the huge number of features generated by the upsampling operation in the decoder of 3D detector. Inspired by the fact that small objects only occupy a small proportion of space, we adopt a multi-level detection framework to detect different sizes of objects in different levels. As the multi-level detector has already detected out larger objects in lower resolution, there are many redundant features in the scene representations of higher resolution. To this end, we propose to dynamically prune the features after detecting out objects in each level, which skips the upsampling operation at regions where there is no smaller object. Specifically, we first theoretically derive a pruning mask generation strategy to + +![](images/5ba71b0b125f9fbe0d2601e0cf6b5bd34589191d8f4bee751bfbfbb72fbfda88.jpg) +Fig. 2: Detection accuracy (mAP@0.25 of all categories) and speed (FPS) of mainstream 3D object detection methods on TO-SCENE dataset. Our DSPDet3D shows absolute advantage on 3D small object detection and provides flexible accuracy-speed tradeoff by simply adjusting the pruning threshold without retraining. + +supervise the pruning module, which prunes as much features as possible while not affecting the features of object proposals. Then we design a dynamic spatial pruning (DSP) module according to the theoretical analysis and use it to construct a 3D object detector named DSPDet3D. On the popular ScanNet [9] dataset, DSPDet3D improves the mAP of all categories by $3\%$ and mAP of small object by $14\%$ compared with current state-of-the-art. On TO-SCENE [50] dataset with more tabletop objects, we improve the mAP of all categories by $8\%$ while achieving leading inference speed among all mainstream indoor 3D object detection methods. + +# 2 Related Work + +Indoor 3D object detection: Since PointNet and PointNet++ [34, 35], deep learning-based 3D detection methods for point clouds begin to emerge in recent years, which can be mainly divided into three categories: voting-based [6, 33, 47, 49, 54], transformer-based [26, 28] and voxel-based [14, 38, 39, 46] methods. Inspired by 2D hough voting, VoteNet [33] proposes the first voting-based 3D detector, which aggregates the point features on surfaces into object center by 3D voting and predicts bounding boxes from the voted centers. Drawing on the success of transformer-based detector [3] in 2D domain, GroupFree3D [26] and 3DETR [28] adopts transformer architecture to decode the object proposals into 3D boxes. As extracting point features require time-consuming sampling and aggregation operation, GSDN [14] proposes a fully convolutional detection network based on sparse convolution [7, 13, 19, 52], which achieves much faster speed. FCAF3D [38] and TR3D [39] further improves the performance of GSDN with a simple anchor-free architecture. Our method also adopts voxel-based architecture considering its efficiency and scalability. + +Small object detection: Small object detection [45] is a challenging problem in 2D vision due to the low-resolution features. To tackle this, a series of methods have been proposed, which can be categorized into three types: (1) small object augmentation and oversampling methods [17, 25, 58]; (2) scale-aware training and inference + +strategy [11, 31, 41, 42]; (3) increasing the resolution of features or generating high-resolution features [5, 10, 21, 22, 48, 53]. However, there are far less works about 3D small object detection due to the limit of data and network capability. BackToReality [51] proposes ScanNet-md40 benchmark which contains small objects and finds many current methods suffer a lot in small object detection. TO-SCENE [50] proposes a new dataset and learning strategy for understanding 3D tabletop scenes. However, it relies on densely sampled points from CAD models, which is infeasible in practical scenarios where the points from small objects are very sparse. In contrast, we aim to directly detect small objects from naturally sampled point clouds. + +Network pruning: Network pruning can be divided into two categories: architecture pruning [15, 16, 18, 20, 27, 29] and spatial pruning [24, 36, 55]. Architecture pruning aims to remove a portion of weights from a neural network to shrink the size of a network, which includes unstructured pruning [15, 18, 29] and structured pruning [16, 20, 27]. The former removes network weights without a predefined structure, while the latter removes whole channels or network layers. On the contrary, spatial pruning does not prune the parameters of a network, but spatially removing redundant computation on the feature maps. DynamicViT [36] prunes the tokens in vision transformer with an attention masking strategy. SPS-Conv [24] dynamically prunes the convolutional kernel to suppress the activation on background voxels in sparse convolution layer. Ada3D [55] proposes a pruning framework for 3D and BEV features. Our dynamic spatial pruning method also belongs to spatial pruning, which directly removes redundant voxel features level by level according to the distribution of objects. + +# 3 Approach + +In this section, we describe our DSPDet3D for efficient 3D small object detection. We first revisit the multi-level 3D detector and analyze the computational cost distribution. Then we propose dynamic spatial pruning with theoretical analysis on how to prune features without affecting detection performance. Finally we design DSP module according to the theoretical analysis and use it to construct DSPDet3D. + +# 3.1 Analysis on Multi-level 3D Detector + +Preliminaries: We choose multi-level FCOS-like [44] 3D detector [38, 39] with sparse convolution [7, 13] for small object detection due to its high performance on both accuracy and speed (more detail can be found in Table 1 and 2). + +As shown in Figure 3 (middle), after extracting backbone features, multi-level detector iteratively upsamples the voxel feature representations to different levels. In each level, all voxels are regarded as object proposals to predict bounding boxes and category scores. Generative upsampling is widely adopted in this kind of architectures [14,38,39] to expand the voxels from object surfaces to the whole 3D space, where object proposals located at object centers can produce accurate predictions. During training, ground-truth bounding boxes are assigned to different levels and each box assigns several nearby voxels as positive object proposals. Only box predictions from positive object proposals will be supervised. While at inference time all voxel features from the decoder are used to predict bounding boxes, which are then filtered by 3D NMS. + +![](images/b45a131204e886a8f409076fc7c514b531393d4deedaba2bde0715dcb52c37c8.jpg) +Fig.3: Comparison of the decoder in typical multi-level 3D object detector [39] and our DSPDet3D. Note that the sparsity of voxels in decoder is changed due to the generative upsampling operation. After detecting out objects in a level, DSPDet3D prunes redundant voxel features according to the distribution of objects before each upsampling operation. Red boxes indicate all pruned voxels and 'scissor' boxes indicate voxels pruned in the previous layer. $\{O\}$ is the set of all objects and $\{O_i\}$ is the set of objects assigned to level $i$ . + +![](images/ef1b0796b11fb2f8e2b8b609482f7e84b450d9dd07fe044e0e60bb7b000db33d.jpg) + +![](images/1ebd3517ad5e81e0ae90038b2f854a9731ee5e7219e636d0a4e2c2f1ebe0b857.jpg) + +Increasing spatial resolution: Based on multi-level architecture, a simple way to boost the performance of small object detection is to increase the spatial resolution of feature maps, i.e., voxelizing the point clouds into smaller voxels to better preserve geometric information. Taking TR3D [39] for example, we double its spatial resolution and show the results in Figure 4. It can be seen that the performance on small object really benefits from larger resolution, but the computational overhead grows dramatically at the same time. As 3D object detection is usually adopted in tasks which requires real-time inference under limited resources, such as AR/VR and robotic navigation, directly increasing spatial resolution is infeasible. Notably, we find the computation growth is imbalanced: the decoder layers (including detection heads) account for the most memory footprint and have larger memory growth ratio than the backbone. This indicates the generative upsampling operation will significantly increase the number of voxels when the spatial resolution is high, which is the main challenge for scaling up the spatial resolution of multi-level detectors. + +# 3.2 Dynamic Spatial Pruning + +Since small objects only occupy a small proportion of space, we assume there is a large amount of redundant computation in decoder layers, especially when the resolution is high. For instance, if a bed is detected in Layer 4, the region near this bed may be less + +![](images/9de2f2472f930b1efb99084b137643f47518bbcd7d1f0bd7af6b0e3642260016.jpg) +Fig. 4: The memory footprint distribution of different multi-level detectors. Layer 4 to Layer 1 refer to decoder layers (including detection heads) from coarse to fine. If doubling the spatial resolution of TR3D, the performance on 3D small object detection improves from $52.7\%$ to $62.8\%$ while memory footprint increases dramatically. We find decoder layers accounts for most of the costs. DSPDet3D efficiently reduces redundant computation on these layers, achieving both fast speed and high accuracy. + +informative for detecting other objects in the follow decoder layers. If we can skip the upsampling operation at these regions, the voxels will be sparsified level by level, as shown in Figure 3 (right). In this way, small objects can be detected in Layer 1 from only a small number of voxels. Inspired by this, we propose to dynamically prune the voxel features according to the distribution of objects. + +However, pruning a voxel will not only reduce the number of object proposals in the following levels, but also change the following voxel features computed based on the pruned voxel. Therefore, in order to reduce the redundant computation of multi-level detector without degrading the detection performance, a carefully designed pruning strategy is required. We give theoretical derivation as below. + +Problem formulation: For each scene, we denote $\{O\}$ as the set of all objects, $\{O_i\}$ as the set of objects assigned to level $i^1$ during training, $f_{i} \in \mathbb{R}^{N \times (3 + C)}$ as the voxel features of level $i$ . We aim to prune $f_{i}$ after detecting out $\{O_i\}$ , where the objective is to remove as many voxels as possible while keeping the predictions of $\{O\} \backslash \{O_i\}$ unaffected after the pruning. For each object $o_j$ in level $j$ ( $j < i$ ), we assume the prediction of it is unaffected if the voxel features at level $j$ near its center $c_j$ are unaffected. We make this assumption because most true positive predictions are from object proposals located at the center of bounding boxes [14, 44]. We denote the expected unaffected neighborhood as $\mathcal{C}_j(c_j, P)$ , which means a cube centered at $c_j$ with $P \times P \times P$ voxels at level $j$ . Given the symmetry, $P$ should be odd. Then we formulate the objective of our pruning strategy at level $i$ as: + +$$ +\underset {\mathcal {K} _ {i}} {\text {m i n i m i z e}} \sum_ {x, y, z} M _ {i} [ x ] [ y ] [ z ], M _ {i} = \bigwedge_ {j = 1} ^ {i - 1} \mathcal {K} _ {i} (\boldsymbol {c _ {j}}), +$$ + +$$ +s. t. \forall j < i, \mathcal {C} _ {j} (\boldsymbol {c} _ {j}, P) \cap \mathcal {A} _ {i, j} (\neg \mathcal {K} _ {i} (\boldsymbol {c} _ {j}) \star f _ {i}) = \varnothing \tag {1} +$$ + +where $M_{i} \in \mathbb{R}^{N}$ is a binary pruning mask sharing the same length with $f_{i}$ , where 0 indicates removing and 1 indicates keeping during the pruning operation $\star$ . $\mathcal{K}_i(\cdot)$ is the generation strategy of pruning mask for each object, which generates a binary pruning mask conditioned on the object center. $\mathcal{A}_{i,j}(f)$ is defined as the affecting field of $f$ , which represents the voxels at level $j$ that will be affected by pruning $f$ at level $i$ . Without loss of generality, here we choose only one object at each level for simplicity of presentation. + +Overview of problem solving: We solve (1) by mathematical induction. Specifically, for pruning strategy $M_{i}$ at level $i$ , we first consider how to generate pruning mask $\mathcal{K}_i(c_{i-1})$ to ensure the predictions of $\{O_{i-1}\}$ are unaffected. Then we show that by following our pruning strategy $\mathcal{K}_i$ , 'the predictions of $\{O_j\}$ are unaffected' can be derived by 'the predictions of $\{O_{j+1}\}$ are unaffected'.2 + +Solving $\mathcal{K}_i(\pmb{c}_{i-1})$ : To make sure $\mathcal{C}_{i-1}(\pmb{c}_{i-1}, P) \cap \mathcal{A}_{i,i-1}(\cdot) = \emptyset$ , we need to compute the affecting field of each voxel $v_i$ in level $i$ . Obviously, the upper bound of affecting field of $v_i$ expands in shape of cube with sparse convolution. Assume there are $m$ sparse convolutions with stride 1 and kernel $x_k$ ( $1 \leq k \leq m$ ) between pruning and generative upsampling in level $i$ , one generative transposed convolution with stride 2 and kernel $y$ , and $n$ sparse convolutions with stride 1 and kernel $z_k$ ( $1 \leq k \leq n$ ) until detecting out objects in level $i-1$ . Then the affecting field from pruning (level $i$ ) to detecting (level $i-1$ ) can be written as: + +$$ +\mathcal {A} _ {i, i - 1} \left(v _ {i}\right) = \mathcal {C} _ {i - 1} \left(v _ {i}, a f f \left(\left\{x _ {k} \right\}, y, \left\{z _ {k} \right\}\right)\right) \tag {2} +$$ + +where $aff(\{x_k\}, y, \{z_k\})$ is the range of affecting field represented by the kernel sizes, which we will detail in supplementary material. Since the shape of the expected unaffected voxel features is a $P \times P \times P$ cube, $\mathcal{K}_i(\pmb{c}_{i-1})$ can be formulated as: + +$$ +\begin{array}{l} \mathcal {K} _ {i} \left(\boldsymbol {c} _ {\boldsymbol {i} - \boldsymbol {1}}\right) [ x ] [ y ] [ z ] = \mathbb {I} \left(2 \cdot | \boldsymbol {x} - \boldsymbol {c} _ {\boldsymbol {i} - \boldsymbol {1}} | _ {\infty} \leq r S _ {i}\right) \\ r = \lceil \frac {P + a f f (\{x _ {k} \} , y , \{z _ {k} \}) - 2}{2} \rceil \tag {3} \\ \end{array} +$$ + +where $S_{i}$ is the size of voxel in level $i$ . $\mathbb{I}(\cdot)$ is the indicative function. $\boldsymbol{x} = (x,y,z)$ is the voxel coordinates of $f_{i}$ . + +Recursion of $\kappa_{i}$ : We now derive when the pruning strategy $\kappa_{i}$ in (3) also works for $c_{j}$ ( $j < i - 1$ ). We can regrad $c_{j}$ as the center of object in level $i - 1$ and use (3) to generate the pruning mask. In this way, $\mathcal{C}_{i - 1}(c_j,P)$ are unaffected. As $\mathcal{C}_j(c_j,P)$ is covered by $\mathcal{C}_{i - 1}(c_j,P)$ , so $\mathcal{C}_j(c_j,P)$ is unaffected as well. We should also ensure pruning in level $i$ has no cumulative impact on pruning in level $i - 1$ : + +$$ +\left(\mathcal {K} _ {i - 1} \left(\boldsymbol {c} _ {\boldsymbol {j}}\right) \star f _ {i - 1}\right) \subseteq \mathcal {C} _ {i - 1} \left(\boldsymbol {c} _ {\boldsymbol {j}}, P\right) \tag {4} +$$ + +this means when generating pruning mask of $c_{j}$ in level $i - 1$ using $\mathcal{K}_{i - 1}$ , the kept voxels should be covered by the unaffected voxels after pruning in level $i$ . So we have: + +$$ +r \cdot S _ {i - 1} \leq P \cdot S _ {i - 1} \tag {5} +$$ + +The minimum $P$ can be acquired by solving (5). In this case, strategy $\mathcal{K}_i$ in (3) works for all $c_j$ ( $j < i$ ). + +![](images/6a86af61c73089b12e032991a6e4a5f28a947418d85bc61a7a55621f1a3a99ce.jpg) +Fig. 5: Illustration of DSPDet3D. The voxelized point clouds are fed into a high-resolution sparse convolutional backbone, which output four levels of scene representations. Four dynamic spatial pruning (DSP) modules are stacked to construct a multi-level decoder and detect objects from coarse to fine. DSP module utilizes a light-weight learnable module to predict the pruning mask. During inference, we discretize the pruning mask and use it to guide pruning before generative upsampling. While during training we interpolate the pruning mask to next level and prune the voxel features after generative upsampling. + +# 3.3 DSPDet3D + +Based on the theoretical analysis, we devise a dynamic spatial pruning (DSP) module to approximate the ideal pruning strategy. We further construct a 3D small object detector named DSPDet3D with the proposed DSP module. + +DSP module: As shown in Figure 3, we modify the layers of a typical multi-level decoder to DSP modules, which prunes redundant voxel features after detecting out objects at each level for efficient feature upsampling. Formally, given the upsampled voxel feature $f_{i}^{U}$ and the backbone feature $f_{i}^{B}$ at level $i$ , DSP module first add them for detection. However, $f_{i}^{U}$ may be much sparser than $f_{i}^{B}$ due to pruning, directly adding by taking union of them is inefficient. Therefore, we propose a new operator called partial addition to fit our pruning strategy: + +$$ +f _ {i} = f _ {i} ^ {B \overrightarrow {\quad +}} f _ {i} ^ {U} \tag {6} +$$ + +where addition is constrained to be operated only on the voxels of $f_{i}^{U}$ . Then objects are detected using a shared detection head across all levels: $\{O_i\} = \text{Detect}(f_i)$ . Once objects at level $i$ are detected out, we prune the voxel features according to the derived strategy described in Section 3.2. Here we devise a light-weight MLP-based learnable pruning module to decide where smaller objects (i.e. objects in level $j$ ( $j < i$ ) ) may appear, and then prune other locations: + +$$ +\bar {f} _ {i} = t \left(\hat {M} _ {i}\right) \star f _ {i}, \hat {M} _ {i} = \mathrm {M L P} _ {i} \left(f _ {i}\right) \tag {7} +$$ + +where $\hat{M}_i$ is the pruning mask predicted from $f_i$ , which represents the probability of retention for each voxel. We utilize FocalLoss [23] to supervise $\hat{M}_i$ with the generated + +$M_{i}$ in (1). During inference, a threshold function $t(\cdot)$ sets probability lower than $\tau$ to be 0, others be 1 to guide pruning. After pruning, the generative upsampling is applied to acquire features for the next level: $f_{i - 1}^{U} = \text{GeV}(\bar{f}_{i})$ . + +During training, as $\hat{M}_i$ may not be so accurate (especially at beginning), we find applying the above learnable pruning module makes training difficult to converge. Instead, we switch the pruning to weak mode for context preservation. As shown in Figure 5, the weak pruning is applied after generative upsampling. For level $i$ , we upsample the pruning mask $\hat{M}_{i+1}$ to level $i$ with nearest neighbor interpolation. Then we sort the interpolated scores and keep only $N_{max}$ voxels with the highest scores. This weak pruning mechanism aims to stabilize training, which only works when the amount of voxels is too large to conduct following operations. + +Since our theoretical analysis sets the expected unaffected neighborhood to be a $P \times P \times P$ cube, we also modify the assigning strategy of positive object proposals accordingly for robust training. Specifically, for a ground-truth bounding box of $o_i$ assigned to level $i$ , we sample the nearest $N_{pos}$ voxels to $c_i$ inside the cube centered at $c_i$ with length $P \cdot S_i$ . If there are less than $N_{pos}$ voxels in the cube, we simply sample all voxels inside it. Our assigning method is independent of the size of bounding box, which ensures there are enough positive proposals even for small objects. + +DSPDet3D: Based upon the top-performance multi-level detector TR3D [39], we remove the max pooling layer to increase the spatial resolution of backbone features. Then we replace the decoder in TR3D with four stacked DSP modules to remove redundant voxel features level by level, which achieves efficient upsampling without affecting the detection performance. To train DSPDet3D, we keep the same loss for classification and box regression as in TR3D and add additional FocalLoss to supervise $\hat{M}_i$ with $M_i$ . + +Compare with FCAF3D: Similar to our training-time weak pruning, FCAF3D [38] also adopts a pruning strategy in the decoder to prevent the number of voxels from getting too large, which is unable to remove redundant features in early decoder layers during inference. Moreover, it directly utilizes the classification scores for bounding boxes to sort and prune the voxel features, which cannot accurately preserve geometric information for small objects. + +# 4 Experiment + +In this section, we conduct experiments to investigate the performance of our approach on 3D small object detection. We first describe the datasets and experimental settings. Then we compare DSPDet3D with the state-of-the-art 3D object detection methods. We also design ablation experiments to study the effectiveness of the proposed methods. Finally we transfer DSPDet3D to extremely large scenes to show its efficiency and generalization ability. + +# 4.1 Experimental Settings + +Datasets and metrics: We conduct experiments on two indoor datasets including ScanNet [9] and TO-SCENE [50]. ScanNet is a richly annotated dataset of indoor scenes + +Table 1: 3D objects detection results and computational costs of different methods on ScanNet.md40. DSPDet3D with the best pruning threshold is highlighted in gray. We set best scores in bold, runner-ups underlined. + +
MethodDecodermAP\( mAP_S \)SpeedMemory
@0.25@0.5@0.25@0.5
VoteNetVoting51.0233.690.30013.41150
VoteNetSVoting48.6231.551.0408.51500
H3DNetHybrid53.5139.233.080.907.21550
GroupFree3DTransformer56.7741.3911.70.817.81450
GroupFree3DSTransformer29.4411.940.2003.22000
RBGNetVoting55.2332.645.8106.61700
FCAF3DMulti-level59.4948.7518.388.2112.3850
CAGroup3DVoting60.2949.9016.628.633.13250
TR3DMulti-level61.5949.9827.5312.9110.81250
FCAF3D-higherMulti-level62.6551.0127.6816.237.14000
TR3D-higherMulti-level65.1854.0341.7029.565.24450
Ours(τ=0)Multi-level65.3954.5944.7931.554.44200
Ours(τ=0.3)Multi-level65.0454.3543.7730.3812.5700
+ +with 1201 training scenes and 312 validation scenes. Each object in the scenes are annotated with texts and then mapped to category IDs. We follow the ScanNet-md40 benchmark proposed by [51], which contains objects in 22 categories with large size variance. TO-SCENE is a mixed reality dataset which provides three variants called TO_Vanilla, TO_Crowd and TO_ScanNet with different numbers of tabletop objects and scene scales. We choose the room-scale TO_ScanNet benchmark, which contains 3600 training scenes and 800 validation scenes with 70 categories. However, TO_ScanNet adopts non-uniform sampling to acquire about 2000 points per tabletop object, which is infeasible in practical settings. To this end, we downsample the small objects and control the density of them to be similar with other objects and backgrounds. We name this modified version as TO-SCENE-down benchmark. We take the point clouds without color as inputs for all methods. More details about ScanNet-md40 and TO-SCENE-down benchmarks can be found in supplementary material. + +We report the mean average precision (mAP) with threshold 0.25 and 0.5. To measure the performance on different categories, we use two kinds of metrics: mAP and $\mathrm{mAP}_S$ , which refer to the mean AP of all objects and of small objects respectively. Here we define categories of small object as ones with average volume smaller than $0.05m^3$ for both benchmarks. + +Implementation details: We implement our approach with PyTorch [32], MinkowskiEngine [7] and MMDetection3D [8]. We follow the same training strategy / hyperparameters as TR3D [39] for fair comparison. Training converges within 4 hours on a 4 GPU machine. The stride of the sparse convolution in the preencoder of DSPDet3D is set to 2, thus the voxel size of $f_1^B$ is 4cm and $S_i$ equals to $2^i \cdot 2cm$ . We set $N_{pos} = 6$ and $N_{max} = 100000$ during training. The weight of the FocalLoss between $M_i$ and $\hat{M}_i$ is + +Table 2: 3D objects detection results and computational costs of different methods on TO-SCENE-down benchmark. DSPDet3D with the best pruning threshold is highlighted in gray. We set best scores in bold, runner-ups underlined. + +
MethodDecodermAP\( mAP_S \)SpeedMemory
@0.25@0.5@0.25@0.5
VoteNetVoting26.7214.0114.514.7812.81300
\( VoteNet_S \)Voting31.8714.8921.757.407.61650
H3DNetHybrid27.6917.3814.837.395.11650
GroupFree3DTransformer32.4120.4320.1710.137.71700
\( GroupFree3D_S \)Transformer40.1423.5533.3316.152.42200
RBGNetVoting40.4230.2729.6921.615.01850
FCAF3DMulti-level45.1337.2137.1831.6511.91000
CAGroup3DVoting54.2847.5848.4943.852.23500
TR3DMulti-level55.5845.9552.7244.019.91400
FCAF3D-higherMulti-level57.2350.3953.0748.766.34250
TR3D-higherMulti-level63.9656.0662.8457.144.14600
Ours(τ=0)Multi-level66.8159.4166.5361.574.15300
Ours(τ=0.5)Multi-level66.1258.5565.8260.7313.9800
+ +0.01. In terms of block structure, we have $\{x_k\} = \emptyset$ , $y = 3$ and $\{z_k\} = \{3,3\}$ . So we set $r = 7$ and $P = 7$ according to (3). + +# 4.2 Comparison with State-of-the-art + +We compare our method with popular and state-of-the-art 3D object detection methods, including VoteNet [33], H3DNet [54], GroupFree3D [26], RBGNet [47], CA-Group3D [46], FCAF3D [38] and TR3D [39]. We also follow [50] to reduce the radius of ball query in the PointNet++ backbone for VoteNet and GroupFree3D. The modified models is distinguished by subscript $S$ . Note that the original TR3D only uses two detection heads at level 2/3 and removes the last generative upsampling. However, detecting small objects heavily relies on high-resolution feature map, so we add the upsampling back. This will make it slightly slower but much more accurate on the 3D small object detection benchmarks. + +For all methods, we use their official code and the same training strategy / hyperparameters to train them on ScanNet-md40 and TO-SCENE-down. + +Table 1 and 2 shows the experimental results on ScanNet-md40 and TO-SCENEDown respectively. Consistent with the observation of [51], we find point-based (VoteNet, H3DNet, RBGNet) and transformer-based (GroupFree3D) methods almost fail to detect small objects on ScanNet-md40. This is because the PointNet++ backbone used by these methods adopts set abstraction (SA) operation to aggressively downsample the point clouds and extract scene representation. Since the number of small objects in ScanNet is limited, furthest point sampling has a low probability to sample points on small objects, which leads to inaccurate representation of small objects. For methods (CAGroup3D, FCAF3D, TR3D) with sparse convolutional backbone, they achieve + +![](images/1e8d60e37ce8795b764b8e60202220319de5e7c964721a51aaf285c87cc6854d.jpg) +Fig. 6: Visualization of pruning process on ScanNet. We show the kept voxels in each level under different thresholds. The memory footprint of each level is also listed at bottom. + +relatively much higher $\mathrm{mAP}_S$ due to sparse convolution [7, 13] can extract fine-grained scene representation with high efficiency. However, two-stage method like CAGroup3D is both slow and memory-consuming. Multi-level methods like FCAF3D and TR3D are efficient and get good performance on small object detection due to the FPN-like architecture, but they are still limited by resolution. On the contrary, our DSPDet3D with a proper threshold takes advantage of the high-resolution scene representation to achieve much higher performance. Furthermore, DSPDet3D is the most memory-efficient model among all mainstream methods. + +# 4.3 Ablation Study + +We conduct ablation studies on ScanNet-md40 to study the effects of hyperparameters and different design choices. + +Pruning process: We visualize the pruning process under different thresholds in Figure 6, where the voxels in each level after pruning are shown. We also list the memory footprint of each level. It can be seen that our method significantly reduces the memory footprint by pruning most of the uninformative voxels. Our pruning module only keeps regions where there are smaller objects than current level. + +Hyperparameters: We study two hyperparameters: $r$ and $N_{pos}$ , which is highly relevant to 3D small object detection. Note that $r = \left\lceil \frac{P + 9 - 2}{2} \right\rceil$ , thus $r$ and $P$ should be + +Table 3: Ablation studies on several design choices. We control the speed of each method to 10 FPS and report the accuracy in $\mathrm{mAP}@\mathbf{0.25}$ and $\mathrm{mAP}_S@\mathbf{0.25}$ . + +
MethodmAP\( \mathrm{mAP}_S \)
Remove partial addition55.335.5
Addition by taking union57.936.4
Addition by interpolation62.140.9
Spherical keeping mask63.041.1
Remove training-time pruning--
Positive proposal inside bounding box62.440.7
The full design of DSP module65.144.1
+ +![](images/08b388177be4ca13c44331bf65631fb18c481b2e2fa88cf89f6f82706787ce2e.jpg) +Fig. 7: Ablation studies on the value of $r$ and $N_{pos}$ . For each value we report performance under different pruning threshold $\tau$ . + +![](images/44173595e24b288de5d95e4fd0d03cff3cb92786fdc641582234e8efec7b27a6.jpg) + +changed simultaneously. As shown in Figure 7 (left), setting $r = 7$ achieves the best performance. If $r$ is smaller than 7 then $r > P$ , which conflicts with Equation (5) and the features will be affected by pruning. While a larger $r$ will make the pruning less aggressive, resulting in a large number of redundant voxel features. Figure 7 (right) shows that the number of positive object proposals should be set properly, which is important to balance the ratio between positive and negative samples during classification. + +Design choices: We also study the design choices of DSPDet3D in Table 3. Observing the second, third and fourth rows, we conclude that the partial addition is important for efficient feature fusion. Although taking union can preserve more information, this operation will reduce the sparsity of voxels and thus make our pruning less efficient. The fifth row shows that generate the keeping mask according to the shape of affecting field is better than using a spherical shape. According to the sixth row, removing training-time pruning will significantly increase the memory footprint during training, which makes the network unable to train. The seventh row validates the effectiveness of our assigning method for positive object proposals. + +# 4.4 Transferring to Larger Scenes + +We further validate the efficiency and generalization ability of different 3D detectors by transferring them to scenes of much larger scale. We first train 3D detectors on rooms from ScanNet training set in a category-agnostic manner, which is done by regarding + +![](images/8efed2cab5f01737a94450892b72b65dd106d2033bac84a775217e808d97b746.jpg) +Fig. 8: Visualization of the transferring results of different 3D object detectors. The 3D detector is trained on rooms from ScanNet and directly adopted to process a whole building-level 3D scene from Matterport3D. + +every labeled object as the same category. Then we directly adopt them to process the building-level scenes in Matterport3D [4]. We find previous methods almost all fail to process the extremely large scenes due to unaffordable memory footprint, so we only compare DSPDet3D with FCAF3D as shown in 8. It is shown that FCAF3D cannot detect out any small object and even struggles on relatively large objects like chairs when the scene is too large. On the contrary, DSPDet3D is able to accurately detect small objects like cups and thin pictures. + +# 5 Conclusion + +In this paper, we have presented an efficient feature pruning strategy for 3D small object detection. Inspired by the fact that small objects only occupy a small proportion of space, we adopt a multi-level detection framework to detect different sizes of objects in different levels. Then we present a dynamic spatial pruning strategy to prune the voxel features after detecting out objects in each level. Specifically, we first design the dynamic spatial pruning strategy by theoretical analysis on how to prune voxels without affecting the features of object proposals. Then we propose dynamic spatial pruning (DSP) module according to the strategy and use it to construct DSPDet3D. Extensive experiments on ScanNet and TO-SCENE datasets show that our DSPDet3D achieves leading detection accuracy and speed. We also conduct transferring experiment on Matterport3D to show DSPDet3D also generalizes well to extremely large scenes. + +# Acknowledgements + +This work was supported in part by the National Natural Science Foundation of China under Grant 62125603, Grant 62321005, and Grant 62336004. + +# References + +1. Armeni, I., Sener, O., Zamir, A.R., Jiang, H., Brilakis, I., Fischer, M., Savarese, S.: 3d semantic parsing of large-scale indoor spaces. In: ICCV. pp. 1534-1543 (2016) 2 +2. Bansal, M., Krizhevsky, A., Ogale, A.: Chauffeurnet: Learning to drive by imitating the best and synthesizing the worst. arXiv preprint arXiv:1812.03079 (2018) 1 +3. Carion, N., Massa, F., Synnaeve, G., Usunier, N., Kirillov, A., Zagoruyko, S.: End-to-end object detection with transformers. In: ECCV. pp. 213-229. Springer (2020) 3 +4. Chang, A., Dai, A., Funkhouser, T., Halber, M., Niessner, M., Savva, M., Song, S., Zeng, A., Zhang, Y.: Matterport3d: Learning from rgb-d data in indoor environments. 3DV (2017) 2, 14 +5. Chen, C., Liu, M.Y., Tuzel, O., Xiao, J.: R-cnn for small object detection. In: ACCV. pp. 214-230. Springer (2017) 4 +6. Cheng, B., Sheng, L., Shi, S., Yang, M., Xu, D.: Back-tracing representative points for voting-based 3d object detection in point clouds. In: CVPR. pp. 8963-8972 (2021) 3 +7. Choy, C., Gwak, J., Savarese, S.: 4d spatio-temporal convnets: Minkowski convolutional neural networks. In: CVPR. pp. 3075-3084 (2019) 1, 3, 4, 10, 12 +8. Contributors, M.: Mmdetection3d: Openmmlab next-generation platform for general 3d object detection (2020) 10 +9. Dai, A., Chang, A.X., Savva, M., Halber, M., Funkhouser, T., Nießner, M.: Scannet: Richly-annotated 3d reconstructions of indoor scenes. In: CVPR. pp. 5828--5839 (2017) 2, 3, 9 +10. Deng, C., Wang, M., Liu, L., Liu, Y., Jiang, Y.: Extended feature pyramid network for small object detection. TMM 24, 1968-1979 (2021) 4 +11. Gao, M., Yu, R., Li, A., Morariu, V.I., Davis, L.S.: Dynamic zoom-in network for fast object detection in large images. In: CVPR. pp. 6926-6935 (2018) 4 +12. Geiger, A., Lenz, P., Urtasun, R.: Are we ready for autonomous driving? the kitti vision benchmark suite. In: CVPR. pp. 3354-3361 (2012) 2 +13. Graham, B., Engelcke, M., Van Der Maaten, L.: 3d semantic segmentation with submanifold sparse convolutional networks. In: CVPR. pp. 9224-9232 (2018) 1, 3, 4, 12 +14. Gwak, J., Choy, C., Savarese, S.: Generative sparse detection networks for 3d single-shot object detection. In: ECCV. pp. 297-313. Springer (2020) 3, 4, 6 +15. Han, S., Pool, J., Tran, J., Dally, W.: Learning both weights and connections for efficient neural network. NeurIPS 28 (2015) 4 +16. Huang, Z., Wang, N.: Data-driven sparse structure selection for deep neural networks. In: ECCV. pp. 304-320 (2018) 4 +17. Kisantal, M., Wojna, Z., Murawski, J., Naruniec, J., Cho, K.: Augmentation for small object detection. arXiv preprint arXiv:1902.07296 (2019) 3 +18. LeCun, Y., Denker, J., Solla, S.: Optimal brain damage. NeurIPS 2 (1989) 4 +19. Lee, J., Choy, C., Park, J.: Putting 3d spatially sparse networks on a diet. arXiv preprint arXiv:2112.01316 (2021) 3 +20. Li, H., Kadav, A., Durdanovic, I., Samet, H., Graf, H.P.: Pruning filters for efficient convnets. arXiv preprint arXiv:1608.08710 (2016) 4 +21. Li, J., Liang, X., Wei, Y., Xu, T., Feng, J., Yan, S.: Perceptual generative adversarial networks for small object detection. In: CVPR. pp. 1222-1230 (2017) 4 +22. Lin, T.Y., Dollár, P., Girshick, R., He, K., Hariharan, B., Belongie, S.: Feature pyramid networks for object detection. In: CVPR. pp. 2117-2125 (2017) 4 +23. Lin, T.Y., Goyal, P., Girshick, R., He, K., Dólár, P.: Focal loss for dense object detection. In: ICCV. pp. 2980-2988 (2017) 8 + +24. Liu, J., Chen, Y., Ye, X., Tian, Z., Tan, X., Qi, X.: Spatial pruned sparse convolution for efficient 3d object detection. In: NeurIPS (2022) 4 +25. Liu, W., Anguelov, D., Erhan, D., Szegedy, C., Reed, S., Fu, C.Y., Berg, A.C.: Ssd: Single shot multibox detector. In: ECCV. pp. 21-37 (2016) 3 +26. Liu, Z., Zhang, Z., Cao, Y., Hu, H., Tong, X.: Group-free 3d object detection via transformers. arXiv preprint arXiv:2104.00678 (2021) 3, 11 +27. Liu, Z., Li, J., Shen, Z., Huang, G., Yan, S., Zhang, C.: Learning efficient convolutional networks through network slimming. In: ICCV. pp. 2736-2744 (2017) 4 +28. Misra, I., Girdhar, R., Joulin, A.: An end-to-end transformer model for 3d object detection. In: ICCV. pp. 2906-2917 (2021) 3 +29. Molchanov, P., Tyree, S., Karras, T., Aila, T., Kautz, J.: Pruning convolutional neural networks for resource efficient inference. arXiv preprint arXiv:1611.06440 (2016) 4 +30. Mousavian, A., Eppner, C., Fox, D.: 6-dof grapnet: Variational grasp generation for object manipulation. In: ICCV. pp. 2901-2910 (2019) 1 +31. Najibi, M., Singh, B., Davis, L.S.: Autofocus: Efficient multi-scale inference. In: ICCV. pp. 9745-9755 (2019) 4 +32. Paszke, A., Gross, S., Massa, F., Lerer, A., Bradbury, J., Chanan, G., Killeen, T., Lin, Z., Gimelshein, N., Antiga, L., et al.: Pytorch: An imperative style, high-performance deep learning library. NeurIPS 32 (2019) 10 +33. Qi, C.R., Litany, O., He, K., Guibas, L.J.: Deep hough voting for 3d object detection in point clouds. In: ICCV. pp. 9277-9286 (2019) 3, 11 +34. Qi, C.R., Su, H., Mo, K., Guibas, L.J.: Pointnet: Deep learning on point sets for 3d classification and segmentation. In: CVPR. pp. 652-660 (2017) 1, 3 +35. Qi, C.R., Yi, L., Su, H., Guibas, L.J.: Pointnet++: Deep hierarchical feature learning on point sets in a metric space. In: NeurIPS. pp. 5099-5108 (2017) 1, 3 +36. Rao, Y., Zhao, W., Liu, B., Lu, J., Zhou, J., Hsieh, C.J.: Dynamicvit: Efficient vision transformers with dynamic token sparsification. NeurIPS 34, 13937-13949 (2021) 4 +37. Rozenberszki, D., Litany, O., Dai, A.: Language-grounded indoor 3d semantic segmentation in the wild. In: ECCV. pp. 125-141. Springer (2022) 2 +38. Rukhovich, D., Vorontsova, A., Konushin, A.: Fcaf3d: fully convolutional anchor-free 3d object detection. In: ECCV. pp. 477-493. Springer (2022) 3, 4, 9, 11 +39. Rukhovich, D., Vorontsova, A., Konushin, A.: Tr3d: Towards real-time indoor 3d object detection. arXiv preprint arXiv:2302.02858 (2023) 1, 3, 4, 5, 9, 10, 11 +40. Shi, S., Guo, C., Jiang, L., Wang, Z., Shi, J., Wang, X., Li, H.: Pv-rcnn: Point-voxel feature set abstraction for 3d object detection. In: CVPR. pp. 10529–10538 (2020) 1 +41. Singh, B., Davis, L.S.: An analysis of scale invariance in object detection snip. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 3578-3587 (2018) 4 +42. Singh, B., Najibi, M., Davis, L.S.: Sniper: Efficient multi-scale training. NeurIPS 31 (2018) 4 +43. Song, S., Lichtenberg, S.P., Xiao, J.: Sun rgb-d: A rgb-d scene understanding benchmark suite. In: CVPR. pp. 567-576 (2015) 2 +44. Tian, Z., Shen, C., Chen, H., He, T.: Fcos: Fully convolutional one-stage object detection. In: ICCV. pp. 9627-9636 (2019) 4, 6 +45. Tong, K., Wu, Y., Zhou, F.: Recent advances in small object detection based on deep learning: A review. IVC 97, 103910 (2020) 3 +46. Wang, H., Ding, L., Dong, S., Shi, S., Li, A., Li, J., Li, Z., Wang, L.: Cagroup3d: Class-aware grouping for 3d object detection on point clouds. arXiv preprint arXiv:2210.04264 (2022) 1, 3, 11 +47. Wang, H., Shi, S., Yang, Z., Fang, R., Qian, Q., Li, H., Schiele, B., Wang, L.: Rbgnet: Ray-based grouping for 3d object detection. In: CVPR. pp. 1110-1119 (2022) 3, 11 + +48. Wang, J., Sun, K., Cheng, T., Jiang, B., Deng, C., Zhao, Y., Liu, D., Mu, Y., Tan, M., Wang, X., et al.: Deep high-resolution representation learning for visual recognition. TPAMI 43(10), 3349-3364 (2020) 4 +49. Xie, Q., Lai, Y.K., Wu, J., Wang, Z., Zhang, Y., Xu, K., Wang, J.: Mlcvnet: Multi-level context votenet for 3d object detection. In: CVPR. pp. 10447-10456 (2020) 3 +50. Xu, M., Chen, P., Liu, H., Han, X.: To-scene: A large-scale dataset for understanding 3d tabletop scenes. In: ECCV. pp. 340-356. Springer (2022) 2, 3, 4, 9, 11 +51. Xu, X., Wang, Y., Zheng, Y., Rao, Y., Zhou, J., Lu, J.: Back to reality: Weakly-supervised 3d object detection with shape-guided label enhancement. In: CVPR. pp. 8438-8447 (2022) 2, 4, 10, 11 +52. Xu, X., Wang, Z., Zhou, J., Lu, J.: Binarizing sparse convolutional networks for efficient point cloud analysis. arXiv preprint arXiv:2303.15493 (2023) 3 +53. Yang, C., Huang, Z., Wang, N.: Querydet: Cascaded sparse query for accelerating high-resolution small object detection. In: CVPR. pp. 13668-13677 (2022) 4 +54. Zhang, Z., Sun, B., Yang, H., Huang, Q.: H3dnet: 3d object detection using hybrid geometric primitives. In: ECCV. pp. 311-329 (2020) 3, 11 +55. Zhao, T., Ning, X., Hong, K., Qiu, Z., Lu, P., Zhao, Y., Zhang, L., Zhou, L., Dai, G., Yang, H., et al.: Ada3d: Exploiting the spatial redundancy with adaptive inference for efficient 3d object detection. arXiv preprint arXiv:2307.08209 (2023) 4 +56. Zheng, W., Tang, W., Jiang, L., Fu, C.W.: Se-ssd: Self-ensembling single-stage object detector from point cloud. In: CVPR. pp. 14494–14503 (2021) 1 +57. Zhu, Y., Mottaghi, R., Kolve, E., Lim, J.J., Gupta, A., Fei-Fei, L., Farhadi, A.: Target-driven visual navigation in indoor scenes using deep reinforcement learning. In: ICRA. pp. 3357-3364 (2017) 1 +58. Zoph, B., Cubuk, E.D., Ghiasi, G., Lin, T.Y., Shlens, J., Le, Q.V.: Learning data augmentation strategies for object detection. In: ECCV. pp. 566-583. Springer (2020) 3 \ No newline at end of file diff --git a/2024/3D Small Object Detection with Dynamic Spatial Pruning/images.zip b/2024/3D Small Object Detection with Dynamic Spatial Pruning/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..1d51245de373da3c2e0ee2cfb7d7122977fd2e1f --- /dev/null +++ b/2024/3D Small Object Detection with Dynamic Spatial Pruning/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:181cee60907e93a681f12b8c451d30f9f52fbb589dd74fdc47aa37c776e96b0b +size 749443 diff --git a/2024/3D Small Object Detection with Dynamic Spatial Pruning/layout.json b/2024/3D Small Object Detection with Dynamic Spatial Pruning/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..fee5d6bea804717b652324b1e97829e82cd2a1bf --- /dev/null +++ b/2024/3D Small Object Detection with Dynamic Spatial Pruning/layout.json @@ -0,0 +1,10323 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 142, + 111, + 473, + 148 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 142, + 111, + 473, + 148 + ], + "spans": [ + { + "bbox": [ + 142, + 111, + 473, + 148 + ], + "type": "text", + "content": "DSPDet3D: 3D Small Object Detection with Dynamic Spatial Pruning" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 135, + 167, + 478, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 167, + 478, + 180 + ], + "spans": [ + { + "bbox": [ + 135, + 167, + 478, + 180 + ], + "type": "text", + "content": "Xiuwei " + }, + { + "bbox": [ + 135, + 167, + 478, + 180 + ], + "type": "inline_equation", + "content": "\\mathrm{Xu}^{1\\star}" + }, + { + "bbox": [ + 135, + 167, + 478, + 180 + ], + "type": "text", + "content": ", Zhihao " + }, + { + "bbox": [ + 135, + 167, + 478, + 180 + ], + "type": "inline_equation", + "content": "\\mathrm{Sun}^{2*}" + }, + { + "bbox": [ + 135, + 167, + 478, + 180 + ], + "type": "text", + "content": ", Ziwei Wang" + }, + { + "bbox": [ + 135, + 167, + 478, + 180 + ], + "type": "inline_equation", + "content": "^3" + }, + { + "bbox": [ + 135, + 167, + 478, + 180 + ], + "type": "text", + "content": ", Hongmin Liu" + }, + { + "bbox": [ + 135, + 167, + 478, + 180 + ], + "type": "inline_equation", + "content": "^{2\\dagger}" + }, + { + "bbox": [ + 135, + 167, + 478, + 180 + ], + "type": "text", + "content": ", Jie Zhou" + }, + { + "bbox": [ + 135, + 167, + 478, + 180 + ], + "type": "inline_equation", + "content": "^1" + }, + { + "bbox": [ + 135, + 167, + 478, + 180 + ], + "type": "text", + "content": ", Jiwen Lu" + }, + { + "bbox": [ + 135, + 167, + 478, + 180 + ], + "type": "inline_equation", + "content": "^{1\\dagger}" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 264, + 180, + 351, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 264, + 180, + 351, + 191 + ], + "spans": [ + { + "bbox": [ + 264, + 180, + 351, + 191 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 264, + 180, + 351, + 191 + ], + "type": "text", + "content": "Tsinghua University" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 212, + 192, + 403, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 212, + 192, + 403, + 205 + ], + "spans": [ + { + "bbox": [ + 212, + 192, + 403, + 205 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 212, + 192, + 403, + 205 + ], + "type": "text", + "content": "University of Science and Technology Beijing" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 249, + 205, + 367, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 249, + 205, + 367, + 217 + ], + "spans": [ + { + "bbox": [ + 249, + 205, + 367, + 217 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 249, + 205, + 367, + 217 + ], + "type": "text", + "content": "Carnegie Mellon University" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 159, + 218, + 452, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 218, + 452, + 228 + ], + "spans": [ + { + "bbox": [ + 159, + 218, + 452, + 228 + ], + "type": "text", + "content": "xxw21@mails.tsinghua.edu.cn; d202210361@xs.ustb.edu.cn;" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 192, + 230, + 421, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 192, + 230, + 421, + 240 + ], + "spans": [ + { + "bbox": [ + 192, + 230, + 421, + 240 + ], + "type": "text", + "content": "ziweiwa2@andrew.cmu.edu; hmliu@ustb.edu.cn;" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 221, + 242, + 393, + 252 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 221, + 242, + 393, + 252 + ], + "spans": [ + { + "bbox": [ + 221, + 242, + 393, + 252 + ], + "type": "text", + "content": "{jzhou,lujiwen}@tsinghua.edu.cn" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 159, + 284, + 455, + 494 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 284, + 455, + 494 + ], + "spans": [ + { + "bbox": [ + 159, + 284, + 455, + 494 + ], + "type": "text", + "content": "Abstract. In this paper, we propose an efficient feature pruning strategy for 3D small object detection. Conventional 3D object detection methods struggle on small objects due to the weak geometric information from a small number of points. Although increasing the spatial resolution of feature representations can improve the detection performance on small objects, the additional computational overhead is unaffordable. With in-depth study, we observe the growth of computation mainly comes from the upsampling operation in the decoder of 3D detector. Motivated by this, we present a multi-level 3D detector named DSPDet3D which benefits from high spatial resolution to achieve high accuracy on small object detection, while reducing redundant computation by only focusing on small object areas. Specifically, we theoretically derive a dynamic spatial pruning (DSP) strategy to prune the redundant spatial representation of 3D scene in a cascade manner according to the distribution of objects. Then we design DSP module following this strategy and construct DSPDet3D with this efficient module. On ScanNet and TO-SCENE dataset, our method achieves leading performance on small object detection. Moreover, DSPDet3D trained with only ScanNet rooms can generalize well to scenes in larger scale. It takes less than 2s to directly process a whole building consisting of more than " + }, + { + "bbox": [ + 159, + 284, + 455, + 494 + ], + "type": "inline_equation", + "content": "4500\\mathrm{k}" + }, + { + "bbox": [ + 159, + 284, + 455, + 494 + ], + "type": "text", + "content": " points while detecting out almost all objects, ranging from cups to beds, on a single RTX 3090 GPU. Code." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 160, + 508, + 437, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 508, + 437, + 519 + ], + "spans": [ + { + "bbox": [ + 160, + 508, + 437, + 519 + ], + "type": "text", + "content": "Keywords: 3D small object detection " + }, + { + "bbox": [ + 160, + 508, + 437, + 519 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 160, + 508, + 437, + 519 + ], + "type": "text", + "content": " Spatial pruning " + }, + { + "bbox": [ + 160, + 508, + 437, + 519 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 160, + 508, + 437, + 519 + ], + "type": "text", + "content": " Efficient inference" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 535, + 218, + 548 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 535, + 218, + 548 + ], + "spans": [ + { + "bbox": [ + 132, + 535, + 218, + 548 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 562, + 482, + 646 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 562, + 482, + 646 + ], + "spans": [ + { + "bbox": [ + 130, + 562, + 482, + 646 + ], + "type": "text", + "content": "3D object detection is a fundamental scene understanding problem, which aims to detect 3D bounding boxes and semantic labels from a point cloud of 3D scene. With the recent advances of deep learning techniques on point cloud understanding [7, 13, 34, 35], 3D detection methods have shown remarkable progress [39, 40, 46, 56]. However, with 3D object detection being widely adopted in fields like robotics [30, 57] and autonomous driving [2] which require highly precise and fine-grained perception, small object detection becomes one of the most important yet unsolved problems. In autonomous driving" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 134, + 653, + 307, + 666 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 653, + 307, + 666 + ], + "spans": [ + { + "bbox": [ + 134, + 653, + 307, + 666 + ], + "type": "text", + "content": "* Equal contribution. † Corresponding author." + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 133, + 114, + 479, + 260 + ], + "blocks": [ + { + "bbox": [ + 133, + 114, + 479, + 260 + ], + "lines": [ + { + "bbox": [ + 133, + 114, + 479, + 260 + ], + "spans": [ + { + "bbox": [ + 133, + 114, + 479, + 260 + ], + "type": "image", + "image_path": "750fbe39739c992072150d0c57b35ff3953b106544110e86c67298469a085483.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 268, + 482, + 301 + ], + "lines": [ + { + "bbox": [ + 130, + 268, + 482, + 301 + ], + "spans": [ + { + "bbox": [ + 130, + 268, + 482, + 301 + ], + "type": "text", + "content": "Fig.1: Trained with only rooms from ScanNet, our DSPDet3D generalizes well to process a whole house with dozens of rooms. It takes less than 2s to generate fine-grained detection results with a RTX 3090 single GPU." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 328, + 480, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 328, + 480, + 376 + ], + "spans": [ + { + "bbox": [ + 130, + 328, + 480, + 376 + ], + "type": "text", + "content": "scenarios [12], we observe a significant performance gap between cars and pedestrians. In indoor scenes [4,9] where the size variance is much larger (e.g. a bed is 1000x larger than a cup), detecting small objects is more difficult. We focus on indoor 3D object detection task where scenes are crowded with objects of multiple categories and sizes." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 377, + 482, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 377, + 482, + 509 + ], + "spans": [ + { + "bbox": [ + 130, + 377, + 482, + 509 + ], + "type": "text", + "content": "For indoor 3D object detection, although great improvement has been achieved in both speed and accuracy on previous benchmarks [1,9,43], they are still far from general purpose 3D object detection due to the limited range of object size they can handle. For instance, these methods focus on furniture-level objects such as bed and table, while smaller ones like laptop, keyboard and bottle are ignored. With the arrival of 3D small object benchmarks [37, 50, 51] which contain objects with wider size variance (e.g. from tabletop object like cup to large furniture like bed), it is shown that previous 3D detectors get very low accuracy on small objects and some even fail to detect any small objects. This is because extracting fine-grained representation for a large scene is too computationally expensive, so current methods aggressively downsamples the 3D features, which harms the representation of small objects." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 510, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 510, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 510, + 482, + 666 + ], + "type": "text", + "content": "In this paper, we propose a dynamic spatial pruning approach for 3D small object detection. Although increasing the spatial resolution of the feature representations is a simple and effective way to boost the performance of 3D small object detection, the large computational overhead makes this plan infeasible for real application. With in-depth study, we observe the memory footprint mainly comes from the huge number of features generated by the upsampling operation in the decoder of 3D detector. Inspired by the fact that small objects only occupy a small proportion of space, we adopt a multi-level detection framework to detect different sizes of objects in different levels. As the multi-level detector has already detected out larger objects in lower resolution, there are many redundant features in the scene representations of higher resolution. To this end, we propose to dynamically prune the features after detecting out objects in each level, which skips the upsampling operation at regions where there is no smaller object. Specifically, we first theoretically derive a pruning mask generation strategy to" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 332, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 332, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 332, + 102 + ], + "type": "text", + "content": "Authors Suppressed Due to Excessive Length" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 176, + 118, + 434, + 236 + ], + "blocks": [ + { + "bbox": [ + 176, + 118, + 434, + 236 + ], + "lines": [ + { + "bbox": [ + 176, + 118, + 434, + 236 + ], + "spans": [ + { + "bbox": [ + 176, + 118, + 434, + 236 + ], + "type": "image", + "image_path": "5ba71b0b125f9fbe0d2601e0cf6b5bd34589191d8f4bee751bfbfbb72fbfda88.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 132, + 244, + 481, + 289 + ], + "lines": [ + { + "bbox": [ + 132, + 244, + 481, + 289 + ], + "spans": [ + { + "bbox": [ + 132, + 244, + 481, + 289 + ], + "type": "text", + "content": "Fig. 2: Detection accuracy (mAP@0.25 of all categories) and speed (FPS) of mainstream 3D object detection methods on TO-SCENE dataset. Our DSPDet3D shows absolute advantage on 3D small object detection and provides flexible accuracy-speed tradeoff by simply adjusting the pruning threshold without retraining." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 312, + 481, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 312, + 481, + 407 + ], + "spans": [ + { + "bbox": [ + 132, + 312, + 481, + 407 + ], + "type": "text", + "content": "supervise the pruning module, which prunes as much features as possible while not affecting the features of object proposals. Then we design a dynamic spatial pruning (DSP) module according to the theoretical analysis and use it to construct a 3D object detector named DSPDet3D. On the popular ScanNet [9] dataset, DSPDet3D improves the mAP of all categories by " + }, + { + "bbox": [ + 132, + 312, + 481, + 407 + ], + "type": "inline_equation", + "content": "3\\%" + }, + { + "bbox": [ + 132, + 312, + 481, + 407 + ], + "type": "text", + "content": " and mAP of small object by " + }, + { + "bbox": [ + 132, + 312, + 481, + 407 + ], + "type": "inline_equation", + "content": "14\\%" + }, + { + "bbox": [ + 132, + 312, + 481, + 407 + ], + "type": "text", + "content": " compared with current state-of-the-art. On TO-SCENE [50] dataset with more tabletop objects, we improve the mAP of all categories by " + }, + { + "bbox": [ + 132, + 312, + 481, + 407 + ], + "type": "inline_equation", + "content": "8\\%" + }, + { + "bbox": [ + 132, + 312, + 481, + 407 + ], + "type": "text", + "content": " while achieving leading inference speed among all mainstream indoor 3D object detection methods." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 133, + 425, + 223, + 438 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 425, + 223, + 438 + ], + "spans": [ + { + "bbox": [ + 133, + 425, + 223, + 438 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 449, + 481, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 449, + 481, + 617 + ], + "spans": [ + { + "bbox": [ + 132, + 449, + 481, + 617 + ], + "type": "text", + "content": "Indoor 3D object detection: Since PointNet and PointNet++ [34, 35], deep learning-based 3D detection methods for point clouds begin to emerge in recent years, which can be mainly divided into three categories: voting-based [6, 33, 47, 49, 54], transformer-based [26, 28] and voxel-based [14, 38, 39, 46] methods. Inspired by 2D hough voting, VoteNet [33] proposes the first voting-based 3D detector, which aggregates the point features on surfaces into object center by 3D voting and predicts bounding boxes from the voted centers. Drawing on the success of transformer-based detector [3] in 2D domain, GroupFree3D [26] and 3DETR [28] adopts transformer architecture to decode the object proposals into 3D boxes. As extracting point features require time-consuming sampling and aggregation operation, GSDN [14] proposes a fully convolutional detection network based on sparse convolution [7, 13, 19, 52], which achieves much faster speed. FCAF3D [38] and TR3D [39] further improves the performance of GSDN with a simple anchor-free architecture. Our method also adopts voxel-based architecture considering its efficiency and scalability." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 617, + 481, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 617, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 132, + 617, + 481, + 665 + ], + "type": "text", + "content": "Small object detection: Small object detection [45] is a challenging problem in 2D vision due to the low-resolution features. To tackle this, a series of methods have been proposed, which can be categorized into three types: (1) small object augmentation and oversampling methods [17, 25, 58]; (2) scale-aware training and inference" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 192, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 192, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 192, + 91, + 447, + 102 + ], + "type": "text", + "content": "DSPDet3D: 3D Small Object Detection with Dynamic Spatial Pruning" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 481, + 100 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 224 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 224 + ], + "type": "text", + "content": "strategy [11, 31, 41, 42]; (3) increasing the resolution of features or generating high-resolution features [5, 10, 21, 22, 48, 53]. However, there are far less works about 3D small object detection due to the limit of data and network capability. BackToReality [51] proposes ScanNet-md40 benchmark which contains small objects and finds many current methods suffer a lot in small object detection. TO-SCENE [50] proposes a new dataset and learning strategy for understanding 3D tabletop scenes. However, it relies on densely sampled points from CAD models, which is infeasible in practical scenarios where the points from small objects are very sparse. In contrast, we aim to directly detect small objects from naturally sampled point clouds." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 224, + 482, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 224, + 482, + 380 + ], + "spans": [ + { + "bbox": [ + 130, + 224, + 482, + 380 + ], + "type": "text", + "content": "Network pruning: Network pruning can be divided into two categories: architecture pruning [15, 16, 18, 20, 27, 29] and spatial pruning [24, 36, 55]. Architecture pruning aims to remove a portion of weights from a neural network to shrink the size of a network, which includes unstructured pruning [15, 18, 29] and structured pruning [16, 20, 27]. The former removes network weights without a predefined structure, while the latter removes whole channels or network layers. On the contrary, spatial pruning does not prune the parameters of a network, but spatially removing redundant computation on the feature maps. DynamicViT [36] prunes the tokens in vision transformer with an attention masking strategy. SPS-Conv [24] dynamically prunes the convolutional kernel to suppress the activation on background voxels in sparse convolution layer. Ada3D [55] proposes a pruning framework for 3D and BEV features. Our dynamic spatial pruning method also belongs to spatial pruning, which directly removes redundant voxel features level by level according to the distribution of objects." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 395, + 204, + 409 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 395, + 204, + 409 + ], + "spans": [ + { + "bbox": [ + 132, + 395, + 204, + 409 + ], + "type": "text", + "content": "3 Approach" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 418, + 482, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 418, + 482, + 478 + ], + "spans": [ + { + "bbox": [ + 130, + 418, + 482, + 478 + ], + "type": "text", + "content": "In this section, we describe our DSPDet3D for efficient 3D small object detection. We first revisit the multi-level 3D detector and analyze the computational cost distribution. Then we propose dynamic spatial pruning with theoretical analysis on how to prune features without affecting detection performance. Finally we design DSP module according to the theoretical analysis and use it to construct DSPDet3D." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 492, + 309, + 506 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 492, + 309, + 506 + ], + "spans": [ + { + "bbox": [ + 132, + 492, + 309, + 506 + ], + "type": "text", + "content": "3.1 Analysis on Multi-level 3D Detector" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 510, + 482, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 510, + 482, + 545 + ], + "spans": [ + { + "bbox": [ + 130, + 510, + 482, + 545 + ], + "type": "text", + "content": "Preliminaries: We choose multi-level FCOS-like [44] 3D detector [38, 39] with sparse convolution [7, 13] for small object detection due to its high performance on both accuracy and speed (more detail can be found in Table 1 and 2)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 546, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 546, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 546, + 482, + 666 + ], + "type": "text", + "content": "As shown in Figure 3 (middle), after extracting backbone features, multi-level detector iteratively upsamples the voxel feature representations to different levels. In each level, all voxels are regarded as object proposals to predict bounding boxes and category scores. Generative upsampling is widely adopted in this kind of architectures [14,38,39] to expand the voxels from object surfaces to the whole 3D space, where object proposals located at object centers can produce accurate predictions. During training, ground-truth bounding boxes are assigned to different levels and each box assigns several nearby voxels as positive object proposals. Only box predictions from positive object proposals will be supervised. While at inference time all voxel features from the decoder are used to predict bounding boxes, which are then filtered by 3D NMS." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 92, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 92, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 92, + 140, + 100 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 332, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 332, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 332, + 102 + ], + "type": "text", + "content": "Authors Suppressed Due to Excessive Length" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 168, + 114, + 227, + 326 + ], + "blocks": [ + { + "bbox": [ + 168, + 114, + 227, + 326 + ], + "lines": [ + { + "bbox": [ + 168, + 114, + 227, + 326 + ], + "spans": [ + { + "bbox": [ + 168, + 114, + 227, + 326 + ], + "type": "image", + "image_path": "b45a131204e886a8f409076fc7c514b531393d4deedaba2bde0715dcb52c37c8.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 336, + 482, + 403 + ], + "lines": [ + { + "bbox": [ + 130, + 336, + 482, + 403 + ], + "spans": [ + { + "bbox": [ + 130, + 336, + 482, + 403 + ], + "type": "text", + "content": "Fig.3: Comparison of the decoder in typical multi-level 3D object detector [39] and our DSPDet3D. Note that the sparsity of voxels in decoder is changed due to the generative upsampling operation. After detecting out objects in a level, DSPDet3D prunes redundant voxel features according to the distribution of objects before each upsampling operation. Red boxes indicate all pruned voxels and 'scissor' boxes indicate voxels pruned in the previous layer. " + }, + { + "bbox": [ + 130, + 336, + 482, + 403 + ], + "type": "inline_equation", + "content": "\\{O\\}" + }, + { + "bbox": [ + 130, + 336, + 482, + 403 + ], + "type": "text", + "content": " is the set of all objects and " + }, + { + "bbox": [ + 130, + 336, + 482, + 403 + ], + "type": "inline_equation", + "content": "\\{O_i\\}" + }, + { + "bbox": [ + 130, + 336, + 482, + 403 + ], + "type": "text", + "content": " is the set of objects assigned to level " + }, + { + "bbox": [ + 130, + 336, + 482, + 403 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 130, + 336, + 482, + 403 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 235, + 113, + 336, + 327 + ], + "blocks": [ + { + "bbox": [ + 235, + 113, + 336, + 327 + ], + "lines": [ + { + "bbox": [ + 235, + 113, + 336, + 327 + ], + "spans": [ + { + "bbox": [ + 235, + 113, + 336, + 327 + ], + "type": "image", + "image_path": "ef1b0796b11fb2f8e2b8b609482f7e84b450d9dd07fe044e0e60bb7b000db33d.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 345, + 114, + 446, + 327 + ], + "blocks": [ + { + "bbox": [ + 345, + 114, + 446, + 327 + ], + "lines": [ + { + "bbox": [ + 345, + 114, + 446, + 327 + ], + "spans": [ + { + "bbox": [ + 345, + 114, + 446, + 327 + ], + "type": "image", + "image_path": "1ebd3517ad5e81e0ae90038b2f854a9731ee5e7219e636d0a4e2c2f1ebe0b857.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 426, + 482, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 426, + 482, + 594 + ], + "spans": [ + { + "bbox": [ + 130, + 426, + 482, + 594 + ], + "type": "text", + "content": "Increasing spatial resolution: Based on multi-level architecture, a simple way to boost the performance of small object detection is to increase the spatial resolution of feature maps, i.e., voxelizing the point clouds into smaller voxels to better preserve geometric information. Taking TR3D [39] for example, we double its spatial resolution and show the results in Figure 4. It can be seen that the performance on small object really benefits from larger resolution, but the computational overhead grows dramatically at the same time. As 3D object detection is usually adopted in tasks which requires real-time inference under limited resources, such as AR/VR and robotic navigation, directly increasing spatial resolution is infeasible. Notably, we find the computation growth is imbalanced: the decoder layers (including detection heads) account for the most memory footprint and have larger memory growth ratio than the backbone. This indicates the generative upsampling operation will significantly increase the number of voxels when the spatial resolution is high, which is the main challenge for scaling up the spatial resolution of multi-level detectors." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 131, + 609, + 266, + 623 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 609, + 266, + 623 + ], + "spans": [ + { + "bbox": [ + 131, + 609, + 266, + 623 + ], + "type": "text", + "content": "3.2 Dynamic Spatial Pruning" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "type": "text", + "content": "Since small objects only occupy a small proportion of space, we assume there is a large amount of redundant computation in decoder layers, especially when the resolution is high. For instance, if a bed is detected in Layer 4, the region near this bed may be less" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 192, + 91, + 448, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 192, + 91, + 448, + 102 + ], + "spans": [ + { + "bbox": [ + 192, + 91, + 448, + 102 + ], + "type": "text", + "content": "DSPDet3D: 3D Small Object Detection with Dynamic Spatial Pruning" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 481, + 100 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 170, + 117, + 444, + 232 + ], + "blocks": [ + { + "bbox": [ + 170, + 117, + 444, + 232 + ], + "lines": [ + { + "bbox": [ + 170, + 117, + 444, + 232 + ], + "spans": [ + { + "bbox": [ + 170, + 117, + 444, + 232 + ], + "type": "image", + "image_path": "9de2f2472f930b1efb99084b137643f47518bbcd7d1f0bd7af6b0e3642260016.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 239, + 482, + 307 + ], + "lines": [ + { + "bbox": [ + 130, + 239, + 482, + 307 + ], + "spans": [ + { + "bbox": [ + 130, + 239, + 482, + 307 + ], + "type": "text", + "content": "Fig. 4: The memory footprint distribution of different multi-level detectors. Layer 4 to Layer 1 refer to decoder layers (including detection heads) from coarse to fine. If doubling the spatial resolution of TR3D, the performance on 3D small object detection improves from " + }, + { + "bbox": [ + 130, + 239, + 482, + 307 + ], + "type": "inline_equation", + "content": "52.7\\%" + }, + { + "bbox": [ + 130, + 239, + 482, + 307 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 130, + 239, + 482, + 307 + ], + "type": "inline_equation", + "content": "62.8\\%" + }, + { + "bbox": [ + 130, + 239, + 482, + 307 + ], + "type": "text", + "content": " while memory footprint increases dramatically. We find decoder layers accounts for most of the costs. DSPDet3D efficiently reduces redundant computation on these layers, achieving both fast speed and high accuracy." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 333, + 482, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 333, + 482, + 392 + ], + "spans": [ + { + "bbox": [ + 130, + 333, + 482, + 392 + ], + "type": "text", + "content": "informative for detecting other objects in the follow decoder layers. If we can skip the upsampling operation at these regions, the voxels will be sparsified level by level, as shown in Figure 3 (right). In this way, small objects can be detected in Layer 1 from only a small number of voxels. Inspired by this, we propose to dynamically prune the voxel features according to the distribution of objects." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 394, + 482, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 394, + 482, + 453 + ], + "spans": [ + { + "bbox": [ + 130, + 394, + 482, + 453 + ], + "type": "text", + "content": "However, pruning a voxel will not only reduce the number of object proposals in the following levels, but also change the following voxel features computed based on the pruned voxel. Therefore, in order to reduce the redundant computation of multi-level detector without degrading the detection performance, a carefully designed pruning strategy is required. We give theoretical derivation as below." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 455, + 483, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 455, + 483, + 588 + ], + "spans": [ + { + "bbox": [ + 130, + 455, + 483, + 588 + ], + "type": "text", + "content": "Problem formulation: For each scene, we denote " + }, + { + "bbox": [ + 130, + 455, + 483, + 588 + ], + "type": "inline_equation", + "content": "\\{O\\}" + }, + { + "bbox": [ + 130, + 455, + 483, + 588 + ], + "type": "text", + "content": " as the set of all objects, " + }, + { + "bbox": [ + 130, + 455, + 483, + 588 + ], + "type": "inline_equation", + "content": "\\{O_i\\}" + }, + { + "bbox": [ + 130, + 455, + 483, + 588 + ], + "type": "text", + "content": " as the set of objects assigned to level " + }, + { + "bbox": [ + 130, + 455, + 483, + 588 + ], + "type": "inline_equation", + "content": "i^1" + }, + { + "bbox": [ + 130, + 455, + 483, + 588 + ], + "type": "text", + "content": " during training, " + }, + { + "bbox": [ + 130, + 455, + 483, + 588 + ], + "type": "inline_equation", + "content": "f_{i} \\in \\mathbb{R}^{N \\times (3 + C)}" + }, + { + "bbox": [ + 130, + 455, + 483, + 588 + ], + "type": "text", + "content": " as the voxel features of level " + }, + { + "bbox": [ + 130, + 455, + 483, + 588 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 130, + 455, + 483, + 588 + ], + "type": "text", + "content": ". We aim to prune " + }, + { + "bbox": [ + 130, + 455, + 483, + 588 + ], + "type": "inline_equation", + "content": "f_{i}" + }, + { + "bbox": [ + 130, + 455, + 483, + 588 + ], + "type": "text", + "content": " after detecting out " + }, + { + "bbox": [ + 130, + 455, + 483, + 588 + ], + "type": "inline_equation", + "content": "\\{O_i\\}" + }, + { + "bbox": [ + 130, + 455, + 483, + 588 + ], + "type": "text", + "content": ", where the objective is to remove as many voxels as possible while keeping the predictions of " + }, + { + "bbox": [ + 130, + 455, + 483, + 588 + ], + "type": "inline_equation", + "content": "\\{O\\} \\backslash \\{O_i\\}" + }, + { + "bbox": [ + 130, + 455, + 483, + 588 + ], + "type": "text", + "content": " unaffected after the pruning. For each object " + }, + { + "bbox": [ + 130, + 455, + 483, + 588 + ], + "type": "inline_equation", + "content": "o_j" + }, + { + "bbox": [ + 130, + 455, + 483, + 588 + ], + "type": "text", + "content": " in level " + }, + { + "bbox": [ + 130, + 455, + 483, + 588 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 130, + 455, + 483, + 588 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 130, + 455, + 483, + 588 + ], + "type": "inline_equation", + "content": "j < i" + }, + { + "bbox": [ + 130, + 455, + 483, + 588 + ], + "type": "text", + "content": "), we assume the prediction of it is unaffected if the voxel features at level " + }, + { + "bbox": [ + 130, + 455, + 483, + 588 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 130, + 455, + 483, + 588 + ], + "type": "text", + "content": " near its center " + }, + { + "bbox": [ + 130, + 455, + 483, + 588 + ], + "type": "inline_equation", + "content": "c_j" + }, + { + "bbox": [ + 130, + 455, + 483, + 588 + ], + "type": "text", + "content": " are unaffected. We make this assumption because most true positive predictions are from object proposals located at the center of bounding boxes [14, 44]. We denote the expected unaffected neighborhood as " + }, + { + "bbox": [ + 130, + 455, + 483, + 588 + ], + "type": "inline_equation", + "content": "\\mathcal{C}_j(c_j, P)" + }, + { + "bbox": [ + 130, + 455, + 483, + 588 + ], + "type": "text", + "content": ", which means a cube centered at " + }, + { + "bbox": [ + 130, + 455, + 483, + 588 + ], + "type": "inline_equation", + "content": "c_j" + }, + { + "bbox": [ + 130, + 455, + 483, + 588 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 130, + 455, + 483, + 588 + ], + "type": "inline_equation", + "content": "P \\times P \\times P" + }, + { + "bbox": [ + 130, + 455, + 483, + 588 + ], + "type": "text", + "content": " voxels at level " + }, + { + "bbox": [ + 130, + 455, + 483, + 588 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 130, + 455, + 483, + 588 + ], + "type": "text", + "content": ". Given the symmetry, " + }, + { + "bbox": [ + 130, + 455, + 483, + 588 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 130, + 455, + 483, + 588 + ], + "type": "text", + "content": " should be odd. Then we formulate the objective of our pruning strategy at level " + }, + { + "bbox": [ + 130, + 455, + 483, + 588 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 130, + 455, + 483, + 588 + ], + "type": "text", + "content": " as:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 211, + 600, + 403, + 624 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 211, + 600, + 403, + 624 + ], + "spans": [ + { + "bbox": [ + 211, + 600, + 403, + 624 + ], + "type": "interline_equation", + "content": "\\underset {\\mathcal {K} _ {i}} {\\text {m i n i m i z e}} \\sum_ {x, y, z} M _ {i} [ x ] [ y ] [ z ], M _ {i} = \\bigwedge_ {j = 1} ^ {i - 1} \\mathcal {K} _ {i} (\\boldsymbol {c _ {j}}),", + "image_path": "6e799e1f19ebafd97f67b31caac8aa3bd981fc1773b8ad747764bca8f5396026.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 207, + 627, + 481, + 641 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 207, + 627, + 481, + 641 + ], + "spans": [ + { + "bbox": [ + 207, + 627, + 481, + 641 + ], + "type": "interline_equation", + "content": "s. t. \\forall j < i, \\mathcal {C} _ {j} (\\boldsymbol {c} _ {j}, P) \\cap \\mathcal {A} _ {i, j} (\\neg \\mathcal {K} _ {i} (\\boldsymbol {c} _ {j}) \\star f _ {i}) = \\varnothing \\tag {1}", + "image_path": "06c08e89e5bfc439f45c6261ff942b82b868833eb24c5b6939a5f1f6587b8094.jpg" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 332, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 332, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 332, + 102 + ], + "type": "text", + "content": "Authors Suppressed Due to Excessive Length" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 135, + 653, + 470, + 666 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 653, + 470, + 666 + ], + "spans": [ + { + "bbox": [ + 135, + 653, + 470, + 666 + ], + "type": "text", + "content": "1 We adopt the same definition of level as in Figure 3, where level " + }, + { + "bbox": [ + 135, + 653, + 470, + 666 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 135, + 653, + 470, + 666 + ], + "type": "text", + "content": " is finer than level " + }, + { + "bbox": [ + 135, + 653, + 470, + 666 + ], + "type": "inline_equation", + "content": "i + 1" + }, + { + "bbox": [ + 135, + 653, + 470, + 666 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 115, + 479, + 199 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 115, + 479, + 199 + ], + "spans": [ + { + "bbox": [ + 130, + 115, + 479, + 199 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 115, + 479, + 199 + ], + "type": "inline_equation", + "content": "M_{i} \\in \\mathbb{R}^{N}" + }, + { + "bbox": [ + 130, + 115, + 479, + 199 + ], + "type": "text", + "content": " is a binary pruning mask sharing the same length with " + }, + { + "bbox": [ + 130, + 115, + 479, + 199 + ], + "type": "inline_equation", + "content": "f_{i}" + }, + { + "bbox": [ + 130, + 115, + 479, + 199 + ], + "type": "text", + "content": ", where 0 indicates removing and 1 indicates keeping during the pruning operation " + }, + { + "bbox": [ + 130, + 115, + 479, + 199 + ], + "type": "inline_equation", + "content": "\\star" + }, + { + "bbox": [ + 130, + 115, + 479, + 199 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 130, + 115, + 479, + 199 + ], + "type": "inline_equation", + "content": "\\mathcal{K}_i(\\cdot)" + }, + { + "bbox": [ + 130, + 115, + 479, + 199 + ], + "type": "text", + "content": " is the generation strategy of pruning mask for each object, which generates a binary pruning mask conditioned on the object center. " + }, + { + "bbox": [ + 130, + 115, + 479, + 199 + ], + "type": "inline_equation", + "content": "\\mathcal{A}_{i,j}(f)" + }, + { + "bbox": [ + 130, + 115, + 479, + 199 + ], + "type": "text", + "content": " is defined as the affecting field of " + }, + { + "bbox": [ + 130, + 115, + 479, + 199 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 130, + 115, + 479, + 199 + ], + "type": "text", + "content": ", which represents the voxels at level " + }, + { + "bbox": [ + 130, + 115, + 479, + 199 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 130, + 115, + 479, + 199 + ], + "type": "text", + "content": " that will be affected by pruning " + }, + { + "bbox": [ + 130, + 115, + 479, + 199 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 130, + 115, + 479, + 199 + ], + "type": "text", + "content": " at level " + }, + { + "bbox": [ + 130, + 115, + 479, + 199 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 130, + 115, + 479, + 199 + ], + "type": "text", + "content": ". Without loss of generality, here we choose only one object at each level for simplicity of presentation." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 200, + 479, + 259 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 200, + 479, + 259 + ], + "spans": [ + { + "bbox": [ + 130, + 200, + 479, + 259 + ], + "type": "text", + "content": "Overview of problem solving: We solve (1) by mathematical induction. Specifically, for pruning strategy " + }, + { + "bbox": [ + 130, + 200, + 479, + 259 + ], + "type": "inline_equation", + "content": "M_{i}" + }, + { + "bbox": [ + 130, + 200, + 479, + 259 + ], + "type": "text", + "content": " at level " + }, + { + "bbox": [ + 130, + 200, + 479, + 259 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 130, + 200, + 479, + 259 + ], + "type": "text", + "content": ", we first consider how to generate pruning mask " + }, + { + "bbox": [ + 130, + 200, + 479, + 259 + ], + "type": "inline_equation", + "content": "\\mathcal{K}_i(c_{i-1})" + }, + { + "bbox": [ + 130, + 200, + 479, + 259 + ], + "type": "text", + "content": " to ensure the predictions of " + }, + { + "bbox": [ + 130, + 200, + 479, + 259 + ], + "type": "inline_equation", + "content": "\\{O_{i-1}\\}" + }, + { + "bbox": [ + 130, + 200, + 479, + 259 + ], + "type": "text", + "content": " are unaffected. Then we show that by following our pruning strategy " + }, + { + "bbox": [ + 130, + 200, + 479, + 259 + ], + "type": "inline_equation", + "content": "\\mathcal{K}_i" + }, + { + "bbox": [ + 130, + 200, + 479, + 259 + ], + "type": "text", + "content": ", 'the predictions of " + }, + { + "bbox": [ + 130, + 200, + 479, + 259 + ], + "type": "inline_equation", + "content": "\\{O_j\\}" + }, + { + "bbox": [ + 130, + 200, + 479, + 259 + ], + "type": "text", + "content": " are unaffected' can be derived by 'the predictions of " + }, + { + "bbox": [ + 130, + 200, + 479, + 259 + ], + "type": "inline_equation", + "content": "\\{O_{j+1}\\}" + }, + { + "bbox": [ + 130, + 200, + 479, + 259 + ], + "type": "text", + "content": " are unaffected'.2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 259, + 480, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 259, + 480, + 354 + ], + "spans": [ + { + "bbox": [ + 130, + 259, + 480, + 354 + ], + "type": "text", + "content": "Solving " + }, + { + "bbox": [ + 130, + 259, + 480, + 354 + ], + "type": "inline_equation", + "content": "\\mathcal{K}_i(\\pmb{c}_{i-1})" + }, + { + "bbox": [ + 130, + 259, + 480, + 354 + ], + "type": "text", + "content": ": To make sure " + }, + { + "bbox": [ + 130, + 259, + 480, + 354 + ], + "type": "inline_equation", + "content": "\\mathcal{C}_{i-1}(\\pmb{c}_{i-1}, P) \\cap \\mathcal{A}_{i,i-1}(\\cdot) = \\emptyset" + }, + { + "bbox": [ + 130, + 259, + 480, + 354 + ], + "type": "text", + "content": ", we need to compute the affecting field of each voxel " + }, + { + "bbox": [ + 130, + 259, + 480, + 354 + ], + "type": "inline_equation", + "content": "v_i" + }, + { + "bbox": [ + 130, + 259, + 480, + 354 + ], + "type": "text", + "content": " in level " + }, + { + "bbox": [ + 130, + 259, + 480, + 354 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 130, + 259, + 480, + 354 + ], + "type": "text", + "content": ". Obviously, the upper bound of affecting field of " + }, + { + "bbox": [ + 130, + 259, + 480, + 354 + ], + "type": "inline_equation", + "content": "v_i" + }, + { + "bbox": [ + 130, + 259, + 480, + 354 + ], + "type": "text", + "content": " expands in shape of cube with sparse convolution. Assume there are " + }, + { + "bbox": [ + 130, + 259, + 480, + 354 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 130, + 259, + 480, + 354 + ], + "type": "text", + "content": " sparse convolutions with stride 1 and kernel " + }, + { + "bbox": [ + 130, + 259, + 480, + 354 + ], + "type": "inline_equation", + "content": "x_k" + }, + { + "bbox": [ + 130, + 259, + 480, + 354 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 130, + 259, + 480, + 354 + ], + "type": "inline_equation", + "content": "1 \\leq k \\leq m" + }, + { + "bbox": [ + 130, + 259, + 480, + 354 + ], + "type": "text", + "content": ") between pruning and generative upsampling in level " + }, + { + "bbox": [ + 130, + 259, + 480, + 354 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 130, + 259, + 480, + 354 + ], + "type": "text", + "content": ", one generative transposed convolution with stride 2 and kernel " + }, + { + "bbox": [ + 130, + 259, + 480, + 354 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 130, + 259, + 480, + 354 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 130, + 259, + 480, + 354 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 130, + 259, + 480, + 354 + ], + "type": "text", + "content": " sparse convolutions with stride 1 and kernel " + }, + { + "bbox": [ + 130, + 259, + 480, + 354 + ], + "type": "inline_equation", + "content": "z_k" + }, + { + "bbox": [ + 130, + 259, + 480, + 354 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 130, + 259, + 480, + 354 + ], + "type": "inline_equation", + "content": "1 \\leq k \\leq n" + }, + { + "bbox": [ + 130, + 259, + 480, + 354 + ], + "type": "text", + "content": ") until detecting out objects in level " + }, + { + "bbox": [ + 130, + 259, + 480, + 354 + ], + "type": "inline_equation", + "content": "i-1" + }, + { + "bbox": [ + 130, + 259, + 480, + 354 + ], + "type": "text", + "content": ". Then the affecting field from pruning (level " + }, + { + "bbox": [ + 130, + 259, + 480, + 354 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 130, + 259, + 480, + 354 + ], + "type": "text", + "content": ") to detecting (level " + }, + { + "bbox": [ + 130, + 259, + 480, + 354 + ], + "type": "inline_equation", + "content": "i-1" + }, + { + "bbox": [ + 130, + 259, + 480, + 354 + ], + "type": "text", + "content": ") can be written as:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 217, + 361, + 481, + 374 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 217, + 361, + 481, + 374 + ], + "spans": [ + { + "bbox": [ + 217, + 361, + 481, + 374 + ], + "type": "interline_equation", + "content": "\\mathcal {A} _ {i, i - 1} \\left(v _ {i}\\right) = \\mathcal {C} _ {i - 1} \\left(v _ {i}, a f f \\left(\\left\\{x _ {k} \\right\\}, y, \\left\\{z _ {k} \\right\\}\\right)\\right) \\tag {2}", + "image_path": "27632f698750fa41b24c3258f46b3d923de7ae26d8af2f8a4c2f81143215d014.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 131, + 380, + 479, + 416 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 380, + 479, + 416 + ], + "spans": [ + { + "bbox": [ + 131, + 380, + 479, + 416 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 131, + 380, + 479, + 416 + ], + "type": "inline_equation", + "content": "aff(\\{x_k\\}, y, \\{z_k\\})" + }, + { + "bbox": [ + 131, + 380, + 479, + 416 + ], + "type": "text", + "content": " is the range of affecting field represented by the kernel sizes, which we will detail in supplementary material. Since the shape of the expected unaffected voxel features is a " + }, + { + "bbox": [ + 131, + 380, + 479, + 416 + ], + "type": "inline_equation", + "content": "P \\times P \\times P" + }, + { + "bbox": [ + 131, + 380, + 479, + 416 + ], + "type": "text", + "content": " cube, " + }, + { + "bbox": [ + 131, + 380, + 479, + 416 + ], + "type": "inline_equation", + "content": "\\mathcal{K}_i(\\pmb{c}_{i-1})" + }, + { + "bbox": [ + 131, + 380, + 479, + 416 + ], + "type": "text", + "content": " can be formulated as:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 209, + 422, + 481, + 460 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 209, + 422, + 481, + 460 + ], + "spans": [ + { + "bbox": [ + 209, + 422, + 481, + 460 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {K} _ {i} \\left(\\boldsymbol {c} _ {\\boldsymbol {i} - \\boldsymbol {1}}\\right) [ x ] [ y ] [ z ] = \\mathbb {I} \\left(2 \\cdot | \\boldsymbol {x} - \\boldsymbol {c} _ {\\boldsymbol {i} - \\boldsymbol {1}} | _ {\\infty} \\leq r S _ {i}\\right) \\\\ r = \\lceil \\frac {P + a f f (\\{x _ {k} \\} , y , \\{z _ {k} \\}) - 2}{2} \\rceil \\tag {3} \\\\ \\end{array}", + "image_path": "d85446d1d665a9d9bb9e2c56dd190904dbcd3fc777610c6250e951748f6cf0b2.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 465, + 479, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 465, + 479, + 489 + ], + "spans": [ + { + "bbox": [ + 130, + 465, + 479, + 489 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 465, + 479, + 489 + ], + "type": "inline_equation", + "content": "S_{i}" + }, + { + "bbox": [ + 130, + 465, + 479, + 489 + ], + "type": "text", + "content": " is the size of voxel in level " + }, + { + "bbox": [ + 130, + 465, + 479, + 489 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 130, + 465, + 479, + 489 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 130, + 465, + 479, + 489 + ], + "type": "inline_equation", + "content": "\\mathbb{I}(\\cdot)" + }, + { + "bbox": [ + 130, + 465, + 479, + 489 + ], + "type": "text", + "content": " is the indicative function. " + }, + { + "bbox": [ + 130, + 465, + 479, + 489 + ], + "type": "inline_equation", + "content": "\\boldsymbol{x} = (x,y,z)" + }, + { + "bbox": [ + 130, + 465, + 479, + 489 + ], + "type": "text", + "content": " is the voxel coordinates of " + }, + { + "bbox": [ + 130, + 465, + 479, + 489 + ], + "type": "inline_equation", + "content": "f_{i}" + }, + { + "bbox": [ + 130, + 465, + 479, + 489 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 490, + 479, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 490, + 479, + 550 + ], + "spans": [ + { + "bbox": [ + 130, + 490, + 479, + 550 + ], + "type": "text", + "content": "Recursion of " + }, + { + "bbox": [ + 130, + 490, + 479, + 550 + ], + "type": "inline_equation", + "content": "\\kappa_{i}" + }, + { + "bbox": [ + 130, + 490, + 479, + 550 + ], + "type": "text", + "content": ": We now derive when the pruning strategy " + }, + { + "bbox": [ + 130, + 490, + 479, + 550 + ], + "type": "inline_equation", + "content": "\\kappa_{i}" + }, + { + "bbox": [ + 130, + 490, + 479, + 550 + ], + "type": "text", + "content": " in (3) also works for " + }, + { + "bbox": [ + 130, + 490, + 479, + 550 + ], + "type": "inline_equation", + "content": "c_{j}" + }, + { + "bbox": [ + 130, + 490, + 479, + 550 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 130, + 490, + 479, + 550 + ], + "type": "inline_equation", + "content": "j < i - 1" + }, + { + "bbox": [ + 130, + 490, + 479, + 550 + ], + "type": "text", + "content": "). We can regrad " + }, + { + "bbox": [ + 130, + 490, + 479, + 550 + ], + "type": "inline_equation", + "content": "c_{j}" + }, + { + "bbox": [ + 130, + 490, + 479, + 550 + ], + "type": "text", + "content": " as the center of object in level " + }, + { + "bbox": [ + 130, + 490, + 479, + 550 + ], + "type": "inline_equation", + "content": "i - 1" + }, + { + "bbox": [ + 130, + 490, + 479, + 550 + ], + "type": "text", + "content": " and use (3) to generate the pruning mask. In this way, " + }, + { + "bbox": [ + 130, + 490, + 479, + 550 + ], + "type": "inline_equation", + "content": "\\mathcal{C}_{i - 1}(c_j,P)" + }, + { + "bbox": [ + 130, + 490, + 479, + 550 + ], + "type": "text", + "content": " are unaffected. As " + }, + { + "bbox": [ + 130, + 490, + 479, + 550 + ], + "type": "inline_equation", + "content": "\\mathcal{C}_j(c_j,P)" + }, + { + "bbox": [ + 130, + 490, + 479, + 550 + ], + "type": "text", + "content": " is covered by " + }, + { + "bbox": [ + 130, + 490, + 479, + 550 + ], + "type": "inline_equation", + "content": "\\mathcal{C}_{i - 1}(c_j,P)" + }, + { + "bbox": [ + 130, + 490, + 479, + 550 + ], + "type": "text", + "content": ", so " + }, + { + "bbox": [ + 130, + 490, + 479, + 550 + ], + "type": "inline_equation", + "content": "\\mathcal{C}_j(c_j,P)" + }, + { + "bbox": [ + 130, + 490, + 479, + 550 + ], + "type": "text", + "content": " is unaffected as well. We should also ensure pruning in level " + }, + { + "bbox": [ + 130, + 490, + 479, + 550 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 130, + 490, + 479, + 550 + ], + "type": "text", + "content": " has no cumulative impact on pruning in level " + }, + { + "bbox": [ + 130, + 490, + 479, + 550 + ], + "type": "inline_equation", + "content": "i - 1" + }, + { + "bbox": [ + 130, + 490, + 479, + 550 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 236, + 555, + 481, + 569 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 555, + 481, + 569 + ], + "spans": [ + { + "bbox": [ + 236, + 555, + 481, + 569 + ], + "type": "interline_equation", + "content": "\\left(\\mathcal {K} _ {i - 1} \\left(\\boldsymbol {c} _ {\\boldsymbol {j}}\\right) \\star f _ {i - 1}\\right) \\subseteq \\mathcal {C} _ {i - 1} \\left(\\boldsymbol {c} _ {\\boldsymbol {j}}, P\\right) \\tag {4}", + "image_path": "0f09d7577709e7d2f35d2b20621e0984b23f4dc00880d947d72315f6d576d625.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 131, + 574, + 479, + 598 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 574, + 479, + 598 + ], + "spans": [ + { + "bbox": [ + 131, + 574, + 479, + 598 + ], + "type": "text", + "content": "this means when generating pruning mask of " + }, + { + "bbox": [ + 131, + 574, + 479, + 598 + ], + "type": "inline_equation", + "content": "c_{j}" + }, + { + "bbox": [ + 131, + 574, + 479, + 598 + ], + "type": "text", + "content": " in level " + }, + { + "bbox": [ + 131, + 574, + 479, + 598 + ], + "type": "inline_equation", + "content": "i - 1" + }, + { + "bbox": [ + 131, + 574, + 479, + 598 + ], + "type": "text", + "content": " using " + }, + { + "bbox": [ + 131, + 574, + 479, + 598 + ], + "type": "inline_equation", + "content": "\\mathcal{K}_{i - 1}" + }, + { + "bbox": [ + 131, + 574, + 479, + 598 + ], + "type": "text", + "content": ", the kept voxels should be covered by the unaffected voxels after pruning in level " + }, + { + "bbox": [ + 131, + 574, + 479, + 598 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 131, + 574, + 479, + 598 + ], + "type": "text", + "content": ". So we have:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 264, + 604, + 481, + 617 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 264, + 604, + 481, + 617 + ], + "spans": [ + { + "bbox": [ + 264, + 604, + 481, + 617 + ], + "type": "interline_equation", + "content": "r \\cdot S _ {i - 1} \\leq P \\cdot S _ {i - 1} \\tag {5}", + "image_path": "b2a0351cbd5683378efbbf9e985a9601a1a2d43c6cbeeffce60efd4a5c737072.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 130, + 623, + 479, + 648 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 623, + 479, + 648 + ], + "spans": [ + { + "bbox": [ + 130, + 623, + 479, + 648 + ], + "type": "text", + "content": "The minimum " + }, + { + "bbox": [ + 130, + 623, + 479, + 648 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 130, + 623, + 479, + 648 + ], + "type": "text", + "content": " can be acquired by solving (5). In this case, strategy " + }, + { + "bbox": [ + 130, + 623, + 479, + 648 + ], + "type": "inline_equation", + "content": "\\mathcal{K}_i" + }, + { + "bbox": [ + 130, + 623, + 479, + 648 + ], + "type": "text", + "content": " in (3) works for all " + }, + { + "bbox": [ + 130, + 623, + 479, + 648 + ], + "type": "inline_equation", + "content": "c_j" + }, + { + "bbox": [ + 130, + 623, + 479, + 648 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 130, + 623, + 479, + 648 + ], + "type": "inline_equation", + "content": "j < i" + }, + { + "bbox": [ + 130, + 623, + 479, + 648 + ], + "type": "text", + "content": ")." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 192, + 91, + 448, + 103 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 192, + 91, + 448, + 103 + ], + "spans": [ + { + "bbox": [ + 192, + 91, + 448, + 103 + ], + "type": "text", + "content": "DSPDet3D: 3D Small Object Detection with Dynamic Spatial Pruning" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 91, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 91, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 91, + 480, + 100 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 135, + 652, + 446, + 666 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 652, + 446, + 666 + ], + "spans": [ + { + "bbox": [ + 135, + 652, + 446, + 666 + ], + "type": "text", + "content": "2 We provide illustrated examples in supplementary material for better understanding." + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 138, + 116, + 473, + 255 + ], + "blocks": [ + { + "bbox": [ + 138, + 116, + 473, + 255 + ], + "lines": [ + { + "bbox": [ + 138, + 116, + 473, + 255 + ], + "spans": [ + { + "bbox": [ + 138, + 116, + 473, + 255 + ], + "type": "image", + "image_path": "6a86af61c73089b12e032991a6e4a5f28a947418d85bc61a7a55621f1a3a99ce.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 264, + 482, + 342 + ], + "lines": [ + { + "bbox": [ + 130, + 264, + 482, + 342 + ], + "spans": [ + { + "bbox": [ + 130, + 264, + 482, + 342 + ], + "type": "text", + "content": "Fig. 5: Illustration of DSPDet3D. The voxelized point clouds are fed into a high-resolution sparse convolutional backbone, which output four levels of scene representations. Four dynamic spatial pruning (DSP) modules are stacked to construct a multi-level decoder and detect objects from coarse to fine. DSP module utilizes a light-weight learnable module to predict the pruning mask. During inference, we discretize the pruning mask and use it to guide pruning before generative upsampling. While during training we interpolate the pruning mask to next level and prune the voxel features after generative upsampling." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 365, + 203, + 376 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 365, + 203, + 376 + ], + "spans": [ + { + "bbox": [ + 132, + 365, + 203, + 376 + ], + "type": "text", + "content": "3.3 DSPDet3D" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 385, + 481, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 385, + 481, + 422 + ], + "spans": [ + { + "bbox": [ + 130, + 385, + 481, + 422 + ], + "type": "text", + "content": "Based on the theoretical analysis, we devise a dynamic spatial pruning (DSP) module to approximate the ideal pruning strategy. We further construct a 3D small object detector named DSPDet3D with the proposed DSP module." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 422, + 482, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 422, + 482, + 506 + ], + "spans": [ + { + "bbox": [ + 130, + 422, + 482, + 506 + ], + "type": "text", + "content": "DSP module: As shown in Figure 3, we modify the layers of a typical multi-level decoder to DSP modules, which prunes redundant voxel features after detecting out objects at each level for efficient feature upsampling. Formally, given the upsampled voxel feature " + }, + { + "bbox": [ + 130, + 422, + 482, + 506 + ], + "type": "inline_equation", + "content": "f_{i}^{U}" + }, + { + "bbox": [ + 130, + 422, + 482, + 506 + ], + "type": "text", + "content": " and the backbone feature " + }, + { + "bbox": [ + 130, + 422, + 482, + 506 + ], + "type": "inline_equation", + "content": "f_{i}^{B}" + }, + { + "bbox": [ + 130, + 422, + 482, + 506 + ], + "type": "text", + "content": " at level " + }, + { + "bbox": [ + 130, + 422, + 482, + 506 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 130, + 422, + 482, + 506 + ], + "type": "text", + "content": ", DSP module first add them for detection. However, " + }, + { + "bbox": [ + 130, + 422, + 482, + 506 + ], + "type": "inline_equation", + "content": "f_{i}^{U}" + }, + { + "bbox": [ + 130, + 422, + 482, + 506 + ], + "type": "text", + "content": " may be much sparser than " + }, + { + "bbox": [ + 130, + 422, + 482, + 506 + ], + "type": "inline_equation", + "content": "f_{i}^{B}" + }, + { + "bbox": [ + 130, + 422, + 482, + 506 + ], + "type": "text", + "content": " due to pruning, directly adding by taking union of them is inefficient. Therefore, we propose a new operator called partial addition to fit our pruning strategy:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 276, + 513, + 481, + 529 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 513, + 481, + 529 + ], + "spans": [ + { + "bbox": [ + 276, + 513, + 481, + 529 + ], + "type": "interline_equation", + "content": "f _ {i} = f _ {i} ^ {B \\overrightarrow {\\quad +}} f _ {i} ^ {U} \\tag {6}", + "image_path": "b64ba14593fa33f1cf6d0ee96fc01f916c3cc1516c823c6c7d9198886e12c70f.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 537, + 482, + 610 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 537, + 482, + 610 + ], + "spans": [ + { + "bbox": [ + 130, + 537, + 482, + 610 + ], + "type": "text", + "content": "where addition is constrained to be operated only on the voxels of " + }, + { + "bbox": [ + 130, + 537, + 482, + 610 + ], + "type": "inline_equation", + "content": "f_{i}^{U}" + }, + { + "bbox": [ + 130, + 537, + 482, + 610 + ], + "type": "text", + "content": ". Then objects are detected using a shared detection head across all levels: " + }, + { + "bbox": [ + 130, + 537, + 482, + 610 + ], + "type": "inline_equation", + "content": "\\{O_i\\} = \\text{Detect}(f_i)" + }, + { + "bbox": [ + 130, + 537, + 482, + 610 + ], + "type": "text", + "content": ". Once objects at level " + }, + { + "bbox": [ + 130, + 537, + 482, + 610 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 130, + 537, + 482, + 610 + ], + "type": "text", + "content": " are detected out, we prune the voxel features according to the derived strategy described in Section 3.2. Here we devise a light-weight MLP-based learnable pruning module to decide where smaller objects (i.e. objects in level " + }, + { + "bbox": [ + 130, + 537, + 482, + 610 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 130, + 537, + 482, + 610 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 130, + 537, + 482, + 610 + ], + "type": "inline_equation", + "content": "j < i" + }, + { + "bbox": [ + 130, + 537, + 482, + 610 + ], + "type": "text", + "content": ") ) may appear, and then prune other locations:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 235, + 617, + 481, + 632 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 235, + 617, + 481, + 632 + ], + "spans": [ + { + "bbox": [ + 235, + 617, + 481, + 632 + ], + "type": "interline_equation", + "content": "\\bar {f} _ {i} = t \\left(\\hat {M} _ {i}\\right) \\star f _ {i}, \\hat {M} _ {i} = \\mathrm {M L P} _ {i} \\left(f _ {i}\\right) \\tag {7}", + "image_path": "fc57e91d976d2208b42f55a3801a546534ea86f5a5fe125df230d7fd24a3cdd8.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 640, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 640, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 640, + 482, + 666 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 640, + 482, + 666 + ], + "type": "inline_equation", + "content": "\\hat{M}_i" + }, + { + "bbox": [ + 130, + 640, + 482, + 666 + ], + "type": "text", + "content": " is the pruning mask predicted from " + }, + { + "bbox": [ + 130, + 640, + 482, + 666 + ], + "type": "inline_equation", + "content": "f_i" + }, + { + "bbox": [ + 130, + 640, + 482, + 666 + ], + "type": "text", + "content": ", which represents the probability of retention for each voxel. We utilize FocalLoss [23] to supervise " + }, + { + "bbox": [ + 130, + 640, + 482, + 666 + ], + "type": "inline_equation", + "content": "\\hat{M}_i" + }, + { + "bbox": [ + 130, + 640, + 482, + 666 + ], + "type": "text", + "content": " with the generated" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 139, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 139, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 139, + 100 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 332, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 332, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 332, + 102 + ], + "type": "text", + "content": "Authors Suppressed Due to Excessive Length" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 479, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 479, + 152 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 479, + 152 + ], + "type": "inline_equation", + "content": "M_{i}" + }, + { + "bbox": [ + 130, + 116, + 479, + 152 + ], + "type": "text", + "content": " in (1). During inference, a threshold function " + }, + { + "bbox": [ + 130, + 116, + 479, + 152 + ], + "type": "inline_equation", + "content": "t(\\cdot)" + }, + { + "bbox": [ + 130, + 116, + 479, + 152 + ], + "type": "text", + "content": " sets probability lower than " + }, + { + "bbox": [ + 130, + 116, + 479, + 152 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 130, + 116, + 479, + 152 + ], + "type": "text", + "content": " to be 0, others be 1 to guide pruning. After pruning, the generative upsampling is applied to acquire features for the next level: " + }, + { + "bbox": [ + 130, + 116, + 479, + 152 + ], + "type": "inline_equation", + "content": "f_{i - 1}^{U} = \\text{GeV}(\\bar{f}_{i})" + }, + { + "bbox": [ + 130, + 116, + 479, + 152 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 154, + 481, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 154, + 481, + 251 + ], + "spans": [ + { + "bbox": [ + 130, + 154, + 481, + 251 + ], + "type": "text", + "content": "During training, as " + }, + { + "bbox": [ + 130, + 154, + 481, + 251 + ], + "type": "inline_equation", + "content": "\\hat{M}_i" + }, + { + "bbox": [ + 130, + 154, + 481, + 251 + ], + "type": "text", + "content": " may not be so accurate (especially at beginning), we find applying the above learnable pruning module makes training difficult to converge. Instead, we switch the pruning to weak mode for context preservation. As shown in Figure 5, the weak pruning is applied after generative upsampling. For level " + }, + { + "bbox": [ + 130, + 154, + 481, + 251 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 130, + 154, + 481, + 251 + ], + "type": "text", + "content": ", we upsample the pruning mask " + }, + { + "bbox": [ + 130, + 154, + 481, + 251 + ], + "type": "inline_equation", + "content": "\\hat{M}_{i+1}" + }, + { + "bbox": [ + 130, + 154, + 481, + 251 + ], + "type": "text", + "content": " to level " + }, + { + "bbox": [ + 130, + 154, + 481, + 251 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 130, + 154, + 481, + 251 + ], + "type": "text", + "content": " with nearest neighbor interpolation. Then we sort the interpolated scores and keep only " + }, + { + "bbox": [ + 130, + 154, + 481, + 251 + ], + "type": "inline_equation", + "content": "N_{max}" + }, + { + "bbox": [ + 130, + 154, + 481, + 251 + ], + "type": "text", + "content": " voxels with the highest scores. This weak pruning mechanism aims to stabilize training, which only works when the amount of voxels is too large to conduct following operations." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 251, + 481, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 251, + 481, + 334 + ], + "spans": [ + { + "bbox": [ + 130, + 251, + 481, + 334 + ], + "type": "text", + "content": "Since our theoretical analysis sets the expected unaffected neighborhood to be a " + }, + { + "bbox": [ + 130, + 251, + 481, + 334 + ], + "type": "inline_equation", + "content": "P \\times P \\times P" + }, + { + "bbox": [ + 130, + 251, + 481, + 334 + ], + "type": "text", + "content": " cube, we also modify the assigning strategy of positive object proposals accordingly for robust training. Specifically, for a ground-truth bounding box of " + }, + { + "bbox": [ + 130, + 251, + 481, + 334 + ], + "type": "inline_equation", + "content": "o_i" + }, + { + "bbox": [ + 130, + 251, + 481, + 334 + ], + "type": "text", + "content": " assigned to level " + }, + { + "bbox": [ + 130, + 251, + 481, + 334 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 130, + 251, + 481, + 334 + ], + "type": "text", + "content": ", we sample the nearest " + }, + { + "bbox": [ + 130, + 251, + 481, + 334 + ], + "type": "inline_equation", + "content": "N_{pos}" + }, + { + "bbox": [ + 130, + 251, + 481, + 334 + ], + "type": "text", + "content": " voxels to " + }, + { + "bbox": [ + 130, + 251, + 481, + 334 + ], + "type": "inline_equation", + "content": "c_i" + }, + { + "bbox": [ + 130, + 251, + 481, + 334 + ], + "type": "text", + "content": " inside the cube centered at " + }, + { + "bbox": [ + 130, + 251, + 481, + 334 + ], + "type": "inline_equation", + "content": "c_i" + }, + { + "bbox": [ + 130, + 251, + 481, + 334 + ], + "type": "text", + "content": " with length " + }, + { + "bbox": [ + 130, + 251, + 481, + 334 + ], + "type": "inline_equation", + "content": "P \\cdot S_i" + }, + { + "bbox": [ + 130, + 251, + 481, + 334 + ], + "type": "text", + "content": ". If there are less than " + }, + { + "bbox": [ + 130, + 251, + 481, + 334 + ], + "type": "inline_equation", + "content": "N_{pos}" + }, + { + "bbox": [ + 130, + 251, + 481, + 334 + ], + "type": "text", + "content": " voxels in the cube, we simply sample all voxels inside it. Our assigning method is independent of the size of bounding box, which ensures there are enough positive proposals even for small objects." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 335, + 481, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 335, + 481, + 407 + ], + "spans": [ + { + "bbox": [ + 130, + 335, + 481, + 407 + ], + "type": "text", + "content": "DSPDet3D: Based upon the top-performance multi-level detector TR3D [39], we remove the max pooling layer to increase the spatial resolution of backbone features. Then we replace the decoder in TR3D with four stacked DSP modules to remove redundant voxel features level by level, which achieves efficient upsampling without affecting the detection performance. To train DSPDet3D, we keep the same loss for classification and box regression as in TR3D and add additional FocalLoss to supervise " + }, + { + "bbox": [ + 130, + 335, + 481, + 407 + ], + "type": "inline_equation", + "content": "\\hat{M}_i" + }, + { + "bbox": [ + 130, + 335, + 481, + 407 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 130, + 335, + 481, + 407 + ], + "type": "inline_equation", + "content": "M_i" + }, + { + "bbox": [ + 130, + 335, + 481, + 407 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 407, + 481, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 407, + 481, + 479 + ], + "spans": [ + { + "bbox": [ + 130, + 407, + 481, + 479 + ], + "type": "text", + "content": "Compare with FCAF3D: Similar to our training-time weak pruning, FCAF3D [38] also adopts a pruning strategy in the decoder to prevent the number of voxels from getting too large, which is unable to remove redundant features in early decoder layers during inference. Moreover, it directly utilizes the classification scores for bounding boxes to sort and prune the voxel features, which cannot accurately preserve geometric information for small objects." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 131, + 499, + 214, + 513 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 499, + 214, + 513 + ], + "spans": [ + { + "bbox": [ + 131, + 499, + 214, + 513 + ], + "type": "text", + "content": "4 Experiment" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 527, + 481, + 599 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 527, + 481, + 599 + ], + "spans": [ + { + "bbox": [ + 130, + 527, + 481, + 599 + ], + "type": "text", + "content": "In this section, we conduct experiments to investigate the performance of our approach on 3D small object detection. We first describe the datasets and experimental settings. Then we compare DSPDet3D with the state-of-the-art 3D object detection methods. We also design ablation experiments to study the effectiveness of the proposed methods. Finally we transfer DSPDet3D to extremely large scenes to show its efficiency and generalization ability." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 131, + 619, + 253, + 632 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 619, + 253, + 632 + ], + "spans": [ + { + "bbox": [ + 131, + 619, + 253, + 632 + ], + "type": "text", + "content": "4.1 Experimental Settings" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 641, + 481, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 641, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 641, + 481, + 665 + ], + "type": "text", + "content": "Datasets and metrics: We conduct experiments on two indoor datasets including ScanNet [9] and TO-SCENE [50]. ScanNet is a richly annotated dataset of indoor scenes" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 192, + 91, + 447, + 103 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 192, + 91, + 447, + 103 + ], + "spans": [ + { + "bbox": [ + 192, + 91, + 447, + 103 + ], + "type": "text", + "content": "DSPDet3D: 3D Small Object Detection with Dynamic Spatial Pruning" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 481, + 100 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 150, + 157, + 462, + 340 + ], + "blocks": [ + { + "bbox": [ + 131, + 114, + 482, + 148 + ], + "lines": [ + { + "bbox": [ + 131, + 114, + 482, + 148 + ], + "spans": [ + { + "bbox": [ + 131, + 114, + 482, + 148 + ], + "type": "text", + "content": "Table 1: 3D objects detection results and computational costs of different methods on ScanNet.md40. DSPDet3D with the best pruning threshold is highlighted in gray. We set best scores in bold, runner-ups underlined." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 150, + 157, + 462, + 340 + ], + "lines": [ + { + "bbox": [ + 150, + 157, + 462, + 340 + ], + "spans": [ + { + "bbox": [ + 150, + 157, + 462, + 340 + ], + "type": "table", + "html": "
MethodDecodermAP\\( mAP_S \\)SpeedMemory
@0.25@0.5@0.25@0.5
VoteNetVoting51.0233.690.30013.41150
VoteNetSVoting48.6231.551.0408.51500
H3DNetHybrid53.5139.233.080.907.21550
GroupFree3DTransformer56.7741.3911.70.817.81450
GroupFree3DSTransformer29.4411.940.2003.22000
RBGNetVoting55.2332.645.8106.61700
FCAF3DMulti-level59.4948.7518.388.2112.3850
CAGroup3DVoting60.2949.9016.628.633.13250
TR3DMulti-level61.5949.9827.5312.9110.81250
FCAF3D-higherMulti-level62.6551.0127.6816.237.14000
TR3D-higherMulti-level65.1854.0341.7029.565.24450
Ours(τ=0)Multi-level65.3954.5944.7931.554.44200
Ours(τ=0.3)Multi-level65.0454.3543.7730.3812.5700
", + "image_path": "04c033936fe80a4760efe0afab76a42aad56b57d84df9a93d815337dc612c352.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 370, + 482, + 526 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 370, + 482, + 526 + ], + "spans": [ + { + "bbox": [ + 130, + 370, + 482, + 526 + ], + "type": "text", + "content": "with 1201 training scenes and 312 validation scenes. Each object in the scenes are annotated with texts and then mapped to category IDs. We follow the ScanNet-md40 benchmark proposed by [51], which contains objects in 22 categories with large size variance. TO-SCENE is a mixed reality dataset which provides three variants called TO_Vanilla, TO_Crowd and TO_ScanNet with different numbers of tabletop objects and scene scales. We choose the room-scale TO_ScanNet benchmark, which contains 3600 training scenes and 800 validation scenes with 70 categories. However, TO_ScanNet adopts non-uniform sampling to acquire about 2000 points per tabletop object, which is infeasible in practical settings. To this end, we downsample the small objects and control the density of them to be similar with other objects and backgrounds. We name this modified version as TO-SCENE-down benchmark. We take the point clouds without color as inputs for all methods. More details about ScanNet-md40 and TO-SCENE-down benchmarks can be found in supplementary material." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 529, + 482, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 529, + 482, + 589 + ], + "spans": [ + { + "bbox": [ + 130, + 529, + 482, + 589 + ], + "type": "text", + "content": "We report the mean average precision (mAP) with threshold 0.25 and 0.5. To measure the performance on different categories, we use two kinds of metrics: mAP and " + }, + { + "bbox": [ + 130, + 529, + 482, + 589 + ], + "type": "inline_equation", + "content": "\\mathrm{mAP}_S" + }, + { + "bbox": [ + 130, + 529, + 482, + 589 + ], + "type": "text", + "content": ", which refer to the mean AP of all objects and of small objects respectively. Here we define categories of small object as ones with average volume smaller than " + }, + { + "bbox": [ + 130, + 529, + 482, + 589 + ], + "type": "inline_equation", + "content": "0.05m^3" + }, + { + "bbox": [ + 130, + 529, + 482, + 589 + ], + "type": "text", + "content": " for both benchmarks." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 592, + 482, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 592, + 482, + 667 + ], + "spans": [ + { + "bbox": [ + 130, + 592, + 482, + 667 + ], + "type": "text", + "content": "Implementation details: We implement our approach with PyTorch [32], MinkowskiEngine [7] and MMDetection3D [8]. We follow the same training strategy / hyperparameters as TR3D [39] for fair comparison. Training converges within 4 hours on a 4 GPU machine. The stride of the sparse convolution in the preencoder of DSPDet3D is set to 2, thus the voxel size of " + }, + { + "bbox": [ + 130, + 592, + 482, + 667 + ], + "type": "inline_equation", + "content": "f_1^B" + }, + { + "bbox": [ + 130, + 592, + 482, + 667 + ], + "type": "text", + "content": " is 4cm and " + }, + { + "bbox": [ + 130, + 592, + 482, + 667 + ], + "type": "inline_equation", + "content": "S_i" + }, + { + "bbox": [ + 130, + 592, + 482, + 667 + ], + "type": "text", + "content": " equals to " + }, + { + "bbox": [ + 130, + 592, + 482, + 667 + ], + "type": "inline_equation", + "content": "2^i \\cdot 2cm" + }, + { + "bbox": [ + 130, + 592, + 482, + 667 + ], + "type": "text", + "content": ". We set " + }, + { + "bbox": [ + 130, + 592, + 482, + 667 + ], + "type": "inline_equation", + "content": "N_{pos} = 6" + }, + { + "bbox": [ + 130, + 592, + 482, + 667 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 592, + 482, + 667 + ], + "type": "inline_equation", + "content": "N_{max} = 100000" + }, + { + "bbox": [ + 130, + 592, + 482, + 667 + ], + "type": "text", + "content": " during training. The weight of the FocalLoss between " + }, + { + "bbox": [ + 130, + 592, + 482, + 667 + ], + "type": "inline_equation", + "content": "M_i" + }, + { + "bbox": [ + 130, + 592, + 482, + 667 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 592, + 482, + 667 + ], + "type": "inline_equation", + "content": "\\hat{M}_i" + }, + { + "bbox": [ + 130, + 592, + 482, + 667 + ], + "type": "text", + "content": " is" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 332, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 332, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 332, + 102 + ], + "type": "text", + "content": "Authors Suppressed Due to Excessive Length" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 150, + 157, + 462, + 340 + ], + "blocks": [ + { + "bbox": [ + 131, + 114, + 482, + 149 + ], + "lines": [ + { + "bbox": [ + 131, + 114, + 482, + 149 + ], + "spans": [ + { + "bbox": [ + 131, + 114, + 482, + 149 + ], + "type": "text", + "content": "Table 2: 3D objects detection results and computational costs of different methods on TO-SCENE-down benchmark. DSPDet3D with the best pruning threshold is highlighted in gray. We set best scores in bold, runner-ups underlined." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 150, + 157, + 462, + 340 + ], + "lines": [ + { + "bbox": [ + 150, + 157, + 462, + 340 + ], + "spans": [ + { + "bbox": [ + 150, + 157, + 462, + 340 + ], + "type": "table", + "html": "
MethodDecodermAP\\( mAP_S \\)SpeedMemory
@0.25@0.5@0.25@0.5
VoteNetVoting26.7214.0114.514.7812.81300
\\( VoteNet_S \\)Voting31.8714.8921.757.407.61650
H3DNetHybrid27.6917.3814.837.395.11650
GroupFree3DTransformer32.4120.4320.1710.137.71700
\\( GroupFree3D_S \\)Transformer40.1423.5533.3316.152.42200
RBGNetVoting40.4230.2729.6921.615.01850
FCAF3DMulti-level45.1337.2137.1831.6511.91000
CAGroup3DVoting54.2847.5848.4943.852.23500
TR3DMulti-level55.5845.9552.7244.019.91400
FCAF3D-higherMulti-level57.2350.3953.0748.766.34250
TR3D-higherMulti-level63.9656.0662.8457.144.14600
Ours(τ=0)Multi-level66.8159.4166.5361.574.15300
Ours(τ=0.5)Multi-level66.1258.5565.8260.7313.9800
", + "image_path": "1fe0990212573a14ec09b8b4fa4e0b4250c001076eb3725e19a7874aebbb5664.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 363, + 482, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 363, + 482, + 389 + ], + "spans": [ + { + "bbox": [ + 130, + 363, + 482, + 389 + ], + "type": "text", + "content": "0.01. In terms of block structure, we have " + }, + { + "bbox": [ + 130, + 363, + 482, + 389 + ], + "type": "inline_equation", + "content": "\\{x_k\\} = \\emptyset" + }, + { + "bbox": [ + 130, + 363, + 482, + 389 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 363, + 482, + 389 + ], + "type": "inline_equation", + "content": "y = 3" + }, + { + "bbox": [ + 130, + 363, + 482, + 389 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 363, + 482, + 389 + ], + "type": "inline_equation", + "content": "\\{z_k\\} = \\{3,3\\}" + }, + { + "bbox": [ + 130, + 363, + 482, + 389 + ], + "type": "text", + "content": ". So we set " + }, + { + "bbox": [ + 130, + 363, + 482, + 389 + ], + "type": "inline_equation", + "content": "r = 7" + }, + { + "bbox": [ + 130, + 363, + 482, + 389 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 363, + 482, + 389 + ], + "type": "inline_equation", + "content": "P = 7" + }, + { + "bbox": [ + 130, + 363, + 482, + 389 + ], + "type": "text", + "content": " according to (3)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 131, + 405, + 301, + 418 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 405, + 301, + 418 + ], + "spans": [ + { + "bbox": [ + 131, + 405, + 301, + 418 + ], + "type": "text", + "content": "4.2 Comparison with State-of-the-art" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 426, + 482, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 426, + 482, + 533 + ], + "spans": [ + { + "bbox": [ + 130, + 426, + 482, + 533 + ], + "type": "text", + "content": "We compare our method with popular and state-of-the-art 3D object detection methods, including VoteNet [33], H3DNet [54], GroupFree3D [26], RBGNet [47], CA-Group3D [46], FCAF3D [38] and TR3D [39]. We also follow [50] to reduce the radius of ball query in the PointNet++ backbone for VoteNet and GroupFree3D. The modified models is distinguished by subscript " + }, + { + "bbox": [ + 130, + 426, + 482, + 533 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 130, + 426, + 482, + 533 + ], + "type": "text", + "content": ". Note that the original TR3D only uses two detection heads at level 2/3 and removes the last generative upsampling. However, detecting small objects heavily relies on high-resolution feature map, so we add the upsampling back. This will make it slightly slower but much more accurate on the 3D small object detection benchmarks." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 533, + 482, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 533, + 482, + 556 + ], + "spans": [ + { + "bbox": [ + 130, + 533, + 482, + 556 + ], + "type": "text", + "content": "For all methods, we use their official code and the same training strategy / hyperparameters to train them on ScanNet-md40 and TO-SCENE-down." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 558, + 483, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 558, + 483, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 558, + 483, + 665 + ], + "type": "text", + "content": "Table 1 and 2 shows the experimental results on ScanNet-md40 and TO-SCENEDown respectively. Consistent with the observation of [51], we find point-based (VoteNet, H3DNet, RBGNet) and transformer-based (GroupFree3D) methods almost fail to detect small objects on ScanNet-md40. This is because the PointNet++ backbone used by these methods adopts set abstraction (SA) operation to aggressively downsample the point clouds and extract scene representation. Since the number of small objects in ScanNet is limited, furthest point sampling has a low probability to sample points on small objects, which leads to inaccurate representation of small objects. For methods (CAGroup3D, FCAF3D, TR3D) with sparse convolutional backbone, they achieve" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 192, + 91, + 448, + 103 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 192, + 91, + 448, + 103 + ], + "spans": [ + { + "bbox": [ + 192, + 91, + 448, + 103 + ], + "type": "text", + "content": "DSPDet3D: 3D Small Object Detection with Dynamic Spatial Pruning" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 480, + 100 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 133, + 114, + 479, + 376 + ], + "blocks": [ + { + "bbox": [ + 133, + 114, + 479, + 376 + ], + "lines": [ + { + "bbox": [ + 133, + 114, + 479, + 376 + ], + "spans": [ + { + "bbox": [ + 133, + 114, + 479, + 376 + ], + "type": "image", + "image_path": "1e8d60e37ce8795b764b8e60202220319de5e7c964721a51aaf285c87cc6854d.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 386, + 482, + 407 + ], + "lines": [ + { + "bbox": [ + 130, + 386, + 482, + 407 + ], + "spans": [ + { + "bbox": [ + 130, + 386, + 482, + 407 + ], + "type": "text", + "content": "Fig. 6: Visualization of pruning process on ScanNet. We show the kept voxels in each level under different thresholds. The memory footprint of each level is also listed at bottom." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 429, + 482, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 429, + 482, + 525 + ], + "spans": [ + { + "bbox": [ + 130, + 429, + 482, + 525 + ], + "type": "text", + "content": "relatively much higher " + }, + { + "bbox": [ + 130, + 429, + 482, + 525 + ], + "type": "inline_equation", + "content": "\\mathrm{mAP}_S" + }, + { + "bbox": [ + 130, + 429, + 482, + 525 + ], + "type": "text", + "content": " due to sparse convolution [7, 13] can extract fine-grained scene representation with high efficiency. However, two-stage method like CAGroup3D is both slow and memory-consuming. Multi-level methods like FCAF3D and TR3D are efficient and get good performance on small object detection due to the FPN-like architecture, but they are still limited by resolution. On the contrary, our DSPDet3D with a proper threshold takes advantage of the high-resolution scene representation to achieve much higher performance. Furthermore, DSPDet3D is the most memory-efficient model among all mainstream methods." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 131, + 540, + 222, + 553 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 540, + 222, + 553 + ], + "spans": [ + { + "bbox": [ + 131, + 540, + 222, + 553 + ], + "type": "text", + "content": "4.3 Ablation Study" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 558, + 481, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 558, + 481, + 581 + ], + "spans": [ + { + "bbox": [ + 130, + 558, + 481, + 581 + ], + "type": "text", + "content": "We conduct ablation studies on ScanNet-md40 to study the effects of hyperparameters and different design choices." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 582, + 481, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 582, + 481, + 641 + ], + "spans": [ + { + "bbox": [ + 130, + 582, + 481, + 641 + ], + "type": "text", + "content": "Pruning process: We visualize the pruning process under different thresholds in Figure 6, where the voxels in each level after pruning are shown. We also list the memory footprint of each level. It can be seen that our method significantly reduces the memory footprint by pruning most of the uninformative voxels. Our pruning module only keeps regions where there are smaller objects than current level." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 641, + 481, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 641, + 481, + 667 + ], + "spans": [ + { + "bbox": [ + 130, + 641, + 481, + 667 + ], + "type": "text", + "content": "Hyperparameters: We study two hyperparameters: " + }, + { + "bbox": [ + 130, + 641, + 481, + 667 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 130, + 641, + 481, + 667 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 641, + 481, + 667 + ], + "type": "inline_equation", + "content": "N_{pos}" + }, + { + "bbox": [ + 130, + 641, + 481, + 667 + ], + "type": "text", + "content": ", which is highly relevant to 3D small object detection. Note that " + }, + { + "bbox": [ + 130, + 641, + 481, + 667 + ], + "type": "inline_equation", + "content": "r = \\left\\lceil \\frac{P + 9 - 2}{2} \\right\\rceil" + }, + { + "bbox": [ + 130, + 641, + 481, + 667 + ], + "type": "text", + "content": ", thus " + }, + { + "bbox": [ + 130, + 641, + 481, + 667 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 130, + 641, + 481, + 667 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 641, + 481, + 667 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 130, + 641, + 481, + 667 + ], + "type": "text", + "content": " should be" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 332, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 332, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 332, + 102 + ], + "type": "text", + "content": "Authors Suppressed Due to Excessive Length" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 196, + 146, + 418, + 248 + ], + "blocks": [ + { + "bbox": [ + 132, + 114, + 482, + 137 + ], + "lines": [ + { + "bbox": [ + 132, + 114, + 482, + 137 + ], + "spans": [ + { + "bbox": [ + 132, + 114, + 482, + 137 + ], + "type": "text", + "content": "Table 3: Ablation studies on several design choices. We control the speed of each method to 10 FPS and report the accuracy in " + }, + { + "bbox": [ + 132, + 114, + 482, + 137 + ], + "type": "inline_equation", + "content": "\\mathrm{mAP}@\\mathbf{0.25}" + }, + { + "bbox": [ + 132, + 114, + 482, + 137 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 132, + 114, + 482, + 137 + ], + "type": "inline_equation", + "content": "\\mathrm{mAP}_S@\\mathbf{0.25}" + }, + { + "bbox": [ + 132, + 114, + 482, + 137 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 196, + 146, + 418, + 248 + ], + "lines": [ + { + "bbox": [ + 196, + 146, + 418, + 248 + ], + "spans": [ + { + "bbox": [ + 196, + 146, + 418, + 248 + ], + "type": "table", + "html": "
MethodmAP\\( \\mathrm{mAP}_S \\)
Remove partial addition55.335.5
Addition by taking union57.936.4
Addition by interpolation62.140.9
Spherical keeping mask63.041.1
Remove training-time pruning--
Positive proposal inside bounding box62.440.7
The full design of DSP module65.144.1
", + "image_path": "12403ecf7a487963078d546f95ae40636a84d7a662fec91d85d6bb43e46c5535.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 170, + 260, + 307, + 350 + ], + "blocks": [ + { + "bbox": [ + 170, + 260, + 307, + 350 + ], + "lines": [ + { + "bbox": [ + 170, + 260, + 307, + 350 + ], + "spans": [ + { + "bbox": [ + 170, + 260, + 307, + 350 + ], + "type": "image", + "image_path": "08b388177be4ca13c44331bf65631fb18c481b2e2fa88cf89f6f82706787ce2e.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 361, + 482, + 384 + ], + "lines": [ + { + "bbox": [ + 130, + 361, + 482, + 384 + ], + "spans": [ + { + "bbox": [ + 130, + 361, + 482, + 384 + ], + "type": "text", + "content": "Fig. 7: Ablation studies on the value of " + }, + { + "bbox": [ + 130, + 361, + 482, + 384 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 130, + 361, + 482, + 384 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 361, + 482, + 384 + ], + "type": "inline_equation", + "content": "N_{pos}" + }, + { + "bbox": [ + 130, + 361, + 482, + 384 + ], + "type": "text", + "content": ". For each value we report performance under different pruning threshold " + }, + { + "bbox": [ + 130, + 361, + 482, + 384 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 130, + 361, + 482, + 384 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 315, + 260, + 444, + 350 + ], + "blocks": [ + { + "bbox": [ + 315, + 260, + 444, + 350 + ], + "lines": [ + { + "bbox": [ + 315, + 260, + 444, + 350 + ], + "spans": [ + { + "bbox": [ + 315, + 260, + 444, + 350 + ], + "type": "image", + "image_path": "44173595e24b288de5d95e4fd0d03cff3cb92786fdc641582234e8efec7b27a6.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 408, + 482, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 408, + 482, + 480 + ], + "spans": [ + { + "bbox": [ + 130, + 408, + 482, + 480 + ], + "type": "text", + "content": "changed simultaneously. As shown in Figure 7 (left), setting " + }, + { + "bbox": [ + 130, + 408, + 482, + 480 + ], + "type": "inline_equation", + "content": "r = 7" + }, + { + "bbox": [ + 130, + 408, + 482, + 480 + ], + "type": "text", + "content": " achieves the best performance. If " + }, + { + "bbox": [ + 130, + 408, + 482, + 480 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 130, + 408, + 482, + 480 + ], + "type": "text", + "content": " is smaller than 7 then " + }, + { + "bbox": [ + 130, + 408, + 482, + 480 + ], + "type": "inline_equation", + "content": "r > P" + }, + { + "bbox": [ + 130, + 408, + 482, + 480 + ], + "type": "text", + "content": ", which conflicts with Equation (5) and the features will be affected by pruning. While a larger " + }, + { + "bbox": [ + 130, + 408, + 482, + 480 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 130, + 408, + 482, + 480 + ], + "type": "text", + "content": " will make the pruning less aggressive, resulting in a large number of redundant voxel features. Figure 7 (right) shows that the number of positive object proposals should be set properly, which is important to balance the ratio between positive and negative samples during classification." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 481, + 482, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 481, + 482, + 589 + ], + "spans": [ + { + "bbox": [ + 130, + 481, + 482, + 589 + ], + "type": "text", + "content": "Design choices: We also study the design choices of DSPDet3D in Table 3. Observing the second, third and fourth rows, we conclude that the partial addition is important for efficient feature fusion. Although taking union can preserve more information, this operation will reduce the sparsity of voxels and thus make our pruning less efficient. The fifth row shows that generate the keeping mask according to the shape of affecting field is better than using a spherical shape. According to the sixth row, removing training-time pruning will significantly increase the memory footprint during training, which makes the network unable to train. The seventh row validates the effectiveness of our assigning method for positive object proposals." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 131, + 608, + 287, + 620 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 608, + 287, + 620 + ], + "spans": [ + { + "bbox": [ + 131, + 608, + 287, + 620 + ], + "type": "text", + "content": "4.4 Transferring to Larger Scenes" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 629, + 481, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 629, + 481, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 629, + 481, + 666 + ], + "type": "text", + "content": "We further validate the efficiency and generalization ability of different 3D detectors by transferring them to scenes of much larger scale. We first train 3D detectors on rooms from ScanNet training set in a category-agnostic manner, which is done by regarding" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 192, + 91, + 448, + 103 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 192, + 91, + 448, + 103 + ], + "spans": [ + { + "bbox": [ + 192, + 91, + 448, + 103 + ], + "type": "text", + "content": "DSPDet3D: 3D Small Object Detection with Dynamic Spatial Pruning" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 471, + 91, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 471, + 91, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 471, + 91, + 480, + 100 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 137, + 116, + 476, + 277 + ], + "blocks": [ + { + "bbox": [ + 137, + 116, + 476, + 277 + ], + "lines": [ + { + "bbox": [ + 137, + 116, + 476, + 277 + ], + "spans": [ + { + "bbox": [ + 137, + 116, + 476, + 277 + ], + "type": "image", + "image_path": "8efed2cab5f01737a94450892b72b65dd106d2033bac84a775217e808d97b746.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 289, + 482, + 323 + ], + "lines": [ + { + "bbox": [ + 130, + 289, + 482, + 323 + ], + "spans": [ + { + "bbox": [ + 130, + 289, + 482, + 323 + ], + "type": "text", + "content": "Fig. 8: Visualization of the transferring results of different 3D object detectors. The 3D detector is trained on rooms from ScanNet and directly adopted to process a whole building-level 3D scene from Matterport3D." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 346, + 482, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 346, + 482, + 430 + ], + "spans": [ + { + "bbox": [ + 130, + 346, + 482, + 430 + ], + "type": "text", + "content": "every labeled object as the same category. Then we directly adopt them to process the building-level scenes in Matterport3D [4]. We find previous methods almost all fail to process the extremely large scenes due to unaffordable memory footprint, so we only compare DSPDet3D with FCAF3D as shown in 8. It is shown that FCAF3D cannot detect out any small object and even struggles on relatively large objects like chairs when the scene is too large. On the contrary, DSPDet3D is able to accurately detect small objects like cups and thin pictures." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 446, + 211, + 459 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 446, + 211, + 459 + ], + "spans": [ + { + "bbox": [ + 132, + 446, + 211, + 459 + ], + "type": "text", + "content": "5 Conclusion" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 469, + 482, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 469, + 482, + 601 + ], + "spans": [ + { + "bbox": [ + 130, + 469, + 482, + 601 + ], + "type": "text", + "content": "In this paper, we have presented an efficient feature pruning strategy for 3D small object detection. Inspired by the fact that small objects only occupy a small proportion of space, we adopt a multi-level detection framework to detect different sizes of objects in different levels. Then we present a dynamic spatial pruning strategy to prune the voxel features after detecting out objects in each level. Specifically, we first design the dynamic spatial pruning strategy by theoretical analysis on how to prune voxels without affecting the features of object proposals. Then we propose dynamic spatial pruning (DSP) module according to the strategy and use it to construct DSPDet3D. Extensive experiments on ScanNet and TO-SCENE datasets show that our DSPDet3D achieves leading detection accuracy and speed. We also conduct transferring experiment on Matterport3D to show DSPDet3D also generalizes well to extremely large scenes." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 618, + 234, + 632 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 618, + 234, + 632 + ], + "spans": [ + { + "bbox": [ + 132, + 618, + 234, + 632 + ], + "type": "text", + "content": "Acknowledgements" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 641, + 481, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 641, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 641, + 481, + 665 + ], + "type": "text", + "content": "This work was supported in part by the National Natural Science Foundation of China under Grant 62125603, Grant 62321005, and Grant 62336004." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 332, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 332, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 332, + 102 + ], + "type": "text", + "content": "Authors Suppressed Due to Excessive Length" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 133, + 114, + 190, + 127 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 114, + 190, + 127 + ], + "spans": [ + { + "bbox": [ + 133, + 114, + 190, + 127 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 135, + 141, + 480, + 665 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 138, + 141, + 480, + 163 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 141, + 480, + 163 + ], + "spans": [ + { + "bbox": [ + 138, + 141, + 480, + 163 + ], + "type": "text", + "content": "1. Armeni, I., Sener, O., Zamir, A.R., Jiang, H., Brilakis, I., Fischer, M., Savarese, S.: 3d semantic parsing of large-scale indoor spaces. In: ICCV. pp. 1534-1543 (2016) 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 138, + 164, + 480, + 186 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 164, + 480, + 186 + ], + "spans": [ + { + "bbox": [ + 138, + 164, + 480, + 186 + ], + "type": "text", + "content": "2. Bansal, M., Krizhevsky, A., Ogale, A.: Chauffeurnet: Learning to drive by imitating the best and synthesizing the worst. arXiv preprint arXiv:1812.03079 (2018) 1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 138, + 186, + 480, + 208 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 186, + 480, + 208 + ], + "spans": [ + { + "bbox": [ + 138, + 186, + 480, + 208 + ], + "type": "text", + "content": "3. Carion, N., Massa, F., Synnaeve, G., Usunier, N., Kirillov, A., Zagoruyko, S.: End-to-end object detection with transformers. In: ECCV. pp. 213-229. Springer (2020) 3" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 138, + 209, + 480, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 209, + 480, + 239 + ], + "spans": [ + { + "bbox": [ + 138, + 209, + 480, + 239 + ], + "type": "text", + "content": "4. Chang, A., Dai, A., Funkhouser, T., Halber, M., Niessner, M., Savva, M., Song, S., Zeng, A., Zhang, Y.: Matterport3d: Learning from rgb-d data in indoor environments. 3DV (2017) 2, 14" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 138, + 241, + 480, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 241, + 480, + 262 + ], + "spans": [ + { + "bbox": [ + 138, + 241, + 480, + 262 + ], + "type": "text", + "content": "5. Chen, C., Liu, M.Y., Tuzel, O., Xiao, J.: R-cnn for small object detection. In: ACCV. pp. 214-230. Springer (2017) 4" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 138, + 264, + 480, + 286 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 264, + 480, + 286 + ], + "spans": [ + { + "bbox": [ + 138, + 264, + 480, + 286 + ], + "type": "text", + "content": "6. Cheng, B., Sheng, L., Shi, S., Yang, M., Xu, D.: Back-tracing representative points for voting-based 3d object detection in point clouds. In: CVPR. pp. 8963-8972 (2021) 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 138, + 287, + 480, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 287, + 480, + 308 + ], + "spans": [ + { + "bbox": [ + 138, + 287, + 480, + 308 + ], + "type": "text", + "content": "7. Choy, C., Gwak, J., Savarese, S.: 4d spatio-temporal convnets: Minkowski convolutional neural networks. In: CVPR. pp. 3075-3084 (2019) 1, 3, 4, 10, 12" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 138, + 309, + 480, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 309, + 480, + 330 + ], + "spans": [ + { + "bbox": [ + 138, + 309, + 480, + 330 + ], + "type": "text", + "content": "8. Contributors, M.: Mmdetection3d: Openmmlab next-generation platform for general 3d object detection (2020) 10" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 138, + 331, + 480, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 331, + 480, + 361 + ], + "spans": [ + { + "bbox": [ + 138, + 331, + 480, + 361 + ], + "type": "text", + "content": "9. Dai, A., Chang, A.X., Savva, M., Halber, M., Funkhouser, T., Nießner, M.: Scannet: Richly-annotated 3d reconstructions of indoor scenes. In: CVPR. pp. 5828--5839 (2017) 2, 3, 9" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 135, + 364, + 480, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 364, + 480, + 385 + ], + "spans": [ + { + "bbox": [ + 135, + 364, + 480, + 385 + ], + "type": "text", + "content": "10. Deng, C., Wang, M., Liu, L., Liu, Y., Jiang, Y.: Extended feature pyramid network for small object detection. TMM 24, 1968-1979 (2021) 4" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 135, + 387, + 480, + 408 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 387, + 480, + 408 + ], + "spans": [ + { + "bbox": [ + 135, + 387, + 480, + 408 + ], + "type": "text", + "content": "11. Gao, M., Yu, R., Li, A., Morariu, V.I., Davis, L.S.: Dynamic zoom-in network for fast object detection in large images. In: CVPR. pp. 6926-6935 (2018) 4" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 135, + 409, + 480, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 409, + 480, + 430 + ], + "spans": [ + { + "bbox": [ + 135, + 409, + 480, + 430 + ], + "type": "text", + "content": "12. Geiger, A., Lenz, P., Urtasun, R.: Are we ready for autonomous driving? the kitti vision benchmark suite. In: CVPR. pp. 3354-3361 (2012) 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 135, + 431, + 480, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 431, + 480, + 453 + ], + "spans": [ + { + "bbox": [ + 135, + 431, + 480, + 453 + ], + "type": "text", + "content": "13. Graham, B., Engelcke, M., Van Der Maaten, L.: 3d semantic segmentation with submanifold sparse convolutional networks. In: CVPR. pp. 9224-9232 (2018) 1, 3, 4, 12" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 135, + 453, + 480, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 453, + 480, + 475 + ], + "spans": [ + { + "bbox": [ + 135, + 453, + 480, + 475 + ], + "type": "text", + "content": "14. Gwak, J., Choy, C., Savarese, S.: Generative sparse detection networks for 3d single-shot object detection. In: ECCV. pp. 297-313. Springer (2020) 3, 4, 6" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 135, + 476, + 480, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 476, + 480, + 498 + ], + "spans": [ + { + "bbox": [ + 135, + 476, + 480, + 498 + ], + "type": "text", + "content": "15. Han, S., Pool, J., Tran, J., Dally, W.: Learning both weights and connections for efficient neural network. NeurIPS 28 (2015) 4" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 135, + 498, + 480, + 519 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 498, + 480, + 519 + ], + "spans": [ + { + "bbox": [ + 135, + 498, + 480, + 519 + ], + "type": "text", + "content": "16. Huang, Z., Wang, N.: Data-driven sparse structure selection for deep neural networks. In: ECCV. pp. 304-320 (2018) 4" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 135, + 521, + 480, + 542 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 521, + 480, + 542 + ], + "spans": [ + { + "bbox": [ + 135, + 521, + 480, + 542 + ], + "type": "text", + "content": "17. Kisantal, M., Wojna, Z., Murawski, J., Naruniec, J., Cho, K.: Augmentation for small object detection. arXiv preprint arXiv:1902.07296 (2019) 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 135, + 543, + 424, + 553 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 543, + 424, + 553 + ], + "spans": [ + { + "bbox": [ + 135, + 543, + 424, + 553 + ], + "type": "text", + "content": "18. LeCun, Y., Denker, J., Solla, S.: Optimal brain damage. NeurIPS 2 (1989) 4" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 135, + 554, + 480, + 575 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 554, + 480, + 575 + ], + "spans": [ + { + "bbox": [ + 135, + 554, + 480, + 575 + ], + "type": "text", + "content": "19. Lee, J., Choy, C., Park, J.: Putting 3d spatially sparse networks on a diet. arXiv preprint arXiv:2112.01316 (2021) 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 135, + 576, + 480, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 576, + 480, + 597 + ], + "spans": [ + { + "bbox": [ + 135, + 576, + 480, + 597 + ], + "type": "text", + "content": "20. Li, H., Kadav, A., Durdanovic, I., Samet, H., Graf, H.P.: Pruning filters for efficient convnets. arXiv preprint arXiv:1608.08710 (2016) 4" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 135, + 599, + 480, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 599, + 480, + 620 + ], + "spans": [ + { + "bbox": [ + 135, + 599, + 480, + 620 + ], + "type": "text", + "content": "21. Li, J., Liang, X., Wei, Y., Xu, T., Feng, J., Yan, S.: Perceptual generative adversarial networks for small object detection. In: CVPR. pp. 1222-1230 (2017) 4" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 135, + 621, + 480, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 621, + 480, + 643 + ], + "spans": [ + { + "bbox": [ + 135, + 621, + 480, + 643 + ], + "type": "text", + "content": "22. Lin, T.Y., Dollár, P., Girshick, R., He, K., Hariharan, B., Belongie, S.: Feature pyramid networks for object detection. In: CVPR. pp. 2117-2125 (2017) 4" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 135, + 643, + 480, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 643, + 480, + 665 + ], + "spans": [ + { + "bbox": [ + 135, + 643, + 480, + 665 + ], + "type": "text", + "content": "23. Lin, T.Y., Goyal, P., Girshick, R., He, K., Dólár, P.: Focal loss for dense object detection. In: ICCV. pp. 2980-2988 (2017) 8" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 192, + 91, + 448, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 192, + 91, + 448, + 102 + ], + "spans": [ + { + "bbox": [ + 192, + 91, + 448, + 102 + ], + "type": "text", + "content": "DSPDet3D: 3D Small Object Detection with Dynamic Spatial Pruning" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 471, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 471, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 471, + 92, + 480, + 100 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 481, + 665 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 132, + 116, + 481, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 116, + 481, + 138 + ], + "spans": [ + { + "bbox": [ + 132, + 116, + 481, + 138 + ], + "type": "text", + "content": "24. Liu, J., Chen, Y., Ye, X., Tian, Z., Tan, X., Qi, X.: Spatial pruned sparse convolution for efficient 3d object detection. In: NeurIPS (2022) 4" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 138, + 481, + 159 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 138, + 481, + 159 + ], + "spans": [ + { + "bbox": [ + 132, + 138, + 481, + 159 + ], + "type": "text", + "content": "25. Liu, W., Anguelov, D., Erhan, D., Szegedy, C., Reed, S., Fu, C.Y., Berg, A.C.: Ssd: Single shot multibox detector. In: ECCV. pp. 21-37 (2016) 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 160, + 481, + 181 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 160, + 481, + 181 + ], + "spans": [ + { + "bbox": [ + 132, + 160, + 481, + 181 + ], + "type": "text", + "content": "26. Liu, Z., Zhang, Z., Cao, Y., Hu, H., Tong, X.: Group-free 3d object detection via transformers. arXiv preprint arXiv:2104.00678 (2021) 3, 11" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 182, + 481, + 203 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 182, + 481, + 203 + ], + "spans": [ + { + "bbox": [ + 132, + 182, + 481, + 203 + ], + "type": "text", + "content": "27. Liu, Z., Li, J., Shen, Z., Huang, G., Yan, S., Zhang, C.: Learning efficient convolutional networks through network slimming. In: ICCV. pp. 2736-2744 (2017) 4" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 204, + 481, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 204, + 481, + 224 + ], + "spans": [ + { + "bbox": [ + 132, + 204, + 481, + 224 + ], + "type": "text", + "content": "28. Misra, I., Girdhar, R., Joulin, A.: An end-to-end transformer model for 3d object detection. In: ICCV. pp. 2906-2917 (2021) 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 224, + 481, + 246 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 224, + 481, + 246 + ], + "spans": [ + { + "bbox": [ + 132, + 224, + 481, + 246 + ], + "type": "text", + "content": "29. Molchanov, P., Tyree, S., Karras, T., Aila, T., Kautz, J.: Pruning convolutional neural networks for resource efficient inference. arXiv preprint arXiv:1611.06440 (2016) 4" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 247, + 481, + 267 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 247, + 481, + 267 + ], + "spans": [ + { + "bbox": [ + 132, + 247, + 481, + 267 + ], + "type": "text", + "content": "30. Mousavian, A., Eppner, C., Fox, D.: 6-dof grapnet: Variational grasp generation for object manipulation. In: ICCV. pp. 2901-2910 (2019) 1" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 268, + 481, + 288 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 268, + 481, + 288 + ], + "spans": [ + { + "bbox": [ + 132, + 268, + 481, + 288 + ], + "type": "text", + "content": "31. Najibi, M., Singh, B., Davis, L.S.: Autofocus: Efficient multi-scale inference. In: ICCV. pp. 9745-9755 (2019) 4" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 289, + 481, + 320 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 289, + 481, + 320 + ], + "spans": [ + { + "bbox": [ + 132, + 289, + 481, + 320 + ], + "type": "text", + "content": "32. Paszke, A., Gross, S., Massa, F., Lerer, A., Bradbury, J., Chanan, G., Killeen, T., Lin, Z., Gimelshein, N., Antiga, L., et al.: Pytorch: An imperative style, high-performance deep learning library. NeurIPS 32 (2019) 10" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 322, + 481, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 322, + 481, + 342 + ], + "spans": [ + { + "bbox": [ + 132, + 322, + 481, + 342 + ], + "type": "text", + "content": "33. Qi, C.R., Litany, O., He, K., Guibas, L.J.: Deep hough voting for 3d object detection in point clouds. In: ICCV. pp. 9277-9286 (2019) 3, 11" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 343, + 481, + 364 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 343, + 481, + 364 + ], + "spans": [ + { + "bbox": [ + 132, + 343, + 481, + 364 + ], + "type": "text", + "content": "34. Qi, C.R., Su, H., Mo, K., Guibas, L.J.: Pointnet: Deep learning on point sets for 3d classification and segmentation. In: CVPR. pp. 652-660 (2017) 1, 3" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 132, + 365, + 481, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 365, + 481, + 385 + ], + "spans": [ + { + "bbox": [ + 132, + 365, + 481, + 385 + ], + "type": "text", + "content": "35. Qi, C.R., Yi, L., Su, H., Guibas, L.J.: Pointnet++: Deep hierarchical feature learning on point sets in a metric space. In: NeurIPS. pp. 5099-5108 (2017) 1, 3" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 132, + 386, + 481, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 386, + 481, + 407 + ], + "spans": [ + { + "bbox": [ + 132, + 386, + 481, + 407 + ], + "type": "text", + "content": "36. Rao, Y., Zhao, W., Liu, B., Lu, J., Zhou, J., Hsieh, C.J.: Dynamicvit: Efficient vision transformers with dynamic token sparsification. NeurIPS 34, 13937-13949 (2021) 4" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 132, + 407, + 481, + 429 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 407, + 481, + 429 + ], + "spans": [ + { + "bbox": [ + 132, + 407, + 481, + 429 + ], + "type": "text", + "content": "37. Rozenberszki, D., Litany, O., Dai, A.: Language-grounded indoor 3d semantic segmentation in the wild. In: ECCV. pp. 125-141. Springer (2022) 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 132, + 430, + 481, + 450 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 430, + 481, + 450 + ], + "spans": [ + { + "bbox": [ + 132, + 430, + 481, + 450 + ], + "type": "text", + "content": "38. Rukhovich, D., Vorontsova, A., Konushin, A.: Fcaf3d: fully convolutional anchor-free 3d object detection. In: ECCV. pp. 477-493. Springer (2022) 3, 4, 9, 11" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 132, + 450, + 481, + 471 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 450, + 481, + 471 + ], + "spans": [ + { + "bbox": [ + 132, + 450, + 481, + 471 + ], + "type": "text", + "content": "39. Rukhovich, D., Vorontsova, A., Konushin, A.: Tr3d: Towards real-time indoor 3d object detection. arXiv preprint arXiv:2302.02858 (2023) 1, 3, 4, 5, 9, 10, 11" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 132, + 472, + 481, + 493 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 472, + 481, + 493 + ], + "spans": [ + { + "bbox": [ + 132, + 472, + 481, + 493 + ], + "type": "text", + "content": "40. Shi, S., Guo, C., Jiang, L., Wang, Z., Shi, J., Wang, X., Li, H.: Pv-rcnn: Point-voxel feature set abstraction for 3d object detection. In: CVPR. pp. 10529–10538 (2020) 1" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 132, + 494, + 481, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 494, + 481, + 525 + ], + "spans": [ + { + "bbox": [ + 132, + 494, + 481, + 525 + ], + "type": "text", + "content": "41. Singh, B., Davis, L.S.: An analysis of scale invariance in object detection snip. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 3578-3587 (2018) 4" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 132, + 525, + 481, + 545 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 525, + 481, + 545 + ], + "spans": [ + { + "bbox": [ + 132, + 525, + 481, + 545 + ], + "type": "text", + "content": "42. Singh, B., Najibi, M., Davis, L.S.: Sniper: Efficient multi-scale training. NeurIPS 31 (2018) 4" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 132, + 546, + 481, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 546, + 481, + 568 + ], + "spans": [ + { + "bbox": [ + 132, + 546, + 481, + 568 + ], + "type": "text", + "content": "43. Song, S., Lichtenberg, S.P., Xiao, J.: Sun rgb-d: A rgb-d scene understanding benchmark suite. In: CVPR. pp. 567-576 (2015) 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 132, + 569, + 481, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 569, + 481, + 589 + ], + "spans": [ + { + "bbox": [ + 132, + 569, + 481, + 589 + ], + "type": "text", + "content": "44. Tian, Z., Shen, C., Chen, H., He, T.: Fcos: Fully convolutional one-stage object detection. In: ICCV. pp. 9627-9636 (2019) 4, 6" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 132, + 590, + 481, + 610 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 590, + 481, + 610 + ], + "spans": [ + { + "bbox": [ + 132, + 590, + 481, + 610 + ], + "type": "text", + "content": "45. Tong, K., Wu, Y., Zhou, F.: Recent advances in small object detection based on deep learning: A review. IVC 97, 103910 (2020) 3" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 132, + 611, + 481, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 611, + 481, + 643 + ], + "spans": [ + { + "bbox": [ + 132, + 611, + 481, + 643 + ], + "type": "text", + "content": "46. Wang, H., Ding, L., Dong, S., Shi, S., Li, A., Li, J., Li, Z., Wang, L.: Cagroup3d: Class-aware grouping for 3d object detection on point clouds. arXiv preprint arXiv:2210.04264 (2022) 1, 3, 11" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 132, + 643, + 481, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 643, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 132, + 643, + 481, + 665 + ], + "type": "text", + "content": "47. Wang, H., Shi, S., Yang, Z., Fang, R., Qian, Q., Li, H., Schiele, B., Wang, L.: Rbgnet: Ray-based grouping for 3d object detection. In: CVPR. pp. 1110-1119 (2022) 3, 11" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 332, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 332, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 332, + 102 + ], + "type": "text", + "content": "Authors Suppressed Due to Excessive Length" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 403 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 148 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 148 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 148 + ], + "type": "text", + "content": "48. Wang, J., Sun, K., Cheng, T., Jiang, B., Deng, C., Zhao, Y., Liu, D., Mu, Y., Tan, M., Wang, X., et al.: Deep high-resolution representation learning for visual recognition. TPAMI 43(10), 3349-3364 (2020) 4" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 149, + 482, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 149, + 482, + 171 + ], + "spans": [ + { + "bbox": [ + 130, + 149, + 482, + 171 + ], + "type": "text", + "content": "49. Xie, Q., Lai, Y.K., Wu, J., Wang, Z., Zhang, Y., Xu, K., Wang, J.: Mlcvnet: Multi-level context votenet for 3d object detection. In: CVPR. pp. 10447-10456 (2020) 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 172, + 482, + 193 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 172, + 482, + 193 + ], + "spans": [ + { + "bbox": [ + 130, + 172, + 482, + 193 + ], + "type": "text", + "content": "50. Xu, M., Chen, P., Liu, H., Han, X.: To-scene: A large-scale dataset for understanding 3d tabletop scenes. In: ECCV. pp. 340-356. Springer (2022) 2, 3, 4, 9, 11" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 194, + 482, + 225 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 194, + 482, + 225 + ], + "spans": [ + { + "bbox": [ + 130, + 194, + 482, + 225 + ], + "type": "text", + "content": "51. Xu, X., Wang, Y., Zheng, Y., Rao, Y., Zhou, J., Lu, J.: Back to reality: Weakly-supervised 3d object detection with shape-guided label enhancement. In: CVPR. pp. 8438-8447 (2022) 2, 4, 10, 11" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 227, + 482, + 248 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 227, + 482, + 248 + ], + "spans": [ + { + "bbox": [ + 130, + 227, + 482, + 248 + ], + "type": "text", + "content": "52. Xu, X., Wang, Z., Zhou, J., Lu, J.: Binarizing sparse convolutional networks for efficient point cloud analysis. arXiv preprint arXiv:2303.15493 (2023) 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 249, + 482, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 249, + 482, + 270 + ], + "spans": [ + { + "bbox": [ + 130, + 249, + 482, + 270 + ], + "type": "text", + "content": "53. Yang, C., Huang, Z., Wang, N.: Querydet: Cascaded sparse query for accelerating high-resolution small object detection. In: CVPR. pp. 13668-13677 (2022) 4" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 271, + 482, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 271, + 482, + 293 + ], + "spans": [ + { + "bbox": [ + 130, + 271, + 482, + 293 + ], + "type": "text", + "content": "54. Zhang, Z., Sun, B., Yang, H., Huang, Q.: H3dnet: 3d object detection using hybrid geometric primitives. In: ECCV. pp. 311-329 (2020) 3, 11" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 293, + 482, + 325 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 293, + 482, + 325 + ], + "spans": [ + { + "bbox": [ + 130, + 293, + 482, + 325 + ], + "type": "text", + "content": "55. Zhao, T., Ning, X., Hong, K., Qiu, Z., Lu, P., Zhao, Y., Zhang, L., Zhou, L., Dai, G., Yang, H., et al.: Ada3d: Exploiting the spatial redundancy with adaptive inference for efficient 3d object detection. arXiv preprint arXiv:2307.08209 (2023) 4" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 326, + 482, + 347 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 326, + 482, + 347 + ], + "spans": [ + { + "bbox": [ + 130, + 326, + 482, + 347 + ], + "type": "text", + "content": "56. Zheng, W., Tang, W., Jiang, L., Fu, C.W.: Se-ssd: Self-ensembling single-stage object detector from point cloud. In: CVPR. pp. 14494–14503 (2021) 1" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 348, + 482, + 379 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 348, + 482, + 379 + ], + "spans": [ + { + "bbox": [ + 130, + 348, + 482, + 379 + ], + "type": "text", + "content": "57. Zhu, Y., Mottaghi, R., Kolve, E., Lim, J.J., Gupta, A., Fei-Fei, L., Farhadi, A.: Target-driven visual navigation in indoor scenes using deep reinforcement learning. In: ICRA. pp. 3357-3364 (2017) 1" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 130, + 380, + 482, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 380, + 482, + 403 + ], + "spans": [ + { + "bbox": [ + 130, + 380, + 482, + 403 + ], + "type": "text", + "content": "58. Zoph, B., Cubuk, E.D., Ghiasi, G., Lin, T.Y., Shlens, J., Le, Q.V.: Learning data augmentation strategies for object detection. In: ECCV. pp. 566-583. Springer (2020) 3" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 192, + 91, + 448, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 192, + 91, + 448, + 102 + ], + "spans": [ + { + "bbox": [ + 192, + 91, + 448, + 102 + ], + "type": "text", + "content": "DSPDet3D: 3D Small Object Detection with Dynamic Spatial Pruning" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 471, + 91, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 471, + 91, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 471, + 91, + 480, + 100 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/3D Weakly Supervised Semantic Segmentation with 2D Vision-Language Guidance/b6e9bc2f-30dd-47bb-97e3-5a3cad4d6faf_content_list.json b/2024/3D Weakly Supervised Semantic Segmentation with 2D Vision-Language Guidance/b6e9bc2f-30dd-47bb-97e3-5a3cad4d6faf_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..95249dbfd19818d982fb5f0fe3ed1c794c41b4d9 --- /dev/null +++ b/2024/3D Weakly Supervised Semantic Segmentation with 2D Vision-Language Guidance/b6e9bc2f-30dd-47bb-97e3-5a3cad4d6faf_content_list.json @@ -0,0 +1,1782 @@ +[ + { + "type": "text", + "text": "3D Weakly Supervised Semantic Segmentation with 2D Vision-Language Guidance", + "text_level": 1, + "bbox": [ + 233, + 140, + 769, + 186 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xiaoxu Xu $^{1}$ , Yitian Yuan $^{2}$ , Jinlong Li $^{3}$ , Qiudan Zhang $^{1}$ , Zequn Jie $^{2}$ , Lin Ma $^{2}$ , Hao Tang $^{4,5}$ , Nicu Sebe $^{3}$ , and Xu Wang $^{1*}$", + "bbox": [ + 217, + 210, + 785, + 244 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 College of Computer Science and Software Engineering, Shenzhen University, Shenzhen, 518060, China.", + "bbox": [ + 236, + 253, + 764, + 281 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "2 Meituan Inc., China.", + "bbox": [ + 421, + 282, + 580, + 296 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "3 University of Trento, Italy.", + "bbox": [ + 403, + 296, + 596, + 310 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "4 Peking University, China.", + "bbox": [ + 406, + 310, + 593, + 323 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "5 Carnegie Mellon University, USA.", + "bbox": [ + 380, + 323, + 620, + 337 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract. In this paper, we propose 3DSS-VLG, a weakly supervised approach for 3D Semantic Segmentation with 2D Vision-Language Guidance, an alternative approach that a 3D model predicts dense-embedding for each point which is co-embedded with both the aligned image and text spaces from the 2D vision-language model. Specifically, our method exploits the superior generalization ability of the 2D vision-language models and proposes the Embeddings Soft-Guidance Stage to utilize it to implicitly align 3D embeddings and text embeddings. Moreover, we introduce the Embeddings Specialization Stage to purify the feature representation with the help of a given scene-level label, specifying a better feature supervised by the corresponding text embedding. Thus, the 3D model is able to gain informative supervisions both from the image embedding and text embedding, leading to competitive segmentation performances. To the best of our knowledge, this is the first work to investigate 3D weakly supervised semantic segmentation by using the textual semantic information of text category labels. Moreover, with extensive quantitative and qualitative experiments, we present that our 3DSS-VLG is able not only to achieve the state-of-the-art performance on both S3DIS and ScanNet datasets, but also to maintain strong generalization capability. The code will be available at https://github.com/xuxiaoxxxx/3DSS-VLG/.", + "bbox": [ + 259, + 363, + 743, + 656 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Keywords: 3D Weakly Supervised Semantic Segmentation $\\cdot$ Vision-Language Model", + "bbox": [ + 259, + 667, + 740, + 694 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 215, + 715, + 375, + 729 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "3D point cloud semantic segmentation [13, 16, 27-29, 43] can provide valuable geometric and semantic data about the 3D environment and has gained considerable attention over the past few years. Learning-based semantic segmentation methods have achieved remarkable performance recently, but they need per-point annotations, which is time consuming and labor intensive.", + "bbox": [ + 212, + 742, + 787, + 818 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "* Corresponding author: wangxu@szu.edu.cn", + "bbox": [ + 217, + 825, + 519, + 839 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/1e25a6fa566557f0fd497ec39b3550371babc60a255ebec09c428e50ac2016c6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 330, + 145, + 656, + 209 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/a385dd5c806a4d4dd2a4364120e940854498cd0695d3483cb40fe228acc50b59.jpg", + "image_caption": [ + "(a) Conventional 3D weakly supervised semantic segmentation solution", + "(b) Ours (3DSS-VLG)", + "Fig. 1: Comparison of different approaches. (a) The conventional 3D WSSS approach adopts the coarse-grained CAM method in a global manner and is supervised by scene-level annotations or subcloud-level annotations. (b) Our proposed 3DSS-VLG approach leverages natural 3D-2D correspondence from geometric camera calibration and 2D-text correspondence from vision-language models, to implicitly align texts and 3D point clouds." + ], + "image_footnote": [], + "bbox": [ + 251, + 237, + 750, + 385 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address this issue, existing weakly supervised methods derive the segmentation model with different weak supervisory signals, such as subcloud-level annotations [36], scene-level annotations [18, 31, 45] and so on. As shown in Fig. 1 (a), the 3D Weakly Supervised Semantic Segmentation (3D WSSS) approaches typically adopt a Class Activation Map (CAM) [50] solution. Point clouds are first processed by several Multi-Layer Perception (MLP) layers and thus get a point cloud feature map, and then this point cloud feature map is processed by a Global Average Pooling (GAP) to get a global classification prediction, which is trained with subcloud-level or scene-level annotations. Given the simple GAP connectivity structure, these methods can easily identify the importance of each point by projecting back the output classification weight onto the point cloud feature maps, a technique we call CAM. In this way, the semantic segmentation for each category is back-derived from the global prediction. Recently, with the remarkable success of 2D vision, some methods [18, 44] also use the 2D module to enhance the 3D WSSS.", + "bbox": [ + 212, + 537, + 787, + 762 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Although leveraging 2D-3D fusion in 3D WSSS seems to be promising, there also exist some problems. Kweon et al. [18] need extra detailed annotations of 2D images. As for MIT [44], although it avoids additional per-point/pixel annotations or per-image class labels, its performance is not expected. Therefore, how to design a network that achieves good performance despite the lack of 2D anno", + "bbox": [ + 212, + 763, + 787, + 839 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "X. Xu et al.", + "bbox": [ + 271, + 114, + 352, + 126 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "tations still remains a big challenge. Meanwhile, we notice that the conventional methods for 3D WSSS only use the scene-labels or subcloud-labels to supervise the model, but ignore that the textual category labels such as \"chair, table\" also have semantic meanings and could be embedded to help the model learning. At the same time, we also find that some methods [12, 25, 46] like Openseg [12], which leverage the pretrained vision-language models such as CLIP [30] to establish precise semantic matching relationships between natural languages and 2D images, have achieved good results in 2D open vocabulary semantic segmentation (2D OVSS). The above two points inspire us to consider whether we can use the well-pretrained 2D OVSS model to help the 3D WSSS. As shown in Fig. 1 (b), the point cloud and 2D images could be mutually mapped with geometric projections, and the 2D images and textual categories could be compared with pretrained vision-language models. Therefore, why do not we take the 2D images as a bridge, leveraging the correspondences between point clouds and images, images and natural languages, to implicitly build matching relationships between point clouds and natural languages?", + "bbox": [ + 212, + 145, + 787, + 388 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To this end, we propose a simple yet effective method, namely 3DSS-VLG, i.e., a weakly supervised approach for 3D Semantic Segmentation with 2D Vision-Language Guidance. Our 3DSS-VLG only needs to use 2D images, but no need for their 2D image-level annotations during training. Specifically, for the input 3D point cloud, the dataset also provides a set of multi-view images corresponding to it. We first process these multi-view images using the image encoder of the pretrained off-the-shelf 2D OVSS model such as Openseg [12] to get the 2D embeddings. Then, for each point in the 3D point cloud, we project it to the multi-view images with geometric projections, and integrate these corresponding 2D embeddings to get the 2D-projected embeddings for the point. Next, we utilize the text module of the 2D OVSS model to obtain the textual embeddings of each semantic category label. Since in the embedding space of 2D OVSS, the textual category labels and 2D images could be directly compared, we only need to learn a 3D backbone which could generate 3D embeddings aligned with 2D embeddings; thus, the category labels and the 3D point cloud could be implicitly compared.", + "bbox": [ + 212, + 393, + 787, + 636 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Actually, if the 3D embedding is learned well enough, it can be directly compared with the text embedding by the similarity measurement to classify. However, we find that only relying on pulling the 2D-projected embeddings and 3D embeddings closely is not reliable since the pretrained 2D OVSS model are designed to learn the general knowledge and do not have specialized knowledge to the indoor point cloud scene. Therefore, we propose to alleviate this problem by three stages. (1) First, as shown in Fig. 2, we perform matrix multiplication on projected 2D embeddings and text embeddings of category labels and get the classification logits. Then, we use the scene-level labels as mask to filter out some confusing and unreliable predictions in the classification logits and thus get a more reliable pseudo label vector. (2) Second, as shown in Fig. 3 (a), we propose the Embeddings Specialization Stage, which transfers the 2D-projected embeddings with an adapter module to obtain adapted 3D embeddings, and the", + "bbox": [ + 212, + 643, + 787, + 840 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "3DSS with 2D Vision-Language Guidance", + "bbox": [ + 450, + 114, + 730, + 128 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "training of this adapter module will be supervised with the pseudo label vector. This stage is designed to induce a more reliable target 3D embeddings suited for the indoor point cloud scene from the 2D-projected embeddings. (3) Finally, as shown in Fig. 3 (b), we design Embeddings Soft-Guidance Stage, which freezes the adapter module introduced in the second stage and leverages cosine similarity to align the adapted 3D embeddings and the MinkowskiNet [9] 3D embeddings. Combining the above three stages, we can learn a more reliable 3D embedding space for semantic segmentation in indoor point cloud scene. In the inference procedure, we only need to compare the MinkowskiNet 3D embeddings of the point cloud and the text embeddings of the semantic category labels, thus accomplishing the 3D semantic segmentation. Note that we do not need 2D images to participate in the inference process of our model.", + "bbox": [ + 212, + 146, + 787, + 325 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In summary, the main contributions of this paper are as follows:", + "bbox": [ + 238, + 328, + 702, + 343 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We propose a weakly supervised method 3DSS-VLG for 3D WSSS, which takes 2D images as a bridge, and leverages natural 3D-2D correspondence from geometric camera calibration and 2D-text correspondence from vision-language models to implicitly establish the semantic relationships between texts and 3D point clouds.", + "- We design a three-stage training procedure to learn a reliable 3D embedding space in 3DSS-VLG for 3D semantic segmentation. Embeddings Specialization Stage is designed to utilize the pretrained 2D vision-language model to provide a embedding space for 3D point cloud representation with MinkowskiNet 3D backbone. Moreover, we propose Embeddings Specialization Stage to make the embedding space to be more robust based on the pseudo label filtering with indoor point cloud scene knowledge.", + "- Extensive experiments on the ScanNet and S3DIS dataset show that the proposed 3DSS-VLG significantly outperforms the previous state-of-the-art methods, even Kweon et al. which use extra 2D image-level annotations. Moreover, our further experiments show our 3DSS-VLG has strong generalization capability and can be extended to handle unobserved general datasets." + ], + "bbox": [ + 225, + 352, + 785, + 609 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2 Related Work", + "text_level": 1, + "bbox": [ + 215, + 632, + 387, + 648 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.1 2D Open-Vocabulary Semantic Segmentation", + "text_level": 1, + "bbox": [ + 215, + 664, + 633, + 679 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The recent advances of large vision-language models have enabled a remarkable level of robustness in open-vocabulary semantic segmentation [6,25,39-41]. Open vocabulary semantic segmentation aims to segment the target categories that cannot be access during the training procedure. Pioneering work ZS3Net [4] uses generative models to synthesize pixel-level features by word embeddings of unseen classes. SPNet [37] encodes visual features into the semantic embeddings space to align with text embeddings. More recently, researchers propose to leverage the pretrained CLIP [30] for open-vocabulary semantic segmentation. ZSSeg [41] leverages the visual module to generate class-agnostic masks and uses the pretrained text encoder to retrieve the unseen class masks. OpenSeg [12]", + "bbox": [ + 212, + 689, + 787, + 840 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "X. Xu et al.", + "bbox": [ + 271, + 114, + 352, + 127 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "proposes to align the segment-level visual embeddings with text embeddings via region-word grounding. In this work, we solely rely on pretrained 2D open-vocabulary models and perform 3D weakly supervised semantic segmentation understarnding tasks. We pull the 3D embeddings and 2D embeddings which features exacted from pretrained model back-project onto point cloud closed to implicitly align 3D embeddings and text embeddings.", + "bbox": [ + 212, + 146, + 787, + 238 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "2.2 3D Weakly Supervised Semantic Segmentation", + "text_level": 1, + "bbox": [ + 214, + 258, + 648, + 273 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "This task aims to learn point cloud semantic segmentation using weakly annotated data, such as sparsely labeled points [15, 48], box-level labels [8], subcloud-level labels [36] and scene-level labels [9, 18, 31, 44]. Though the state-of-the-art methods based on sparsely labeled points show performance comparable to that of supervised ones, they require at least partial point-wise annotation in a scene, which is still expensive compared to subcloud-level labels and scene-level labels. The pipeline of the conventional CAM solution has been used in the majority of previous 3D WSSS works and only treats the scene-level labels as one-hot digit. MPRM [36] proposes the subcloud-level annotations method that samples subclouds from the full scene and annotates them, which can alleviate the class imbalance issue commonly appearing in almost scene. However, the subcloud-level annotations need to divide the point cloud into small that we need to annotations more than one for a scene, which is too much trouble and time-consuming. Therefore, some methods that use scene-level annotations are proposals for the 3D WSSS. Kweon et al. [18] utilizes 2D and 3D data for semantic segmentation and gets good performance, however, requiring extra 2D image-level labels. MIT [44] proposes the interlaced transformer structure to fuse 2D-3D information with only scene-level labels. However, its performance is not as good as expected.", + "bbox": [ + 212, + 281, + 787, + 568 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Therefore, in this work, we explore a 3D WSSS method with only scene-level labels. Unlike those previous works, we use the semantic meanings of textual category labels to assist in model learning. Moreover, the performance of our 3DSS-VLG is over the Kweon et al., which uses extra 2D image-level labels.", + "bbox": [ + 212, + 569, + 787, + 630 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "2.3 2D Semantic in 3D task", + "text_level": 1, + "bbox": [ + 214, + 650, + 460, + 664 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Studies on 3D object detection and semantic segmentation [1,2,5,11,14,20-24] have explored the use of 2D image semantics to assist 3D tasks. There are almost two approaches: concatenating the image embeddings with each point in the 3D scene as extra information [7,17,42,47] or projecting image semantic results into a 3D space to assist 3D semantic segmentation [19,26,38]. Previous studies usually used 2D image semantics as extra inputs in both training and inference. Although performance has improved, the extra 2D inputs have the potential to constrain the range of application scenarios. This is due to the fact that 2D information may be absent during inference or necessitate laborious pre-processing. In this paper, we aim to investigate the potential of using 2D semantics exclusively during training to assist in the 3D WSSS task.", + "bbox": [ + 212, + 672, + 787, + 840 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "3DSS with 2D Vision-Language Guidance", + "bbox": [ + 450, + 114, + 730, + 128 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/ec7a5e1c73ffcde778c6f22944ef831adb8bf8e68fea90a74161e849726952de.jpg", + "image_caption": [ + "Fig. 2: The proposed pseudo label generation procedure. We first leverage the text encoder $\\varepsilon^{text}$ of Openseg to get embeddings of the full category labels $\\mathbf{F}^C$ , and leverage the 2D image encoder $\\varepsilon^{2D}$ of Openseg to get embeddings of the 2D image $\\mathbf{F}^{2D}$ . It is important to note that we freeze the whole Openseg model during the procedure of pseudo label generation. Then we back-project the 2D embeddings $\\mathbf{F}^{2D}$ to integrate the 2D-projected embeddings $\\mathbf{P}^{2D}$ . Specifically, for each point in the point cloud $(x^{3D}, y^{3D}, z^{3D})$ , we use geometric camera calibration matrixes $GCCM^{img}$ to calculate the corresponding positions $(x^{2D}, y^{2D})$ on the multi-view images $S$ . Then we integrate these corresponding 2D embeddings in $\\mathbf{F}^{2D}$ and average them to get the 2D-projected embeddings $\\mathbf{P}^{2D}$ . We perform matrix multiplication on $\\mathbf{F}^C$ and $\\mathbf{P}^{2D}$ , and get the 3D point cloud semantic segmentation prediction logits $\\mathbf{L}^{2D}$ . Finally we utilize the scene-level labels as mask $M$ to filter out some confusing and unreliable predictions in the classification and get the more accurate predicted logits $\\mathbf{L}^f$ and pseudo labels $\\mathbf{Y}$ ." + ], + "image_footnote": [], + "bbox": [ + 243, + 142, + 767, + 342 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3 The Proposed Method", + "text_level": 1, + "bbox": [ + 215, + 564, + 472, + 583 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In this section, we will first introduce the procedure of pseudo label generation in Sec. 3.1. Then, we will demonstrate the training procedure of our 3DSS-VLG in Sec. 3.2 and Sec. 3.3. Finally, we will describe the 3DSS-VLG inference procedure in Sec. 3.4.", + "bbox": [ + 212, + 595, + 787, + 657 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.1 Pseudo Label Generation Stage", + "text_level": 1, + "bbox": [ + 215, + 679, + 522, + 695 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "This stage aims to utilize the pretrained vision-language model and scene-level labels to generate more precise pseudo label. Given an input point cloud with multi-view images as shown in Fig. 2, we first implement dense 2D embeddings extraction for each RGB image via the frozen visual encoder of Openseg [12], and back-project them onto the 3D surface points of a scene to integrate the 2D-projected embeddings. Afterward, more accurate pseudo labels are generated based on 2D-projected embeddings, text embeddings and scene-level labels.", + "bbox": [ + 212, + 704, + 787, + 809 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "2D Embeddings Extraction. The inputs of 3DSS-VLG comprise a scene with 3D point cloud, scene-level labels and the associated multi-view RGB images", + "bbox": [ + 212, + 809, + 787, + 839 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "X. Xu et al.", + "bbox": [ + 271, + 114, + 352, + 126 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "set. Given the RGB images set $S$ consists of $T$ images with a resolution of $H\\times W$ . The point cloud $\\mathbf{X}\\in \\mathbb{R}^{N\\times 6}$ contains $N$ points in the scene, and each point is represented with six dimensions of RGBXYZ. We leverage the pretrained image encoder of OpenSeg [12] to get per-pixel embedding, denoted as $\\mathbf{F}^{2D}\\in \\mathbb{R}^{T\\times H\\times W\\times d}$ , where $d$ is the 2D embedding dimension. For each point in the 3D point cloud, we project it onto multi-view images through geometric camera calibration matrixes and get the corresponding 2D positions. Then we can exact the corresponding projected 2D embeddings from $\\mathbf{F}^{2D}$ according to the calculated 2D image positions. Since each point may have multiple correspondences in different images, the final 2D-projected embeddings $\\mathbf{P}^{2D}\\in \\mathbb{R}^{N\\times d}$ is obtained via average all the corresponding projected 2D embeddings of each point.", + "bbox": [ + 212, + 146, + 787, + 325 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Text Embeddings Extraction. We take the text encoder of Openseg to exact text embeddings $\\mathbf{F}^C\\in \\mathbb{R}^{K\\times d}$ of full category labels, where $K$ denoted the number of categories. Similarly, we also freeze the text encoder and directly load the pretrained Openseg parameters.", + "bbox": [ + 212, + 328, + 787, + 388 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Filtering Strategy. After getting the 2D-projected embeddings $\\mathbf{P}^{2D}$ and the text embeddings $\\mathbf{F}^C$ , we perform matrix multiplication on them and obtain the classification logits $\\mathbf{L}^{2D} \\in \\mathbb{R}^{N \\times K}$ . To make classification logits more reliable, the filtering strategy is employed to filter out confusing and unreliable predictions. For instance, as shown in Fig. 2, we create a boolean scene-level label mask $\\mathbf{M} \\in \\mathbb{R}^{1 \\times K}$ , where the element value in the mask indicated whether the corresponding category existed. Finally, we perform matrix inner product on classification logits $\\mathbf{L}^{2D}$ and scene-level label mask $\\mathbf{M}$ and obtain filtered classification logits $\\mathbf{L}^f \\in \\mathbb{R}^{N \\times K}$ . After ranking the filtered classification logits $\\mathbf{L}^f$ , we can get the more precise pseudo label $\\mathbf{Y} \\in \\mathbb{R}^N$ of the input point cloud.", + "bbox": [ + 212, + 388, + 787, + 542 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.2 Embeddings Specialization Stage", + "text_level": 1, + "bbox": [ + 215, + 568, + 532, + 584 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "As we know, the 2D OVSS model is designed to learn general knowledge and do not have any specialized knowledge of the indoor point cloud scene. Therefore, only relying on the 2D embeddings to build the 3D-text correlation will make the 3D WSSS process not reliable. To mitigate this issue, the Embeddings Specialization Stage is proposed to further improve the perception of indoor knowledge of 3D embeddings.", + "bbox": [ + 212, + 598, + 787, + 688 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Specifically, the 2D-projected embeddings $\\mathbf{P}^{2D}$ of input are transferred into another space through the adapter module, which simply contains two fully-connected layers. Besides, to keep both the source and adapted semantics, we employ the residual connections to get the adapted 3D embeddings $\\mathbf{A}^{3D} \\in \\mathbb{R}^{N \\times d}$ :", + "bbox": [ + 212, + 689, + 787, + 750 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {A} ^ {3 D} = \\alpha \\cdot M L P (\\mathbf {P} ^ {2 D}) + (1 - \\alpha) \\cdot \\mathbf {P} ^ {2 D}, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 361, + 767, + 785, + 785 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $\\alpha$ is the ratio of residual connections. Next, we perform matrix multiplication on text embeddings $\\mathbf{F}^C$ and adapted 3D embeddings $\\mathbf{A}^{3D}$ and obtain the classification logits $\\mathbf{L}^a\\in \\mathbb{R}^{N\\times K}$ . The softmax layer is applied on $\\mathbf{L}^a$ and", + "bbox": [ + 212, + 794, + 787, + 840 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "3DSS with 2D Vision-Language Guidance", + "bbox": [ + 450, + 114, + 732, + 128 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/9600d967b6292825c681f07e3860caad03c21a1e646f944b7dd688b370e2d117.jpg", + "image_caption": [ + "Fig. 3: The proposed training procedure of our proposed 3DSS-VLG. Here, it is mainly divided into two stages: (a) Embeddings Specialization Stage and (b) Embeddings Soft-Guidance Stage. For (a), we first utilize the text encoder $\\varepsilon^{text}$ of Openseg to obtain embeddings of the category labels $\\mathbf{F}^C$ , which are frozen during the training procedure of (a). Meanwhile, we get the initial 2D-projected embeddings $\\mathbf{P}^{2D}$ from the 2D module and leverage the adapter module to transfer the $\\mathbf{P}^{2D}$ to a new embedding spaces to obtain the adapted 3D embeddings $\\mathbf{A}^{3D}$ . We perform matrix multiplication on $\\mathbf{A}^{3D}$ and $\\mathbf{F}^C$ and get the predicted probability $\\mathbf{L}^a$ . Finally, we use the pseudo labels $\\mathbf{Y}$ to supervise the model, and the green dashed lines denote back-propagation of the loss $\\mathcal{L}_a$ . For (b), we first utilize the adapter module and obtain the adapted 3D embeddings $\\mathbf{A}^{3D}$ . It is important to note that we freeze the adapter module during the training procedure of (b). Meanwhile, we use the 3D module $\\varepsilon^{3D}$ to obtain the 3D embeddings $\\mathbf{F}^{3D}$ . The cosine similarity loss $\\mathcal{L}_s$ will be integrated to train the model. The red dashed lines denote back-propagation of the loss $\\mathcal{L}_s$ ." + ], + "image_footnote": [], + "bbox": [ + 238, + 143, + 764, + 406 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "a classification cross-entropy loss $\\mathcal{L}_a$ is introduced to supervise the procedure. Here we leverage the pseudo labels $\\mathbf{Y}$ of the point cloud to supervise the model.", + "bbox": [ + 212, + 638, + 785, + 670 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Just by introducing the above simple adapter module, we can make the learned adapted embeddings have better semantic awareness of the point clouds of indoor scenes, thus assisting the 3D WSSS task.", + "bbox": [ + 212, + 670, + 785, + 714 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3.3 Embeddings Soft-Guidance Stage", + "text_level": 1, + "bbox": [ + 214, + 738, + 537, + 753 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Since Openseg has established a high level of semantic alignment between 2D embeddings and text embeddings, we propose the Embeddings Soft-Guidance Stage, which can naturally take the 2D embeddings as a bridge to implicitly align the 3D embeddings and text embeddings via cosine similarity. Specifically, as shown in Fig. 3 (b), we take the point cloud $\\mathbf{X}$ as input, and use Minkowsk-", + "bbox": [ + 212, + 763, + 785, + 840 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "X. Xu et al.", + "bbox": [ + 271, + 114, + 352, + 126 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "iNet18A UNet [9] as our 3D module meanwhile, we change the dimension of the outputs to $d$ . Therefore, we can get the learned 3D embeddings $\\mathbf{F}^{3D} \\in \\mathbb{R}^{N \\times d}$ . Then we take the corresponding 2D-projected embeddings $\\mathbf{P}^{2D}$ as input, processed by the adapter module, and get the adapted 3D embeddings $\\mathbf{A}^{3D}$ . We follow the typical cosine similarity loss by pulling the paired 3D embeddings $\\mathbf{F}^{3D}$ and adapted 3D embeddings $\\mathbf{A}^{3D}$ closer. We need to note that we freeze the adapter module and directly load the parameters provided by Sec. 3.2 during training. Therefore, we define the 3DSS-VLG loss as:", + "bbox": [ + 212, + 146, + 787, + 268 + ], + "page_idx": 8 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {s} = 1 - \\cos (\\mathbf {F} ^ {3 D}, \\mathbf {A} ^ {3 D}). \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 406, + 280, + 785, + 299 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "3.4 Inference", + "text_level": 1, + "bbox": [ + 214, + 316, + 339, + 329 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "During inference, we only retain the 3D and text modules and remove the 2D module. Specifically, we take the 3D embeddings $\\mathbf{F}^{3D}$ from the 3D module, as well as the category embeddings $\\mathbf{F}^C$ from text module, to perform matrix multiplication on them and get the classification logits. Finally, we rank the logits and obtain the final per-point segmentation for the input point cloud $\\mathbf{X}$ .", + "bbox": [ + 212, + 335, + 787, + 412 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4 Experiments", + "text_level": 1, + "bbox": [ + 214, + 431, + 375, + 448 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In this section, we first present our experimental settings, including datasets, evaluation metrics, and implementation details. The competing methods are then presented and compared. Finally, ablation studies are provided to further demonstrate the necessity and effectiveness of each component of our framework.", + "bbox": [ + 212, + 459, + 787, + 521 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.1 Datasets and Evaluation Metrics", + "text_level": 1, + "bbox": [ + 214, + 537, + 532, + 551 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We evaluate our 3DSS-VLG on two publicly and widely-used large-scale point cloud datasets with multi-view images, S3DIS [3] and ScanNet [10]. S3DIS is proposed for indoor scene understanding. It consists of 6 areas including 271 rooms with 13 classes. Each room is scanned via RGBD sensors and is represented by a point cloud with 3D coordinates and RGB values. Following previous works, we take area 5 as the test scene. ScanNet [10] has 1513 training scenes and 100 test scenes with 20 classes. We adopt the default train-val split setting, where there are 1201 training scenes and 312 validation scenes. The mean intersection over Union (mIoU) is employed as the evaluation metric for datasets.", + "bbox": [ + 212, + 559, + 787, + 695 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.2 Implementations Details", + "text_level": 1, + "bbox": [ + 214, + 713, + 465, + 728 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "3DSS-VLG is implemented by PyTorch. For the training procedure of Sec. 3.2, we use Adam optimizer with batch size of 16 and set an initial learning rate of 0.003 for the model. We reduce the learning rate by a multiplying factor of 0.7 every 20 epochs for a total of 80 epochs. For the training procedure of Sec. 3.3, the model optimization is conducted using Adam optimizer with a batch size of 8. We set an initial learning rate of 0.0001 for the model and use the poly learning rate policy to adjust the learning rate.", + "bbox": [ + 212, + 733, + 787, + 840 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "3DSS with 2D Vision-Language Guidance", + "bbox": [ + 450, + 114, + 730, + 128 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/ef32bdc2fee69d13f977185257f28a76eb90f8c4098e84c0a15b520af25b988b.jpg", + "table_caption": [ + "Table 1: Performance comparison on the S3DIS dataset. \"Sup.\" indicates the type of supervision. \"100%\" represents full annotation. \"scene.\" denotes scene-level annotation." + ], + "table_footnote": [], + "table_body": "
MethodLabel EffortSup.Test
PointNet [27]100%41.1
TangentConv [33]100%52.8
MinkowskiNet [9]100%65.8
KPConv [34]>20 min100%67.1
PointTransformer [49]100%70.4
PointNetXt [29]100%70.5
DeepViewAgg [32]100%67.2
SemAffiNet [35]100%71.6
MPRM [36]scene.10.3
MIL-Trans [45]scene.12.9
WYPR [31]<1 minscene.22.3
MIT [44]scene.27.7
Oursscene.45.3
", + "bbox": [ + 341, + 184, + 658, + 383 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4.3 3D Semantic Segmentation Results", + "text_level": 1, + "bbox": [ + 215, + 422, + 552, + 439 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We evaluate our proposed approach against state-of-the-art techniques for 3D weakly supervised semantic segmentation with scene-level labels. Firstly, we demonstrate some full supervised point cloud semantic segmentation methods to compare the gap between the performances of ours and full supervised methods. Subsequently, we introduce semantic segmentation methods supervised by scene-level labels or subcloud-level labels and compare them with our method. Meanwhile, we indicate the average annotation time per scene.", + "bbox": [ + 212, + 446, + 787, + 551 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Evaluation on S3DIS. Tab. 1 Show the performance of each type of 3D point cloud semantic segmentation methods evaluated on the S3DIS dataset. We can find that in the scene-level annotations setting, our method greatly surpasses the existing state-of-the-art method MIT [44] by $17.6\\%$ . This shows that using textual semantic information ignored by previous 3D weakly supervised semantic segmentation can significantly improve segmentation performance. The textual semantic information of each category is unique; then the 2D embeddings and 3D embeddings are aligned so that the 3D embeddings can be implicitly aligned to the corresponding unique category semantic information, which allows the model to achieve greater performance improvements.", + "bbox": [ + 212, + 553, + 787, + 704 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Meanwhile, we compare our method with some full supervised methods. It can be observed that our 3DSS-VLG can outperform some fully supervised methods, i.e., PointNet [27]. Moreover, we notice that the annotations cost time of different types of supervision and find that the scene-level annotation is the most efficient compared to other types annotations. Such results demonstrate the effectiveness and potential of our weakly supervised method.", + "bbox": [ + 212, + 704, + 787, + 794 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Evaluation on ScanNet. We also evaluate our 3DSS-VLG on the ScanNet online test set and the validation set and presented the performance results of 3DSS-VLG in Tab. 2. For the test set, it can be observed that our 3DSS-VLG", + "bbox": [ + 212, + 794, + 787, + 839 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "X. Xu et al.", + "bbox": [ + 271, + 114, + 352, + 126 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/7b8dc03c7c4f20306c4427839c62faed6b9581781800cd2cfc7eb81c899f36dc.jpg", + "table_caption": [ + "Table 2: Performance comparison on the ScanNet test set and validation set. \"Sup.\" indicates the type of supervision. \"100%\" represents full annotation. \"subcloud.\" and \"scene.\" imply subcloud-level annotation and scene-level annotation respectively. \"image.\" denotes image-level annotation." + ], + "table_footnote": [], + "table_body": "
MethodLabel EffortSup.TestVal
PointNet++ [28]100%33.9-
TangentConv [33]100%43.8-
MinkowskiNet [9]100%73.672.2
KPConv [34]>20 min100%68.669.2
PointTransformer [49]100%-70.6
PointNetXt [29]100%71.271.5
DeepViewAgg [32]100%-71.0
SemAffiNet [35]100%74.9-
MPRM [36]3 minsubcloud.41.143.2
Kweon et al. [18]5 minscene. + image.47.449.6
MIL-Trans [45]scene.-26.2
WYPR [31]<1 minscene.24.029.6
MIT [44]scene.31.735.8
Oursscene.48.949.7
", + "bbox": [ + 290, + 212, + 707, + 426 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "achieves the best performance under only scene-level label supervision and even surpasses the performance of MPRM [36] which is supervised by subcloud-level annotations. Moreover, we are surprised to find that our method also outperforms Kweon et al. [18] by $1.5\\%$ , which uses not only scene-level labels, but also extra image-level labels. Our method can achieve stronger performance with less annotations, further illustrating the superiority of our method. Meanwhile, our 3DSS-VLG can outperform some fully supervised methods. In addition for the validation set, our method also achieves the state-of-the-art during those 3D WSSS approaches. Those results demonstrate the superiority of 3DSS-VLG.", + "bbox": [ + 212, + 468, + 787, + 604 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "4.4 Ablation Studies", + "text_level": 1, + "bbox": [ + 215, + 630, + 401, + 643 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Effectiveness of Each Components. To demonstrate the advantage of each component in our 3DSS-VLG, we conduct comprehensive ablation studies on the S3DIS dataset, as shown in Tab. 3. The ablation model (a) only retains the MinkowskiNet18A UNet [9] and trains directly with the pseudo labels which are generated without using scene-level labels filtering. The cross-entropy loss is introduced to supervised this procedure. We set model (a) as the baseline of our experiment. Compared to model (a), model (b) is not directly supervised by pseudo labels. It adopts the Embeddings Soft-Guidance Stage (ESGS) and is soft-guided by the 2D-projected embeddings $\\mathbf{P}^{2D}$ . We can find that the performance of mIoU is improved from $37.7\\%$ to $38.2\\%$ . This observation proves that the soft-guidance strategy can guide 3D embeddings to align with the text embeddings and achieve better performance compared to directly using the pseudo", + "bbox": [ + 212, + 657, + 787, + 840 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "3DSS with 2D Vision-Language Guidance", + "bbox": [ + 450, + 114, + 730, + 128 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 767, + 116, + 782, + 126 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/25077e9f2bc5257b5e926be0f08ef7c52f52267af0beafd7a9cf6702ae9fa686.jpg", + "table_caption": [ + "Table 3: Ablation studies of the 3DSS-VLG components on S3DIS dataset." + ], + "table_footnote": [], + "table_body": "
ESGSFilteringESSmIoU
(a)37.7
(b)38.2
(c)42.6
(d)45.3
", + "bbox": [ + 395, + 170, + 602, + 246 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/1f89be49054d1ccd0950d54c69db70cb98e12721cb750c971d571a9764023edd.jpg", + "table_caption": [ + "Table 4: Performance comparisons of the generalization capability." + ], + "table_footnote": [], + "table_body": "
DomainmIoUmAcc
S3DIS ->ScanNet13.423.0
ScanNet ->S3DIS33.350.9
", + "bbox": [ + 393, + 297, + 602, + 344 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "labels to supervised 3D model. Meanwhile, when we introduce the filtering strategy to model (a), as shown in model (c), we can find that the model performance increases greatly from $37.7\\%$ to $42.6\\%$ . Finally, by adding the filtering strategy to model (b) and utilizing the Embeddings Specialization Stage (ESS), model (d) is supervised by adapted 3D embeddings $\\mathbf{A}^{3D}$ at this time. It can be observed the performance improves from $38.2\\%$ to $45.3\\%$ . Such results prove that our 3DSS-VLG can help the model to get a better, indoor point cloud specific embedding space to align 3D point clouds and text.", + "bbox": [ + 212, + 386, + 784, + 507 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Generalization Capability. Due to the domain gap among different datasets, a model trained on one dataset is not applicable to another dataset. Also, this situation occurs in the 3D WSSS task. Nevertheless, we notice that, compared to previous works, our 3DSS-VLG uses textual semantic information as a guide rather than CAM, which means our model has a good relationship between 3D point cloud and the text of category labels and indicates that the model may have generalization ability. Therefore, we further explore our framework to the novel data of the unobserved scene domains.", + "bbox": [ + 212, + 507, + 785, + 627 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "As shown in Tab. 4, we experimentally verify the generalizability of the proposed method on the S3DIS and ScanNet dataset, respectively. The first row is the performance of model that we first train our model on the S3DIS dataset and then test the trained model on validation set of the ScanNet dataset. The second row is the performance of model that we first train our model on the ScanNet dataset and then test the trained model on the test set of the S3DIS dataset. Compared to those weakly supervised methods with scene-level labels, it can be observed that our 3DSS-VLG has a certain gap with those methods in the first row. However, for the second row, we are supervised to find that our method can outperform all the weakly supervised methods and achieve state-of-the-art performance. The ScanNet dataset provides six times more training scenes than the S3DIS dataset. Therefore, when a model is pretrained on the ScanNet dataset, the model will be more robust than a model pretrained on the S3DIS dataset. Our experimental results also prove this phenomenon.", + "bbox": [ + 212, + 628, + 787, + 840 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "X. Xu et al.", + "bbox": [ + 271, + 114, + 352, + 126 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/f720567dea407a08200f5aa330d44149db4719e90d6f0a1b772122b74289b7f2.jpg", + "table_caption": [ + "Table 5: Performance comparisons with different 3D backbones and ESS module backbones on the S3DIS dataset." + ], + "table_footnote": [], + "table_body": "
ModuleBackbonemIoU
3DMinkowskiNet14A44.5
MinkowskiNet18A45.3
MinkowskiNet34A44.7
ESSTransformers45.0
MLP45.3
", + "bbox": [ + 385, + 184, + 612, + 273 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The results also strongly support the complementary advantages of using text semantic information, even without any further fine-tuning or domain-specific adaptation. Our 3DSS-VLG can be extended to handle unobserved general data and has strong generalization capability, which is promising for the field of 3D WSSS.", + "bbox": [ + 212, + 314, + 782, + 387 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Experiments with Different Backbones. Tab. 5 shows the performances of our method on S3DIS with different 3D backbones and ESS module backbones. Finally, we use the MinkowskiNet18A as our 3D backbone and the FC-layer as the backbone of our ESS.", + "bbox": [ + 212, + 388, + 784, + 448 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "4.5 Qualitative Results", + "text_level": 1, + "bbox": [ + 215, + 470, + 419, + 486 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Fig. 4 visualizes the qualitative comparison of the proposed framework and baseline. Here the baseline is model (a) which is mentioned in Sec. 4.4. Compared with the result of baseline, our 3DSS-VLG shows significantly better results in the terms of accuracy of semantics and preciseness of segmentation. With the ESGS, ESS and filtering strategies, our 3DSS-VLG can learn a more better indoor point cloud specific embedding space to align 3D point clouds and text and achieve substantial semantic segmentation results compared to the baseline.", + "bbox": [ + 212, + 496, + 784, + 602 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "4.6 Limitations", + "text_level": 1, + "bbox": [ + 215, + 625, + 356, + 638 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Our work relies on vision-language alignment and does not address how to align visual embeddings with some abstract category text embeddings (e.g. \"other\" class in the S3DIS dataset). It is difficult for the model to understand what the difference is between the \"other\" class and other categories, thus making the wrong segmentation. This limitation is a direct avenue for future work.", + "bbox": [ + 212, + 648, + 784, + 724 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "5 Conclusion", + "text_level": 1, + "bbox": [ + 215, + 747, + 359, + 763 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "In this paper, we propose 3DSS-VLG to address the shortage of point-level annotations. Specifically, our 3DSS-VLG exploits the superior ability of current vision-language models on aligning the semantics between texts and 2D images, as well as the naturally existing correspondences between 2D images and 3D", + "bbox": [ + 212, + 779, + 784, + 839 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "3DSS with 2D Vision-Language Guidance", + "bbox": [ + 450, + 114, + 730, + 128 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 767, + 116, + 784, + 126 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/7711442115bf0b7100fc52c012e0cbb31e8c0b6a0f35b6e29a7b2c7b6f85bda8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 218, + 143, + 359, + 232 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/d9aec7f4432e4773f889b7534d14005610e8123d2fb060365f46b379b3c2685c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 218, + 233, + 359, + 320 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/fce4d96ba0cf08ca7b95010740b5c59f3df176c796a5fb184c6e031f00b00d1c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 220, + 321, + 357, + 407 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/79f24e6eee301eb1cd812bf0ad0f4732a99d275e7d7ccc5f62b0de21671965da.jpg", + "image_caption": [ + "ceiling" + ], + "image_footnote": [], + "bbox": [ + 220, + 409, + 357, + 500 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/46e6857063ccdc2fb62eadfbbb68375749eb1823c4e77169d9ba5fce75b3b2c8.jpg", + "image_caption": [ + "floor" + ], + "image_footnote": [], + "bbox": [ + 223, + 502, + 248, + 515 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/3979f86eb1ddb19df89086a146ebf811d19805ac3efee289ca3770fcc5c7df74.jpg", + "image_caption": [ + "Fig. 4: Qualitative results on the S3DIS dataset of baseline and our 3DSS-VLG. From left to right: input point clouds, ground truth, baseline results, and our 3DSS-VLG results." + ], + "image_footnote": [], + "bbox": [ + 223, + 516, + 248, + 526 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/08274ca5e25499cc99dc785e9c49a7f7777e280a92c6b527dfe8c374d3ba202b.jpg", + "image_caption": [ + "#" + ], + "image_footnote": [], + "bbox": [ + 305, + 503, + 330, + 515 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/be711ce0b4db8bf93e4ad957edcba20f8405c53aa30f1a87ee90ffd847ff4886.jpg", + "image_caption": [ + "beam" + ], + "image_footnote": [], + "bbox": [ + 333, + 503, + 356, + 515 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/78f5fdbd80c29db14024c80ad273e46940226d7df2a95d446389ced3a83268e9.jpg", + "image_caption": [ + "window" + ], + "image_footnote": [], + "bbox": [ + 383, + 503, + 398, + 513 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "window", + "bbox": [ + 410, + 516, + 452, + 525 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/af54b758ef067c52125d556afcbcc2a2ad3c7c83fed744cb67afb8f5b6d88d27.jpg", + "image_caption": [ + "column", + "table" + ], + "image_footnote": [], + "bbox": [ + 472, + 503, + 496, + 513 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/a99844856f66bed3dc950b7b3a8ac5971646ef7297c317c8b51972a64965bb2f.jpg", + "image_caption": [ + "s" + ], + "image_footnote": [], + "bbox": [ + 545, + 503, + 560, + 513 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/e3da1ed463fe48be1c30465c737efaf5b5758023c9014de039ef2291ce41a014.jpg", + "image_caption": [ + "sofa" + ], + "image_footnote": [], + "bbox": [ + 573, + 503, + 596, + 513 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/ccfd1138d4fa59158c8f42f83bf1a4e5fe437be2a06e803569a07a1056a8ec79.jpg", + "image_caption": [ + "b" + ], + "image_footnote": [], + "bbox": [ + 619, + 503, + 633, + 513 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/11b11729c49847242eff190e3cdb3007f76c83e0c9c130f716b3e47a696118bc.jpg", + "image_caption": [ + "board" + ], + "image_footnote": [], + "bbox": [ + 643, + 503, + 668, + 513 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/9a2cf3bf14753d72e4bf33cd3d5d59a100d4337e0c2057f672071cca71e7a65b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 669, + 503, + 699, + 513 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "rd", + "bbox": [ + 669, + 516, + 679, + 525 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/d7bfb6f69617c227607bb8b14f8bc5c02695e3d5ec03df656633b3ec1588bfbe.jpg", + "image_caption": [ + "" + ], + "image_footnote": [], + "bbox": [ + 718, + 503, + 743, + 513 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "point clouds to implicitly co-embed texts embeddings with 3D point clouds embeddings using only scene-level labels. With extensive experiments, we verify that the textual semantic information of category labels is beneficial for 3DSS-VLG which achieves the state-of-the-art on both S3DIS and ScanNet datasets. Further, with an experiment to explore our framework to unobserved scene domains, we demonstrate the generalization capability of our method, which supports its practicality.", + "bbox": [ + 212, + 613, + 787, + 720 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Acknowledgements", + "text_level": 1, + "bbox": [ + 215, + 744, + 401, + 763 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "This work was supported in part by the National Natural Science Foundation of China under Grants 62371310 and 62032015, in part by the Guangdong Basic and Applied Basic Research Foundation under Grant 2023A1515011236, in part by the Stable Support Project of Shenzhen (Project No.20231122122722001), in", + "bbox": [ + 212, + 779, + 787, + 840 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "X. Xu et al.", + "bbox": [ + 271, + 114, + 352, + 126 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "part by the third phase of high-level university construction of interdisciplinary innovation team project of Shenzhen University(24JCXK03). We also acknowledge the CINECA award under the ISCRA initiative, for the availability of partial HPC resources support, and partially supported by the Fundamental Research Funds for the Central Universities, Peking University.", + "bbox": [ + 212, + 146, + 787, + 223 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 217, + 241, + 321, + 258 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "1. Alonso, I., Riazuelo, L., Montesano, L., Murillo, A.C.: 3d-mininet: Learning a 2d representation from point clouds for fast and efficient 3d lidar semantic segmentation. IEEE Robotics and Automation Letters 5(4), 5432-5439 (2020) 5", + "2. Ando, A., Gidaris, S., Bursuc, A., Puy, G., Boulch, A., Marlet, R.: Rangevit: Towards vision transformers for 3d semantic segmentation in autonomous driving. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5240-5250 (2023) 5", + "3. Armeni, I., Sener, O., Zamir, A.R., Jiang, H., Brilakis, I., Fischer, M., Savarese, S.: 3d semantic parsing of large-scale indoor spaces. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 1534-1543 (2016) 9", + "4. Bucher, M., Vu, T.H., Cord, M., Pérez, P.: Zero-shot semantic segmentation. Advances in Neural Information Processing Systems 32 (2019) 4", + "5. Cardace, A., Ramirez, P.Z., Salti, S., Di Stefano, L.: Exploiting the complementarity of 2d and 3d networks to address domain-shift in 3d semantic segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 98-109 (2023) 5", + "6. Chen, J., Zhu, D., Qian, G., Ghanem, B., Yan, Z., Zhu, C., Xiao, F., Culatana, S.C., Elhoseiny, M.: Exploring open-vocabulary semantic segmentation from clip vision encoder distillation only. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 699-710 (2023) 4", + "7. Chen, R., Liu, Y., Kong, L., Chen, N., Zhu, X., Ma, Y., Liu, T., Wang, W.: Towards label-free scene understanding by vision foundation models. Advances in Neural Information Processing Systems 36 (2024) 5", + "8. Chibane, J., Engelmann, F., Anh Tran, T., Pons-Moll, G.: Box2mask: Weakly supervised 3d semantic instance segmentation using bounding boxes. In: European Conference on Computer Vision. pp. 681-699. Springer (2022) 5", + "9. Choy, C., Gwak, J., Savarese, S.: 4d spatio-temporal convnets: Minkowski convolutional neural networks. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 3075-3084 (2019) 4, 5, 9, 10, 11", + "10. Dai, A., Chang, A.X., Savva, M., Halber, M., Funkhouser, T., Nießner, M.: Scannet: Richly-annotated 3d reconstructions of indoor scenes. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 5828-5839 (2017) 9", + "1. Genova, K., Yin, X., Kundu, A., Pantofaru, C., Cole, F., Sud, A., Brewington, B., Shucker, B., Funkhouser, T.: Learning 3d semantic segmentation with only 2d image supervision. In: 2021 International Conference on 3D Vision (3DV). pp. 361-372 (2021) 5", + "2. Ghiasi, G., Gu, X., Cui, Y., Lin, T.Y.: Scaling open-vocabulary image segmentation with image-level labels. In: European Conference on Computer Vision. pp. 540-557. Springer (2022) 3, 4, 6, 7", + "3. Hegde, D., Valanarasu, J.M.J., Patel, V.: Clip goes 3d: Leveraging prompt tuning for language grounded 3d recognition. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 2028-2038 (2023) 1" + ], + "bbox": [ + 225, + 267, + 785, + 839 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "3DSS with 2D Vision-Language Guidance", + "bbox": [ + 450, + 114, + 730, + 128 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 767, + 116, + 784, + 126 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "14. Hou, J., Xie, S., Graham, B., Dai, A., Nießner, M.: Pri3d: Can 3d priors help 2d representation learning? In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 5693-5702 (2021) 5", + "15. Hu, Q., Yang, B., Fang, G., Guo, Y., Leonardis, A., Trigoni, N., Markham, A.: Sqn: Weakly-supervised semantic segmentation of large-scale 3d point clouds. In: European Conference on Computer Vision. pp. 600-619. Springer (2022) 5", + "16. Hu, Q., Yang, B., Xie, L., Rosa, S., Guo, Y., Wang, Z., Trigoni, N., Markham, A.: Randla-net: Efficient semantic segmentation of large-scale point clouds. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 11108-11117 (2020) 1", + "17. Hu, W., Zhao, H., Jiang, L., Jia, J., Wong, T.T.: Bidirectional projection network for cross dimension scene understanding. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 14373-14382 (2021) 5", + "18. Kweon, H., Yoon, K.J.: Joint learning of 2d-3d weakly supervised semantic segmentation. Advances in Neural Information Processing Systems 35, 30499-30511 (2022) 2, 5, 11", + "19. Lahoud, J., Ghanem, B.: 2d-driven 3d object detection in rgb-d images. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 4622-4630 (2017) 5", + "20. Li, J., Dai, H., Han, H., Ding, Y.: Mseg3d: Multi-modal 3d semantic segmentation for autonomous driving. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 21694-21704 (2023) 5", + "21. Li, J., Jie, Z., Ricci, E., Ma, L., Sebe, N.: Enhancing robustness of vision-language models through orthogonality learning and cross-regularization (2024), https:// arxiv.org/abs/2407.08374 5", + "22. Li, J., Jie, Z., Wang, X., Wei, X., Ma, L.: Expansion and shrinkage of localization for weakly-supervised semantic segmentation. In: Advances in Neural Information Processing Systems. vol. 35, pp. 16037-16051 (2022) 5", + "23. Li, J., Jie, Z., Wang, X., Zhou, Y., Ma, L., Jiang, J.: Weakly supervised semantic segmentation via self-supervised destruction learning. Neurocomputing 561, 126821 (2023) 5", + "24. Li, J., Jie, Z., Wang, X., Zhou, Y., Wei, X., Ma, L.: Weakly supervised semantic segmentation via progressive patch learning. IEEE Transactions on multimedia 25, 1686-1699 (2022) 5", + "25. Liang, F., Wu, B., Dai, X., Li, K., Zhao, Y., Zhang, H., Zhang, P., Vajda, P., Marculescu, D.: Open-vocabulary semantic segmentation with mask-adapted clip. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 7061-7070 (2023) 3, 4", + "26. Qi, C.R., Liu, W., Wu, C., Su, H., Guibas, L.J.: Frustum pointnets for 3d object detection from rgb-d data. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 918-927 (2018) 5", + "27. Qi, C.R., Su, H., Mo, K., Guibas, L.J.: Pointnet: Deep learning on point sets for 3d classification and segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 652-660 (2017) 1, 10", + "28. Qi, C.R., Yi, L., Su, H., Guibas, L.J.: Pointnet++: Deep hierarchical feature learning on point sets in a metric space. Advances in neural information processing systems 30 (2017) 1, 11", + "29. Qian, G., Li, Y., Peng, H., Mai, J., Hammoud, H., Elhoseiny, M., Ghanem, B.: Pointnext: Revisiting pointnet++ with improved training and scaling strategies." + ], + "bbox": [ + 215, + 146, + 785, + 840 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "X. Xu et al.", + "bbox": [ + 271, + 114, + 351, + 126 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Advances in Neural Information Processing Systems 35, 23192-23204 (2022) 1, 10, 11", + "30. Radford, A., Kim, J.W., Hallacy, C., Ramesh, A., Goh, G., Agarwal, S., Sastry, G., Askell, A., Mishkin, P., Clark, J., et al.: Learning transferable visual models from natural language supervision. In: International Conference on Machine Learning. pp. 8748-8763 (2021) 3, 4", + "31. Ren, Z., Misra, I., Schwing, A.G., Girdhar, R.: 3d spatial recognition without spatially labeled 3d. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 13204-13213 (2021) 2, 5, 10, 11", + "32. Robert, D., Vallet, B., Landrieu, L.: Learning multi-view aggregation in the wild for large-scale 3d semantic segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5575-5584 (2022) 10, 11", + "33. Tatarchenko, M., Park, J., Koltun, V., Zhou, Q.Y.: Tangent convolutions for dense prediction in 3d. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 3887-3896 (2018) 10, 11", + "34. Thomas, H., Qi, C.R., Deschaud, J.E., Marcotegui, B., Goulette, F., Guibas, L.J.: Kpconv: Flexible and deformable convolution for point clouds. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 6411-6420 (2019) 10, 11", + "35. Wang, Z., Rao, Y., Yu, X., Zhou, J., Lu, J.: Semaffinet: Semantic-affine transformation for point cloud segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 11819-11829 (2022) 10, 11", + "36. Wei, J., Lin, G., Yap, K.H., Hung, T.Y., Xie, L.: Multi-path region mining for weakly supervised 3d semantic segmentation on point clouds. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 4384-4393 (2020) 2, 5, 10, 11", + "37. Xian, Y., Choudhury, S., He, Y., Schiele, B., Akata, Z.: Semantic projection network for zero-and few-label semantic segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 8256-8265 (2019) 4", + "38. Xu, D., Anguelov, D., Jain, A.: Pointfusion: Deep sensor fusion for 3d bounding box estimation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 244-253 (2018) 5", + "39. Xu, J., Hou, J., Zhang, Y., Feng, R., Wang, Y., Qiao, Y., Xie, W.: Learning open-vocabulary semantic segmentation models from natural language supervision. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 2935-2944 (2023) 4", + "40. Xu, M., Zhang, Z., Wei, F., Hu, H., Bai, X.: Side adapter network for open-vocabulary semantic segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 2945-2954 (2023) 4", + "41. Xu, M., Zhang, Z., Wei, F., Lin, Y., Cao, Y., Hu, H., Bai, X.: A simple baseline for open-vocabulary semantic segmentation with pre-trained vision-language model. In: European Conference on Computer Vision. pp. 736-753. Springer (2022) 4", + "42. Xu, X., Yuan, Y., Zhang, Q., Wu, W., Jie, Z., Ma, L., Wang, X.: Weakly-supervised 3d visual grounding based on visual linguistic alignment. arXiv preprint arXiv:2312.09625 (2023) 5", + "43. Yan, X., Gao, J., Zheng, C., Zheng, C., Zhang, R., Cui, S., Li, Z.: 2dpass: 2d priors assisted semantic segmentation on lidar point clouds. In: European Conference on Computer Vision. pp. 677-695. Springer (2022) 1" + ], + "bbox": [ + 215, + 146, + 785, + 840 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "3DSS with 2D Vision-Language Guidance", + "bbox": [ + 450, + 114, + 730, + 128 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 767, + 114, + 785, + 126 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "44. Yang, C.K., Chen, M.H., Chuang, Y.Y., Lin, Y.Y.: 2d-3d interlaced transformer for point cloud segmentation with scene-level supervision. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 977–987 (2023) 2, 5, 10, 11", + "45. Yang, C.K., Wu, J.J., Chen, K.S., Chuang, Y.Y., Lin, Y.Y.: An mil-derived transformer for weakly supervised point cloud segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 11830-11839 (2022) 2, 10, 11", + "46. Yun, S., Park, S.H., Seo, P.H., Shin, J.: Ifseg: Image-free semantic segmentation via vision-language model. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 2967-2977 (2023) 3", + "47. Zhang, R., Wang, L., Qiao, Y., Gao, P., Li, H.: Learning 3d representations from 2d pre-trained models via image-to-point masked autoencoders. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 21769-21780 (2023) 5", + "48. Zhang, Y., Hu, Q., Xu, G., Ma, Y., Wan, J., Guo, Y.: Not all points are equal: Learning highly efficient point-based detectors for 3d lidar point clouds. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 18953-18962 (2022) 5", + "49. Zhao, H., Jiang, L., Jia, J., Torr, P.H., Koltun, V.: Point transformer. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 16259-16268 (2021) 10, 11", + "50. Zhou, B., Khosla, A., Lapedriza, A., Oliva, A., Torralba, A.: Learning deep features for discriminative localization. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 2921-2929 (2016) 2" + ], + "bbox": [ + 215, + 146, + 785, + 494 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "X. Xu et al.", + "bbox": [ + 271, + 114, + 352, + 126 + ], + "page_idx": 17 + } +] \ No newline at end of file diff --git a/2024/3D Weakly Supervised Semantic Segmentation with 2D Vision-Language Guidance/b6e9bc2f-30dd-47bb-97e3-5a3cad4d6faf_model.json b/2024/3D Weakly Supervised Semantic Segmentation with 2D Vision-Language Guidance/b6e9bc2f-30dd-47bb-97e3-5a3cad4d6faf_model.json new file mode 100644 index 0000000000000000000000000000000000000000..69ef2a6468d80c96778e1a374e03745e6905e95b --- /dev/null +++ b/2024/3D Weakly Supervised Semantic Segmentation with 2D Vision-Language Guidance/b6e9bc2f-30dd-47bb-97e3-5a3cad4d6faf_model.json @@ -0,0 +1,2480 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.235, + 0.141, + 0.77, + 0.187 + ], + "angle": 0, + "content": "3D Weakly Supervised Semantic Segmentation with 2D Vision-Language Guidance" + }, + { + "type": "text", + "bbox": [ + 0.218, + 0.212, + 0.786, + 0.245 + ], + "angle": 0, + "content": "Xiaoxu Xu\\(^{1}\\), Yitian Yuan\\(^{2}\\), Jinlong Li\\(^{3}\\), Qiudan Zhang\\(^{1}\\), Zequn Jie\\(^{2}\\), Lin Ma\\(^{2}\\), Hao Tang\\(^{4,5}\\), Nicu Sebe\\(^{3}\\), and Xu Wang\\(^{1*}\\)" + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.255, + 0.765, + 0.282 + ], + "angle": 0, + "content": "1 College of Computer Science and Software Engineering, Shenzhen University, Shenzhen, 518060, China." + }, + { + "type": "text", + "bbox": [ + 0.423, + 0.283, + 0.581, + 0.297 + ], + "angle": 0, + "content": "2 Meituan Inc., China." + }, + { + "type": "text", + "bbox": [ + 0.405, + 0.297, + 0.597, + 0.311 + ], + "angle": 0, + "content": "3 University of Trento, Italy." + }, + { + "type": "text", + "bbox": [ + 0.408, + 0.311, + 0.594, + 0.324 + ], + "angle": 0, + "content": "4 Peking University, China." + }, + { + "type": "text", + "bbox": [ + 0.382, + 0.324, + 0.621, + 0.338 + ], + "angle": 0, + "content": "5 Carnegie Mellon University, USA." + }, + { + "type": "text", + "bbox": [ + 0.261, + 0.364, + 0.744, + 0.657 + ], + "angle": 0, + "content": "Abstract. In this paper, we propose 3DSS-VLG, a weakly supervised approach for 3D Semantic Segmentation with 2D Vision-Language Guidance, an alternative approach that a 3D model predicts dense-embedding for each point which is co-embedded with both the aligned image and text spaces from the 2D vision-language model. Specifically, our method exploits the superior generalization ability of the 2D vision-language models and proposes the Embeddings Soft-Guidance Stage to utilize it to implicitly align 3D embeddings and text embeddings. Moreover, we introduce the Embeddings Specialization Stage to purify the feature representation with the help of a given scene-level label, specifying a better feature supervised by the corresponding text embedding. Thus, the 3D model is able to gain informative supervisions both from the image embedding and text embedding, leading to competitive segmentation performances. To the best of our knowledge, this is the first work to investigate 3D weakly supervised semantic segmentation by using the textual semantic information of text category labels. Moreover, with extensive quantitative and qualitative experiments, we present that our 3DSS-VLG is able not only to achieve the state-of-the-art performance on both S3DIS and ScanNet datasets, but also to maintain strong generalization capability. The code will be available at https://github.com/xuxiaoxxxx/3DSS-VLG/." + }, + { + "type": "text", + "bbox": [ + 0.261, + 0.668, + 0.741, + 0.695 + ], + "angle": 0, + "content": "Keywords: 3D Weakly Supervised Semantic Segmentation \\(\\cdot\\) Vision-Language Model" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.716, + 0.376, + 0.731 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.743, + 0.788, + 0.819 + ], + "angle": 0, + "content": "3D point cloud semantic segmentation [13, 16, 27-29, 43] can provide valuable geometric and semantic data about the 3D environment and has gained considerable attention over the past few years. Learning-based semantic segmentation methods have achieved remarkable performance recently, but they need per-point annotations, which is time consuming and labor intensive." + }, + { + "type": "page_footnote", + "bbox": [ + 0.218, + 0.826, + 0.521, + 0.84 + ], + "angle": 0, + "content": "* Corresponding author: wangxu@szu.edu.cn" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "2" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.353, + 0.127 + ], + "angle": 0, + "content": "X. Xu et al." + }, + { + "type": "image", + "bbox": [ + 0.331, + 0.146, + 0.658, + 0.21 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.258, + 0.218, + 0.746, + 0.234 + ], + "angle": 0, + "content": "(a) Conventional 3D weakly supervised semantic segmentation solution" + }, + { + "type": "image", + "bbox": [ + 0.252, + 0.238, + 0.75, + 0.386 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.432, + 0.389, + 0.586, + 0.404 + ], + "angle": 0, + "content": "(b) Ours (3DSS-VLG)" + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.422, + 0.788, + 0.504 + ], + "angle": 0, + "content": "Fig. 1: Comparison of different approaches. (a) The conventional 3D WSSS approach adopts the coarse-grained CAM method in a global manner and is supervised by scene-level annotations or subcloud-level annotations. (b) Our proposed 3DSS-VLG approach leverages natural 3D-2D correspondence from geometric camera calibration and 2D-text correspondence from vision-language models, to implicitly align texts and 3D point clouds." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.538, + 0.788, + 0.763 + ], + "angle": 0, + "content": "To address this issue, existing weakly supervised methods derive the segmentation model with different weak supervisory signals, such as subcloud-level annotations [36], scene-level annotations [18, 31, 45] and so on. As shown in Fig. 1 (a), the 3D Weakly Supervised Semantic Segmentation (3D WSSS) approaches typically adopt a Class Activation Map (CAM) [50] solution. Point clouds are first processed by several Multi-Layer Perception (MLP) layers and thus get a point cloud feature map, and then this point cloud feature map is processed by a Global Average Pooling (GAP) to get a global classification prediction, which is trained with subcloud-level or scene-level annotations. Given the simple GAP connectivity structure, these methods can easily identify the importance of each point by projecting back the output classification weight onto the point cloud feature maps, a technique we call CAM. In this way, the semantic segmentation for each category is back-derived from the global prediction. Recently, with the remarkable success of 2D vision, some methods [18, 44] also use the 2D module to enhance the 3D WSSS." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.765, + 0.788, + 0.84 + ], + "angle": 0, + "content": "Although leveraging 2D-3D fusion in 3D WSSS seems to be promising, there also exist some problems. Kweon et al. [18] need extra detailed annotations of 2D images. As for MIT [44], although it avoids additional per-point/pixel annotations or per-image class labels, its performance is not expected. Therefore, how to design a network that achieves good performance despite the lack of 2D anno" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.452, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "3DSS with 2D Vision-Language Guidance" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "3" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.146, + 0.788, + 0.389 + ], + "angle": 0, + "content": "tations still remains a big challenge. Meanwhile, we notice that the conventional methods for 3D WSSS only use the scene-labels or subcloud-labels to supervise the model, but ignore that the textual category labels such as \"chair, table\" also have semantic meanings and could be embedded to help the model learning. At the same time, we also find that some methods [12, 25, 46] like Openseg [12], which leverage the pretrained vision-language models such as CLIP [30] to establish precise semantic matching relationships between natural languages and 2D images, have achieved good results in 2D open vocabulary semantic segmentation (2D OVSS). The above two points inspire us to consider whether we can use the well-pretrained 2D OVSS model to help the 3D WSSS. As shown in Fig. 1 (b), the point cloud and 2D images could be mutually mapped with geometric projections, and the 2D images and textual categories could be compared with pretrained vision-language models. Therefore, why do not we take the 2D images as a bridge, leveraging the correspondences between point clouds and images, images and natural languages, to implicitly build matching relationships between point clouds and natural languages?" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.395, + 0.788, + 0.637 + ], + "angle": 0, + "content": "To this end, we propose a simple yet effective method, namely 3DSS-VLG, i.e., a weakly supervised approach for 3D Semantic Segmentation with 2D Vision-Language Guidance. Our 3DSS-VLG only needs to use 2D images, but no need for their 2D image-level annotations during training. Specifically, for the input 3D point cloud, the dataset also provides a set of multi-view images corresponding to it. We first process these multi-view images using the image encoder of the pretrained off-the-shelf 2D OVSS model such as Openseg [12] to get the 2D embeddings. Then, for each point in the 3D point cloud, we project it to the multi-view images with geometric projections, and integrate these corresponding 2D embeddings to get the 2D-projected embeddings for the point. Next, we utilize the text module of the 2D OVSS model to obtain the textual embeddings of each semantic category label. Since in the embedding space of 2D OVSS, the textual category labels and 2D images could be directly compared, we only need to learn a 3D backbone which could generate 3D embeddings aligned with 2D embeddings; thus, the category labels and the 3D point cloud could be implicitly compared." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.644, + 0.788, + 0.841 + ], + "angle": 0, + "content": "Actually, if the 3D embedding is learned well enough, it can be directly compared with the text embedding by the similarity measurement to classify. However, we find that only relying on pulling the 2D-projected embeddings and 3D embeddings closely is not reliable since the pretrained 2D OVSS model are designed to learn the general knowledge and do not have specialized knowledge to the indoor point cloud scene. Therefore, we propose to alleviate this problem by three stages. (1) First, as shown in Fig. 2, we perform matrix multiplication on projected 2D embeddings and text embeddings of category labels and get the classification logits. Then, we use the scene-level labels as mask to filter out some confusing and unreliable predictions in the classification logits and thus get a more reliable pseudo label vector. (2) Second, as shown in Fig. 3 (a), we propose the Embeddings Specialization Stage, which transfers the 2D-projected embeddings with an adapter module to obtain adapted 3D embeddings, and the" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "4" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.353, + 0.128 + ], + "angle": 0, + "content": "X. Xu et al." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.327 + ], + "angle": 0, + "content": "training of this adapter module will be supervised with the pseudo label vector. This stage is designed to induce a more reliable target 3D embeddings suited for the indoor point cloud scene from the 2D-projected embeddings. (3) Finally, as shown in Fig. 3 (b), we design Embeddings Soft-Guidance Stage, which freezes the adapter module introduced in the second stage and leverages cosine similarity to align the adapted 3D embeddings and the MinkowskiNet [9] 3D embeddings. Combining the above three stages, we can learn a more reliable 3D embedding space for semantic segmentation in indoor point cloud scene. In the inference procedure, we only need to compare the MinkowskiNet 3D embeddings of the point cloud and the text embeddings of the semantic category labels, thus accomplishing the 3D semantic segmentation. Note that we do not need 2D images to participate in the inference process of our model." + }, + { + "type": "text", + "bbox": [ + 0.239, + 0.329, + 0.704, + 0.344 + ], + "angle": 0, + "content": "In summary, the main contributions of this paper are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.226, + 0.353, + 0.786, + 0.429 + ], + "angle": 0, + "content": "- We propose a weakly supervised method 3DSS-VLG for 3D WSSS, which takes 2D images as a bridge, and leverages natural 3D-2D correspondence from geometric camera calibration and 2D-text correspondence from vision-language models to implicitly establish the semantic relationships between texts and 3D point clouds." + }, + { + "type": "text", + "bbox": [ + 0.226, + 0.429, + 0.787, + 0.534 + ], + "angle": 0, + "content": "- We design a three-stage training procedure to learn a reliable 3D embedding space in 3DSS-VLG for 3D semantic segmentation. Embeddings Specialization Stage is designed to utilize the pretrained 2D vision-language model to provide a embedding space for 3D point cloud representation with MinkowskiNet 3D backbone. Moreover, we propose Embeddings Specialization Stage to make the embedding space to be more robust based on the pseudo label filtering with indoor point cloud scene knowledge." + }, + { + "type": "text", + "bbox": [ + 0.226, + 0.535, + 0.787, + 0.61 + ], + "angle": 0, + "content": "- Extensive experiments on the ScanNet and S3DIS dataset show that the proposed 3DSS-VLG significantly outperforms the previous state-of-the-art methods, even Kweon et al. which use extra 2D image-level annotations. Moreover, our further experiments show our 3DSS-VLG has strong generalization capability and can be extended to handle unobserved general datasets." + }, + { + "type": "list", + "bbox": [ + 0.226, + 0.353, + 0.787, + 0.61 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.633, + 0.388, + 0.649 + ], + "angle": 0, + "content": "2 Related Work" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.665, + 0.634, + 0.68 + ], + "angle": 0, + "content": "2.1 2D Open-Vocabulary Semantic Segmentation" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.69, + 0.788, + 0.842 + ], + "angle": 0, + "content": "The recent advances of large vision-language models have enabled a remarkable level of robustness in open-vocabulary semantic segmentation [6,25,39-41]. Open vocabulary semantic segmentation aims to segment the target categories that cannot be access during the training procedure. Pioneering work ZS3Net [4] uses generative models to synthesize pixel-level features by word embeddings of unseen classes. SPNet [37] encodes visual features into the semantic embeddings space to align with text embeddings. More recently, researchers propose to leverage the pretrained CLIP [30] for open-vocabulary semantic segmentation. ZSSeg [41] leverages the visual module to generate class-agnostic masks and uses the pretrained text encoder to retrieve the unseen class masks. OpenSeg [12]" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.452, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "3DSS with 2D Vision-Language Guidance" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "5" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.239 + ], + "angle": 0, + "content": "proposes to align the segment-level visual embeddings with text embeddings via region-word grounding. In this work, we solely rely on pretrained 2D open-vocabulary models and perform 3D weakly supervised semantic segmentation understarnding tasks. We pull the 3D embeddings and 2D embeddings which features exacted from pretrained model back-project onto point cloud closed to implicitly align 3D embeddings and text embeddings." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.259, + 0.649, + 0.274 + ], + "angle": 0, + "content": "2.2 3D Weakly Supervised Semantic Segmentation" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.282, + 0.789, + 0.569 + ], + "angle": 0, + "content": "This task aims to learn point cloud semantic segmentation using weakly annotated data, such as sparsely labeled points [15, 48], box-level labels [8], subcloud-level labels [36] and scene-level labels [9, 18, 31, 44]. Though the state-of-the-art methods based on sparsely labeled points show performance comparable to that of supervised ones, they require at least partial point-wise annotation in a scene, which is still expensive compared to subcloud-level labels and scene-level labels. The pipeline of the conventional CAM solution has been used in the majority of previous 3D WSSS works and only treats the scene-level labels as one-hot digit. MPRM [36] proposes the subcloud-level annotations method that samples subclouds from the full scene and annotates them, which can alleviate the class imbalance issue commonly appearing in almost scene. However, the subcloud-level annotations need to divide the point cloud into small that we need to annotations more than one for a scene, which is too much trouble and time-consuming. Therefore, some methods that use scene-level annotations are proposals for the 3D WSSS. Kweon et al. [18] utilizes 2D and 3D data for semantic segmentation and gets good performance, however, requiring extra 2D image-level labels. MIT [44] proposes the interlaced transformer structure to fuse 2D-3D information with only scene-level labels. However, its performance is not as good as expected." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.57, + 0.788, + 0.631 + ], + "angle": 0, + "content": "Therefore, in this work, we explore a 3D WSSS method with only scene-level labels. Unlike those previous works, we use the semantic meanings of textual category labels to assist in model learning. Moreover, the performance of our 3DSS-VLG is over the Kweon et al., which uses extra 2D image-level labels." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.651, + 0.461, + 0.665 + ], + "angle": 0, + "content": "2.3 2D Semantic in 3D task" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.674, + 0.789, + 0.841 + ], + "angle": 0, + "content": "Studies on 3D object detection and semantic segmentation [1,2,5,11,14,20-24] have explored the use of 2D image semantics to assist 3D tasks. There are almost two approaches: concatenating the image embeddings with each point in the 3D scene as extra information [7,17,42,47] or projecting image semantic results into a 3D space to assist 3D semantic segmentation [19,26,38]. Previous studies usually used 2D image semantics as extra inputs in both training and inference. Although performance has improved, the extra 2D inputs have the potential to constrain the range of application scenarios. This is due to the fact that 2D information may be absent during inference or necessitate laborious pre-processing. In this paper, we aim to investigate the potential of using 2D semantics exclusively during training to assist in the 3D WSSS task." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "6" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.353, + 0.127 + ], + "angle": 0, + "content": "X. Xu et al." + }, + { + "type": "image", + "bbox": [ + 0.245, + 0.143, + 0.768, + 0.343 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.357, + 0.788, + 0.538 + ], + "angle": 0, + "content": "Fig. 2: The proposed pseudo label generation procedure. We first leverage the text encoder \\(\\varepsilon^{text}\\) of Openseg to get embeddings of the full category labels \\(\\mathbf{F}^C\\), and leverage the 2D image encoder \\(\\varepsilon^{2D}\\) of Openseg to get embeddings of the 2D image \\(\\mathbf{F}^{2D}\\). It is important to note that we freeze the whole Openseg model during the procedure of pseudo label generation. Then we back-project the 2D embeddings \\(\\mathbf{F}^{2D}\\) to integrate the 2D-projected embeddings \\(\\mathbf{P}^{2D}\\). Specifically, for each point in the point cloud \\((x^{3D}, y^{3D}, z^{3D})\\), we use geometric camera calibration matrixes \\(GCCM^{img}\\) to calculate the corresponding positions \\((x^{2D}, y^{2D})\\) on the multi-view images \\(S\\). Then we integrate these corresponding 2D embeddings in \\(\\mathbf{F}^{2D}\\) and average them to get the 2D-projected embeddings \\(\\mathbf{P}^{2D}\\). We perform matrix multiplication on \\(\\mathbf{F}^C\\) and \\(\\mathbf{P}^{2D}\\), and get the 3D point cloud semantic segmentation prediction logits \\(\\mathbf{L}^{2D}\\). Finally we utilize the scene-level labels as mask \\(M\\) to filter out some confusing and unreliable predictions in the classification and get the more accurate predicted logits \\(\\mathbf{L}^f\\) and pseudo labels \\(\\mathbf{Y}\\)." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.565, + 0.473, + 0.584 + ], + "angle": 0, + "content": "3 The Proposed Method" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.597, + 0.788, + 0.658 + ], + "angle": 0, + "content": "In this section, we will first introduce the procedure of pseudo label generation in Sec. 3.1. Then, we will demonstrate the training procedure of our 3DSS-VLG in Sec. 3.2 and Sec. 3.3. Finally, we will describe the 3DSS-VLG inference procedure in Sec. 3.4." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.68, + 0.524, + 0.696 + ], + "angle": 0, + "content": "3.1 Pseudo Label Generation Stage" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.705, + 0.788, + 0.81 + ], + "angle": 0, + "content": "This stage aims to utilize the pretrained vision-language model and scene-level labels to generate more precise pseudo label. Given an input point cloud with multi-view images as shown in Fig. 2, we first implement dense 2D embeddings extraction for each RGB image via the frozen visual encoder of Openseg [12], and back-project them onto the 3D surface points of a scene to integrate the 2D-projected embeddings. Afterward, more accurate pseudo labels are generated based on 2D-projected embeddings, text embeddings and scene-level labels." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.81, + 0.788, + 0.84 + ], + "angle": 0, + "content": "2D Embeddings Extraction. The inputs of 3DSS-VLG comprise a scene with 3D point cloud, scene-level labels and the associated multi-view RGB images" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.452, + 0.115, + 0.733, + 0.129 + ], + "angle": 0, + "content": "3DSS with 2D Vision-Language Guidance" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "7" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.327 + ], + "angle": 0, + "content": "set. Given the RGB images set \\(S\\) consists of \\(T\\) images with a resolution of \\(H\\times W\\). The point cloud \\(\\mathbf{X}\\in \\mathbb{R}^{N\\times 6}\\) contains \\(N\\) points in the scene, and each point is represented with six dimensions of RGBXYZ. We leverage the pretrained image encoder of OpenSeg [12] to get per-pixel embedding, denoted as \\(\\mathbf{F}^{2D}\\in \\mathbb{R}^{T\\times H\\times W\\times d}\\), where \\(d\\) is the 2D embedding dimension. For each point in the 3D point cloud, we project it onto multi-view images through geometric camera calibration matrixes and get the corresponding 2D positions. Then we can exact the corresponding projected 2D embeddings from \\(\\mathbf{F}^{2D}\\) according to the calculated 2D image positions. Since each point may have multiple correspondences in different images, the final 2D-projected embeddings \\(\\mathbf{P}^{2D}\\in \\mathbb{R}^{N\\times d}\\) is obtained via average all the corresponding projected 2D embeddings of each point." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.329, + 0.788, + 0.39 + ], + "angle": 0, + "content": "Text Embeddings Extraction. We take the text encoder of Openseg to exact text embeddings \\(\\mathbf{F}^C\\in \\mathbb{R}^{K\\times d}\\) of full category labels, where \\(K\\) denoted the number of categories. Similarly, we also freeze the text encoder and directly load the pretrained Openseg parameters." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.39, + 0.788, + 0.543 + ], + "angle": 0, + "content": "Filtering Strategy. After getting the 2D-projected embeddings \\(\\mathbf{P}^{2D}\\) and the text embeddings \\(\\mathbf{F}^C\\), we perform matrix multiplication on them and obtain the classification logits \\(\\mathbf{L}^{2D} \\in \\mathbb{R}^{N \\times K}\\). To make classification logits more reliable, the filtering strategy is employed to filter out confusing and unreliable predictions. For instance, as shown in Fig. 2, we create a boolean scene-level label mask \\(\\mathbf{M} \\in \\mathbb{R}^{1 \\times K}\\), where the element value in the mask indicated whether the corresponding category existed. Finally, we perform matrix inner product on classification logits \\(\\mathbf{L}^{2D}\\) and scene-level label mask \\(\\mathbf{M}\\) and obtain filtered classification logits \\(\\mathbf{L}^f \\in \\mathbb{R}^{N \\times K}\\). After ranking the filtered classification logits \\(\\mathbf{L}^f\\), we can get the more precise pseudo label \\(\\mathbf{Y} \\in \\mathbb{R}^N\\) of the input point cloud." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.569, + 0.534, + 0.585 + ], + "angle": 0, + "content": "3.2 Embeddings Specialization Stage" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.599, + 0.788, + 0.689 + ], + "angle": 0, + "content": "As we know, the 2D OVSS model is designed to learn general knowledge and do not have any specialized knowledge of the indoor point cloud scene. Therefore, only relying on the 2D embeddings to build the 3D-text correlation will make the 3D WSSS process not reliable. To mitigate this issue, the Embeddings Specialization Stage is proposed to further improve the perception of indoor knowledge of 3D embeddings." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.69, + 0.788, + 0.751 + ], + "angle": 0, + "content": "Specifically, the 2D-projected embeddings \\(\\mathbf{P}^{2D}\\) of input are transferred into another space through the adapter module, which simply contains two fully-connected layers. Besides, to keep both the source and adapted semantics, we employ the residual connections to get the adapted 3D embeddings \\(\\mathbf{A}^{3D} \\in \\mathbb{R}^{N \\times d}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.362, + 0.768, + 0.786, + 0.786 + ], + "angle": 0, + "content": "\\[\n\\mathbf {A} ^ {3 D} = \\alpha \\cdot M L P (\\mathbf {P} ^ {2 D}) + (1 - \\alpha) \\cdot \\mathbf {P} ^ {2 D}, \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.795, + 0.788, + 0.842 + ], + "angle": 0, + "content": "where \\(\\alpha\\) is the ratio of residual connections. Next, we perform matrix multiplication on text embeddings \\(\\mathbf{F}^C\\) and adapted 3D embeddings \\(\\mathbf{A}^{3D}\\) and obtain the classification logits \\(\\mathbf{L}^a\\in \\mathbb{R}^{N\\times K}\\). The softmax layer is applied on \\(\\mathbf{L}^a\\) and" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "8" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.353, + 0.127 + ], + "angle": 0, + "content": "X. Xu et al." + }, + { + "type": "image", + "bbox": [ + 0.239, + 0.144, + 0.765, + 0.407 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.416, + 0.789, + 0.611 + ], + "angle": 0, + "content": "Fig. 3: The proposed training procedure of our proposed 3DSS-VLG. Here, it is mainly divided into two stages: (a) Embeddings Specialization Stage and (b) Embeddings Soft-Guidance Stage. For (a), we first utilize the text encoder \\(\\varepsilon^{text}\\) of Openseg to obtain embeddings of the category labels \\(\\mathbf{F}^C\\), which are frozen during the training procedure of (a). Meanwhile, we get the initial 2D-projected embeddings \\(\\mathbf{P}^{2D}\\) from the 2D module and leverage the adapter module to transfer the \\(\\mathbf{P}^{2D}\\) to a new embedding spaces to obtain the adapted 3D embeddings \\(\\mathbf{A}^{3D}\\). We perform matrix multiplication on \\(\\mathbf{A}^{3D}\\) and \\(\\mathbf{F}^C\\) and get the predicted probability \\(\\mathbf{L}^a\\). Finally, we use the pseudo labels \\(\\mathbf{Y}\\) to supervise the model, and the green dashed lines denote back-propagation of the loss \\(\\mathcal{L}_a\\). For (b), we first utilize the adapter module and obtain the adapted 3D embeddings \\(\\mathbf{A}^{3D}\\). It is important to note that we freeze the adapter module during the training procedure of (b). Meanwhile, we use the 3D module \\(\\varepsilon^{3D}\\) to obtain the 3D embeddings \\(\\mathbf{F}^{3D}\\). The cosine similarity loss \\(\\mathcal{L}_s\\) will be integrated to train the model. The red dashed lines denote back-propagation of the loss \\(\\mathcal{L}_s\\)." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.64, + 0.787, + 0.671 + ], + "angle": 0, + "content": "a classification cross-entropy loss \\(\\mathcal{L}_a\\) is introduced to supervise the procedure. Here we leverage the pseudo labels \\(\\mathbf{Y}\\) of the point cloud to supervise the model." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.671, + 0.787, + 0.715 + ], + "angle": 0, + "content": "Just by introducing the above simple adapter module, we can make the learned adapted embeddings have better semantic awareness of the point clouds of indoor scenes, thus assisting the 3D WSSS task." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.739, + 0.538, + 0.755 + ], + "angle": 0, + "content": "3.3 Embeddings Soft-Guidance Stage" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.765, + 0.787, + 0.841 + ], + "angle": 0, + "content": "Since Openseg has established a high level of semantic alignment between 2D embeddings and text embeddings, we propose the Embeddings Soft-Guidance Stage, which can naturally take the 2D embeddings as a bridge to implicitly align the 3D embeddings and text embeddings via cosine similarity. Specifically, as shown in Fig. 3 (b), we take the point cloud \\(\\mathbf{X}\\) as input, and use Minkowsk-" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.452, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "3DSS with 2D Vision-Language Guidance" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "9" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.269 + ], + "angle": 0, + "content": "iNet18A UNet [9] as our 3D module meanwhile, we change the dimension of the outputs to \\(d\\). Therefore, we can get the learned 3D embeddings \\(\\mathbf{F}^{3D} \\in \\mathbb{R}^{N \\times d}\\). Then we take the corresponding 2D-projected embeddings \\(\\mathbf{P}^{2D}\\) as input, processed by the adapter module, and get the adapted 3D embeddings \\(\\mathbf{A}^{3D}\\). We follow the typical cosine similarity loss by pulling the paired 3D embeddings \\(\\mathbf{F}^{3D}\\) and adapted 3D embeddings \\(\\mathbf{A}^{3D}\\) closer. We need to note that we freeze the adapter module and directly load the parameters provided by Sec. 3.2 during training. Therefore, we define the 3DSS-VLG loss as:" + }, + { + "type": "equation", + "bbox": [ + 0.408, + 0.281, + 0.786, + 0.3 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {s} = 1 - \\cos (\\mathbf {F} ^ {3 D}, \\mathbf {A} ^ {3 D}). \\tag {2}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.317, + 0.341, + 0.33 + ], + "angle": 0, + "content": "3.4 Inference" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.337, + 0.788, + 0.414 + ], + "angle": 0, + "content": "During inference, we only retain the 3D and text modules and remove the 2D module. Specifically, we take the 3D embeddings \\(\\mathbf{F}^{3D}\\) from the 3D module, as well as the category embeddings \\(\\mathbf{F}^C\\) from text module, to perform matrix multiplication on them and get the classification logits. Finally, we rank the logits and obtain the final per-point segmentation for the input point cloud \\(\\mathbf{X}\\)." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.433, + 0.377, + 0.449 + ], + "angle": 0, + "content": "4 Experiments" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.46, + 0.788, + 0.522 + ], + "angle": 0, + "content": "In this section, we first present our experimental settings, including datasets, evaluation metrics, and implementation details. The competing methods are then presented and compared. Finally, ablation studies are provided to further demonstrate the necessity and effectiveness of each component of our framework." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.539, + 0.533, + 0.552 + ], + "angle": 0, + "content": "4.1 Datasets and Evaluation Metrics" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.56, + 0.788, + 0.696 + ], + "angle": 0, + "content": "We evaluate our 3DSS-VLG on two publicly and widely-used large-scale point cloud datasets with multi-view images, S3DIS [3] and ScanNet [10]. S3DIS is proposed for indoor scene understanding. It consists of 6 areas including 271 rooms with 13 classes. Each room is scanned via RGBD sensors and is represented by a point cloud with 3D coordinates and RGB values. Following previous works, we take area 5 as the test scene. ScanNet [10] has 1513 training scenes and 100 test scenes with 20 classes. We adopt the default train-val split setting, where there are 1201 training scenes and 312 validation scenes. The mean intersection over Union (mIoU) is employed as the evaluation metric for datasets." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.714, + 0.466, + 0.729 + ], + "angle": 0, + "content": "4.2 Implementations Details" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.734, + 0.788, + 0.841 + ], + "angle": 0, + "content": "3DSS-VLG is implemented by PyTorch. For the training procedure of Sec. 3.2, we use Adam optimizer with batch size of 16 and set an initial learning rate of 0.003 for the model. We reduce the learning rate by a multiplying factor of 0.7 every 20 epochs for a total of 80 epochs. For the training procedure of Sec. 3.3, the model optimization is conducted using Adam optimizer with a batch size of 8. We set an initial learning rate of 0.0001 for the model and use the poly learning rate policy to adjust the learning rate." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "10" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.353, + 0.127 + ], + "angle": 0, + "content": "X. Xu et al." + }, + { + "type": "table_caption", + "bbox": [ + 0.216, + 0.145, + 0.788, + 0.174 + ], + "angle": 0, + "content": "Table 1: Performance comparison on the S3DIS dataset. \"Sup.\" indicates the type of supervision. \"100%\" represents full annotation. \"scene.\" denotes scene-level annotation." + }, + { + "type": "table", + "bbox": [ + 0.342, + 0.185, + 0.659, + 0.385 + ], + "angle": 0, + "content": "
MethodLabel EffortSup.Test
PointNet [27]100%41.1
TangentConv [33]100%52.8
MinkowskiNet [9]100%65.8
KPConv [34]>20 min100%67.1
PointTransformer [49]100%70.4
PointNetXt [29]100%70.5
DeepViewAgg [32]100%67.2
SemAffiNet [35]100%71.6
MPRM [36]scene.10.3
MIL-Trans [45]scene.12.9
WYPR [31]<1 minscene.22.3
MIT [44]scene.27.7
Oursscene.45.3
" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.424, + 0.553, + 0.44 + ], + "angle": 0, + "content": "4.3 3D Semantic Segmentation Results" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.448, + 0.788, + 0.553 + ], + "angle": 0, + "content": "We evaluate our proposed approach against state-of-the-art techniques for 3D weakly supervised semantic segmentation with scene-level labels. Firstly, we demonstrate some full supervised point cloud semantic segmentation methods to compare the gap between the performances of ours and full supervised methods. Subsequently, we introduce semantic segmentation methods supervised by scene-level labels or subcloud-level labels and compare them with our method. Meanwhile, we indicate the average annotation time per scene." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.554, + 0.788, + 0.705 + ], + "angle": 0, + "content": "Evaluation on S3DIS. Tab. 1 Show the performance of each type of 3D point cloud semantic segmentation methods evaluated on the S3DIS dataset. We can find that in the scene-level annotations setting, our method greatly surpasses the existing state-of-the-art method MIT [44] by \\(17.6\\%\\). This shows that using textual semantic information ignored by previous 3D weakly supervised semantic segmentation can significantly improve segmentation performance. The textual semantic information of each category is unique; then the 2D embeddings and 3D embeddings are aligned so that the 3D embeddings can be implicitly aligned to the corresponding unique category semantic information, which allows the model to achieve greater performance improvements." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.705, + 0.788, + 0.795 + ], + "angle": 0, + "content": "Meanwhile, we compare our method with some full supervised methods. It can be observed that our 3DSS-VLG can outperform some fully supervised methods, i.e., PointNet [27]. Moreover, we notice that the annotations cost time of different types of supervision and find that the scene-level annotation is the most efficient compared to other types annotations. Such results demonstrate the effectiveness and potential of our weakly supervised method." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.795, + 0.788, + 0.84 + ], + "angle": 0, + "content": "Evaluation on ScanNet. We also evaluate our 3DSS-VLG on the ScanNet online test set and the validation set and presented the performance results of 3DSS-VLG in Tab. 2. For the test set, it can be observed that our 3DSS-VLG" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.452, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "3DSS with 2D Vision-Language Guidance" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.117, + 0.784, + 0.127 + ], + "angle": 0, + "content": "11" + }, + { + "type": "table_caption", + "bbox": [ + 0.214, + 0.145, + 0.788, + 0.201 + ], + "angle": 0, + "content": "Table 2: Performance comparison on the ScanNet test set and validation set. \"Sup.\" indicates the type of supervision. \"100%\" represents full annotation. \"subcloud.\" and \"scene.\" imply subcloud-level annotation and scene-level annotation respectively. \"image.\" denotes image-level annotation." + }, + { + "type": "table", + "bbox": [ + 0.292, + 0.213, + 0.709, + 0.428 + ], + "angle": 0, + "content": "
MethodLabel EffortSup.TestVal
PointNet++ [28]100%33.9-
TangentConv [33]100%43.8-
MinkowskiNet [9]100%73.672.2
KPConv [34]>20 min100%68.669.2
PointTransformer [49]100%-70.6
PointNetXt [29]100%71.271.5
DeepViewAgg [32]100%-71.0
SemAffiNet [35]100%74.9-
MPRM [36]3 minsubcloud.41.143.2
Kweon et al. [18]5 minscene. + image.47.449.6
MIL-Trans [45]scene.-26.2
WYPR [31]<1 minscene.24.029.6
MIT [44]scene.31.735.8
Oursscene.48.949.7
" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.469, + 0.788, + 0.606 + ], + "angle": 0, + "content": "achieves the best performance under only scene-level label supervision and even surpasses the performance of MPRM [36] which is supervised by subcloud-level annotations. Moreover, we are surprised to find that our method also outperforms Kweon et al. [18] by \\(1.5\\%\\), which uses not only scene-level labels, but also extra image-level labels. Our method can achieve stronger performance with less annotations, further illustrating the superiority of our method. Meanwhile, our 3DSS-VLG can outperform some fully supervised methods. In addition for the validation set, our method also achieves the state-of-the-art during those 3D WSSS approaches. Those results demonstrate the superiority of 3DSS-VLG." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.631, + 0.403, + 0.645 + ], + "angle": 0, + "content": "4.4 Ablation Studies" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.659, + 0.789, + 0.841 + ], + "angle": 0, + "content": "Effectiveness of Each Components. To demonstrate the advantage of each component in our 3DSS-VLG, we conduct comprehensive ablation studies on the S3DIS dataset, as shown in Tab. 3. The ablation model (a) only retains the MinkowskiNet18A UNet [9] and trains directly with the pseudo labels which are generated without using scene-level labels filtering. The cross-entropy loss is introduced to supervised this procedure. We set model (a) as the baseline of our experiment. Compared to model (a), model (b) is not directly supervised by pseudo labels. It adopts the Embeddings Soft-Guidance Stage (ESGS) and is soft-guided by the 2D-projected embeddings \\(\\mathbf{P}^{2D}\\). We can find that the performance of mIoU is improved from \\(37.7\\%\\) to \\(38.2\\%\\). This observation proves that the soft-guidance strategy can guide 3D embeddings to align with the text embeddings and achieve better performance compared to directly using the pseudo" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "12" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.353, + 0.127 + ], + "angle": 0, + "content": "X. Xu et al." + }, + { + "type": "table_caption", + "bbox": [ + 0.248, + 0.145, + 0.754, + 0.159 + ], + "angle": 0, + "content": "Table 3: Ablation studies of the 3DSS-VLG components on S3DIS dataset." + }, + { + "type": "table", + "bbox": [ + 0.396, + 0.171, + 0.603, + 0.247 + ], + "angle": 0, + "content": "
ESGSFilteringESSmIoU
(a)37.7
(b)38.2
(c)42.6
(d)45.3
" + }, + { + "type": "table_caption", + "bbox": [ + 0.276, + 0.272, + 0.726, + 0.286 + ], + "angle": 0, + "content": "Table 4: Performance comparisons of the generalization capability." + }, + { + "type": "table", + "bbox": [ + 0.395, + 0.298, + 0.603, + 0.345 + ], + "angle": 0, + "content": "
DomainmIoUmAcc
S3DIS ->ScanNet13.423.0
ScanNet ->S3DIS33.350.9
" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.387, + 0.785, + 0.508 + ], + "angle": 0, + "content": "labels to supervised 3D model. Meanwhile, when we introduce the filtering strategy to model (a), as shown in model (c), we can find that the model performance increases greatly from \\(37.7\\%\\) to \\(42.6\\%\\). Finally, by adding the filtering strategy to model (b) and utilizing the Embeddings Specialization Stage (ESS), model (d) is supervised by adapted 3D embeddings \\(\\mathbf{A}^{3D}\\) at this time. It can be observed the performance improves from \\(38.2\\%\\) to \\(45.3\\%\\). Such results prove that our 3DSS-VLG can help the model to get a better, indoor point cloud specific embedding space to align 3D point clouds and text." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.508, + 0.787, + 0.628 + ], + "angle": 0, + "content": "Generalization Capability. Due to the domain gap among different datasets, a model trained on one dataset is not applicable to another dataset. Also, this situation occurs in the 3D WSSS task. Nevertheless, we notice that, compared to previous works, our 3DSS-VLG uses textual semantic information as a guide rather than CAM, which means our model has a good relationship between 3D point cloud and the text of category labels and indicates that the model may have generalization ability. Therefore, we further explore our framework to the novel data of the unobserved scene domains." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.63, + 0.788, + 0.841 + ], + "angle": 0, + "content": "As shown in Tab. 4, we experimentally verify the generalizability of the proposed method on the S3DIS and ScanNet dataset, respectively. The first row is the performance of model that we first train our model on the S3DIS dataset and then test the trained model on validation set of the ScanNet dataset. The second row is the performance of model that we first train our model on the ScanNet dataset and then test the trained model on the test set of the S3DIS dataset. Compared to those weakly supervised methods with scene-level labels, it can be observed that our 3DSS-VLG has a certain gap with those methods in the first row. However, for the second row, we are supervised to find that our method can outperform all the weakly supervised methods and achieve state-of-the-art performance. The ScanNet dataset provides six times more training scenes than the S3DIS dataset. Therefore, when a model is pretrained on the ScanNet dataset, the model will be more robust than a model pretrained on the S3DIS dataset. Our experimental results also prove this phenomenon." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.452, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "3DSS with 2D Vision-Language Guidance" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "13" + }, + { + "type": "table_caption", + "bbox": [ + 0.216, + 0.145, + 0.785, + 0.173 + ], + "angle": 0, + "content": "Table 5: Performance comparisons with different 3D backbones and ESS module backbones on the S3DIS dataset." + }, + { + "type": "table", + "bbox": [ + 0.387, + 0.185, + 0.613, + 0.274 + ], + "angle": 0, + "content": "
ModuleBackbonemIoU
3DMinkowskiNet14A44.5
MinkowskiNet18A45.3
MinkowskiNet34A44.7
ESSTransformers45.0
MLP45.3
" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.315, + 0.784, + 0.388 + ], + "angle": 0, + "content": "The results also strongly support the complementary advantages of using text semantic information, even without any further fine-tuning or domain-specific adaptation. Our 3DSS-VLG can be extended to handle unobserved general data and has strong generalization capability, which is promising for the field of 3D WSSS." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.39, + 0.785, + 0.449 + ], + "angle": 0, + "content": "Experiments with Different Backbones. Tab. 5 shows the performances of our method on S3DIS with different 3D backbones and ESS module backbones. Finally, we use the MinkowskiNet18A as our 3D backbone and the FC-layer as the backbone of our ESS." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.472, + 0.421, + 0.487 + ], + "angle": 0, + "content": "4.5 Qualitative Results" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.497, + 0.785, + 0.603 + ], + "angle": 0, + "content": "Fig. 4 visualizes the qualitative comparison of the proposed framework and baseline. Here the baseline is model (a) which is mentioned in Sec. 4.4. Compared with the result of baseline, our 3DSS-VLG shows significantly better results in the terms of accuracy of semantics and preciseness of segmentation. With the ESGS, ESS and filtering strategies, our 3DSS-VLG can learn a more better indoor point cloud specific embedding space to align 3D point clouds and text and achieve substantial semantic segmentation results compared to the baseline." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.625, + 0.357, + 0.639 + ], + "angle": 0, + "content": "4.6 Limitations" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.65, + 0.785, + 0.726 + ], + "angle": 0, + "content": "Our work relies on vision-language alignment and does not address how to align visual embeddings with some abstract category text embeddings (e.g. \"other\" class in the S3DIS dataset). It is difficult for the model to understand what the difference is between the \"other\" class and other categories, thus making the wrong segmentation. This limitation is a direct avenue for future work." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.748, + 0.36, + 0.765 + ], + "angle": 0, + "content": "5 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.78, + 0.785, + 0.84 + ], + "angle": 0, + "content": "In this paper, we propose 3DSS-VLG to address the shortage of point-level annotations. Specifically, our 3DSS-VLG exploits the superior ability of current vision-language models on aligning the semantics between texts and 2D images, as well as the naturally existing correspondences between 2D images and 3D" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "14" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.353, + 0.127 + ], + "angle": 0, + "content": "X. Xu et al." + }, + { + "type": "image", + "bbox": [ + 0.219, + 0.145, + 0.361, + 0.233 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.22, + 0.234, + 0.36, + 0.321 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.221, + 0.322, + 0.359, + 0.409 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.221, + 0.41, + 0.359, + 0.5 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.224, + 0.503, + 0.249, + 0.516 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.252, + 0.504, + 0.289, + 0.514 + ], + "angle": 0, + "content": "ceiling" + }, + { + "type": "image", + "bbox": [ + 0.225, + 0.517, + 0.249, + 0.527 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.252, + 0.518, + 0.279, + 0.527 + ], + "angle": 0, + "content": "floor" + }, + { + "type": "image", + "bbox": [ + 0.307, + 0.504, + 0.331, + 0.516 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.306, + 0.518, + 0.339, + 0.527 + ], + "angle": 0, + "content": "#" + }, + { + "type": "image", + "bbox": [ + 0.334, + 0.504, + 0.357, + 0.516 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.333, + 0.518, + 0.366, + 0.527 + ], + "angle": 0, + "content": "beam" + }, + { + "type": "image", + "bbox": [ + 0.385, + 0.504, + 0.4, + 0.515 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.385, + 0.517, + 0.453, + 0.526 + ], + "angle": 0, + "content": "window" + }, + { + "type": "image_caption", + "bbox": [ + 0.411, + 0.504, + 0.452, + 0.514 + ], + "angle": 0, + "content": "column" + }, + { + "type": "image_caption", + "bbox": [ + 0.411, + 0.517, + 0.453, + 0.526 + ], + "angle": 0, + "content": "window" + }, + { + "type": "image", + "bbox": [ + 0.473, + 0.504, + 0.497, + 0.515 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.473, + 0.517, + 0.529, + 0.526 + ], + "angle": 0, + "content": "table" + }, + { + "type": "image", + "bbox": [ + 0.547, + 0.504, + 0.561, + 0.515 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.548, + 0.517, + 0.579, + 0.526 + ], + "angle": 0, + "content": "s" + }, + { + "type": "image", + "bbox": [ + 0.574, + 0.504, + 0.598, + 0.515 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.574, + 0.517, + 0.6, + 0.526 + ], + "angle": 0, + "content": "sofa" + }, + { + "type": "image", + "bbox": [ + 0.62, + 0.504, + 0.634, + 0.515 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.621, + 0.517, + 0.654, + 0.526 + ], + "angle": 0, + "content": "b" + }, + { + "type": "image", + "bbox": [ + 0.645, + 0.504, + 0.669, + 0.515 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.646, + 0.517, + 0.68, + 0.526 + ], + "angle": 0, + "content": "board" + }, + { + "type": "image", + "bbox": [ + 0.671, + 0.504, + 0.7, + 0.515 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.671, + 0.517, + 0.681, + 0.526 + ], + "angle": 0, + "content": "rd" + }, + { + "type": "image", + "bbox": [ + 0.72, + 0.504, + 0.744, + 0.515 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.72, + 0.517, + 0.776, + 0.526 + ], + "angle": 0, + "content": "" + }, + { + "type": "image_caption", + "bbox": [ + 0.215, + 0.542, + 0.788, + 0.582 + ], + "angle": 0, + "content": "Fig. 4: Qualitative results on the S3DIS dataset of baseline and our 3DSS-VLG. From left to right: input point clouds, ground truth, baseline results, and our 3DSS-VLG results." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.614, + 0.788, + 0.721 + ], + "angle": 0, + "content": "point clouds to implicitly co-embed texts embeddings with 3D point clouds embeddings using only scene-level labels. With extensive experiments, we verify that the textual semantic information of category labels is beneficial for 3DSS-VLG which achieves the state-of-the-art on both S3DIS and ScanNet datasets. Further, with an experiment to explore our framework to unobserved scene domains, we demonstrate the generalization capability of our method, which supports its practicality." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.746, + 0.403, + 0.765 + ], + "angle": 0, + "content": "Acknowledgements" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.78, + 0.789, + 0.842 + ], + "angle": 0, + "content": "This work was supported in part by the National Natural Science Foundation of China under Grants 62371310 and 62032015, in part by the Guangdong Basic and Applied Basic Research Foundation under Grant 2023A1515011236, in part by the Stable Support Project of Shenzhen (Project No.20231122122722001), in" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.452, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "3DSS with 2D Vision-Language Guidance" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "15" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.224 + ], + "angle": 0, + "content": "part by the third phase of high-level university construction of interdisciplinary innovation team project of Shenzhen University(24JCXK03). We also acknowledge the CINECA award under the ISCRA initiative, for the availability of partial HPC resources support, and partially supported by the Fundamental Research Funds for the Central Universities, Peking University." + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.242, + 0.323, + 0.259 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.268, + 0.787, + 0.311 + ], + "angle": 0, + "content": "1. Alonso, I., Riazuelo, L., Montesano, L., Murillo, A.C.: 3d-mininet: Learning a 2d representation from point clouds for fast and efficient 3d lidar semantic segmentation. IEEE Robotics and Automation Letters 5(4), 5432-5439 (2020) 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.311, + 0.787, + 0.365 + ], + "angle": 0, + "content": "2. Ando, A., Gidaris, S., Bursuc, A., Puy, G., Boulch, A., Marlet, R.: Rangevit: Towards vision transformers for 3d semantic segmentation in autonomous driving. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5240-5250 (2023) 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.365, + 0.787, + 0.406 + ], + "angle": 0, + "content": "3. Armeni, I., Sener, O., Zamir, A.R., Jiang, H., Brilakis, I., Fischer, M., Savarese, S.: 3d semantic parsing of large-scale indoor spaces. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 1534-1543 (2016) 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.406, + 0.787, + 0.433 + ], + "angle": 0, + "content": "4. Bucher, M., Vu, T.H., Cord, M., Pérez, P.: Zero-shot semantic segmentation. Advances in Neural Information Processing Systems 32 (2019) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.433, + 0.787, + 0.488 + ], + "angle": 0, + "content": "5. Cardace, A., Ramirez, P.Z., Salti, S., Di Stefano, L.: Exploiting the complementarity of 2d and 3d networks to address domain-shift in 3d semantic segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 98-109 (2023) 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.488, + 0.787, + 0.542 + ], + "angle": 0, + "content": "6. Chen, J., Zhu, D., Qian, G., Ghanem, B., Yan, Z., Zhu, C., Xiao, F., Culatana, S.C., Elhoseiny, M.: Exploring open-vocabulary semantic segmentation from clip vision encoder distillation only. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 699-710 (2023) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.542, + 0.787, + 0.582 + ], + "angle": 0, + "content": "7. Chen, R., Liu, Y., Kong, L., Chen, N., Zhu, X., Ma, Y., Liu, T., Wang, W.: Towards label-free scene understanding by vision foundation models. Advances in Neural Information Processing Systems 36 (2024) 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.582, + 0.787, + 0.624 + ], + "angle": 0, + "content": "8. Chibane, J., Engelmann, F., Anh Tran, T., Pons-Moll, G.: Box2mask: Weakly supervised 3d semantic instance segmentation using bounding boxes. In: European Conference on Computer Vision. pp. 681-699. Springer (2022) 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.624, + 0.787, + 0.664 + ], + "angle": 0, + "content": "9. Choy, C., Gwak, J., Savarese, S.: 4d spatio-temporal convnets: Minkowski convolutional neural networks. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 3075-3084 (2019) 4, 5, 9, 10, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.664, + 0.787, + 0.704 + ], + "angle": 0, + "content": "10. Dai, A., Chang, A.X., Savva, M., Halber, M., Funkhouser, T., Nießner, M.: Scannet: Richly-annotated 3d reconstructions of indoor scenes. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 5828-5839 (2017) 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.704, + 0.787, + 0.759 + ], + "angle": 0, + "content": "1. Genova, K., Yin, X., Kundu, A., Pantofaru, C., Cole, F., Sud, A., Brewington, B., Shucker, B., Funkhouser, T.: Learning 3d semantic segmentation with only 2d image supervision. In: 2021 International Conference on 3D Vision (3DV). pp. 361-372 (2021) 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.759, + 0.787, + 0.8 + ], + "angle": 0, + "content": "2. Ghiasi, G., Gu, X., Cui, Y., Lin, T.Y.: Scaling open-vocabulary image segmentation with image-level labels. In: European Conference on Computer Vision. pp. 540-557. Springer (2022) 3, 4, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.8, + 0.787, + 0.84 + ], + "angle": 0, + "content": "3. Hegde, D., Valanarasu, J.M.J., Patel, V.: Clip goes 3d: Leveraging prompt tuning for language grounded 3d recognition. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 2028-2038 (2023) 1" + }, + { + "type": "list", + "bbox": [ + 0.226, + 0.268, + 0.787, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "16" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.352, + 0.127 + ], + "angle": 0, + "content": "X. Xu et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.147, + 0.787, + 0.189 + ], + "angle": 0, + "content": "14. Hou, J., Xie, S., Graham, B., Dai, A., Nießner, M.: Pri3d: Can 3d priors help 2d representation learning? In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 5693-5702 (2021) 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.19, + 0.786, + 0.232 + ], + "angle": 0, + "content": "15. Hu, Q., Yang, B., Fang, G., Guo, Y., Leonardis, A., Trigoni, N., Markham, A.: Sqn: Weakly-supervised semantic segmentation of large-scale 3d point clouds. In: European Conference on Computer Vision. pp. 600-619. Springer (2022) 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.232, + 0.787, + 0.286 + ], + "angle": 0, + "content": "16. Hu, Q., Yang, B., Xie, L., Rosa, S., Guo, Y., Wang, Z., Trigoni, N., Markham, A.: Randla-net: Efficient semantic segmentation of large-scale point clouds. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 11108-11117 (2020) 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.287, + 0.787, + 0.34 + ], + "angle": 0, + "content": "17. Hu, W., Zhao, H., Jiang, L., Jia, J., Wong, T.T.: Bidirectional projection network for cross dimension scene understanding. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 14373-14382 (2021) 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.342, + 0.786, + 0.383 + ], + "angle": 0, + "content": "18. Kweon, H., Yoon, K.J.: Joint learning of 2d-3d weakly supervised semantic segmentation. Advances in Neural Information Processing Systems 35, 30499-30511 (2022) 2, 5, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.383, + 0.786, + 0.424 + ], + "angle": 0, + "content": "19. Lahoud, J., Ghanem, B.: 2d-driven 3d object detection in rgb-d images. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 4622-4630 (2017) 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.425, + 0.786, + 0.467 + ], + "angle": 0, + "content": "20. Li, J., Dai, H., Han, H., Ding, Y.: Mseg3d: Multi-modal 3d semantic segmentation for autonomous driving. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 21694-21704 (2023) 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.467, + 0.786, + 0.508 + ], + "angle": 0, + "content": "21. Li, J., Jie, Z., Ricci, E., Ma, L., Sebe, N.: Enhancing robustness of vision-language models through orthogonality learning and cross-regularization (2024), https:// arxiv.org/abs/2407.08374 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.508, + 0.786, + 0.55 + ], + "angle": 0, + "content": "22. Li, J., Jie, Z., Wang, X., Wei, X., Ma, L.: Expansion and shrinkage of localization for weakly-supervised semantic segmentation. In: Advances in Neural Information Processing Systems. vol. 35, pp. 16037-16051 (2022) 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.55, + 0.786, + 0.591 + ], + "angle": 0, + "content": "23. Li, J., Jie, Z., Wang, X., Zhou, Y., Ma, L., Jiang, J.: Weakly supervised semantic segmentation via self-supervised destruction learning. Neurocomputing 561, 126821 (2023) 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.591, + 0.786, + 0.632 + ], + "angle": 0, + "content": "24. Li, J., Jie, Z., Wang, X., Zhou, Y., Wei, X., Ma, L.: Weakly supervised semantic segmentation via progressive patch learning. IEEE Transactions on multimedia 25, 1686-1699 (2022) 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.632, + 0.786, + 0.688 + ], + "angle": 0, + "content": "25. Liang, F., Wu, B., Dai, X., Li, K., Zhao, Y., Zhang, H., Zhang, P., Vajda, P., Marculescu, D.: Open-vocabulary semantic segmentation with mask-adapted clip. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 7061-7070 (2023) 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.688, + 0.786, + 0.73 + ], + "angle": 0, + "content": "26. Qi, C.R., Liu, W., Wu, C., Su, H., Guibas, L.J.: Frustum pointnets for 3d object detection from rgb-d data. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 918-927 (2018) 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.73, + 0.786, + 0.772 + ], + "angle": 0, + "content": "27. Qi, C.R., Su, H., Mo, K., Guibas, L.J.: Pointnet: Deep learning on point sets for 3d classification and segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 652-660 (2017) 1, 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.772, + 0.786, + 0.813 + ], + "angle": 0, + "content": "28. Qi, C.R., Yi, L., Su, H., Guibas, L.J.: Pointnet++: Deep hierarchical feature learning on point sets in a metric space. Advances in neural information processing systems 30 (2017) 1, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.813, + 0.786, + 0.841 + ], + "angle": 0, + "content": "29. Qian, G., Li, Y., Peng, H., Mai, J., Hammoud, H., Elhoseiny, M., Ghanem, B.: Pointnext: Revisiting pointnet++ with improved training and scaling strategies." + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.147, + 0.787, + 0.841 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.452, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "3DSS with 2D Vision-Language Guidance" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.116, + 0.786, + 0.127 + ], + "angle": 0, + "content": "17" + }, + { + "type": "ref_text", + "bbox": [ + 0.245, + 0.147, + 0.786, + 0.175 + ], + "angle": 0, + "content": "Advances in Neural Information Processing Systems 35, 23192-23204 (2022) 1, 10, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.176, + 0.787, + 0.232 + ], + "angle": 0, + "content": "30. Radford, A., Kim, J.W., Hallacy, C., Ramesh, A., Goh, G., Agarwal, S., Sastry, G., Askell, A., Mishkin, P., Clark, J., et al.: Learning transferable visual models from natural language supervision. In: International Conference on Machine Learning. pp. 8748-8763 (2021) 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.232, + 0.787, + 0.273 + ], + "angle": 0, + "content": "31. Ren, Z., Misra, I., Schwing, A.G., Girdhar, R.: 3d spatial recognition without spatially labeled 3d. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 13204-13213 (2021) 2, 5, 10, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.273, + 0.787, + 0.327 + ], + "angle": 0, + "content": "32. Robert, D., Vallet, B., Landrieu, L.: Learning multi-view aggregation in the wild for large-scale 3d semantic segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5575-5584 (2022) 10, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.328, + 0.787, + 0.37 + ], + "angle": 0, + "content": "33. Tatarchenko, M., Park, J., Koltun, V., Zhou, Q.Y.: Tangent convolutions for dense prediction in 3d. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 3887-3896 (2018) 10, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.371, + 0.787, + 0.424 + ], + "angle": 0, + "content": "34. Thomas, H., Qi, C.R., Deschaud, J.E., Marcotegui, B., Goulette, F., Guibas, L.J.: Kpconv: Flexible and deformable convolution for point clouds. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 6411-6420 (2019) 10, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.425, + 0.787, + 0.467 + ], + "angle": 0, + "content": "35. Wang, Z., Rao, Y., Yu, X., Zhou, J., Lu, J.: Semaffinet: Semantic-affine transformation for point cloud segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 11819-11829 (2022) 10, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.467, + 0.787, + 0.522 + ], + "angle": 0, + "content": "36. Wei, J., Lin, G., Yap, K.H., Hung, T.Y., Xie, L.: Multi-path region mining for weakly supervised 3d semantic segmentation on point clouds. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 4384-4393 (2020) 2, 5, 10, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.522, + 0.787, + 0.577 + ], + "angle": 0, + "content": "37. Xian, Y., Choudhury, S., He, Y., Schiele, B., Akata, Z.: Semantic projection network for zero-and few-label semantic segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 8256-8265 (2019) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.577, + 0.787, + 0.619 + ], + "angle": 0, + "content": "38. Xu, D., Anguelov, D., Jain, A.: Pointfusion: Deep sensor fusion for 3d bounding box estimation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 244-253 (2018) 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.62, + 0.787, + 0.675 + ], + "angle": 0, + "content": "39. Xu, J., Hou, J., Zhang, Y., Feng, R., Wang, Y., Qiao, Y., Xie, W.: Learning open-vocabulary semantic segmentation models from natural language supervision. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 2935-2944 (2023) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.675, + 0.787, + 0.716 + ], + "angle": 0, + "content": "40. Xu, M., Zhang, Z., Wei, F., Hu, H., Bai, X.: Side adapter network for open-vocabulary semantic segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 2945-2954 (2023) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.716, + 0.787, + 0.758 + ], + "angle": 0, + "content": "41. Xu, M., Zhang, Z., Wei, F., Lin, Y., Cao, Y., Hu, H., Bai, X.: A simple baseline for open-vocabulary semantic segmentation with pre-trained vision-language model. In: European Conference on Computer Vision. pp. 736-753. Springer (2022) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.758, + 0.787, + 0.799 + ], + "angle": 0, + "content": "42. Xu, X., Yuan, Y., Zhang, Q., Wu, W., Jie, Z., Ma, L., Wang, X.: Weakly-supervised 3d visual grounding based on visual linguistic alignment. arXiv preprint arXiv:2312.09625 (2023) 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.799, + 0.787, + 0.841 + ], + "angle": 0, + "content": "43. Yan, X., Gao, J., Zheng, C., Zheng, C., Zhang, R., Cui, S., Li, Z.: 2dpass: 2d priors assisted semantic segmentation on lidar point clouds. In: European Conference on Computer Vision. pp. 677-695. Springer (2022) 1" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.147, + 0.787, + 0.841 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "18" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.353, + 0.127 + ], + "angle": 0, + "content": "X. Xu et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.147, + 0.787, + 0.202 + ], + "angle": 0, + "content": "44. Yang, C.K., Chen, M.H., Chuang, Y.Y., Lin, Y.Y.: 2d-3d interlaced transformer for point cloud segmentation with scene-level supervision. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 977–987 (2023) 2, 5, 10, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.203, + 0.787, + 0.258 + ], + "angle": 0, + "content": "45. Yang, C.K., Wu, J.J., Chen, K.S., Chuang, Y.Y., Lin, Y.Y.: An mil-derived transformer for weakly supervised point cloud segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 11830-11839 (2022) 2, 10, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.259, + 0.787, + 0.3 + ], + "angle": 0, + "content": "46. Yun, S., Park, S.H., Seo, P.H., Shin, J.: Ifseg: Image-free semantic segmentation via vision-language model. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 2967-2977 (2023) 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.3, + 0.787, + 0.355 + ], + "angle": 0, + "content": "47. Zhang, R., Wang, L., Qiao, Y., Gao, P., Li, H.: Learning 3d representations from 2d pre-trained models via image-to-point masked autoencoders. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 21769-21780 (2023) 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.356, + 0.787, + 0.412 + ], + "angle": 0, + "content": "48. Zhang, Y., Hu, Q., Xu, G., Ma, Y., Wan, J., Guo, Y.: Not all points are equal: Learning highly efficient point-based detectors for 3d lidar point clouds. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 18953-18962 (2022) 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.412, + 0.787, + 0.452 + ], + "angle": 0, + "content": "49. Zhao, H., Jiang, L., Jia, J., Torr, P.H., Koltun, V.: Point transformer. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 16259-16268 (2021) 10, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.453, + 0.787, + 0.495 + ], + "angle": 0, + "content": "50. Zhou, B., Khosla, A., Lapedriza, A., Oliva, A., Torralba, A.: Learning deep features for discriminative localization. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 2921-2929 (2016) 2" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.147, + 0.787, + 0.495 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/2024/3D Weakly Supervised Semantic Segmentation with 2D Vision-Language Guidance/b6e9bc2f-30dd-47bb-97e3-5a3cad4d6faf_origin.pdf b/2024/3D Weakly Supervised Semantic Segmentation with 2D Vision-Language Guidance/b6e9bc2f-30dd-47bb-97e3-5a3cad4d6faf_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..5c56d2db79300769c9f7aeb6062a94743941d1ad --- /dev/null +++ b/2024/3D Weakly Supervised Semantic Segmentation with 2D Vision-Language Guidance/b6e9bc2f-30dd-47bb-97e3-5a3cad4d6faf_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f87e7ec472ff8c698e81d4520bce3f4c237343edd8874c7180c7093197f1aed +size 3957792 diff --git a/2024/3D Weakly Supervised Semantic Segmentation with 2D Vision-Language Guidance/full.md b/2024/3D Weakly Supervised Semantic Segmentation with 2D Vision-Language Guidance/full.md new file mode 100644 index 0000000000000000000000000000000000000000..b9fcfe7a5d28c03d562d1627ad42b5b2b10e9e55 --- /dev/null +++ b/2024/3D Weakly Supervised Semantic Segmentation with 2D Vision-Language Guidance/full.md @@ -0,0 +1,299 @@ +# 3D Weakly Supervised Semantic Segmentation with 2D Vision-Language Guidance + +Xiaoxu Xu $^{1}$ , Yitian Yuan $^{2}$ , Jinlong Li $^{3}$ , Qiudan Zhang $^{1}$ , Zequn Jie $^{2}$ , Lin Ma $^{2}$ , Hao Tang $^{4,5}$ , Nicu Sebe $^{3}$ , and Xu Wang $^{1*}$ + +1 College of Computer Science and Software Engineering, Shenzhen University, Shenzhen, 518060, China. + +2 Meituan Inc., China. + +3 University of Trento, Italy. + +4 Peking University, China. + +5 Carnegie Mellon University, USA. + +Abstract. In this paper, we propose 3DSS-VLG, a weakly supervised approach for 3D Semantic Segmentation with 2D Vision-Language Guidance, an alternative approach that a 3D model predicts dense-embedding for each point which is co-embedded with both the aligned image and text spaces from the 2D vision-language model. Specifically, our method exploits the superior generalization ability of the 2D vision-language models and proposes the Embeddings Soft-Guidance Stage to utilize it to implicitly align 3D embeddings and text embeddings. Moreover, we introduce the Embeddings Specialization Stage to purify the feature representation with the help of a given scene-level label, specifying a better feature supervised by the corresponding text embedding. Thus, the 3D model is able to gain informative supervisions both from the image embedding and text embedding, leading to competitive segmentation performances. To the best of our knowledge, this is the first work to investigate 3D weakly supervised semantic segmentation by using the textual semantic information of text category labels. Moreover, with extensive quantitative and qualitative experiments, we present that our 3DSS-VLG is able not only to achieve the state-of-the-art performance on both S3DIS and ScanNet datasets, but also to maintain strong generalization capability. The code will be available at https://github.com/xuxiaoxxxx/3DSS-VLG/. + +Keywords: 3D Weakly Supervised Semantic Segmentation $\cdot$ Vision-Language Model + +# 1 Introduction + +3D point cloud semantic segmentation [13, 16, 27-29, 43] can provide valuable geometric and semantic data about the 3D environment and has gained considerable attention over the past few years. Learning-based semantic segmentation methods have achieved remarkable performance recently, but they need per-point annotations, which is time consuming and labor intensive. + +![](images/1e25a6fa566557f0fd497ec39b3550371babc60a255ebec09c428e50ac2016c6.jpg) + +![](images/a385dd5c806a4d4dd2a4364120e940854498cd0695d3483cb40fe228acc50b59.jpg) +(a) Conventional 3D weakly supervised semantic segmentation solution +(b) Ours (3DSS-VLG) +Fig. 1: Comparison of different approaches. (a) The conventional 3D WSSS approach adopts the coarse-grained CAM method in a global manner and is supervised by scene-level annotations or subcloud-level annotations. (b) Our proposed 3DSS-VLG approach leverages natural 3D-2D correspondence from geometric camera calibration and 2D-text correspondence from vision-language models, to implicitly align texts and 3D point clouds. + +To address this issue, existing weakly supervised methods derive the segmentation model with different weak supervisory signals, such as subcloud-level annotations [36], scene-level annotations [18, 31, 45] and so on. As shown in Fig. 1 (a), the 3D Weakly Supervised Semantic Segmentation (3D WSSS) approaches typically adopt a Class Activation Map (CAM) [50] solution. Point clouds are first processed by several Multi-Layer Perception (MLP) layers and thus get a point cloud feature map, and then this point cloud feature map is processed by a Global Average Pooling (GAP) to get a global classification prediction, which is trained with subcloud-level or scene-level annotations. Given the simple GAP connectivity structure, these methods can easily identify the importance of each point by projecting back the output classification weight onto the point cloud feature maps, a technique we call CAM. In this way, the semantic segmentation for each category is back-derived from the global prediction. Recently, with the remarkable success of 2D vision, some methods [18, 44] also use the 2D module to enhance the 3D WSSS. + +Although leveraging 2D-3D fusion in 3D WSSS seems to be promising, there also exist some problems. Kweon et al. [18] need extra detailed annotations of 2D images. As for MIT [44], although it avoids additional per-point/pixel annotations or per-image class labels, its performance is not expected. Therefore, how to design a network that achieves good performance despite the lack of 2D anno + +tations still remains a big challenge. Meanwhile, we notice that the conventional methods for 3D WSSS only use the scene-labels or subcloud-labels to supervise the model, but ignore that the textual category labels such as "chair, table" also have semantic meanings and could be embedded to help the model learning. At the same time, we also find that some methods [12, 25, 46] like Openseg [12], which leverage the pretrained vision-language models such as CLIP [30] to establish precise semantic matching relationships between natural languages and 2D images, have achieved good results in 2D open vocabulary semantic segmentation (2D OVSS). The above two points inspire us to consider whether we can use the well-pretrained 2D OVSS model to help the 3D WSSS. As shown in Fig. 1 (b), the point cloud and 2D images could be mutually mapped with geometric projections, and the 2D images and textual categories could be compared with pretrained vision-language models. Therefore, why do not we take the 2D images as a bridge, leveraging the correspondences between point clouds and images, images and natural languages, to implicitly build matching relationships between point clouds and natural languages? + +To this end, we propose a simple yet effective method, namely 3DSS-VLG, i.e., a weakly supervised approach for 3D Semantic Segmentation with 2D Vision-Language Guidance. Our 3DSS-VLG only needs to use 2D images, but no need for their 2D image-level annotations during training. Specifically, for the input 3D point cloud, the dataset also provides a set of multi-view images corresponding to it. We first process these multi-view images using the image encoder of the pretrained off-the-shelf 2D OVSS model such as Openseg [12] to get the 2D embeddings. Then, for each point in the 3D point cloud, we project it to the multi-view images with geometric projections, and integrate these corresponding 2D embeddings to get the 2D-projected embeddings for the point. Next, we utilize the text module of the 2D OVSS model to obtain the textual embeddings of each semantic category label. Since in the embedding space of 2D OVSS, the textual category labels and 2D images could be directly compared, we only need to learn a 3D backbone which could generate 3D embeddings aligned with 2D embeddings; thus, the category labels and the 3D point cloud could be implicitly compared. + +Actually, if the 3D embedding is learned well enough, it can be directly compared with the text embedding by the similarity measurement to classify. However, we find that only relying on pulling the 2D-projected embeddings and 3D embeddings closely is not reliable since the pretrained 2D OVSS model are designed to learn the general knowledge and do not have specialized knowledge to the indoor point cloud scene. Therefore, we propose to alleviate this problem by three stages. (1) First, as shown in Fig. 2, we perform matrix multiplication on projected 2D embeddings and text embeddings of category labels and get the classification logits. Then, we use the scene-level labels as mask to filter out some confusing and unreliable predictions in the classification logits and thus get a more reliable pseudo label vector. (2) Second, as shown in Fig. 3 (a), we propose the Embeddings Specialization Stage, which transfers the 2D-projected embeddings with an adapter module to obtain adapted 3D embeddings, and the + +training of this adapter module will be supervised with the pseudo label vector. This stage is designed to induce a more reliable target 3D embeddings suited for the indoor point cloud scene from the 2D-projected embeddings. (3) Finally, as shown in Fig. 3 (b), we design Embeddings Soft-Guidance Stage, which freezes the adapter module introduced in the second stage and leverages cosine similarity to align the adapted 3D embeddings and the MinkowskiNet [9] 3D embeddings. Combining the above three stages, we can learn a more reliable 3D embedding space for semantic segmentation in indoor point cloud scene. In the inference procedure, we only need to compare the MinkowskiNet 3D embeddings of the point cloud and the text embeddings of the semantic category labels, thus accomplishing the 3D semantic segmentation. Note that we do not need 2D images to participate in the inference process of our model. + +In summary, the main contributions of this paper are as follows: + +- We propose a weakly supervised method 3DSS-VLG for 3D WSSS, which takes 2D images as a bridge, and leverages natural 3D-2D correspondence from geometric camera calibration and 2D-text correspondence from vision-language models to implicitly establish the semantic relationships between texts and 3D point clouds. +- We design a three-stage training procedure to learn a reliable 3D embedding space in 3DSS-VLG for 3D semantic segmentation. Embeddings Specialization Stage is designed to utilize the pretrained 2D vision-language model to provide a embedding space for 3D point cloud representation with MinkowskiNet 3D backbone. Moreover, we propose Embeddings Specialization Stage to make the embedding space to be more robust based on the pseudo label filtering with indoor point cloud scene knowledge. +- Extensive experiments on the ScanNet and S3DIS dataset show that the proposed 3DSS-VLG significantly outperforms the previous state-of-the-art methods, even Kweon et al. which use extra 2D image-level annotations. Moreover, our further experiments show our 3DSS-VLG has strong generalization capability and can be extended to handle unobserved general datasets. + +# 2 Related Work + +# 2.1 2D Open-Vocabulary Semantic Segmentation + +The recent advances of large vision-language models have enabled a remarkable level of robustness in open-vocabulary semantic segmentation [6,25,39-41]. Open vocabulary semantic segmentation aims to segment the target categories that cannot be access during the training procedure. Pioneering work ZS3Net [4] uses generative models to synthesize pixel-level features by word embeddings of unseen classes. SPNet [37] encodes visual features into the semantic embeddings space to align with text embeddings. More recently, researchers propose to leverage the pretrained CLIP [30] for open-vocabulary semantic segmentation. ZSSeg [41] leverages the visual module to generate class-agnostic masks and uses the pretrained text encoder to retrieve the unseen class masks. OpenSeg [12] + +proposes to align the segment-level visual embeddings with text embeddings via region-word grounding. In this work, we solely rely on pretrained 2D open-vocabulary models and perform 3D weakly supervised semantic segmentation understarnding tasks. We pull the 3D embeddings and 2D embeddings which features exacted from pretrained model back-project onto point cloud closed to implicitly align 3D embeddings and text embeddings. + +# 2.2 3D Weakly Supervised Semantic Segmentation + +This task aims to learn point cloud semantic segmentation using weakly annotated data, such as sparsely labeled points [15, 48], box-level labels [8], subcloud-level labels [36] and scene-level labels [9, 18, 31, 44]. Though the state-of-the-art methods based on sparsely labeled points show performance comparable to that of supervised ones, they require at least partial point-wise annotation in a scene, which is still expensive compared to subcloud-level labels and scene-level labels. The pipeline of the conventional CAM solution has been used in the majority of previous 3D WSSS works and only treats the scene-level labels as one-hot digit. MPRM [36] proposes the subcloud-level annotations method that samples subclouds from the full scene and annotates them, which can alleviate the class imbalance issue commonly appearing in almost scene. However, the subcloud-level annotations need to divide the point cloud into small that we need to annotations more than one for a scene, which is too much trouble and time-consuming. Therefore, some methods that use scene-level annotations are proposals for the 3D WSSS. Kweon et al. [18] utilizes 2D and 3D data for semantic segmentation and gets good performance, however, requiring extra 2D image-level labels. MIT [44] proposes the interlaced transformer structure to fuse 2D-3D information with only scene-level labels. However, its performance is not as good as expected. + +Therefore, in this work, we explore a 3D WSSS method with only scene-level labels. Unlike those previous works, we use the semantic meanings of textual category labels to assist in model learning. Moreover, the performance of our 3DSS-VLG is over the Kweon et al., which uses extra 2D image-level labels. + +# 2.3 2D Semantic in 3D task + +Studies on 3D object detection and semantic segmentation [1,2,5,11,14,20-24] have explored the use of 2D image semantics to assist 3D tasks. There are almost two approaches: concatenating the image embeddings with each point in the 3D scene as extra information [7,17,42,47] or projecting image semantic results into a 3D space to assist 3D semantic segmentation [19,26,38]. Previous studies usually used 2D image semantics as extra inputs in both training and inference. Although performance has improved, the extra 2D inputs have the potential to constrain the range of application scenarios. This is due to the fact that 2D information may be absent during inference or necessitate laborious pre-processing. In this paper, we aim to investigate the potential of using 2D semantics exclusively during training to assist in the 3D WSSS task. + +![](images/ec7a5e1c73ffcde778c6f22944ef831adb8bf8e68fea90a74161e849726952de.jpg) +Fig. 2: The proposed pseudo label generation procedure. We first leverage the text encoder $\varepsilon^{text}$ of Openseg to get embeddings of the full category labels $\mathbf{F}^C$ , and leverage the 2D image encoder $\varepsilon^{2D}$ of Openseg to get embeddings of the 2D image $\mathbf{F}^{2D}$ . It is important to note that we freeze the whole Openseg model during the procedure of pseudo label generation. Then we back-project the 2D embeddings $\mathbf{F}^{2D}$ to integrate the 2D-projected embeddings $\mathbf{P}^{2D}$ . Specifically, for each point in the point cloud $(x^{3D}, y^{3D}, z^{3D})$ , we use geometric camera calibration matrixes $GCCM^{img}$ to calculate the corresponding positions $(x^{2D}, y^{2D})$ on the multi-view images $S$ . Then we integrate these corresponding 2D embeddings in $\mathbf{F}^{2D}$ and average them to get the 2D-projected embeddings $\mathbf{P}^{2D}$ . We perform matrix multiplication on $\mathbf{F}^C$ and $\mathbf{P}^{2D}$ , and get the 3D point cloud semantic segmentation prediction logits $\mathbf{L}^{2D}$ . Finally we utilize the scene-level labels as mask $M$ to filter out some confusing and unreliable predictions in the classification and get the more accurate predicted logits $\mathbf{L}^f$ and pseudo labels $\mathbf{Y}$ . + +# 3 The Proposed Method + +In this section, we will first introduce the procedure of pseudo label generation in Sec. 3.1. Then, we will demonstrate the training procedure of our 3DSS-VLG in Sec. 3.2 and Sec. 3.3. Finally, we will describe the 3DSS-VLG inference procedure in Sec. 3.4. + +# 3.1 Pseudo Label Generation Stage + +This stage aims to utilize the pretrained vision-language model and scene-level labels to generate more precise pseudo label. Given an input point cloud with multi-view images as shown in Fig. 2, we first implement dense 2D embeddings extraction for each RGB image via the frozen visual encoder of Openseg [12], and back-project them onto the 3D surface points of a scene to integrate the 2D-projected embeddings. Afterward, more accurate pseudo labels are generated based on 2D-projected embeddings, text embeddings and scene-level labels. + +2D Embeddings Extraction. The inputs of 3DSS-VLG comprise a scene with 3D point cloud, scene-level labels and the associated multi-view RGB images + +set. Given the RGB images set $S$ consists of $T$ images with a resolution of $H\times W$ . The point cloud $\mathbf{X}\in \mathbb{R}^{N\times 6}$ contains $N$ points in the scene, and each point is represented with six dimensions of RGBXYZ. We leverage the pretrained image encoder of OpenSeg [12] to get per-pixel embedding, denoted as $\mathbf{F}^{2D}\in \mathbb{R}^{T\times H\times W\times d}$ , where $d$ is the 2D embedding dimension. For each point in the 3D point cloud, we project it onto multi-view images through geometric camera calibration matrixes and get the corresponding 2D positions. Then we can exact the corresponding projected 2D embeddings from $\mathbf{F}^{2D}$ according to the calculated 2D image positions. Since each point may have multiple correspondences in different images, the final 2D-projected embeddings $\mathbf{P}^{2D}\in \mathbb{R}^{N\times d}$ is obtained via average all the corresponding projected 2D embeddings of each point. + +Text Embeddings Extraction. We take the text encoder of Openseg to exact text embeddings $\mathbf{F}^C\in \mathbb{R}^{K\times d}$ of full category labels, where $K$ denoted the number of categories. Similarly, we also freeze the text encoder and directly load the pretrained Openseg parameters. + +Filtering Strategy. After getting the 2D-projected embeddings $\mathbf{P}^{2D}$ and the text embeddings $\mathbf{F}^C$ , we perform matrix multiplication on them and obtain the classification logits $\mathbf{L}^{2D} \in \mathbb{R}^{N \times K}$ . To make classification logits more reliable, the filtering strategy is employed to filter out confusing and unreliable predictions. For instance, as shown in Fig. 2, we create a boolean scene-level label mask $\mathbf{M} \in \mathbb{R}^{1 \times K}$ , where the element value in the mask indicated whether the corresponding category existed. Finally, we perform matrix inner product on classification logits $\mathbf{L}^{2D}$ and scene-level label mask $\mathbf{M}$ and obtain filtered classification logits $\mathbf{L}^f \in \mathbb{R}^{N \times K}$ . After ranking the filtered classification logits $\mathbf{L}^f$ , we can get the more precise pseudo label $\mathbf{Y} \in \mathbb{R}^N$ of the input point cloud. + +# 3.2 Embeddings Specialization Stage + +As we know, the 2D OVSS model is designed to learn general knowledge and do not have any specialized knowledge of the indoor point cloud scene. Therefore, only relying on the 2D embeddings to build the 3D-text correlation will make the 3D WSSS process not reliable. To mitigate this issue, the Embeddings Specialization Stage is proposed to further improve the perception of indoor knowledge of 3D embeddings. + +Specifically, the 2D-projected embeddings $\mathbf{P}^{2D}$ of input are transferred into another space through the adapter module, which simply contains two fully-connected layers. Besides, to keep both the source and adapted semantics, we employ the residual connections to get the adapted 3D embeddings $\mathbf{A}^{3D} \in \mathbb{R}^{N \times d}$ : + +$$ +\mathbf {A} ^ {3 D} = \alpha \cdot M L P (\mathbf {P} ^ {2 D}) + (1 - \alpha) \cdot \mathbf {P} ^ {2 D}, \tag {1} +$$ + +where $\alpha$ is the ratio of residual connections. Next, we perform matrix multiplication on text embeddings $\mathbf{F}^C$ and adapted 3D embeddings $\mathbf{A}^{3D}$ and obtain the classification logits $\mathbf{L}^a\in \mathbb{R}^{N\times K}$ . The softmax layer is applied on $\mathbf{L}^a$ and + +![](images/9600d967b6292825c681f07e3860caad03c21a1e646f944b7dd688b370e2d117.jpg) +Fig. 3: The proposed training procedure of our proposed 3DSS-VLG. Here, it is mainly divided into two stages: (a) Embeddings Specialization Stage and (b) Embeddings Soft-Guidance Stage. For (a), we first utilize the text encoder $\varepsilon^{text}$ of Openseg to obtain embeddings of the category labels $\mathbf{F}^C$ , which are frozen during the training procedure of (a). Meanwhile, we get the initial 2D-projected embeddings $\mathbf{P}^{2D}$ from the 2D module and leverage the adapter module to transfer the $\mathbf{P}^{2D}$ to a new embedding spaces to obtain the adapted 3D embeddings $\mathbf{A}^{3D}$ . We perform matrix multiplication on $\mathbf{A}^{3D}$ and $\mathbf{F}^C$ and get the predicted probability $\mathbf{L}^a$ . Finally, we use the pseudo labels $\mathbf{Y}$ to supervise the model, and the green dashed lines denote back-propagation of the loss $\mathcal{L}_a$ . For (b), we first utilize the adapter module and obtain the adapted 3D embeddings $\mathbf{A}^{3D}$ . It is important to note that we freeze the adapter module during the training procedure of (b). Meanwhile, we use the 3D module $\varepsilon^{3D}$ to obtain the 3D embeddings $\mathbf{F}^{3D}$ . The cosine similarity loss $\mathcal{L}_s$ will be integrated to train the model. The red dashed lines denote back-propagation of the loss $\mathcal{L}_s$ . + +a classification cross-entropy loss $\mathcal{L}_a$ is introduced to supervise the procedure. Here we leverage the pseudo labels $\mathbf{Y}$ of the point cloud to supervise the model. + +Just by introducing the above simple adapter module, we can make the learned adapted embeddings have better semantic awareness of the point clouds of indoor scenes, thus assisting the 3D WSSS task. + +# 3.3 Embeddings Soft-Guidance Stage + +Since Openseg has established a high level of semantic alignment between 2D embeddings and text embeddings, we propose the Embeddings Soft-Guidance Stage, which can naturally take the 2D embeddings as a bridge to implicitly align the 3D embeddings and text embeddings via cosine similarity. Specifically, as shown in Fig. 3 (b), we take the point cloud $\mathbf{X}$ as input, and use Minkowsk- + +iNet18A UNet [9] as our 3D module meanwhile, we change the dimension of the outputs to $d$ . Therefore, we can get the learned 3D embeddings $\mathbf{F}^{3D} \in \mathbb{R}^{N \times d}$ . Then we take the corresponding 2D-projected embeddings $\mathbf{P}^{2D}$ as input, processed by the adapter module, and get the adapted 3D embeddings $\mathbf{A}^{3D}$ . We follow the typical cosine similarity loss by pulling the paired 3D embeddings $\mathbf{F}^{3D}$ and adapted 3D embeddings $\mathbf{A}^{3D}$ closer. We need to note that we freeze the adapter module and directly load the parameters provided by Sec. 3.2 during training. Therefore, we define the 3DSS-VLG loss as: + +$$ +\mathcal {L} _ {s} = 1 - \cos (\mathbf {F} ^ {3 D}, \mathbf {A} ^ {3 D}). \tag {2} +$$ + +# 3.4 Inference + +During inference, we only retain the 3D and text modules and remove the 2D module. Specifically, we take the 3D embeddings $\mathbf{F}^{3D}$ from the 3D module, as well as the category embeddings $\mathbf{F}^C$ from text module, to perform matrix multiplication on them and get the classification logits. Finally, we rank the logits and obtain the final per-point segmentation for the input point cloud $\mathbf{X}$ . + +# 4 Experiments + +In this section, we first present our experimental settings, including datasets, evaluation metrics, and implementation details. The competing methods are then presented and compared. Finally, ablation studies are provided to further demonstrate the necessity and effectiveness of each component of our framework. + +# 4.1 Datasets and Evaluation Metrics + +We evaluate our 3DSS-VLG on two publicly and widely-used large-scale point cloud datasets with multi-view images, S3DIS [3] and ScanNet [10]. S3DIS is proposed for indoor scene understanding. It consists of 6 areas including 271 rooms with 13 classes. Each room is scanned via RGBD sensors and is represented by a point cloud with 3D coordinates and RGB values. Following previous works, we take area 5 as the test scene. ScanNet [10] has 1513 training scenes and 100 test scenes with 20 classes. We adopt the default train-val split setting, where there are 1201 training scenes and 312 validation scenes. The mean intersection over Union (mIoU) is employed as the evaluation metric for datasets. + +# 4.2 Implementations Details + +3DSS-VLG is implemented by PyTorch. For the training procedure of Sec. 3.2, we use Adam optimizer with batch size of 16 and set an initial learning rate of 0.003 for the model. We reduce the learning rate by a multiplying factor of 0.7 every 20 epochs for a total of 80 epochs. For the training procedure of Sec. 3.3, the model optimization is conducted using Adam optimizer with a batch size of 8. We set an initial learning rate of 0.0001 for the model and use the poly learning rate policy to adjust the learning rate. + +Table 1: Performance comparison on the S3DIS dataset. "Sup." indicates the type of supervision. "100%" represents full annotation. "scene." denotes scene-level annotation. + +
MethodLabel EffortSup.Test
PointNet [27]100%41.1
TangentConv [33]100%52.8
MinkowskiNet [9]100%65.8
KPConv [34]>20 min100%67.1
PointTransformer [49]100%70.4
PointNetXt [29]100%70.5
DeepViewAgg [32]100%67.2
SemAffiNet [35]100%71.6
MPRM [36]scene.10.3
MIL-Trans [45]scene.12.9
WYPR [31]<1 minscene.22.3
MIT [44]scene.27.7
Oursscene.45.3
+ +# 4.3 3D Semantic Segmentation Results + +We evaluate our proposed approach against state-of-the-art techniques for 3D weakly supervised semantic segmentation with scene-level labels. Firstly, we demonstrate some full supervised point cloud semantic segmentation methods to compare the gap between the performances of ours and full supervised methods. Subsequently, we introduce semantic segmentation methods supervised by scene-level labels or subcloud-level labels and compare them with our method. Meanwhile, we indicate the average annotation time per scene. + +Evaluation on S3DIS. Tab. 1 Show the performance of each type of 3D point cloud semantic segmentation methods evaluated on the S3DIS dataset. We can find that in the scene-level annotations setting, our method greatly surpasses the existing state-of-the-art method MIT [44] by $17.6\%$ . This shows that using textual semantic information ignored by previous 3D weakly supervised semantic segmentation can significantly improve segmentation performance. The textual semantic information of each category is unique; then the 2D embeddings and 3D embeddings are aligned so that the 3D embeddings can be implicitly aligned to the corresponding unique category semantic information, which allows the model to achieve greater performance improvements. + +Meanwhile, we compare our method with some full supervised methods. It can be observed that our 3DSS-VLG can outperform some fully supervised methods, i.e., PointNet [27]. Moreover, we notice that the annotations cost time of different types of supervision and find that the scene-level annotation is the most efficient compared to other types annotations. Such results demonstrate the effectiveness and potential of our weakly supervised method. + +Evaluation on ScanNet. We also evaluate our 3DSS-VLG on the ScanNet online test set and the validation set and presented the performance results of 3DSS-VLG in Tab. 2. For the test set, it can be observed that our 3DSS-VLG + +Table 2: Performance comparison on the ScanNet test set and validation set. "Sup." indicates the type of supervision. "100%" represents full annotation. "subcloud." and "scene." imply subcloud-level annotation and scene-level annotation respectively. "image." denotes image-level annotation. + +
MethodLabel EffortSup.TestVal
PointNet++ [28]100%33.9-
TangentConv [33]100%43.8-
MinkowskiNet [9]100%73.672.2
KPConv [34]>20 min100%68.669.2
PointTransformer [49]100%-70.6
PointNetXt [29]100%71.271.5
DeepViewAgg [32]100%-71.0
SemAffiNet [35]100%74.9-
MPRM [36]3 minsubcloud.41.143.2
Kweon et al. [18]5 minscene. + image.47.449.6
MIL-Trans [45]scene.-26.2
WYPR [31]<1 minscene.24.029.6
MIT [44]scene.31.735.8
Oursscene.48.949.7
+ +achieves the best performance under only scene-level label supervision and even surpasses the performance of MPRM [36] which is supervised by subcloud-level annotations. Moreover, we are surprised to find that our method also outperforms Kweon et al. [18] by $1.5\%$ , which uses not only scene-level labels, but also extra image-level labels. Our method can achieve stronger performance with less annotations, further illustrating the superiority of our method. Meanwhile, our 3DSS-VLG can outperform some fully supervised methods. In addition for the validation set, our method also achieves the state-of-the-art during those 3D WSSS approaches. Those results demonstrate the superiority of 3DSS-VLG. + +# 4.4 Ablation Studies + +Effectiveness of Each Components. To demonstrate the advantage of each component in our 3DSS-VLG, we conduct comprehensive ablation studies on the S3DIS dataset, as shown in Tab. 3. The ablation model (a) only retains the MinkowskiNet18A UNet [9] and trains directly with the pseudo labels which are generated without using scene-level labels filtering. The cross-entropy loss is introduced to supervised this procedure. We set model (a) as the baseline of our experiment. Compared to model (a), model (b) is not directly supervised by pseudo labels. It adopts the Embeddings Soft-Guidance Stage (ESGS) and is soft-guided by the 2D-projected embeddings $\mathbf{P}^{2D}$ . We can find that the performance of mIoU is improved from $37.7\%$ to $38.2\%$ . This observation proves that the soft-guidance strategy can guide 3D embeddings to align with the text embeddings and achieve better performance compared to directly using the pseudo + +Table 3: Ablation studies of the 3DSS-VLG components on S3DIS dataset. + +
ESGSFilteringESSmIoU
(a)37.7
(b)38.2
(c)42.6
(d)45.3
+ +Table 4: Performance comparisons of the generalization capability. + +
DomainmIoUmAcc
S3DIS ->ScanNet13.423.0
ScanNet ->S3DIS33.350.9
+ +labels to supervised 3D model. Meanwhile, when we introduce the filtering strategy to model (a), as shown in model (c), we can find that the model performance increases greatly from $37.7\%$ to $42.6\%$ . Finally, by adding the filtering strategy to model (b) and utilizing the Embeddings Specialization Stage (ESS), model (d) is supervised by adapted 3D embeddings $\mathbf{A}^{3D}$ at this time. It can be observed the performance improves from $38.2\%$ to $45.3\%$ . Such results prove that our 3DSS-VLG can help the model to get a better, indoor point cloud specific embedding space to align 3D point clouds and text. + +Generalization Capability. Due to the domain gap among different datasets, a model trained on one dataset is not applicable to another dataset. Also, this situation occurs in the 3D WSSS task. Nevertheless, we notice that, compared to previous works, our 3DSS-VLG uses textual semantic information as a guide rather than CAM, which means our model has a good relationship between 3D point cloud and the text of category labels and indicates that the model may have generalization ability. Therefore, we further explore our framework to the novel data of the unobserved scene domains. + +As shown in Tab. 4, we experimentally verify the generalizability of the proposed method on the S3DIS and ScanNet dataset, respectively. The first row is the performance of model that we first train our model on the S3DIS dataset and then test the trained model on validation set of the ScanNet dataset. The second row is the performance of model that we first train our model on the ScanNet dataset and then test the trained model on the test set of the S3DIS dataset. Compared to those weakly supervised methods with scene-level labels, it can be observed that our 3DSS-VLG has a certain gap with those methods in the first row. However, for the second row, we are supervised to find that our method can outperform all the weakly supervised methods and achieve state-of-the-art performance. The ScanNet dataset provides six times more training scenes than the S3DIS dataset. Therefore, when a model is pretrained on the ScanNet dataset, the model will be more robust than a model pretrained on the S3DIS dataset. Our experimental results also prove this phenomenon. + +Table 5: Performance comparisons with different 3D backbones and ESS module backbones on the S3DIS dataset. + +
ModuleBackbonemIoU
3DMinkowskiNet14A44.5
MinkowskiNet18A45.3
MinkowskiNet34A44.7
ESSTransformers45.0
MLP45.3
+ +The results also strongly support the complementary advantages of using text semantic information, even without any further fine-tuning or domain-specific adaptation. Our 3DSS-VLG can be extended to handle unobserved general data and has strong generalization capability, which is promising for the field of 3D WSSS. + +Experiments with Different Backbones. Tab. 5 shows the performances of our method on S3DIS with different 3D backbones and ESS module backbones. Finally, we use the MinkowskiNet18A as our 3D backbone and the FC-layer as the backbone of our ESS. + +# 4.5 Qualitative Results + +Fig. 4 visualizes the qualitative comparison of the proposed framework and baseline. Here the baseline is model (a) which is mentioned in Sec. 4.4. Compared with the result of baseline, our 3DSS-VLG shows significantly better results in the terms of accuracy of semantics and preciseness of segmentation. With the ESGS, ESS and filtering strategies, our 3DSS-VLG can learn a more better indoor point cloud specific embedding space to align 3D point clouds and text and achieve substantial semantic segmentation results compared to the baseline. + +# 4.6 Limitations + +Our work relies on vision-language alignment and does not address how to align visual embeddings with some abstract category text embeddings (e.g. "other" class in the S3DIS dataset). It is difficult for the model to understand what the difference is between the "other" class and other categories, thus making the wrong segmentation. This limitation is a direct avenue for future work. + +# 5 Conclusion + +In this paper, we propose 3DSS-VLG to address the shortage of point-level annotations. Specifically, our 3DSS-VLG exploits the superior ability of current vision-language models on aligning the semantics between texts and 2D images, as well as the naturally existing correspondences between 2D images and 3D + +![](images/7711442115bf0b7100fc52c012e0cbb31e8c0b6a0f35b6e29a7b2c7b6f85bda8.jpg) + +![](images/d9aec7f4432e4773f889b7534d14005610e8123d2fb060365f46b379b3c2685c.jpg) + +![](images/fce4d96ba0cf08ca7b95010740b5c59f3df176c796a5fb184c6e031f00b00d1c.jpg) + +![](images/79f24e6eee301eb1cd812bf0ad0f4732a99d275e7d7ccc5f62b0de21671965da.jpg) +ceiling + +![](images/46e6857063ccdc2fb62eadfbbb68375749eb1823c4e77169d9ba5fce75b3b2c8.jpg) +floor + +![](images/3979f86eb1ddb19df89086a146ebf811d19805ac3efee289ca3770fcc5c7df74.jpg) +Fig. 4: Qualitative results on the S3DIS dataset of baseline and our 3DSS-VLG. From left to right: input point clouds, ground truth, baseline results, and our 3DSS-VLG results. + +![](images/08274ca5e25499cc99dc785e9c49a7f7777e280a92c6b527dfe8c374d3ba202b.jpg) +# + +![](images/be711ce0b4db8bf93e4ad957edcba20f8405c53aa30f1a87ee90ffd847ff4886.jpg) +beam + +![](images/78f5fdbd80c29db14024c80ad273e46940226d7df2a95d446389ced3a83268e9.jpg) +window + +window + +![](images/af54b758ef067c52125d556afcbcc2a2ad3c7c83fed744cb67afb8f5b6d88d27.jpg) +column +table + +![](images/a99844856f66bed3dc950b7b3a8ac5971646ef7297c317c8b51972a64965bb2f.jpg) +s + +![](images/e3da1ed463fe48be1c30465c737efaf5b5758023c9014de039ef2291ce41a014.jpg) +sofa + +![](images/ccfd1138d4fa59158c8f42f83bf1a4e5fe437be2a06e803569a07a1056a8ec79.jpg) +b + +![](images/11b11729c49847242eff190e3cdb3007f76c83e0c9c130f716b3e47a696118bc.jpg) +board + +![](images/9a2cf3bf14753d72e4bf33cd3d5d59a100d4337e0c2057f672071cca71e7a65b.jpg) + +rd + +![](images/d7bfb6f69617c227607bb8b14f8bc5c02695e3d5ec03df656633b3ec1588bfbe.jpg) + +point clouds to implicitly co-embed texts embeddings with 3D point clouds embeddings using only scene-level labels. With extensive experiments, we verify that the textual semantic information of category labels is beneficial for 3DSS-VLG which achieves the state-of-the-art on both S3DIS and ScanNet datasets. Further, with an experiment to explore our framework to unobserved scene domains, we demonstrate the generalization capability of our method, which supports its practicality. + +# Acknowledgements + +This work was supported in part by the National Natural Science Foundation of China under Grants 62371310 and 62032015, in part by the Guangdong Basic and Applied Basic Research Foundation under Grant 2023A1515011236, in part by the Stable Support Project of Shenzhen (Project No.20231122122722001), in + +part by the third phase of high-level university construction of interdisciplinary innovation team project of Shenzhen University(24JCXK03). We also acknowledge the CINECA award under the ISCRA initiative, for the availability of partial HPC resources support, and partially supported by the Fundamental Research Funds for the Central Universities, Peking University. + +# References + +1. Alonso, I., Riazuelo, L., Montesano, L., Murillo, A.C.: 3d-mininet: Learning a 2d representation from point clouds for fast and efficient 3d lidar semantic segmentation. IEEE Robotics and Automation Letters 5(4), 5432-5439 (2020) 5 +2. Ando, A., Gidaris, S., Bursuc, A., Puy, G., Boulch, A., Marlet, R.: Rangevit: Towards vision transformers for 3d semantic segmentation in autonomous driving. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5240-5250 (2023) 5 +3. Armeni, I., Sener, O., Zamir, A.R., Jiang, H., Brilakis, I., Fischer, M., Savarese, S.: 3d semantic parsing of large-scale indoor spaces. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 1534-1543 (2016) 9 +4. Bucher, M., Vu, T.H., Cord, M., Pérez, P.: Zero-shot semantic segmentation. Advances in Neural Information Processing Systems 32 (2019) 4 +5. Cardace, A., Ramirez, P.Z., Salti, S., Di Stefano, L.: Exploiting the complementarity of 2d and 3d networks to address domain-shift in 3d semantic segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 98-109 (2023) 5 +6. Chen, J., Zhu, D., Qian, G., Ghanem, B., Yan, Z., Zhu, C., Xiao, F., Culatana, S.C., Elhoseiny, M.: Exploring open-vocabulary semantic segmentation from clip vision encoder distillation only. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 699-710 (2023) 4 +7. Chen, R., Liu, Y., Kong, L., Chen, N., Zhu, X., Ma, Y., Liu, T., Wang, W.: Towards label-free scene understanding by vision foundation models. Advances in Neural Information Processing Systems 36 (2024) 5 +8. Chibane, J., Engelmann, F., Anh Tran, T., Pons-Moll, G.: Box2mask: Weakly supervised 3d semantic instance segmentation using bounding boxes. In: European Conference on Computer Vision. pp. 681-699. Springer (2022) 5 +9. Choy, C., Gwak, J., Savarese, S.: 4d spatio-temporal convnets: Minkowski convolutional neural networks. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 3075-3084 (2019) 4, 5, 9, 10, 11 +10. Dai, A., Chang, A.X., Savva, M., Halber, M., Funkhouser, T., Nießner, M.: Scannet: Richly-annotated 3d reconstructions of indoor scenes. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 5828-5839 (2017) 9 +1. Genova, K., Yin, X., Kundu, A., Pantofaru, C., Cole, F., Sud, A., Brewington, B., Shucker, B., Funkhouser, T.: Learning 3d semantic segmentation with only 2d image supervision. In: 2021 International Conference on 3D Vision (3DV). pp. 361-372 (2021) 5 +2. Ghiasi, G., Gu, X., Cui, Y., Lin, T.Y.: Scaling open-vocabulary image segmentation with image-level labels. In: European Conference on Computer Vision. pp. 540-557. Springer (2022) 3, 4, 6, 7 +3. Hegde, D., Valanarasu, J.M.J., Patel, V.: Clip goes 3d: Leveraging prompt tuning for language grounded 3d recognition. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 2028-2038 (2023) 1 + +14. Hou, J., Xie, S., Graham, B., Dai, A., Nießner, M.: Pri3d: Can 3d priors help 2d representation learning? In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 5693-5702 (2021) 5 +15. Hu, Q., Yang, B., Fang, G., Guo, Y., Leonardis, A., Trigoni, N., Markham, A.: Sqn: Weakly-supervised semantic segmentation of large-scale 3d point clouds. In: European Conference on Computer Vision. pp. 600-619. Springer (2022) 5 +16. Hu, Q., Yang, B., Xie, L., Rosa, S., Guo, Y., Wang, Z., Trigoni, N., Markham, A.: Randla-net: Efficient semantic segmentation of large-scale point clouds. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 11108-11117 (2020) 1 +17. Hu, W., Zhao, H., Jiang, L., Jia, J., Wong, T.T.: Bidirectional projection network for cross dimension scene understanding. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 14373-14382 (2021) 5 +18. Kweon, H., Yoon, K.J.: Joint learning of 2d-3d weakly supervised semantic segmentation. Advances in Neural Information Processing Systems 35, 30499-30511 (2022) 2, 5, 11 +19. Lahoud, J., Ghanem, B.: 2d-driven 3d object detection in rgb-d images. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 4622-4630 (2017) 5 +20. Li, J., Dai, H., Han, H., Ding, Y.: Mseg3d: Multi-modal 3d semantic segmentation for autonomous driving. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 21694-21704 (2023) 5 +21. Li, J., Jie, Z., Ricci, E., Ma, L., Sebe, N.: Enhancing robustness of vision-language models through orthogonality learning and cross-regularization (2024), https:// arxiv.org/abs/2407.08374 5 +22. Li, J., Jie, Z., Wang, X., Wei, X., Ma, L.: Expansion and shrinkage of localization for weakly-supervised semantic segmentation. In: Advances in Neural Information Processing Systems. vol. 35, pp. 16037-16051 (2022) 5 +23. Li, J., Jie, Z., Wang, X., Zhou, Y., Ma, L., Jiang, J.: Weakly supervised semantic segmentation via self-supervised destruction learning. Neurocomputing 561, 126821 (2023) 5 +24. Li, J., Jie, Z., Wang, X., Zhou, Y., Wei, X., Ma, L.: Weakly supervised semantic segmentation via progressive patch learning. IEEE Transactions on multimedia 25, 1686-1699 (2022) 5 +25. Liang, F., Wu, B., Dai, X., Li, K., Zhao, Y., Zhang, H., Zhang, P., Vajda, P., Marculescu, D.: Open-vocabulary semantic segmentation with mask-adapted clip. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 7061-7070 (2023) 3, 4 +26. Qi, C.R., Liu, W., Wu, C., Su, H., Guibas, L.J.: Frustum pointnets for 3d object detection from rgb-d data. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 918-927 (2018) 5 +27. Qi, C.R., Su, H., Mo, K., Guibas, L.J.: Pointnet: Deep learning on point sets for 3d classification and segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 652-660 (2017) 1, 10 +28. Qi, C.R., Yi, L., Su, H., Guibas, L.J.: Pointnet++: Deep hierarchical feature learning on point sets in a metric space. Advances in neural information processing systems 30 (2017) 1, 11 +29. Qian, G., Li, Y., Peng, H., Mai, J., Hammoud, H., Elhoseiny, M., Ghanem, B.: Pointnext: Revisiting pointnet++ with improved training and scaling strategies. + +Advances in Neural Information Processing Systems 35, 23192-23204 (2022) 1, 10, 11 +30. Radford, A., Kim, J.W., Hallacy, C., Ramesh, A., Goh, G., Agarwal, S., Sastry, G., Askell, A., Mishkin, P., Clark, J., et al.: Learning transferable visual models from natural language supervision. In: International Conference on Machine Learning. pp. 8748-8763 (2021) 3, 4 +31. Ren, Z., Misra, I., Schwing, A.G., Girdhar, R.: 3d spatial recognition without spatially labeled 3d. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 13204-13213 (2021) 2, 5, 10, 11 +32. Robert, D., Vallet, B., Landrieu, L.: Learning multi-view aggregation in the wild for large-scale 3d semantic segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5575-5584 (2022) 10, 11 +33. Tatarchenko, M., Park, J., Koltun, V., Zhou, Q.Y.: Tangent convolutions for dense prediction in 3d. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 3887-3896 (2018) 10, 11 +34. Thomas, H., Qi, C.R., Deschaud, J.E., Marcotegui, B., Goulette, F., Guibas, L.J.: Kpconv: Flexible and deformable convolution for point clouds. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 6411-6420 (2019) 10, 11 +35. Wang, Z., Rao, Y., Yu, X., Zhou, J., Lu, J.: Semaffinet: Semantic-affine transformation for point cloud segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 11819-11829 (2022) 10, 11 +36. Wei, J., Lin, G., Yap, K.H., Hung, T.Y., Xie, L.: Multi-path region mining for weakly supervised 3d semantic segmentation on point clouds. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 4384-4393 (2020) 2, 5, 10, 11 +37. Xian, Y., Choudhury, S., He, Y., Schiele, B., Akata, Z.: Semantic projection network for zero-and few-label semantic segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 8256-8265 (2019) 4 +38. Xu, D., Anguelov, D., Jain, A.: Pointfusion: Deep sensor fusion for 3d bounding box estimation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 244-253 (2018) 5 +39. Xu, J., Hou, J., Zhang, Y., Feng, R., Wang, Y., Qiao, Y., Xie, W.: Learning open-vocabulary semantic segmentation models from natural language supervision. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 2935-2944 (2023) 4 +40. Xu, M., Zhang, Z., Wei, F., Hu, H., Bai, X.: Side adapter network for open-vocabulary semantic segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 2945-2954 (2023) 4 +41. Xu, M., Zhang, Z., Wei, F., Lin, Y., Cao, Y., Hu, H., Bai, X.: A simple baseline for open-vocabulary semantic segmentation with pre-trained vision-language model. In: European Conference on Computer Vision. pp. 736-753. Springer (2022) 4 +42. Xu, X., Yuan, Y., Zhang, Q., Wu, W., Jie, Z., Ma, L., Wang, X.: Weakly-supervised 3d visual grounding based on visual linguistic alignment. arXiv preprint arXiv:2312.09625 (2023) 5 +43. Yan, X., Gao, J., Zheng, C., Zheng, C., Zhang, R., Cui, S., Li, Z.: 2dpass: 2d priors assisted semantic segmentation on lidar point clouds. In: European Conference on Computer Vision. pp. 677-695. Springer (2022) 1 + +44. Yang, C.K., Chen, M.H., Chuang, Y.Y., Lin, Y.Y.: 2d-3d interlaced transformer for point cloud segmentation with scene-level supervision. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 977–987 (2023) 2, 5, 10, 11 +45. Yang, C.K., Wu, J.J., Chen, K.S., Chuang, Y.Y., Lin, Y.Y.: An mil-derived transformer for weakly supervised point cloud segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 11830-11839 (2022) 2, 10, 11 +46. Yun, S., Park, S.H., Seo, P.H., Shin, J.: Ifseg: Image-free semantic segmentation via vision-language model. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 2967-2977 (2023) 3 +47. Zhang, R., Wang, L., Qiao, Y., Gao, P., Li, H.: Learning 3d representations from 2d pre-trained models via image-to-point masked autoencoders. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 21769-21780 (2023) 5 +48. Zhang, Y., Hu, Q., Xu, G., Ma, Y., Wan, J., Guo, Y.: Not all points are equal: Learning highly efficient point-based detectors for 3d lidar point clouds. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 18953-18962 (2022) 5 +49. Zhao, H., Jiang, L., Jia, J., Torr, P.H., Koltun, V.: Point transformer. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 16259-16268 (2021) 10, 11 +50. Zhou, B., Khosla, A., Lapedriza, A., Oliva, A., Torralba, A.: Learning deep features for discriminative localization. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 2921-2929 (2016) 2 \ No newline at end of file diff --git a/2024/3D Weakly Supervised Semantic Segmentation with 2D Vision-Language Guidance/images.zip b/2024/3D Weakly Supervised Semantic Segmentation with 2D Vision-Language Guidance/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..a9cdaea0958681ca92ee7bafd8bb6f92df4300b7 --- /dev/null +++ b/2024/3D Weakly Supervised Semantic Segmentation with 2D Vision-Language Guidance/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4ba6189b94d4be7b54e3e48685e79dda68ac1e61bb8e8bd42dab7d5bf01be28e +size 355702 diff --git a/2024/3D Weakly Supervised Semantic Segmentation with 2D Vision-Language Guidance/layout.json b/2024/3D Weakly Supervised Semantic Segmentation with 2D Vision-Language Guidance/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..6718e4a951f03c97287f4b6e2c4ee154e71dc40f --- /dev/null +++ b/2024/3D Weakly Supervised Semantic Segmentation with 2D Vision-Language Guidance/layout.json @@ -0,0 +1,9572 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 143, + 111, + 471, + 148 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 111, + 471, + 148 + ], + "spans": [ + { + "bbox": [ + 143, + 111, + 471, + 148 + ], + "type": "text", + "content": "3D Weakly Supervised Semantic Segmentation with 2D Vision-Language Guidance" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 133, + 167, + 481, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 167, + 481, + 194 + ], + "spans": [ + { + "bbox": [ + 133, + 167, + 481, + 194 + ], + "type": "text", + "content": "Xiaoxu Xu" + }, + { + "bbox": [ + 133, + 167, + 481, + 194 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 133, + 167, + 481, + 194 + ], + "type": "text", + "content": ", Yitian Yuan" + }, + { + "bbox": [ + 133, + 167, + 481, + 194 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 133, + 167, + 481, + 194 + ], + "type": "text", + "content": ", Jinlong Li" + }, + { + "bbox": [ + 133, + 167, + 481, + 194 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 133, + 167, + 481, + 194 + ], + "type": "text", + "content": ", Qiudan Zhang" + }, + { + "bbox": [ + 133, + 167, + 481, + 194 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 133, + 167, + 481, + 194 + ], + "type": "text", + "content": ", Zequn Jie" + }, + { + "bbox": [ + 133, + 167, + 481, + 194 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 133, + 167, + 481, + 194 + ], + "type": "text", + "content": ", Lin Ma" + }, + { + "bbox": [ + 133, + 167, + 481, + 194 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 133, + 167, + 481, + 194 + ], + "type": "text", + "content": ", Hao Tang" + }, + { + "bbox": [ + 133, + 167, + 481, + 194 + ], + "type": "inline_equation", + "content": "^{4,5}" + }, + { + "bbox": [ + 133, + 167, + 481, + 194 + ], + "type": "text", + "content": ", Nicu Sebe" + }, + { + "bbox": [ + 133, + 167, + 481, + 194 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 133, + 167, + 481, + 194 + ], + "type": "text", + "content": ", and Xu Wang" + }, + { + "bbox": [ + 133, + 167, + 481, + 194 + ], + "type": "inline_equation", + "content": "^{1*}" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 145, + 201, + 468, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 201, + 468, + 223 + ], + "spans": [ + { + "bbox": [ + 145, + 201, + 468, + 223 + ], + "type": "text", + "content": "1 College of Computer Science and Software Engineering, Shenzhen University, Shenzhen, 518060, China." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 258, + 224, + 355, + 235 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 258, + 224, + 355, + 235 + ], + "spans": [ + { + "bbox": [ + 258, + 224, + 355, + 235 + ], + "type": "text", + "content": "2 Meituan Inc., China." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 247, + 235, + 365, + 246 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 247, + 235, + 365, + 246 + ], + "spans": [ + { + "bbox": [ + 247, + 235, + 365, + 246 + ], + "type": "text", + "content": "3 University of Trento, Italy." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 249, + 246, + 363, + 256 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 249, + 246, + 363, + 256 + ], + "spans": [ + { + "bbox": [ + 249, + 246, + 363, + 256 + ], + "type": "text", + "content": "4 Peking University, China." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 233, + 256, + 380, + 267 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 256, + 380, + 267 + ], + "spans": [ + { + "bbox": [ + 233, + 256, + 380, + 267 + ], + "type": "text", + "content": "5 Carnegie Mellon University, USA." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 159, + 288, + 455, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 288, + 455, + 520 + ], + "spans": [ + { + "bbox": [ + 159, + 288, + 455, + 520 + ], + "type": "text", + "content": "Abstract. In this paper, we propose 3DSS-VLG, a weakly supervised approach for 3D Semantic Segmentation with 2D Vision-Language Guidance, an alternative approach that a 3D model predicts dense-embedding for each point which is co-embedded with both the aligned image and text spaces from the 2D vision-language model. Specifically, our method exploits the superior generalization ability of the 2D vision-language models and proposes the Embeddings Soft-Guidance Stage to utilize it to implicitly align 3D embeddings and text embeddings. Moreover, we introduce the Embeddings Specialization Stage to purify the feature representation with the help of a given scene-level label, specifying a better feature supervised by the corresponding text embedding. Thus, the 3D model is able to gain informative supervisions both from the image embedding and text embedding, leading to competitive segmentation performances. To the best of our knowledge, this is the first work to investigate 3D weakly supervised semantic segmentation by using the textual semantic information of text category labels. Moreover, with extensive quantitative and qualitative experiments, we present that our 3DSS-VLG is able not only to achieve the state-of-the-art performance on both S3DIS and ScanNet datasets, but also to maintain strong generalization capability. The code will be available at https://github.com/xuxiaoxxxx/3DSS-VLG/." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 159, + 529, + 453, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 529, + 453, + 550 + ], + "spans": [ + { + "bbox": [ + 159, + 529, + 453, + 550 + ], + "type": "text", + "content": "Keywords: 3D Weakly Supervised Semantic Segmentation " + }, + { + "bbox": [ + 159, + 529, + 453, + 550 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 159, + 529, + 453, + 550 + ], + "type": "text", + "content": " Vision-Language Model" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 567, + 230, + 578 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 567, + 230, + 578 + ], + "spans": [ + { + "bbox": [ + 132, + 567, + 230, + 578 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 588, + 482, + 648 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 588, + 482, + 648 + ], + "spans": [ + { + "bbox": [ + 130, + 588, + 482, + 648 + ], + "type": "text", + "content": "3D point cloud semantic segmentation [13, 16, 27-29, 43] can provide valuable geometric and semantic data about the 3D environment and has gained considerable attention over the past few years. Learning-based semantic segmentation methods have achieved remarkable performance recently, but they need per-point annotations, which is time consuming and labor intensive." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 654, + 318, + 665 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 654, + 318, + 665 + ], + "spans": [ + { + "bbox": [ + 133, + 654, + 318, + 665 + ], + "type": "text", + "content": "* Corresponding author: wangxu@szu.edu.cn" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 202, + 115, + 402, + 166 + ], + "blocks": [ + { + "bbox": [ + 202, + 115, + 402, + 166 + ], + "lines": [ + { + "bbox": [ + 202, + 115, + 402, + 166 + ], + "spans": [ + { + "bbox": [ + 202, + 115, + 402, + 166 + ], + "type": "image", + "image_path": "1e25a6fa566557f0fd497ec39b3550371babc60a255ebec09c428e50ac2016c6.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 154, + 188, + 459, + 305 + ], + "blocks": [ + { + "bbox": [ + 157, + 172, + 456, + 185 + ], + "lines": [ + { + "bbox": [ + 157, + 172, + 456, + 185 + ], + "spans": [ + { + "bbox": [ + 157, + 172, + 456, + 185 + ], + "type": "text", + "content": "(a) Conventional 3D weakly supervised semantic segmentation solution" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 154, + 188, + 459, + 305 + ], + "lines": [ + { + "bbox": [ + 154, + 188, + 459, + 305 + ], + "spans": [ + { + "bbox": [ + 154, + 188, + 459, + 305 + ], + "type": "image", + "image_path": "a385dd5c806a4d4dd2a4364120e940854498cd0695d3483cb40fe228acc50b59.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 264, + 308, + 358, + 319 + ], + "lines": [ + { + "bbox": [ + 264, + 308, + 358, + 319 + ], + "spans": [ + { + "bbox": [ + 264, + 308, + 358, + 319 + ], + "type": "text", + "content": "(b) Ours (3DSS-VLG)" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 130, + 334, + 482, + 399 + ], + "lines": [ + { + "bbox": [ + 130, + 334, + 482, + 399 + ], + "spans": [ + { + "bbox": [ + 130, + 334, + 482, + 399 + ], + "type": "text", + "content": "Fig. 1: Comparison of different approaches. (a) The conventional 3D WSSS approach adopts the coarse-grained CAM method in a global manner and is supervised by scene-level annotations or subcloud-level annotations. (b) Our proposed 3DSS-VLG approach leverages natural 3D-2D correspondence from geometric camera calibration and 2D-text correspondence from vision-language models, to implicitly align texts and 3D point clouds." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 426, + 482, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 426, + 482, + 604 + ], + "spans": [ + { + "bbox": [ + 130, + 426, + 482, + 604 + ], + "type": "text", + "content": "To address this issue, existing weakly supervised methods derive the segmentation model with different weak supervisory signals, such as subcloud-level annotations [36], scene-level annotations [18, 31, 45] and so on. As shown in Fig. 1 (a), the 3D Weakly Supervised Semantic Segmentation (3D WSSS) approaches typically adopt a Class Activation Map (CAM) [50] solution. Point clouds are first processed by several Multi-Layer Perception (MLP) layers and thus get a point cloud feature map, and then this point cloud feature map is processed by a Global Average Pooling (GAP) to get a global classification prediction, which is trained with subcloud-level or scene-level annotations. Given the simple GAP connectivity structure, these methods can easily identify the importance of each point by projecting back the output classification weight onto the point cloud feature maps, a technique we call CAM. In this way, the semantic segmentation for each category is back-derived from the global prediction. Recently, with the remarkable success of 2D vision, some methods [18, 44] also use the 2D module to enhance the 3D WSSS." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 605, + 482, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 605, + 482, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 605, + 482, + 665 + ], + "type": "text", + "content": "Although leveraging 2D-3D fusion in 3D WSSS seems to be promising, there also exist some problems. Kweon et al. [18] need extra detailed annotations of 2D images. As for MIT [44], although it avoids additional per-point/pixel annotations or per-image class labels, its performance is not expected. Therefore, how to design a network that achieves good performance despite the lack of 2D anno" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 216, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 216, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 216, + 100 + ], + "type": "text", + "content": "X. Xu et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 115, + 482, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 115, + 482, + 308 + ], + "spans": [ + { + "bbox": [ + 130, + 115, + 482, + 308 + ], + "type": "text", + "content": "tations still remains a big challenge. Meanwhile, we notice that the conventional methods for 3D WSSS only use the scene-labels or subcloud-labels to supervise the model, but ignore that the textual category labels such as \"chair, table\" also have semantic meanings and could be embedded to help the model learning. At the same time, we also find that some methods [12, 25, 46] like Openseg [12], which leverage the pretrained vision-language models such as CLIP [30] to establish precise semantic matching relationships between natural languages and 2D images, have achieved good results in 2D open vocabulary semantic segmentation (2D OVSS). The above two points inspire us to consider whether we can use the well-pretrained 2D OVSS model to help the 3D WSSS. As shown in Fig. 1 (b), the point cloud and 2D images could be mutually mapped with geometric projections, and the 2D images and textual categories could be compared with pretrained vision-language models. Therefore, why do not we take the 2D images as a bridge, leveraging the correspondences between point clouds and images, images and natural languages, to implicitly build matching relationships between point clouds and natural languages?" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 312, + 482, + 504 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 312, + 482, + 504 + ], + "spans": [ + { + "bbox": [ + 130, + 312, + 482, + 504 + ], + "type": "text", + "content": "To this end, we propose a simple yet effective method, namely 3DSS-VLG, i.e., a weakly supervised approach for 3D Semantic Segmentation with 2D Vision-Language Guidance. Our 3DSS-VLG only needs to use 2D images, but no need for their 2D image-level annotations during training. Specifically, for the input 3D point cloud, the dataset also provides a set of multi-view images corresponding to it. We first process these multi-view images using the image encoder of the pretrained off-the-shelf 2D OVSS model such as Openseg [12] to get the 2D embeddings. Then, for each point in the 3D point cloud, we project it to the multi-view images with geometric projections, and integrate these corresponding 2D embeddings to get the 2D-projected embeddings for the point. Next, we utilize the text module of the 2D OVSS model to obtain the textual embeddings of each semantic category label. Since in the embedding space of 2D OVSS, the textual category labels and 2D images could be directly compared, we only need to learn a 3D backbone which could generate 3D embeddings aligned with 2D embeddings; thus, the category labels and the 3D point cloud could be implicitly compared." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 510, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 510, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 510, + 482, + 666 + ], + "type": "text", + "content": "Actually, if the 3D embedding is learned well enough, it can be directly compared with the text embedding by the similarity measurement to classify. However, we find that only relying on pulling the 2D-projected embeddings and 3D embeddings closely is not reliable since the pretrained 2D OVSS model are designed to learn the general knowledge and do not have specialized knowledge to the indoor point cloud scene. Therefore, we propose to alleviate this problem by three stages. (1) First, as shown in Fig. 2, we perform matrix multiplication on projected 2D embeddings and text embeddings of category labels and get the classification logits. Then, we use the scene-level labels as mask to filter out some confusing and unreliable predictions in the classification logits and thus get a more reliable pseudo label vector. (2) Second, as shown in Fig. 3 (a), we propose the Embeddings Specialization Stage, which transfers the 2D-projected embeddings with an adapter module to obtain adapted 3D embeddings, and the" + } + ] + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 276, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 276, + 91, + 447, + 102 + ], + "type": "text", + "content": "3DSS with 2D Vision-Language Guidance" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 258 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 258 + ], + "type": "text", + "content": "training of this adapter module will be supervised with the pseudo label vector. This stage is designed to induce a more reliable target 3D embeddings suited for the indoor point cloud scene from the 2D-projected embeddings. (3) Finally, as shown in Fig. 3 (b), we design Embeddings Soft-Guidance Stage, which freezes the adapter module introduced in the second stage and leverages cosine similarity to align the adapted 3D embeddings and the MinkowskiNet [9] 3D embeddings. Combining the above three stages, we can learn a more reliable 3D embedding space for semantic segmentation in indoor point cloud scene. In the inference procedure, we only need to compare the MinkowskiNet 3D embeddings of the point cloud and the text embeddings of the semantic category labels, thus accomplishing the 3D semantic segmentation. Note that we do not need 2D images to participate in the inference process of our model." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 146, + 260, + 430, + 272 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 146, + 260, + 430, + 272 + ], + "spans": [ + { + "bbox": [ + 146, + 260, + 430, + 272 + ], + "type": "text", + "content": "In summary, the main contributions of this paper are as follows:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 138, + 279, + 481, + 483 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 138, + 279, + 481, + 339 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 279, + 481, + 339 + ], + "spans": [ + { + "bbox": [ + 138, + 279, + 481, + 339 + ], + "type": "text", + "content": "- We propose a weakly supervised method 3DSS-VLG for 3D WSSS, which takes 2D images as a bridge, and leverages natural 3D-2D correspondence from geometric camera calibration and 2D-text correspondence from vision-language models to implicitly establish the semantic relationships between texts and 3D point clouds." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 138, + 339, + 481, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 339, + 481, + 422 + ], + "spans": [ + { + "bbox": [ + 138, + 339, + 481, + 422 + ], + "type": "text", + "content": "- We design a three-stage training procedure to learn a reliable 3D embedding space in 3DSS-VLG for 3D semantic segmentation. Embeddings Specialization Stage is designed to utilize the pretrained 2D vision-language model to provide a embedding space for 3D point cloud representation with MinkowskiNet 3D backbone. Moreover, we propose Embeddings Specialization Stage to make the embedding space to be more robust based on the pseudo label filtering with indoor point cloud scene knowledge." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 138, + 423, + 481, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 423, + 481, + 483 + ], + "spans": [ + { + "bbox": [ + 138, + 423, + 481, + 483 + ], + "type": "text", + "content": "- Extensive experiments on the ScanNet and S3DIS dataset show that the proposed 3DSS-VLG significantly outperforms the previous state-of-the-art methods, even Kweon et al. which use extra 2D image-level annotations. Moreover, our further experiments show our 3DSS-VLG has strong generalization capability and can be extended to handle unobserved general datasets." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 132, + 501, + 237, + 514 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 501, + 237, + 514 + ], + "spans": [ + { + "bbox": [ + 132, + 501, + 237, + 514 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 526, + 388, + 538 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 526, + 388, + 538 + ], + "spans": [ + { + "bbox": [ + 132, + 526, + 388, + 538 + ], + "type": "text", + "content": "2.1 2D Open-Vocabulary Semantic Segmentation" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 546, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 546, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 546, + 482, + 666 + ], + "type": "text", + "content": "The recent advances of large vision-language models have enabled a remarkable level of robustness in open-vocabulary semantic segmentation [6,25,39-41]. Open vocabulary semantic segmentation aims to segment the target categories that cannot be access during the training procedure. Pioneering work ZS3Net [4] uses generative models to synthesize pixel-level features by word embeddings of unseen classes. SPNet [37] encodes visual features into the semantic embeddings space to align with text embeddings. More recently, researchers propose to leverage the pretrained CLIP [30] for open-vocabulary semantic segmentation. ZSSeg [41] leverages the visual module to generate class-agnostic masks and uses the pretrained text encoder to retrieve the unseen class masks. OpenSeg [12]" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 216, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 216, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 216, + 101 + ], + "type": "text", + "content": "X. Xu et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 189 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 189 + ], + "type": "text", + "content": "proposes to align the segment-level visual embeddings with text embeddings via region-word grounding. In this work, we solely rely on pretrained 2D open-vocabulary models and perform 3D weakly supervised semantic segmentation understarnding tasks. We pull the 3D embeddings and 2D embeddings which features exacted from pretrained model back-project onto point cloud closed to implicitly align 3D embeddings and text embeddings." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 131, + 205, + 397, + 217 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 205, + 397, + 217 + ], + "spans": [ + { + "bbox": [ + 131, + 205, + 397, + 217 + ], + "type": "text", + "content": "2.2 3D Weakly Supervised Semantic Segmentation" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 223, + 482, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 223, + 482, + 450 + ], + "spans": [ + { + "bbox": [ + 130, + 223, + 482, + 450 + ], + "type": "text", + "content": "This task aims to learn point cloud semantic segmentation using weakly annotated data, such as sparsely labeled points [15, 48], box-level labels [8], subcloud-level labels [36] and scene-level labels [9, 18, 31, 44]. Though the state-of-the-art methods based on sparsely labeled points show performance comparable to that of supervised ones, they require at least partial point-wise annotation in a scene, which is still expensive compared to subcloud-level labels and scene-level labels. The pipeline of the conventional CAM solution has been used in the majority of previous 3D WSSS works and only treats the scene-level labels as one-hot digit. MPRM [36] proposes the subcloud-level annotations method that samples subclouds from the full scene and annotates them, which can alleviate the class imbalance issue commonly appearing in almost scene. However, the subcloud-level annotations need to divide the point cloud into small that we need to annotations more than one for a scene, which is too much trouble and time-consuming. Therefore, some methods that use scene-level annotations are proposals for the 3D WSSS. Kweon et al. [18] utilizes 2D and 3D data for semantic segmentation and gets good performance, however, requiring extra 2D image-level labels. MIT [44] proposes the interlaced transformer structure to fuse 2D-3D information with only scene-level labels. However, its performance is not as good as expected." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 451, + 482, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 451, + 482, + 499 + ], + "spans": [ + { + "bbox": [ + 130, + 451, + 482, + 499 + ], + "type": "text", + "content": "Therefore, in this work, we explore a 3D WSSS method with only scene-level labels. Unlike those previous works, we use the semantic meanings of textual category labels to assist in model learning. Moreover, the performance of our 3DSS-VLG is over the Kweon et al., which uses extra 2D image-level labels." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 131, + 515, + 282, + 526 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 515, + 282, + 526 + ], + "spans": [ + { + "bbox": [ + 131, + 515, + 282, + 526 + ], + "type": "text", + "content": "2.3 2D Semantic in 3D task" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 533, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 533, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 533, + 482, + 666 + ], + "type": "text", + "content": "Studies on 3D object detection and semantic segmentation [1,2,5,11,14,20-24] have explored the use of 2D image semantics to assist 3D tasks. There are almost two approaches: concatenating the image embeddings with each point in the 3D scene as extra information [7,17,42,47] or projecting image semantic results into a 3D space to assist 3D semantic segmentation [19,26,38]. Previous studies usually used 2D image semantics as extra inputs in both training and inference. Although performance has improved, the extra 2D inputs have the potential to constrain the range of application scenarios. This is due to the fact that 2D information may be absent during inference or necessitate laborious pre-processing. In this paper, we aim to investigate the potential of using 2D semantics exclusively during training to assist in the 3D WSSS task." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 276, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 276, + 91, + 447, + 102 + ], + "type": "text", + "content": "3DSS with 2D Vision-Language Guidance" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 149, + 113, + 470, + 271 + ], + "blocks": [ + { + "bbox": [ + 149, + 113, + 470, + 271 + ], + "lines": [ + { + "bbox": [ + 149, + 113, + 470, + 271 + ], + "spans": [ + { + "bbox": [ + 149, + 113, + 470, + 271 + ], + "type": "image", + "image_path": "ec7a5e1c73ffcde778c6f22944ef831adb8bf8e68fea90a74161e849726952de.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 282, + 482, + 426 + ], + "lines": [ + { + "bbox": [ + 130, + 282, + 482, + 426 + ], + "spans": [ + { + "bbox": [ + 130, + 282, + 482, + 426 + ], + "type": "text", + "content": "Fig. 2: The proposed pseudo label generation procedure. We first leverage the text encoder " + }, + { + "bbox": [ + 130, + 282, + 482, + 426 + ], + "type": "inline_equation", + "content": "\\varepsilon^{text}" + }, + { + "bbox": [ + 130, + 282, + 482, + 426 + ], + "type": "text", + "content": " of Openseg to get embeddings of the full category labels " + }, + { + "bbox": [ + 130, + 282, + 482, + 426 + ], + "type": "inline_equation", + "content": "\\mathbf{F}^C" + }, + { + "bbox": [ + 130, + 282, + 482, + 426 + ], + "type": "text", + "content": ", and leverage the 2D image encoder " + }, + { + "bbox": [ + 130, + 282, + 482, + 426 + ], + "type": "inline_equation", + "content": "\\varepsilon^{2D}" + }, + { + "bbox": [ + 130, + 282, + 482, + 426 + ], + "type": "text", + "content": " of Openseg to get embeddings of the 2D image " + }, + { + "bbox": [ + 130, + 282, + 482, + 426 + ], + "type": "inline_equation", + "content": "\\mathbf{F}^{2D}" + }, + { + "bbox": [ + 130, + 282, + 482, + 426 + ], + "type": "text", + "content": ". It is important to note that we freeze the whole Openseg model during the procedure of pseudo label generation. Then we back-project the 2D embeddings " + }, + { + "bbox": [ + 130, + 282, + 482, + 426 + ], + "type": "inline_equation", + "content": "\\mathbf{F}^{2D}" + }, + { + "bbox": [ + 130, + 282, + 482, + 426 + ], + "type": "text", + "content": " to integrate the 2D-projected embeddings " + }, + { + "bbox": [ + 130, + 282, + 482, + 426 + ], + "type": "inline_equation", + "content": "\\mathbf{P}^{2D}" + }, + { + "bbox": [ + 130, + 282, + 482, + 426 + ], + "type": "text", + "content": ". Specifically, for each point in the point cloud " + }, + { + "bbox": [ + 130, + 282, + 482, + 426 + ], + "type": "inline_equation", + "content": "(x^{3D}, y^{3D}, z^{3D})" + }, + { + "bbox": [ + 130, + 282, + 482, + 426 + ], + "type": "text", + "content": ", we use geometric camera calibration matrixes " + }, + { + "bbox": [ + 130, + 282, + 482, + 426 + ], + "type": "inline_equation", + "content": "GCCM^{img}" + }, + { + "bbox": [ + 130, + 282, + 482, + 426 + ], + "type": "text", + "content": " to calculate the corresponding positions " + }, + { + "bbox": [ + 130, + 282, + 482, + 426 + ], + "type": "inline_equation", + "content": "(x^{2D}, y^{2D})" + }, + { + "bbox": [ + 130, + 282, + 482, + 426 + ], + "type": "text", + "content": " on the multi-view images " + }, + { + "bbox": [ + 130, + 282, + 482, + 426 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 130, + 282, + 482, + 426 + ], + "type": "text", + "content": ". Then we integrate these corresponding 2D embeddings in " + }, + { + "bbox": [ + 130, + 282, + 482, + 426 + ], + "type": "inline_equation", + "content": "\\mathbf{F}^{2D}" + }, + { + "bbox": [ + 130, + 282, + 482, + 426 + ], + "type": "text", + "content": " and average them to get the 2D-projected embeddings " + }, + { + "bbox": [ + 130, + 282, + 482, + 426 + ], + "type": "inline_equation", + "content": "\\mathbf{P}^{2D}" + }, + { + "bbox": [ + 130, + 282, + 482, + 426 + ], + "type": "text", + "content": ". We perform matrix multiplication on " + }, + { + "bbox": [ + 130, + 282, + 482, + 426 + ], + "type": "inline_equation", + "content": "\\mathbf{F}^C" + }, + { + "bbox": [ + 130, + 282, + 482, + 426 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 282, + 482, + 426 + ], + "type": "inline_equation", + "content": "\\mathbf{P}^{2D}" + }, + { + "bbox": [ + 130, + 282, + 482, + 426 + ], + "type": "text", + "content": ", and get the 3D point cloud semantic segmentation prediction logits " + }, + { + "bbox": [ + 130, + 282, + 482, + 426 + ], + "type": "inline_equation", + "content": "\\mathbf{L}^{2D}" + }, + { + "bbox": [ + 130, + 282, + 482, + 426 + ], + "type": "text", + "content": ". Finally we utilize the scene-level labels as mask " + }, + { + "bbox": [ + 130, + 282, + 482, + 426 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 130, + 282, + 482, + 426 + ], + "type": "text", + "content": " to filter out some confusing and unreliable predictions in the classification and get the more accurate predicted logits " + }, + { + "bbox": [ + 130, + 282, + 482, + 426 + ], + "type": "inline_equation", + "content": "\\mathbf{L}^f" + }, + { + "bbox": [ + 130, + 282, + 482, + 426 + ], + "type": "text", + "content": " and pseudo labels " + }, + { + "bbox": [ + 130, + 282, + 482, + 426 + ], + "type": "inline_equation", + "content": "\\mathbf{Y}" + }, + { + "bbox": [ + 130, + 282, + 482, + 426 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 447, + 289, + 462 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 447, + 289, + 462 + ], + "spans": [ + { + "bbox": [ + 132, + 447, + 289, + 462 + ], + "type": "text", + "content": "3 The Proposed Method" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 472, + 482, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 472, + 482, + 521 + ], + "spans": [ + { + "bbox": [ + 130, + 472, + 482, + 521 + ], + "type": "text", + "content": "In this section, we will first introduce the procedure of pseudo label generation in Sec. 3.1. Then, we will demonstrate the training procedure of our 3DSS-VLG in Sec. 3.2 and Sec. 3.3. Finally, we will describe the 3DSS-VLG inference procedure in Sec. 3.4." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 538, + 320, + 551 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 538, + 320, + 551 + ], + "spans": [ + { + "bbox": [ + 132, + 538, + 320, + 551 + ], + "type": "text", + "content": "3.1 Pseudo Label Generation Stage" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 558, + 482, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 558, + 482, + 641 + ], + "spans": [ + { + "bbox": [ + 130, + 558, + 482, + 641 + ], + "type": "text", + "content": "This stage aims to utilize the pretrained vision-language model and scene-level labels to generate more precise pseudo label. Given an input point cloud with multi-view images as shown in Fig. 2, we first implement dense 2D embeddings extraction for each RGB image via the frozen visual encoder of Openseg [12], and back-project them onto the 3D surface points of a scene to integrate the 2D-projected embeddings. Afterward, more accurate pseudo labels are generated based on 2D-projected embeddings, text embeddings and scene-level labels." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 641, + 482, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 641, + 482, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 641, + 482, + 665 + ], + "type": "text", + "content": "2D Embeddings Extraction. The inputs of 3DSS-VLG comprise a scene with 3D point cloud, scene-level labels and the associated multi-view RGB images" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 216, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 216, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 216, + 100 + ], + "type": "text", + "content": "X. Xu et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 258 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 258 + ], + "type": "text", + "content": "set. Given the RGB images set " + }, + { + "bbox": [ + 130, + 116, + 482, + 258 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 130, + 116, + 482, + 258 + ], + "type": "text", + "content": " consists of " + }, + { + "bbox": [ + 130, + 116, + 482, + 258 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 130, + 116, + 482, + 258 + ], + "type": "text", + "content": " images with a resolution of " + }, + { + "bbox": [ + 130, + 116, + 482, + 258 + ], + "type": "inline_equation", + "content": "H\\times W" + }, + { + "bbox": [ + 130, + 116, + 482, + 258 + ], + "type": "text", + "content": ". The point cloud " + }, + { + "bbox": [ + 130, + 116, + 482, + 258 + ], + "type": "inline_equation", + "content": "\\mathbf{X}\\in \\mathbb{R}^{N\\times 6}" + }, + { + "bbox": [ + 130, + 116, + 482, + 258 + ], + "type": "text", + "content": " contains " + }, + { + "bbox": [ + 130, + 116, + 482, + 258 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 130, + 116, + 482, + 258 + ], + "type": "text", + "content": " points in the scene, and each point is represented with six dimensions of RGBXYZ. We leverage the pretrained image encoder of OpenSeg [12] to get per-pixel embedding, denoted as " + }, + { + "bbox": [ + 130, + 116, + 482, + 258 + ], + "type": "inline_equation", + "content": "\\mathbf{F}^{2D}\\in \\mathbb{R}^{T\\times H\\times W\\times d}" + }, + { + "bbox": [ + 130, + 116, + 482, + 258 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 130, + 116, + 482, + 258 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 130, + 116, + 482, + 258 + ], + "type": "text", + "content": " is the 2D embedding dimension. For each point in the 3D point cloud, we project it onto multi-view images through geometric camera calibration matrixes and get the corresponding 2D positions. Then we can exact the corresponding projected 2D embeddings from " + }, + { + "bbox": [ + 130, + 116, + 482, + 258 + ], + "type": "inline_equation", + "content": "\\mathbf{F}^{2D}" + }, + { + "bbox": [ + 130, + 116, + 482, + 258 + ], + "type": "text", + "content": " according to the calculated 2D image positions. Since each point may have multiple correspondences in different images, the final 2D-projected embeddings " + }, + { + "bbox": [ + 130, + 116, + 482, + 258 + ], + "type": "inline_equation", + "content": "\\mathbf{P}^{2D}\\in \\mathbb{R}^{N\\times d}" + }, + { + "bbox": [ + 130, + 116, + 482, + 258 + ], + "type": "text", + "content": " is obtained via average all the corresponding projected 2D embeddings of each point." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 260, + 482, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 260, + 482, + 308 + ], + "spans": [ + { + "bbox": [ + 130, + 260, + 482, + 308 + ], + "type": "text", + "content": "Text Embeddings Extraction. We take the text encoder of Openseg to exact text embeddings " + }, + { + "bbox": [ + 130, + 260, + 482, + 308 + ], + "type": "inline_equation", + "content": "\\mathbf{F}^C\\in \\mathbb{R}^{K\\times d}" + }, + { + "bbox": [ + 130, + 260, + 482, + 308 + ], + "type": "text", + "content": " of full category labels, where " + }, + { + "bbox": [ + 130, + 260, + 482, + 308 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 130, + 260, + 482, + 308 + ], + "type": "text", + "content": " denoted the number of categories. Similarly, we also freeze the text encoder and directly load the pretrained Openseg parameters." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 308, + 482, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 308, + 482, + 430 + ], + "spans": [ + { + "bbox": [ + 130, + 308, + 482, + 430 + ], + "type": "text", + "content": "Filtering Strategy. After getting the 2D-projected embeddings " + }, + { + "bbox": [ + 130, + 308, + 482, + 430 + ], + "type": "inline_equation", + "content": "\\mathbf{P}^{2D}" + }, + { + "bbox": [ + 130, + 308, + 482, + 430 + ], + "type": "text", + "content": " and the text embeddings " + }, + { + "bbox": [ + 130, + 308, + 482, + 430 + ], + "type": "inline_equation", + "content": "\\mathbf{F}^C" + }, + { + "bbox": [ + 130, + 308, + 482, + 430 + ], + "type": "text", + "content": ", we perform matrix multiplication on them and obtain the classification logits " + }, + { + "bbox": [ + 130, + 308, + 482, + 430 + ], + "type": "inline_equation", + "content": "\\mathbf{L}^{2D} \\in \\mathbb{R}^{N \\times K}" + }, + { + "bbox": [ + 130, + 308, + 482, + 430 + ], + "type": "text", + "content": ". To make classification logits more reliable, the filtering strategy is employed to filter out confusing and unreliable predictions. For instance, as shown in Fig. 2, we create a boolean scene-level label mask " + }, + { + "bbox": [ + 130, + 308, + 482, + 430 + ], + "type": "inline_equation", + "content": "\\mathbf{M} \\in \\mathbb{R}^{1 \\times K}" + }, + { + "bbox": [ + 130, + 308, + 482, + 430 + ], + "type": "text", + "content": ", where the element value in the mask indicated whether the corresponding category existed. Finally, we perform matrix inner product on classification logits " + }, + { + "bbox": [ + 130, + 308, + 482, + 430 + ], + "type": "inline_equation", + "content": "\\mathbf{L}^{2D}" + }, + { + "bbox": [ + 130, + 308, + 482, + 430 + ], + "type": "text", + "content": " and scene-level label mask " + }, + { + "bbox": [ + 130, + 308, + 482, + 430 + ], + "type": "inline_equation", + "content": "\\mathbf{M}" + }, + { + "bbox": [ + 130, + 308, + 482, + 430 + ], + "type": "text", + "content": " and obtain filtered classification logits " + }, + { + "bbox": [ + 130, + 308, + 482, + 430 + ], + "type": "inline_equation", + "content": "\\mathbf{L}^f \\in \\mathbb{R}^{N \\times K}" + }, + { + "bbox": [ + 130, + 308, + 482, + 430 + ], + "type": "text", + "content": ". After ranking the filtered classification logits " + }, + { + "bbox": [ + 130, + 308, + 482, + 430 + ], + "type": "inline_equation", + "content": "\\mathbf{L}^f" + }, + { + "bbox": [ + 130, + 308, + 482, + 430 + ], + "type": "text", + "content": ", we can get the more precise pseudo label " + }, + { + "bbox": [ + 130, + 308, + 482, + 430 + ], + "type": "inline_equation", + "content": "\\mathbf{Y} \\in \\mathbb{R}^N" + }, + { + "bbox": [ + 130, + 308, + 482, + 430 + ], + "type": "text", + "content": " of the input point cloud." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 450, + 326, + 463 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 450, + 326, + 463 + ], + "spans": [ + { + "bbox": [ + 132, + 450, + 326, + 463 + ], + "type": "text", + "content": "3.2 Embeddings Specialization Stage" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 474, + 482, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 474, + 482, + 545 + ], + "spans": [ + { + "bbox": [ + 130, + 474, + 482, + 545 + ], + "type": "text", + "content": "As we know, the 2D OVSS model is designed to learn general knowledge and do not have any specialized knowledge of the indoor point cloud scene. Therefore, only relying on the 2D embeddings to build the 3D-text correlation will make the 3D WSSS process not reliable. To mitigate this issue, the Embeddings Specialization Stage is proposed to further improve the perception of indoor knowledge of 3D embeddings." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 546, + 482, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 546, + 482, + 594 + ], + "spans": [ + { + "bbox": [ + 130, + 546, + 482, + 594 + ], + "type": "text", + "content": "Specifically, the 2D-projected embeddings " + }, + { + "bbox": [ + 130, + 546, + 482, + 594 + ], + "type": "inline_equation", + "content": "\\mathbf{P}^{2D}" + }, + { + "bbox": [ + 130, + 546, + 482, + 594 + ], + "type": "text", + "content": " of input are transferred into another space through the adapter module, which simply contains two fully-connected layers. Besides, to keep both the source and adapted semantics, we employ the residual connections to get the adapted 3D embeddings " + }, + { + "bbox": [ + 130, + 546, + 482, + 594 + ], + "type": "inline_equation", + "content": "\\mathbf{A}^{3D} \\in \\mathbb{R}^{N \\times d}" + }, + { + "bbox": [ + 130, + 546, + 482, + 594 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 221, + 608, + 481, + 622 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 221, + 608, + 481, + 622 + ], + "spans": [ + { + "bbox": [ + 221, + 608, + 481, + 622 + ], + "type": "interline_equation", + "content": "\\mathbf {A} ^ {3 D} = \\alpha \\cdot M L P (\\mathbf {P} ^ {2 D}) + (1 - \\alpha) \\cdot \\mathbf {P} ^ {2 D}, \\tag {1}", + "image_path": "2d58ab32d3a4a5bcfe44c53527a059011b9e4365d6b11cbe4b0f5e485ec2a371.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "type": "text", + "content": " is the ratio of residual connections. Next, we perform matrix multiplication on text embeddings " + }, + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "type": "inline_equation", + "content": "\\mathbf{F}^C" + }, + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "type": "text", + "content": " and adapted 3D embeddings " + }, + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "type": "inline_equation", + "content": "\\mathbf{A}^{3D}" + }, + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "type": "text", + "content": " and obtain the classification logits " + }, + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "type": "inline_equation", + "content": "\\mathbf{L}^a\\in \\mathbb{R}^{N\\times K}" + }, + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "type": "text", + "content": ". The softmax layer is applied on " + }, + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "type": "inline_equation", + "content": "\\mathbf{L}^a" + }, + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "type": "text", + "content": " and" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 276, + 91, + 448, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 91, + 448, + 102 + ], + "spans": [ + { + "bbox": [ + 276, + 91, + 448, + 102 + ], + "type": "text", + "content": "3DSS with 2D Vision-Language Guidance" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 146, + 114, + 468, + 322 + ], + "blocks": [ + { + "bbox": [ + 146, + 114, + 468, + 322 + ], + "lines": [ + { + "bbox": [ + 146, + 114, + 468, + 322 + ], + "spans": [ + { + "bbox": [ + 146, + 114, + 468, + 322 + ], + "type": "image", + "image_path": "9600d967b6292825c681f07e3860caad03c21a1e646f944b7dd688b370e2d117.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 329, + 482, + 483 + ], + "lines": [ + { + "bbox": [ + 130, + 329, + 482, + 483 + ], + "spans": [ + { + "bbox": [ + 130, + 329, + 482, + 483 + ], + "type": "text", + "content": "Fig. 3: The proposed training procedure of our proposed 3DSS-VLG. Here, it is mainly divided into two stages: (a) Embeddings Specialization Stage and (b) Embeddings Soft-Guidance Stage. For (a), we first utilize the text encoder " + }, + { + "bbox": [ + 130, + 329, + 482, + 483 + ], + "type": "inline_equation", + "content": "\\varepsilon^{text}" + }, + { + "bbox": [ + 130, + 329, + 482, + 483 + ], + "type": "text", + "content": " of Openseg to obtain embeddings of the category labels " + }, + { + "bbox": [ + 130, + 329, + 482, + 483 + ], + "type": "inline_equation", + "content": "\\mathbf{F}^C" + }, + { + "bbox": [ + 130, + 329, + 482, + 483 + ], + "type": "text", + "content": ", which are frozen during the training procedure of (a). Meanwhile, we get the initial 2D-projected embeddings " + }, + { + "bbox": [ + 130, + 329, + 482, + 483 + ], + "type": "inline_equation", + "content": "\\mathbf{P}^{2D}" + }, + { + "bbox": [ + 130, + 329, + 482, + 483 + ], + "type": "text", + "content": " from the 2D module and leverage the adapter module to transfer the " + }, + { + "bbox": [ + 130, + 329, + 482, + 483 + ], + "type": "inline_equation", + "content": "\\mathbf{P}^{2D}" + }, + { + "bbox": [ + 130, + 329, + 482, + 483 + ], + "type": "text", + "content": " to a new embedding spaces to obtain the adapted 3D embeddings " + }, + { + "bbox": [ + 130, + 329, + 482, + 483 + ], + "type": "inline_equation", + "content": "\\mathbf{A}^{3D}" + }, + { + "bbox": [ + 130, + 329, + 482, + 483 + ], + "type": "text", + "content": ". We perform matrix multiplication on " + }, + { + "bbox": [ + 130, + 329, + 482, + 483 + ], + "type": "inline_equation", + "content": "\\mathbf{A}^{3D}" + }, + { + "bbox": [ + 130, + 329, + 482, + 483 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 329, + 482, + 483 + ], + "type": "inline_equation", + "content": "\\mathbf{F}^C" + }, + { + "bbox": [ + 130, + 329, + 482, + 483 + ], + "type": "text", + "content": " and get the predicted probability " + }, + { + "bbox": [ + 130, + 329, + 482, + 483 + ], + "type": "inline_equation", + "content": "\\mathbf{L}^a" + }, + { + "bbox": [ + 130, + 329, + 482, + 483 + ], + "type": "text", + "content": ". Finally, we use the pseudo labels " + }, + { + "bbox": [ + 130, + 329, + 482, + 483 + ], + "type": "inline_equation", + "content": "\\mathbf{Y}" + }, + { + "bbox": [ + 130, + 329, + 482, + 483 + ], + "type": "text", + "content": " to supervise the model, and the green dashed lines denote back-propagation of the loss " + }, + { + "bbox": [ + 130, + 329, + 482, + 483 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_a" + }, + { + "bbox": [ + 130, + 329, + 482, + 483 + ], + "type": "text", + "content": ". For (b), we first utilize the adapter module and obtain the adapted 3D embeddings " + }, + { + "bbox": [ + 130, + 329, + 482, + 483 + ], + "type": "inline_equation", + "content": "\\mathbf{A}^{3D}" + }, + { + "bbox": [ + 130, + 329, + 482, + 483 + ], + "type": "text", + "content": ". It is important to note that we freeze the adapter module during the training procedure of (b). Meanwhile, we use the 3D module " + }, + { + "bbox": [ + 130, + 329, + 482, + 483 + ], + "type": "inline_equation", + "content": "\\varepsilon^{3D}" + }, + { + "bbox": [ + 130, + 329, + 482, + 483 + ], + "type": "text", + "content": " to obtain the 3D embeddings " + }, + { + "bbox": [ + 130, + 329, + 482, + 483 + ], + "type": "inline_equation", + "content": "\\mathbf{F}^{3D}" + }, + { + "bbox": [ + 130, + 329, + 482, + 483 + ], + "type": "text", + "content": ". The cosine similarity loss " + }, + { + "bbox": [ + 130, + 329, + 482, + 483 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_s" + }, + { + "bbox": [ + 130, + 329, + 482, + 483 + ], + "type": "text", + "content": " will be integrated to train the model. The red dashed lines denote back-propagation of the loss " + }, + { + "bbox": [ + 130, + 329, + 482, + 483 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_s" + }, + { + "bbox": [ + 130, + 329, + 482, + 483 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 506, + 481, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 506, + 481, + 531 + ], + "spans": [ + { + "bbox": [ + 130, + 506, + 481, + 531 + ], + "type": "text", + "content": "a classification cross-entropy loss " + }, + { + "bbox": [ + 130, + 506, + 481, + 531 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_a" + }, + { + "bbox": [ + 130, + 506, + 481, + 531 + ], + "type": "text", + "content": " is introduced to supervise the procedure. Here we leverage the pseudo labels " + }, + { + "bbox": [ + 130, + 506, + 481, + 531 + ], + "type": "inline_equation", + "content": "\\mathbf{Y}" + }, + { + "bbox": [ + 130, + 506, + 481, + 531 + ], + "type": "text", + "content": " of the point cloud to supervise the model." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 531, + 481, + 566 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 531, + 481, + 566 + ], + "spans": [ + { + "bbox": [ + 130, + 531, + 481, + 566 + ], + "type": "text", + "content": "Just by introducing the above simple adapter module, we can make the learned adapted embeddings have better semantic awareness of the point clouds of indoor scenes, thus assisting the 3D WSSS task." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 131, + 585, + 329, + 597 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 585, + 329, + 597 + ], + "spans": [ + { + "bbox": [ + 131, + 585, + 329, + 597 + ], + "type": "text", + "content": "3.3 Embeddings Soft-Guidance Stage" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 605, + 481, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 605, + 481, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 605, + 481, + 666 + ], + "type": "text", + "content": "Since Openseg has established a high level of semantic alignment between 2D embeddings and text embeddings, we propose the Embeddings Soft-Guidance Stage, which can naturally take the 2D embeddings as a bridge to implicitly align the 3D embeddings and text embeddings via cosine similarity. Specifically, as shown in Fig. 3 (b), we take the point cloud " + }, + { + "bbox": [ + 130, + 605, + 481, + 666 + ], + "type": "inline_equation", + "content": "\\mathbf{X}" + }, + { + "bbox": [ + 130, + 605, + 481, + 666 + ], + "type": "text", + "content": " as input, and use Minkowsk-" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 216, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 216, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 216, + 100 + ], + "type": "text", + "content": "X. Xu et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 213 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 213 + ], + "type": "text", + "content": "iNet18A UNet [9] as our 3D module meanwhile, we change the dimension of the outputs to " + }, + { + "bbox": [ + 130, + 116, + 482, + 213 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 130, + 116, + 482, + 213 + ], + "type": "text", + "content": ". Therefore, we can get the learned 3D embeddings " + }, + { + "bbox": [ + 130, + 116, + 482, + 213 + ], + "type": "inline_equation", + "content": "\\mathbf{F}^{3D} \\in \\mathbb{R}^{N \\times d}" + }, + { + "bbox": [ + 130, + 116, + 482, + 213 + ], + "type": "text", + "content": ". Then we take the corresponding 2D-projected embeddings " + }, + { + "bbox": [ + 130, + 116, + 482, + 213 + ], + "type": "inline_equation", + "content": "\\mathbf{P}^{2D}" + }, + { + "bbox": [ + 130, + 116, + 482, + 213 + ], + "type": "text", + "content": " as input, processed by the adapter module, and get the adapted 3D embeddings " + }, + { + "bbox": [ + 130, + 116, + 482, + 213 + ], + "type": "inline_equation", + "content": "\\mathbf{A}^{3D}" + }, + { + "bbox": [ + 130, + 116, + 482, + 213 + ], + "type": "text", + "content": ". We follow the typical cosine similarity loss by pulling the paired 3D embeddings " + }, + { + "bbox": [ + 130, + 116, + 482, + 213 + ], + "type": "inline_equation", + "content": "\\mathbf{F}^{3D}" + }, + { + "bbox": [ + 130, + 116, + 482, + 213 + ], + "type": "text", + "content": " and adapted 3D embeddings " + }, + { + "bbox": [ + 130, + 116, + 482, + 213 + ], + "type": "inline_equation", + "content": "\\mathbf{A}^{3D}" + }, + { + "bbox": [ + 130, + 116, + 482, + 213 + ], + "type": "text", + "content": " closer. We need to note that we freeze the adapter module and directly load the parameters provided by Sec. 3.2 during training. Therefore, we define the 3DSS-VLG loss as:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 249, + 222, + 481, + 237 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 249, + 222, + 481, + 237 + ], + "spans": [ + { + "bbox": [ + 249, + 222, + 481, + 237 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {s} = 1 - \\cos (\\mathbf {F} ^ {3 D}, \\mathbf {A} ^ {3 D}). \\tag {2}", + "image_path": "004e0a97a4497255f7e74c92674d05c7467271b38709b4bde7309ecc7c25464e.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 131, + 251, + 208, + 261 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 251, + 208, + 261 + ], + "spans": [ + { + "bbox": [ + 131, + 251, + 208, + 261 + ], + "type": "text", + "content": "3.4 Inference" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 266, + 482, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 266, + 482, + 327 + ], + "spans": [ + { + "bbox": [ + 130, + 266, + 482, + 327 + ], + "type": "text", + "content": "During inference, we only retain the 3D and text modules and remove the 2D module. Specifically, we take the 3D embeddings " + }, + { + "bbox": [ + 130, + 266, + 482, + 327 + ], + "type": "inline_equation", + "content": "\\mathbf{F}^{3D}" + }, + { + "bbox": [ + 130, + 266, + 482, + 327 + ], + "type": "text", + "content": " from the 3D module, as well as the category embeddings " + }, + { + "bbox": [ + 130, + 266, + 482, + 327 + ], + "type": "inline_equation", + "content": "\\mathbf{F}^C" + }, + { + "bbox": [ + 130, + 266, + 482, + 327 + ], + "type": "text", + "content": " from text module, to perform matrix multiplication on them and get the classification logits. Finally, we rank the logits and obtain the final per-point segmentation for the input point cloud " + }, + { + "bbox": [ + 130, + 266, + 482, + 327 + ], + "type": "inline_equation", + "content": "\\mathbf{X}" + }, + { + "bbox": [ + 130, + 266, + 482, + 327 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 131, + 342, + 230, + 355 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 342, + 230, + 355 + ], + "spans": [ + { + "bbox": [ + 131, + 342, + 230, + 355 + ], + "type": "text", + "content": "4 Experiments" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 364, + 482, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 364, + 482, + 413 + ], + "spans": [ + { + "bbox": [ + 130, + 364, + 482, + 413 + ], + "type": "text", + "content": "In this section, we first present our experimental settings, including datasets, evaluation metrics, and implementation details. The competing methods are then presented and compared. Finally, ablation studies are provided to further demonstrate the necessity and effectiveness of each component of our framework." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 131, + 426, + 326, + 437 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 426, + 326, + 437 + ], + "spans": [ + { + "bbox": [ + 131, + 426, + 326, + 437 + ], + "type": "text", + "content": "4.1 Datasets and Evaluation Metrics" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 443, + 482, + 551 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 443, + 482, + 551 + ], + "spans": [ + { + "bbox": [ + 130, + 443, + 482, + 551 + ], + "type": "text", + "content": "We evaluate our 3DSS-VLG on two publicly and widely-used large-scale point cloud datasets with multi-view images, S3DIS [3] and ScanNet [10]. S3DIS is proposed for indoor scene understanding. It consists of 6 areas including 271 rooms with 13 classes. Each room is scanned via RGBD sensors and is represented by a point cloud with 3D coordinates and RGB values. Following previous works, we take area 5 as the test scene. ScanNet [10] has 1513 training scenes and 100 test scenes with 20 classes. We adopt the default train-val split setting, where there are 1201 training scenes and 312 validation scenes. The mean intersection over Union (mIoU) is employed as the evaluation metric for datasets." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 131, + 565, + 285, + 577 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 565, + 285, + 577 + ], + "spans": [ + { + "bbox": [ + 131, + 565, + 285, + 577 + ], + "type": "text", + "content": "4.2 Implementations Details" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 581, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 581, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 581, + 482, + 666 + ], + "type": "text", + "content": "3DSS-VLG is implemented by PyTorch. For the training procedure of Sec. 3.2, we use Adam optimizer with batch size of 16 and set an initial learning rate of 0.003 for the model. We reduce the learning rate by a multiplying factor of 0.7 every 20 epochs for a total of 80 epochs. For the training procedure of Sec. 3.3, the model optimization is conducted using Adam optimizer with a batch size of 8. We set an initial learning rate of 0.0001 for the model and use the poly learning rate policy to adjust the learning rate." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 276, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 276, + 91, + 447, + 102 + ], + "type": "text", + "content": "3DSS with 2D Vision-Language Guidance" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 209, + 146, + 403, + 304 + ], + "blocks": [ + { + "bbox": [ + 132, + 114, + 482, + 137 + ], + "lines": [ + { + "bbox": [ + 132, + 114, + 482, + 137 + ], + "spans": [ + { + "bbox": [ + 132, + 114, + 482, + 137 + ], + "type": "text", + "content": "Table 1: Performance comparison on the S3DIS dataset. \"Sup.\" indicates the type of supervision. \"100%\" represents full annotation. \"scene.\" denotes scene-level annotation." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 209, + 146, + 403, + 304 + ], + "lines": [ + { + "bbox": [ + 209, + 146, + 403, + 304 + ], + "spans": [ + { + "bbox": [ + 209, + 146, + 403, + 304 + ], + "type": "table", + "html": "
MethodLabel EffortSup.Test
PointNet [27]100%41.1
TangentConv [33]100%52.8
MinkowskiNet [9]100%65.8
KPConv [34]>20 min100%67.1
PointTransformer [49]100%70.4
PointNetXt [29]100%70.5
DeepViewAgg [32]100%67.2
SemAffiNet [35]100%71.6
MPRM [36]scene.10.3
MIL-Trans [45]scene.12.9
WYPR [31]<1 minscene.22.3
MIT [44]scene.27.7
Oursscene.45.3
", + "image_path": "ef32bdc2fee69d13f977185257f28a76eb90f8c4098e84c0a15b520af25b988b.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 335, + 338, + 348 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 335, + 338, + 348 + ], + "spans": [ + { + "bbox": [ + 132, + 335, + 338, + 348 + ], + "type": "text", + "content": "4.3 3D Semantic Segmentation Results" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 354, + 482, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 354, + 482, + 437 + ], + "spans": [ + { + "bbox": [ + 130, + 354, + 482, + 437 + ], + "type": "text", + "content": "We evaluate our proposed approach against state-of-the-art techniques for 3D weakly supervised semantic segmentation with scene-level labels. Firstly, we demonstrate some full supervised point cloud semantic segmentation methods to compare the gap between the performances of ours and full supervised methods. Subsequently, we introduce semantic segmentation methods supervised by scene-level labels or subcloud-level labels and compare them with our method. Meanwhile, we indicate the average annotation time per scene." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 438, + 482, + 558 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 438, + 482, + 558 + ], + "spans": [ + { + "bbox": [ + 130, + 438, + 482, + 558 + ], + "type": "text", + "content": "Evaluation on S3DIS. Tab. 1 Show the performance of each type of 3D point cloud semantic segmentation methods evaluated on the S3DIS dataset. We can find that in the scene-level annotations setting, our method greatly surpasses the existing state-of-the-art method MIT [44] by " + }, + { + "bbox": [ + 130, + 438, + 482, + 558 + ], + "type": "inline_equation", + "content": "17.6\\%" + }, + { + "bbox": [ + 130, + 438, + 482, + 558 + ], + "type": "text", + "content": ". This shows that using textual semantic information ignored by previous 3D weakly supervised semantic segmentation can significantly improve segmentation performance. The textual semantic information of each category is unique; then the 2D embeddings and 3D embeddings are aligned so that the 3D embeddings can be implicitly aligned to the corresponding unique category semantic information, which allows the model to achieve greater performance improvements." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 558, + 482, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 558, + 482, + 629 + ], + "spans": [ + { + "bbox": [ + 130, + 558, + 482, + 629 + ], + "type": "text", + "content": "Meanwhile, we compare our method with some full supervised methods. It can be observed that our 3DSS-VLG can outperform some fully supervised methods, i.e., PointNet [27]. Moreover, we notice that the annotations cost time of different types of supervision and find that the scene-level annotation is the most efficient compared to other types annotations. Such results demonstrate the effectiveness and potential of our weakly supervised method." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 629, + 482, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 629, + 482, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 629, + 482, + 665 + ], + "type": "text", + "content": "Evaluation on ScanNet. We also evaluate our 3DSS-VLG on the ScanNet online test set and the validation set and presented the performance results of 3DSS-VLG in Tab. 2. For the test set, it can be observed that our 3DSS-VLG" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 216, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 216, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 216, + 100 + ], + "type": "text", + "content": "X. Xu et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 178, + 168, + 433, + 338 + ], + "blocks": [ + { + "bbox": [ + 130, + 114, + 482, + 159 + ], + "lines": [ + { + "bbox": [ + 130, + 114, + 482, + 159 + ], + "spans": [ + { + "bbox": [ + 130, + 114, + 482, + 159 + ], + "type": "text", + "content": "Table 2: Performance comparison on the ScanNet test set and validation set. \"Sup.\" indicates the type of supervision. \"100%\" represents full annotation. \"subcloud.\" and \"scene.\" imply subcloud-level annotation and scene-level annotation respectively. \"image.\" denotes image-level annotation." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 178, + 168, + 433, + 338 + ], + "lines": [ + { + "bbox": [ + 178, + 168, + 433, + 338 + ], + "spans": [ + { + "bbox": [ + 178, + 168, + 433, + 338 + ], + "type": "table", + "html": "
MethodLabel EffortSup.TestVal
PointNet++ [28]100%33.9-
TangentConv [33]100%43.8-
MinkowskiNet [9]100%73.672.2
KPConv [34]>20 min100%68.669.2
PointTransformer [49]100%-70.6
PointNetXt [29]100%71.271.5
DeepViewAgg [32]100%-71.0
SemAffiNet [35]100%74.9-
MPRM [36]3 minsubcloud.41.143.2
Kweon et al. [18]5 minscene. + image.47.449.6
MIL-Trans [45]scene.-26.2
WYPR [31]<1 minscene.24.029.6
MIT [44]scene.31.735.8
Oursscene.48.949.7
", + "image_path": "7b8dc03c7c4f20306c4427839c62faed6b9581781800cd2cfc7eb81c899f36dc.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 371, + 482, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 371, + 482, + 479 + ], + "spans": [ + { + "bbox": [ + 130, + 371, + 482, + 479 + ], + "type": "text", + "content": "achieves the best performance under only scene-level label supervision and even surpasses the performance of MPRM [36] which is supervised by subcloud-level annotations. Moreover, we are surprised to find that our method also outperforms Kweon et al. [18] by " + }, + { + "bbox": [ + 130, + 371, + 482, + 479 + ], + "type": "inline_equation", + "content": "1.5\\%" + }, + { + "bbox": [ + 130, + 371, + 482, + 479 + ], + "type": "text", + "content": ", which uses not only scene-level labels, but also extra image-level labels. Our method can achieve stronger performance with less annotations, further illustrating the superiority of our method. Meanwhile, our 3DSS-VLG can outperform some fully supervised methods. In addition for the validation set, our method also achieves the state-of-the-art during those 3D WSSS approaches. Those results demonstrate the superiority of 3DSS-VLG." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 499, + 246, + 510 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 499, + 246, + 510 + ], + "spans": [ + { + "bbox": [ + 132, + 499, + 246, + 510 + ], + "type": "text", + "content": "4.4 Ablation Studies" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 521, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 521, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 521, + 482, + 666 + ], + "type": "text", + "content": "Effectiveness of Each Components. To demonstrate the advantage of each component in our 3DSS-VLG, we conduct comprehensive ablation studies on the S3DIS dataset, as shown in Tab. 3. The ablation model (a) only retains the MinkowskiNet18A UNet [9] and trains directly with the pseudo labels which are generated without using scene-level labels filtering. The cross-entropy loss is introduced to supervised this procedure. We set model (a) as the baseline of our experiment. Compared to model (a), model (b) is not directly supervised by pseudo labels. It adopts the Embeddings Soft-Guidance Stage (ESGS) and is soft-guided by the 2D-projected embeddings " + }, + { + "bbox": [ + 130, + 521, + 482, + 666 + ], + "type": "inline_equation", + "content": "\\mathbf{P}^{2D}" + }, + { + "bbox": [ + 130, + 521, + 482, + 666 + ], + "type": "text", + "content": ". We can find that the performance of mIoU is improved from " + }, + { + "bbox": [ + 130, + 521, + 482, + 666 + ], + "type": "inline_equation", + "content": "37.7\\%" + }, + { + "bbox": [ + 130, + 521, + 482, + 666 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 130, + 521, + 482, + 666 + ], + "type": "inline_equation", + "content": "38.2\\%" + }, + { + "bbox": [ + 130, + 521, + 482, + 666 + ], + "type": "text", + "content": ". This observation proves that the soft-guidance strategy can guide 3D embeddings to align with the text embeddings and achieve better performance compared to directly using the pseudo" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 276, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 276, + 91, + 447, + 102 + ], + "type": "text", + "content": "3DSS with 2D Vision-Language Guidance" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 479, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 479, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 479, + 100 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 242, + 135, + 369, + 195 + ], + "blocks": [ + { + "bbox": [ + 151, + 114, + 461, + 125 + ], + "lines": [ + { + "bbox": [ + 151, + 114, + 461, + 125 + ], + "spans": [ + { + "bbox": [ + 151, + 114, + 461, + 125 + ], + "type": "text", + "content": "Table 3: Ablation studies of the 3DSS-VLG components on S3DIS dataset." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 242, + 135, + 369, + 195 + ], + "lines": [ + { + "bbox": [ + 242, + 135, + 369, + 195 + ], + "spans": [ + { + "bbox": [ + 242, + 135, + 369, + 195 + ], + "type": "table", + "html": "
ESGSFilteringESSmIoU
(a)37.7
(b)38.2
(c)42.6
(d)45.3
", + "image_path": "25077e9f2bc5257b5e926be0f08ef7c52f52267af0beafd7a9cf6702ae9fa686.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 241, + 236, + 369, + 273 + ], + "blocks": [ + { + "bbox": [ + 168, + 215, + 444, + 226 + ], + "lines": [ + { + "bbox": [ + 168, + 215, + 444, + 226 + ], + "spans": [ + { + "bbox": [ + 168, + 215, + 444, + 226 + ], + "type": "text", + "content": "Table 4: Performance comparisons of the generalization capability." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 241, + 236, + 369, + 273 + ], + "lines": [ + { + "bbox": [ + 241, + 236, + 369, + 273 + ], + "spans": [ + { + "bbox": [ + 241, + 236, + 369, + 273 + ], + "type": "table", + "html": "
DomainmIoUmAcc
S3DIS ->ScanNet13.423.0
ScanNet ->S3DIS33.350.9
", + "image_path": "1f89be49054d1ccd0950d54c69db70cb98e12721cb750c971d571a9764023edd.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 306, + 480, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 306, + 480, + 402 + ], + "spans": [ + { + "bbox": [ + 130, + 306, + 480, + 402 + ], + "type": "text", + "content": "labels to supervised 3D model. Meanwhile, when we introduce the filtering strategy to model (a), as shown in model (c), we can find that the model performance increases greatly from " + }, + { + "bbox": [ + 130, + 306, + 480, + 402 + ], + "type": "inline_equation", + "content": "37.7\\%" + }, + { + "bbox": [ + 130, + 306, + 480, + 402 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 130, + 306, + 480, + 402 + ], + "type": "inline_equation", + "content": "42.6\\%" + }, + { + "bbox": [ + 130, + 306, + 480, + 402 + ], + "type": "text", + "content": ". Finally, by adding the filtering strategy to model (b) and utilizing the Embeddings Specialization Stage (ESS), model (d) is supervised by adapted 3D embeddings " + }, + { + "bbox": [ + 130, + 306, + 480, + 402 + ], + "type": "inline_equation", + "content": "\\mathbf{A}^{3D}" + }, + { + "bbox": [ + 130, + 306, + 480, + 402 + ], + "type": "text", + "content": " at this time. It can be observed the performance improves from " + }, + { + "bbox": [ + 130, + 306, + 480, + 402 + ], + "type": "inline_equation", + "content": "38.2\\%" + }, + { + "bbox": [ + 130, + 306, + 480, + 402 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 130, + 306, + 480, + 402 + ], + "type": "inline_equation", + "content": "45.3\\%" + }, + { + "bbox": [ + 130, + 306, + 480, + 402 + ], + "type": "text", + "content": ". Such results prove that our 3DSS-VLG can help the model to get a better, indoor point cloud specific embedding space to align 3D point clouds and text." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 402, + 481, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 402, + 481, + 497 + ], + "spans": [ + { + "bbox": [ + 130, + 402, + 481, + 497 + ], + "type": "text", + "content": "Generalization Capability. Due to the domain gap among different datasets, a model trained on one dataset is not applicable to another dataset. Also, this situation occurs in the 3D WSSS task. Nevertheless, we notice that, compared to previous works, our 3DSS-VLG uses textual semantic information as a guide rather than CAM, which means our model has a good relationship between 3D point cloud and the text of category labels and indicates that the model may have generalization ability. Therefore, we further explore our framework to the novel data of the unobserved scene domains." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 498, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 498, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 498, + 482, + 666 + ], + "type": "text", + "content": "As shown in Tab. 4, we experimentally verify the generalizability of the proposed method on the S3DIS and ScanNet dataset, respectively. The first row is the performance of model that we first train our model on the S3DIS dataset and then test the trained model on validation set of the ScanNet dataset. The second row is the performance of model that we first train our model on the ScanNet dataset and then test the trained model on the test set of the S3DIS dataset. Compared to those weakly supervised methods with scene-level labels, it can be observed that our 3DSS-VLG has a certain gap with those methods in the first row. However, for the second row, we are supervised to find that our method can outperform all the weakly supervised methods and achieve state-of-the-art performance. The ScanNet dataset provides six times more training scenes than the S3DIS dataset. Therefore, when a model is pretrained on the ScanNet dataset, the model will be more robust than a model pretrained on the S3DIS dataset. Our experimental results also prove this phenomenon." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 216, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 216, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 216, + 100 + ], + "type": "text", + "content": "X. Xu et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 236, + 146, + 375, + 217 + ], + "blocks": [ + { + "bbox": [ + 132, + 114, + 480, + 137 + ], + "lines": [ + { + "bbox": [ + 132, + 114, + 480, + 137 + ], + "spans": [ + { + "bbox": [ + 132, + 114, + 480, + 137 + ], + "type": "text", + "content": "Table 5: Performance comparisons with different 3D backbones and ESS module backbones on the S3DIS dataset." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 236, + 146, + 375, + 217 + ], + "lines": [ + { + "bbox": [ + 236, + 146, + 375, + 217 + ], + "spans": [ + { + "bbox": [ + 236, + 146, + 375, + 217 + ], + "type": "table", + "html": "
ModuleBackbonemIoU
3DMinkowskiNet14A44.5
MinkowskiNet18A45.3
MinkowskiNet34A44.7
ESSTransformers45.0
MLP45.3
", + "image_path": "f720567dea407a08200f5aa330d44149db4719e90d6f0a1b772122b74289b7f2.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 249, + 479, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 249, + 479, + 307 + ], + "spans": [ + { + "bbox": [ + 130, + 249, + 479, + 307 + ], + "type": "text", + "content": "The results also strongly support the complementary advantages of using text semantic information, even without any further fine-tuning or domain-specific adaptation. Our 3DSS-VLG can be extended to handle unobserved general data and has strong generalization capability, which is promising for the field of 3D WSSS." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 308, + 480, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 308, + 480, + 355 + ], + "spans": [ + { + "bbox": [ + 130, + 308, + 480, + 355 + ], + "type": "text", + "content": "Experiments with Different Backbones. Tab. 5 shows the performances of our method on S3DIS with different 3D backbones and ESS module backbones. Finally, we use the MinkowskiNet18A as our 3D backbone and the FC-layer as the backbone of our ESS." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 373, + 257, + 385 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 373, + 257, + 385 + ], + "spans": [ + { + "bbox": [ + 132, + 373, + 257, + 385 + ], + "type": "text", + "content": "4.5 Qualitative Results" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 393, + 480, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 393, + 480, + 477 + ], + "spans": [ + { + "bbox": [ + 130, + 393, + 480, + 477 + ], + "type": "text", + "content": "Fig. 4 visualizes the qualitative comparison of the proposed framework and baseline. Here the baseline is model (a) which is mentioned in Sec. 4.4. Compared with the result of baseline, our 3DSS-VLG shows significantly better results in the terms of accuracy of semantics and preciseness of segmentation. With the ESGS, ESS and filtering strategies, our 3DSS-VLG can learn a more better indoor point cloud specific embedding space to align 3D point clouds and text and achieve substantial semantic segmentation results compared to the baseline." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 495, + 218, + 506 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 495, + 218, + 506 + ], + "spans": [ + { + "bbox": [ + 132, + 495, + 218, + 506 + ], + "type": "text", + "content": "4.6 Limitations" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 514, + 480, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 514, + 480, + 574 + ], + "spans": [ + { + "bbox": [ + 130, + 514, + 480, + 574 + ], + "type": "text", + "content": "Our work relies on vision-language alignment and does not address how to align visual embeddings with some abstract category text embeddings (e.g. \"other\" class in the S3DIS dataset). It is difficult for the model to understand what the difference is between the \"other\" class and other categories, thus making the wrong segmentation. This limitation is a direct avenue for future work." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 592, + 220, + 605 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 592, + 220, + 605 + ], + "spans": [ + { + "bbox": [ + 132, + 592, + 220, + 605 + ], + "type": "text", + "content": "5 Conclusion" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 617, + 480, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 617, + 480, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 617, + 480, + 665 + ], + "type": "text", + "content": "In this paper, we propose 3DSS-VLG to address the shortage of point-level annotations. Specifically, our 3DSS-VLG exploits the superior ability of current vision-language models on aligning the semantics between texts and 2D images, as well as the naturally existing correspondences between 2D images and 3D" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 276, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 276, + 91, + 447, + 102 + ], + "type": "text", + "content": "3DSS with 2D Vision-Language Guidance" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 134, + 114, + 220, + 184 + ], + "blocks": [ + { + "bbox": [ + 134, + 114, + 220, + 184 + ], + "lines": [ + { + "bbox": [ + 134, + 114, + 220, + 184 + ], + "spans": [ + { + "bbox": [ + 134, + 114, + 220, + 184 + ], + "type": "image", + "image_path": "7711442115bf0b7100fc52c012e0cbb31e8c0b6a0f35b6e29a7b2c7b6f85bda8.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 134, + 185, + 220, + 254 + ], + "blocks": [ + { + "bbox": [ + 134, + 185, + 220, + 254 + ], + "lines": [ + { + "bbox": [ + 134, + 185, + 220, + 254 + ], + "spans": [ + { + "bbox": [ + 134, + 185, + 220, + 254 + ], + "type": "image", + "image_path": "d9aec7f4432e4773f889b7534d14005610e8123d2fb060365f46b379b3c2685c.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 135, + 255, + 219, + 323 + ], + "blocks": [ + { + "bbox": [ + 135, + 255, + 219, + 323 + ], + "lines": [ + { + "bbox": [ + 135, + 255, + 219, + 323 + ], + "spans": [ + { + "bbox": [ + 135, + 255, + 219, + 323 + ], + "type": "image", + "image_path": "fce4d96ba0cf08ca7b95010740b5c59f3df176c796a5fb184c6e031f00b00d1c.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 135, + 324, + 219, + 396 + ], + "blocks": [ + { + "bbox": [ + 135, + 324, + 219, + 396 + ], + "lines": [ + { + "bbox": [ + 135, + 324, + 219, + 396 + ], + "spans": [ + { + "bbox": [ + 135, + 324, + 219, + 396 + ], + "type": "image", + "image_path": "79f24e6eee301eb1cd812bf0ad0f4732a99d275e7d7ccc5f62b0de21671965da.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 154, + 399, + 176, + 407 + ], + "lines": [ + { + "bbox": [ + 154, + 399, + 176, + 407 + ], + "spans": [ + { + "bbox": [ + 154, + 399, + 176, + 407 + ], + "type": "text", + "content": "ceiling" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 137, + 398, + 152, + 408 + ], + "blocks": [ + { + "bbox": [ + 137, + 398, + 152, + 408 + ], + "lines": [ + { + "bbox": [ + 137, + 398, + 152, + 408 + ], + "spans": [ + { + "bbox": [ + 137, + 398, + 152, + 408 + ], + "type": "image", + "image_path": "46e6857063ccdc2fb62eadfbbb68375749eb1823c4e77169d9ba5fce75b3b2c8.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 154, + 410, + 170, + 417 + ], + "lines": [ + { + "bbox": [ + 154, + 410, + 170, + 417 + ], + "spans": [ + { + "bbox": [ + 154, + 410, + 170, + 417 + ], + "type": "text", + "content": "floor" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 137, + 409, + 152, + 417 + ], + "blocks": [ + { + "bbox": [ + 137, + 409, + 152, + 417 + ], + "lines": [ + { + "bbox": [ + 137, + 409, + 152, + 417 + ], + "spans": [ + { + "bbox": [ + 137, + 409, + 152, + 417 + ], + "type": "image", + "image_path": "3979f86eb1ddb19df89086a146ebf811d19805ac3efee289ca3770fcc5c7df74.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 131, + 429, + 482, + 460 + ], + "lines": [ + { + "bbox": [ + 131, + 429, + 482, + 460 + ], + "spans": [ + { + "bbox": [ + 131, + 429, + 482, + 460 + ], + "type": "text", + "content": "Fig. 4: Qualitative results on the S3DIS dataset of baseline and our 3DSS-VLG. From left to right: input point clouds, ground truth, baseline results, and our 3DSS-VLG results." + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 187, + 399, + 202, + 408 + ], + "blocks": [ + { + "bbox": [ + 187, + 399, + 202, + 408 + ], + "lines": [ + { + "bbox": [ + 187, + 399, + 202, + 408 + ], + "spans": [ + { + "bbox": [ + 187, + 399, + 202, + 408 + ], + "type": "image", + "image_path": "08274ca5e25499cc99dc785e9c49a7f7777e280a92c6b527dfe8c374d3ba202b.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 187, + 410, + 207, + 417 + ], + "lines": [ + { + "bbox": [ + 187, + 410, + 207, + 417 + ], + "spans": [ + { + "bbox": [ + 187, + 410, + 207, + 417 + ], + "type": "text", + "content": "#" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 204, + 399, + 218, + 408 + ], + "blocks": [ + { + "bbox": [ + 204, + 399, + 218, + 408 + ], + "lines": [ + { + "bbox": [ + 204, + 399, + 218, + 408 + ], + "spans": [ + { + "bbox": [ + 204, + 399, + 218, + 408 + ], + "type": "image", + "image_path": "be711ce0b4db8bf93e4ad957edcba20f8405c53aa30f1a87ee90ffd847ff4886.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 203, + 410, + 223, + 417 + ], + "lines": [ + { + "bbox": [ + 203, + 410, + 223, + 417 + ], + "spans": [ + { + "bbox": [ + 203, + 410, + 223, + 417 + ], + "type": "text", + "content": "beam" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 235, + 399, + 244, + 407 + ], + "blocks": [ + { + "bbox": [ + 235, + 399, + 244, + 407 + ], + "lines": [ + { + "bbox": [ + 235, + 399, + 244, + 407 + ], + "spans": [ + { + "bbox": [ + 235, + 399, + 244, + 407 + ], + "type": "image", + "image_path": "78f5fdbd80c29db14024c80ad273e46940226d7df2a95d446389ced3a83268e9.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 235, + 409, + 277, + 416 + ], + "lines": [ + { + "bbox": [ + 235, + 409, + 277, + 416 + ], + "spans": [ + { + "bbox": [ + 235, + 409, + 277, + 416 + ], + "type": "text", + "content": "window" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "bbox": [ + 251, + 409, + 277, + 416 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 251, + 409, + 277, + 416 + ], + "spans": [ + { + "bbox": [ + 251, + 409, + 277, + 416 + ], + "type": "text", + "content": "window" + } + ] + } + ], + "index": 17, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 289, + 399, + 304, + 407 + ], + "blocks": [ + { + "bbox": [ + 251, + 399, + 276, + 407 + ], + "lines": [ + { + "bbox": [ + 251, + 399, + 276, + 407 + ], + "spans": [ + { + "bbox": [ + 251, + 399, + 276, + 407 + ], + "type": "text", + "content": "column" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 289, + 399, + 304, + 407 + ], + "lines": [ + { + "bbox": [ + 289, + 399, + 304, + 407 + ], + "spans": [ + { + "bbox": [ + 289, + 399, + 304, + 407 + ], + "type": "image", + "image_path": "af54b758ef067c52125d556afcbcc2a2ad3c7c83fed744cb67afb8f5b6d88d27.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 289, + 409, + 323, + 416 + ], + "lines": [ + { + "bbox": [ + 289, + 409, + 323, + 416 + ], + "spans": [ + { + "bbox": [ + 289, + 409, + 323, + 416 + ], + "type": "text", + "content": "table" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 334, + 399, + 343, + 407 + ], + "blocks": [ + { + "bbox": [ + 334, + 399, + 343, + 407 + ], + "lines": [ + { + "bbox": [ + 334, + 399, + 343, + 407 + ], + "spans": [ + { + "bbox": [ + 334, + 399, + 343, + 407 + ], + "type": "image", + "image_path": "a99844856f66bed3dc950b7b3a8ac5971646ef7297c317c8b51972a64965bb2f.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 335, + 409, + 354, + 416 + ], + "lines": [ + { + "bbox": [ + 335, + 409, + 354, + 416 + ], + "spans": [ + { + "bbox": [ + 335, + 409, + 354, + 416 + ], + "type": "text", + "content": "s" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 351, + 399, + 365, + 407 + ], + "blocks": [ + { + "bbox": [ + 351, + 399, + 365, + 407 + ], + "lines": [ + { + "bbox": [ + 351, + 399, + 365, + 407 + ], + "spans": [ + { + "bbox": [ + 351, + 399, + 365, + 407 + ], + "type": "image", + "image_path": "e3da1ed463fe48be1c30465c737efaf5b5758023c9014de039ef2291ce41a014.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 351, + 409, + 367, + 416 + ], + "lines": [ + { + "bbox": [ + 351, + 409, + 367, + 416 + ], + "spans": [ + { + "bbox": [ + 351, + 409, + 367, + 416 + ], + "type": "text", + "content": "sofa" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 379, + 399, + 388, + 407 + ], + "blocks": [ + { + "bbox": [ + 379, + 399, + 388, + 407 + ], + "lines": [ + { + "bbox": [ + 379, + 399, + 388, + 407 + ], + "spans": [ + { + "bbox": [ + 379, + 399, + 388, + 407 + ], + "type": "image", + "image_path": "ccfd1138d4fa59158c8f42f83bf1a4e5fe437be2a06e803569a07a1056a8ec79.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 380, + 409, + 400, + 416 + ], + "lines": [ + { + "bbox": [ + 380, + 409, + 400, + 416 + ], + "spans": [ + { + "bbox": [ + 380, + 409, + 400, + 416 + ], + "type": "text", + "content": "b" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_caption" + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 394, + 399, + 409, + 407 + ], + "blocks": [ + { + "bbox": [ + 394, + 399, + 409, + 407 + ], + "lines": [ + { + "bbox": [ + 394, + 399, + 409, + 407 + ], + "spans": [ + { + "bbox": [ + 394, + 399, + 409, + 407 + ], + "type": "image", + "image_path": "11b11729c49847242eff190e3cdb3007f76c83e0c9c130f716b3e47a696118bc.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 395, + 409, + 416, + 416 + ], + "lines": [ + { + "bbox": [ + 395, + 409, + 416, + 416 + ], + "spans": [ + { + "bbox": [ + 395, + 409, + 416, + 416 + ], + "type": "text", + "content": "board" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_caption" + } + ], + "index": 26 + }, + { + "type": "image", + "bbox": [ + 410, + 399, + 428, + 407 + ], + "blocks": [ + { + "bbox": [ + 410, + 399, + 428, + 407 + ], + "lines": [ + { + "bbox": [ + 410, + 399, + 428, + 407 + ], + "spans": [ + { + "bbox": [ + 410, + 399, + 428, + 407 + ], + "type": "image", + "image_path": "9a2cf3bf14753d72e4bf33cd3d5d59a100d4337e0c2057f672071cca71e7a65b.jpg" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_body" + } + ], + "index": 28 + }, + { + "bbox": [ + 410, + 409, + 416, + 416 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 410, + 409, + 416, + 416 + ], + "spans": [ + { + "bbox": [ + 410, + 409, + 416, + 416 + ], + "type": "text", + "content": "rd" + } + ] + } + ], + "index": 29, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 440, + 399, + 455, + 407 + ], + "blocks": [ + { + "bbox": [ + 440, + 399, + 455, + 407 + ], + "lines": [ + { + "bbox": [ + 440, + 399, + 455, + 407 + ], + "spans": [ + { + "bbox": [ + 440, + 399, + 455, + 407 + ], + "type": "image", + "image_path": "d7bfb6f69617c227607bb8b14f8bc5c02695e3d5ec03df656633b3ec1588bfbe.jpg" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 440, + 409, + 474, + 416 + ], + "lines": [ + { + "bbox": [ + 440, + 409, + 474, + 416 + ], + "spans": [ + { + "bbox": [ + 440, + 409, + 474, + 416 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_caption" + } + ], + "index": 30 + }, + { + "bbox": [ + 130, + 486, + 482, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 486, + 482, + 571 + ], + "spans": [ + { + "bbox": [ + 130, + 486, + 482, + 571 + ], + "type": "text", + "content": "point clouds to implicitly co-embed texts embeddings with 3D point clouds embeddings using only scene-level labels. With extensive experiments, we verify that the textual semantic information of category labels is beneficial for 3DSS-VLG which achieves the state-of-the-art on both S3DIS and ScanNet datasets. Further, with an experiment to explore our framework to unobserved scene domains, we demonstrate the generalization capability of our method, which supports its practicality." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 132, + 590, + 246, + 605 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 590, + 246, + 605 + ], + "spans": [ + { + "bbox": [ + 132, + 590, + 246, + 605 + ], + "type": "text", + "content": "Acknowledgements" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 130, + 617, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 617, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 617, + 482, + 666 + ], + "type": "text", + "content": "This work was supported in part by the National Natural Science Foundation of China under Grants 62371310 and 62032015, in part by the Guangdong Basic and Applied Basic Research Foundation under Grant 2023A1515011236, in part by the Stable Support Project of Shenzhen (Project No.20231122122722001), in" + } + ] + } + ], + "index": 35 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 216, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 216, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 216, + 100 + ], + "type": "text", + "content": "X. Xu et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 177 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 177 + ], + "type": "text", + "content": "part by the third phase of high-level university construction of interdisciplinary innovation team project of Shenzhen University(24JCXK03). We also acknowledge the CINECA award under the ISCRA initiative, for the availability of partial HPC resources support, and partially supported by the Fundamental Research Funds for the Central Universities, Peking University." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 133, + 191, + 197, + 205 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 191, + 197, + 205 + ], + "spans": [ + { + "bbox": [ + 133, + 191, + 197, + 205 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 138, + 212, + 481, + 665 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 138, + 212, + 481, + 246 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 212, + 481, + 246 + ], + "spans": [ + { + "bbox": [ + 138, + 212, + 481, + 246 + ], + "type": "text", + "content": "1. Alonso, I., Riazuelo, L., Montesano, L., Murillo, A.C.: 3d-mininet: Learning a 2d representation from point clouds for fast and efficient 3d lidar semantic segmentation. IEEE Robotics and Automation Letters 5(4), 5432-5439 (2020) 5" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 138, + 246, + 481, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 246, + 481, + 289 + ], + "spans": [ + { + "bbox": [ + 138, + 246, + 481, + 289 + ], + "type": "text", + "content": "2. Ando, A., Gidaris, S., Bursuc, A., Puy, G., Boulch, A., Marlet, R.: Rangevit: Towards vision transformers for 3d semantic segmentation in autonomous driving. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5240-5250 (2023) 5" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 138, + 289, + 481, + 321 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 289, + 481, + 321 + ], + "spans": [ + { + "bbox": [ + 138, + 289, + 481, + 321 + ], + "type": "text", + "content": "3. Armeni, I., Sener, O., Zamir, A.R., Jiang, H., Brilakis, I., Fischer, M., Savarese, S.: 3d semantic parsing of large-scale indoor spaces. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 1534-1543 (2016) 9" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 138, + 321, + 481, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 321, + 481, + 342 + ], + "spans": [ + { + "bbox": [ + 138, + 321, + 481, + 342 + ], + "type": "text", + "content": "4. Bucher, M., Vu, T.H., Cord, M., Pérez, P.: Zero-shot semantic segmentation. Advances in Neural Information Processing Systems 32 (2019) 4" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 138, + 342, + 481, + 386 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 342, + 481, + 386 + ], + "spans": [ + { + "bbox": [ + 138, + 342, + 481, + 386 + ], + "type": "text", + "content": "5. Cardace, A., Ramirez, P.Z., Salti, S., Di Stefano, L.: Exploiting the complementarity of 2d and 3d networks to address domain-shift in 3d semantic segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 98-109 (2023) 5" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 138, + 386, + 481, + 429 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 386, + 481, + 429 + ], + "spans": [ + { + "bbox": [ + 138, + 386, + 481, + 429 + ], + "type": "text", + "content": "6. Chen, J., Zhu, D., Qian, G., Ghanem, B., Yan, Z., Zhu, C., Xiao, F., Culatana, S.C., Elhoseiny, M.: Exploring open-vocabulary semantic segmentation from clip vision encoder distillation only. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 699-710 (2023) 4" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 138, + 429, + 481, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 429, + 481, + 460 + ], + "spans": [ + { + "bbox": [ + 138, + 429, + 481, + 460 + ], + "type": "text", + "content": "7. Chen, R., Liu, Y., Kong, L., Chen, N., Zhu, X., Ma, Y., Liu, T., Wang, W.: Towards label-free scene understanding by vision foundation models. Advances in Neural Information Processing Systems 36 (2024) 5" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 138, + 460, + 481, + 494 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 460, + 481, + 494 + ], + "spans": [ + { + "bbox": [ + 138, + 460, + 481, + 494 + ], + "type": "text", + "content": "8. Chibane, J., Engelmann, F., Anh Tran, T., Pons-Moll, G.: Box2mask: Weakly supervised 3d semantic instance segmentation using bounding boxes. In: European Conference on Computer Vision. pp. 681-699. Springer (2022) 5" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 138, + 494, + 481, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 494, + 481, + 525 + ], + "spans": [ + { + "bbox": [ + 138, + 494, + 481, + 525 + ], + "type": "text", + "content": "9. Choy, C., Gwak, J., Savarese, S.: 4d spatio-temporal convnets: Minkowski convolutional neural networks. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 3075-3084 (2019) 4, 5, 9, 10, 11" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 138, + 525, + 481, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 525, + 481, + 557 + ], + "spans": [ + { + "bbox": [ + 138, + 525, + 481, + 557 + ], + "type": "text", + "content": "10. Dai, A., Chang, A.X., Savva, M., Halber, M., Funkhouser, T., Nießner, M.: Scannet: Richly-annotated 3d reconstructions of indoor scenes. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 5828-5839 (2017) 9" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 138, + 557, + 481, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 557, + 481, + 601 + ], + "spans": [ + { + "bbox": [ + 138, + 557, + 481, + 601 + ], + "type": "text", + "content": "1. Genova, K., Yin, X., Kundu, A., Pantofaru, C., Cole, F., Sud, A., Brewington, B., Shucker, B., Funkhouser, T.: Learning 3d semantic segmentation with only 2d image supervision. In: 2021 International Conference on 3D Vision (3DV). pp. 361-372 (2021) 5" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 138, + 601, + 481, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 601, + 481, + 633 + ], + "spans": [ + { + "bbox": [ + 138, + 601, + 481, + 633 + ], + "type": "text", + "content": "2. Ghiasi, G., Gu, X., Cui, Y., Lin, T.Y.: Scaling open-vocabulary image segmentation with image-level labels. In: European Conference on Computer Vision. pp. 540-557. Springer (2022) 3, 4, 6, 7" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 138, + 633, + 481, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 633, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 138, + 633, + 481, + 665 + ], + "type": "text", + "content": "3. Hegde, D., Valanarasu, J.M.J., Patel, V.: Clip goes 3d: Leveraging prompt tuning for language grounded 3d recognition. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 2028-2038 (2023) 1" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 276, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 276, + 91, + 447, + 102 + ], + "type": "text", + "content": "3DSS with 2D Vision-Language Guidance" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 481, + 666 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 133, + 116, + 481, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 116, + 481, + 149 + ], + "spans": [ + { + "bbox": [ + 133, + 116, + 481, + 149 + ], + "type": "text", + "content": "14. Hou, J., Xie, S., Graham, B., Dai, A., Nießner, M.: Pri3d: Can 3d priors help 2d representation learning? In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 5693-5702 (2021) 5" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 133, + 150, + 481, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 150, + 481, + 183 + ], + "spans": [ + { + "bbox": [ + 133, + 150, + 481, + 183 + ], + "type": "text", + "content": "15. Hu, Q., Yang, B., Fang, G., Guo, Y., Leonardis, A., Trigoni, N., Markham, A.: Sqn: Weakly-supervised semantic segmentation of large-scale 3d point clouds. In: European Conference on Computer Vision. pp. 600-619. Springer (2022) 5" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 183, + 481, + 226 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 183, + 481, + 226 + ], + "spans": [ + { + "bbox": [ + 132, + 183, + 481, + 226 + ], + "type": "text", + "content": "16. Hu, Q., Yang, B., Xie, L., Rosa, S., Guo, Y., Wang, Z., Trigoni, N., Markham, A.: Randla-net: Efficient semantic segmentation of large-scale point clouds. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 11108-11117 (2020) 1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 227, + 481, + 269 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 227, + 481, + 269 + ], + "spans": [ + { + "bbox": [ + 132, + 227, + 481, + 269 + ], + "type": "text", + "content": "17. Hu, W., Zhao, H., Jiang, L., Jia, J., Wong, T.T.: Bidirectional projection network for cross dimension scene understanding. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 14373-14382 (2021) 5" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 270, + 481, + 303 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 270, + 481, + 303 + ], + "spans": [ + { + "bbox": [ + 132, + 270, + 481, + 303 + ], + "type": "text", + "content": "18. Kweon, H., Yoon, K.J.: Joint learning of 2d-3d weakly supervised semantic segmentation. Advances in Neural Information Processing Systems 35, 30499-30511 (2022) 2, 5, 11" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 303, + 481, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 303, + 481, + 335 + ], + "spans": [ + { + "bbox": [ + 132, + 303, + 481, + 335 + ], + "type": "text", + "content": "19. Lahoud, J., Ghanem, B.: 2d-driven 3d object detection in rgb-d images. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 4622-4630 (2017) 5" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 336, + 481, + 369 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 336, + 481, + 369 + ], + "spans": [ + { + "bbox": [ + 132, + 336, + 481, + 369 + ], + "type": "text", + "content": "20. Li, J., Dai, H., Han, H., Ding, Y.: Mseg3d: Multi-modal 3d semantic segmentation for autonomous driving. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 21694-21704 (2023) 5" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 369, + 481, + 402 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 369, + 481, + 402 + ], + "spans": [ + { + "bbox": [ + 132, + 369, + 481, + 402 + ], + "type": "text", + "content": "21. Li, J., Jie, Z., Ricci, E., Ma, L., Sebe, N.: Enhancing robustness of vision-language models through orthogonality learning and cross-regularization (2024), https:// arxiv.org/abs/2407.08374 5" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 402, + 481, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 402, + 481, + 435 + ], + "spans": [ + { + "bbox": [ + 132, + 402, + 481, + 435 + ], + "type": "text", + "content": "22. Li, J., Jie, Z., Wang, X., Wei, X., Ma, L.: Expansion and shrinkage of localization for weakly-supervised semantic segmentation. In: Advances in Neural Information Processing Systems. vol. 35, pp. 16037-16051 (2022) 5" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 435, + 481, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 435, + 481, + 468 + ], + "spans": [ + { + "bbox": [ + 132, + 435, + 481, + 468 + ], + "type": "text", + "content": "23. Li, J., Jie, Z., Wang, X., Zhou, Y., Ma, L., Jiang, J.: Weakly supervised semantic segmentation via self-supervised destruction learning. Neurocomputing 561, 126821 (2023) 5" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 468, + 481, + 500 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 468, + 481, + 500 + ], + "spans": [ + { + "bbox": [ + 132, + 468, + 481, + 500 + ], + "type": "text", + "content": "24. Li, J., Jie, Z., Wang, X., Zhou, Y., Wei, X., Ma, L.: Weakly supervised semantic segmentation via progressive patch learning. IEEE Transactions on multimedia 25, 1686-1699 (2022) 5" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 132, + 500, + 481, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 500, + 481, + 544 + ], + "spans": [ + { + "bbox": [ + 132, + 500, + 481, + 544 + ], + "type": "text", + "content": "25. Liang, F., Wu, B., Dai, X., Li, K., Zhao, Y., Zhang, H., Zhang, P., Vajda, P., Marculescu, D.: Open-vocabulary semantic segmentation with mask-adapted clip. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 7061-7070 (2023) 3, 4" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 132, + 544, + 481, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 544, + 481, + 578 + ], + "spans": [ + { + "bbox": [ + 132, + 544, + 481, + 578 + ], + "type": "text", + "content": "26. Qi, C.R., Liu, W., Wu, C., Su, H., Guibas, L.J.: Frustum pointnets for 3d object detection from rgb-d data. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 918-927 (2018) 5" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 132, + 578, + 481, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 578, + 481, + 611 + ], + "spans": [ + { + "bbox": [ + 132, + 578, + 481, + 611 + ], + "type": "text", + "content": "27. Qi, C.R., Su, H., Mo, K., Guibas, L.J.: Pointnet: Deep learning on point sets for 3d classification and segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 652-660 (2017) 1, 10" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 132, + 611, + 481, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 611, + 481, + 643 + ], + "spans": [ + { + "bbox": [ + 132, + 611, + 481, + 643 + ], + "type": "text", + "content": "28. Qi, C.R., Yi, L., Su, H., Guibas, L.J.: Pointnet++: Deep hierarchical feature learning on point sets in a metric space. Advances in neural information processing systems 30 (2017) 1, 11" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 132, + 643, + 481, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 643, + 481, + 666 + ], + "spans": [ + { + "bbox": [ + 132, + 643, + 481, + 666 + ], + "type": "text", + "content": "29. Qian, G., Li, Y., Peng, H., Mai, J., Hammoud, H., Elhoseiny, M., Ghanem, B.: Pointnext: Revisiting pointnet++ with improved training and scaling strategies." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 215, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 215, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 215, + 100 + ], + "type": "text", + "content": "X. Xu et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 481, + 666 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 149, + 116, + 481, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 116, + 481, + 138 + ], + "spans": [ + { + "bbox": [ + 149, + 116, + 481, + 138 + ], + "type": "text", + "content": "Advances in Neural Information Processing Systems 35, 23192-23204 (2022) 1, 10, 11" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 133, + 139, + 481, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 139, + 481, + 183 + ], + "spans": [ + { + "bbox": [ + 133, + 139, + 481, + 183 + ], + "type": "text", + "content": "30. Radford, A., Kim, J.W., Hallacy, C., Ramesh, A., Goh, G., Agarwal, S., Sastry, G., Askell, A., Mishkin, P., Clark, J., et al.: Learning transferable visual models from natural language supervision. In: International Conference on Machine Learning. pp. 8748-8763 (2021) 3, 4" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 183, + 481, + 216 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 183, + 481, + 216 + ], + "spans": [ + { + "bbox": [ + 132, + 183, + 481, + 216 + ], + "type": "text", + "content": "31. Ren, Z., Misra, I., Schwing, A.G., Girdhar, R.: 3d spatial recognition without spatially labeled 3d. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 13204-13213 (2021) 2, 5, 10, 11" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 216, + 481, + 258 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 216, + 481, + 258 + ], + "spans": [ + { + "bbox": [ + 132, + 216, + 481, + 258 + ], + "type": "text", + "content": "32. Robert, D., Vallet, B., Landrieu, L.: Learning multi-view aggregation in the wild for large-scale 3d semantic segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5575-5584 (2022) 10, 11" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 259, + 481, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 259, + 481, + 293 + ], + "spans": [ + { + "bbox": [ + 132, + 259, + 481, + 293 + ], + "type": "text", + "content": "33. Tatarchenko, M., Park, J., Koltun, V., Zhou, Q.Y.: Tangent convolutions for dense prediction in 3d. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 3887-3896 (2018) 10, 11" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 293, + 481, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 293, + 481, + 335 + ], + "spans": [ + { + "bbox": [ + 132, + 293, + 481, + 335 + ], + "type": "text", + "content": "34. Thomas, H., Qi, C.R., Deschaud, J.E., Marcotegui, B., Goulette, F., Guibas, L.J.: Kpconv: Flexible and deformable convolution for point clouds. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 6411-6420 (2019) 10, 11" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 336, + 481, + 369 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 336, + 481, + 369 + ], + "spans": [ + { + "bbox": [ + 132, + 336, + 481, + 369 + ], + "type": "text", + "content": "35. Wang, Z., Rao, Y., Yu, X., Zhou, J., Lu, J.: Semaffinet: Semantic-affine transformation for point cloud segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 11819-11829 (2022) 10, 11" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 369, + 481, + 413 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 369, + 481, + 413 + ], + "spans": [ + { + "bbox": [ + 132, + 369, + 481, + 413 + ], + "type": "text", + "content": "36. Wei, J., Lin, G., Yap, K.H., Hung, T.Y., Xie, L.: Multi-path region mining for weakly supervised 3d semantic segmentation on point clouds. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 4384-4393 (2020) 2, 5, 10, 11" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 413, + 481, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 413, + 481, + 456 + ], + "spans": [ + { + "bbox": [ + 132, + 413, + 481, + 456 + ], + "type": "text", + "content": "37. Xian, Y., Choudhury, S., He, Y., Schiele, B., Akata, Z.: Semantic projection network for zero-and few-label semantic segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 8256-8265 (2019) 4" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 456, + 481, + 490 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 456, + 481, + 490 + ], + "spans": [ + { + "bbox": [ + 132, + 456, + 481, + 490 + ], + "type": "text", + "content": "38. Xu, D., Anguelov, D., Jain, A.: Pointfusion: Deep sensor fusion for 3d bounding box estimation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 244-253 (2018) 5" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 491, + 481, + 534 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 491, + 481, + 534 + ], + "spans": [ + { + "bbox": [ + 132, + 491, + 481, + 534 + ], + "type": "text", + "content": "39. Xu, J., Hou, J., Zhang, Y., Feng, R., Wang, Y., Qiao, Y., Xie, W.: Learning open-vocabulary semantic segmentation models from natural language supervision. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 2935-2944 (2023) 4" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 132, + 534, + 481, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 534, + 481, + 567 + ], + "spans": [ + { + "bbox": [ + 132, + 534, + 481, + 567 + ], + "type": "text", + "content": "40. Xu, M., Zhang, Z., Wei, F., Hu, H., Bai, X.: Side adapter network for open-vocabulary semantic segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 2945-2954 (2023) 4" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 132, + 567, + 481, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 567, + 481, + 600 + ], + "spans": [ + { + "bbox": [ + 132, + 567, + 481, + 600 + ], + "type": "text", + "content": "41. Xu, M., Zhang, Z., Wei, F., Lin, Y., Cao, Y., Hu, H., Bai, X.: A simple baseline for open-vocabulary semantic segmentation with pre-trained vision-language model. In: European Conference on Computer Vision. pp. 736-753. Springer (2022) 4" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 132, + 600, + 481, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 600, + 481, + 632 + ], + "spans": [ + { + "bbox": [ + 132, + 600, + 481, + 632 + ], + "type": "text", + "content": "42. Xu, X., Yuan, Y., Zhang, Q., Wu, W., Jie, Z., Ma, L., Wang, X.: Weakly-supervised 3d visual grounding based on visual linguistic alignment. arXiv preprint arXiv:2312.09625 (2023) 5" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 132, + 632, + 481, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 632, + 481, + 666 + ], + "spans": [ + { + "bbox": [ + 132, + 632, + 481, + 666 + ], + "type": "text", + "content": "43. Yan, X., Gao, J., Zheng, C., Zheng, C., Zhang, R., Cui, S., Li, Z.: 2dpass: 2d priors assisted semantic segmentation on lidar point clouds. In: European Conference on Computer Vision. pp. 677-695. Springer (2022) 1" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 276, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 276, + 91, + 447, + 102 + ], + "type": "text", + "content": "3DSS with 2D Vision-Language Guidance" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 481, + 392 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 133, + 116, + 481, + 159 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 116, + 481, + 159 + ], + "spans": [ + { + "bbox": [ + 133, + 116, + 481, + 159 + ], + "type": "text", + "content": "44. Yang, C.K., Chen, M.H., Chuang, Y.Y., Lin, Y.Y.: 2d-3d interlaced transformer for point cloud segmentation with scene-level supervision. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 977–987 (2023) 2, 5, 10, 11" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 160, + 481, + 204 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 160, + 481, + 204 + ], + "spans": [ + { + "bbox": [ + 132, + 160, + 481, + 204 + ], + "type": "text", + "content": "45. Yang, C.K., Wu, J.J., Chen, K.S., Chuang, Y.Y., Lin, Y.Y.: An mil-derived transformer for weakly supervised point cloud segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 11830-11839 (2022) 2, 10, 11" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 205, + 481, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 205, + 481, + 237 + ], + "spans": [ + { + "bbox": [ + 132, + 205, + 481, + 237 + ], + "type": "text", + "content": "46. Yun, S., Park, S.H., Seo, P.H., Shin, J.: Ifseg: Image-free semantic segmentation via vision-language model. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 2967-2977 (2023) 3" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 237, + 481, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 237, + 481, + 281 + ], + "spans": [ + { + "bbox": [ + 132, + 237, + 481, + 281 + ], + "type": "text", + "content": "47. Zhang, R., Wang, L., Qiao, Y., Gao, P., Li, H.: Learning 3d representations from 2d pre-trained models via image-to-point masked autoencoders. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 21769-21780 (2023) 5" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 281, + 481, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 281, + 481, + 326 + ], + "spans": [ + { + "bbox": [ + 132, + 281, + 481, + 326 + ], + "type": "text", + "content": "48. Zhang, Y., Hu, Q., Xu, G., Ma, Y., Wan, J., Guo, Y.: Not all points are equal: Learning highly efficient point-based detectors for 3d lidar point clouds. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 18953-18962 (2022) 5" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 326, + 481, + 357 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 326, + 481, + 357 + ], + "spans": [ + { + "bbox": [ + 132, + 326, + 481, + 357 + ], + "type": "text", + "content": "49. Zhao, H., Jiang, L., Jia, J., Torr, P.H., Koltun, V.: Point transformer. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 16259-16268 (2021) 10, 11" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 358, + 481, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 358, + 481, + 392 + ], + "spans": [ + { + "bbox": [ + 132, + 358, + 481, + 392 + ], + "type": "text", + "content": "50. Zhou, B., Khosla, A., Lapedriza, A., Oliva, A., Torralba, A.: Learning deep features for discriminative localization. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 2921-2929 (2016) 2" + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 216, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 216, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 216, + 100 + ], + "type": "text", + "content": "X. Xu et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/3D-GOI_ 3D GAN Omni-Inversion for Multifaceted and Multi-object Editing/c22d5df4-9f40-4c3a-8e7a-40f3a1d6dbe5_content_list.json b/2024/3D-GOI_ 3D GAN Omni-Inversion for Multifaceted and Multi-object Editing/c22d5df4-9f40-4c3a-8e7a-40f3a1d6dbe5_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..47b13d3acd865286e8bb99c40df0f749da5b2dcb --- /dev/null +++ b/2024/3D-GOI_ 3D GAN Omni-Inversion for Multifaceted and Multi-object Editing/c22d5df4-9f40-4c3a-8e7a-40f3a1d6dbe5_content_list.json @@ -0,0 +1,2143 @@ +[ + { + "type": "text", + "text": "3D-GOI: 3D GAN Omni-Inversion for Multifaceted and Multi-object Editing", + "text_level": 1, + "bbox": [ + 282, + 140, + 722, + 186 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Haoran Li $^{1,2}$ , Long Ma $^{1,2}$ , Haolin Shi $^{1,2}$ , Yanbin Hao $^{1,2}$ , Yong Liao $^{1,2*}$ , Lechao Cheng $^{3}$ , and Peng Yuan Zhou $^{4*}$", + "bbox": [ + 218, + 210, + 782, + 244 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 University of Science and Technology of China", + "bbox": [ + 336, + 253, + 665, + 268 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "2 CCCD Key Lab of Ministry of Culture and Tourism {1hr123, longm, mar}@mail.ustc.edu.cn, haoyanbin@hotmail.com, yliao@ustc.edu.cn", + "bbox": [ + 264, + 268, + 736, + 309 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "3 Hefei University of Technology", + "bbox": [ + 390, + 309, + 611, + 325 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "chenglc@hfut.edu.cn", + "bbox": [ + 426, + 325, + 575, + 338 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Aarhus University", + "bbox": [ + 433, + 338, + 568, + 352 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "pengyuan.zhou@ece.au.dk", + "bbox": [ + 411, + 353, + 591, + 366 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract. The current GAN inversion methods typically can only edit the appearance and shape of a single object and background while overlooking spatial information. In this work, we propose a 3D editing framework, 3D-GOI to enable multifaceted editing of affine information (scale, translation, and rotation) on multiple objects. 3D-GOI realizes the complex editing function by inverting the abundance of attribute codes (object shape/ appearance/ scale/ rotation/ translation, background shape/ appearance, and camera pose) controlled by GIRAFFE, a renowned 3D GAN. Accurately inverting all the codes is challenging, 3D-GOI solves this challenge following three main steps. First, we segment the objects and the background in a multi-object image. Second, we use a custom Neural Inversion Encoder to obtain coarse codes of each object. Finally, we use a round-robin optimization algorithm to get precise codes to reconstruct the image. To the best of our knowledge, 3D-GOI is the first framework to enable multifaceted editing on multiple objects. Both qualitative and quantitative experiments demonstrate that 3D-GOI holds immense potential for flexible, multifaceted editing in complex multi-object scenes. Our project and code are released at https://3d-goi.github.io.", + "bbox": [ + 259, + 402, + 743, + 667 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 215, + 693, + 375, + 709 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The development of generative 3D models has attracted increasing attention to automatic 3D objects and scene generation and edition. Most existing works are limited to a single object, such as 3D face generation [7] and synthesis of facial viewpoints [40]. There are few methods for generating multi-object 3D scenes while editing such scenes remains unexplored. In this paper, we propose 3D-GOI to edit images containing multiple objects with complex spatial geometric", + "bbox": [ + 212, + 724, + 787, + 816 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "* Corresponding authors", + "bbox": [ + 230, + 825, + 401, + 840 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/d9ae7d3da607da8e33995c2a1218ca96f1aa5b7eb57b8e91e023402c9b26b44e.jpg", + "image_caption": [ + "Fig. 1: The first row shows the editing results of traditional 2D/3D GAN inversion methods on multi-object images. The second row showcases 3D-GOI, which can perform multifaceted editing on complex images with multiple objects. 'bg' stands for background. The red crosses in the upper right figures indicate features that cannot be edited with current 2D/3D GAN inversion methods." + ], + "image_footnote": [], + "bbox": [ + 218, + 147, + 781, + 262 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "relationships. 3D-GOI not only can change the appearance and shape of each object and the background, but also can edit the spatial position of each object and the camera pose of the image as shown by Figure 1.", + "bbox": [ + 212, + 380, + 784, + 426 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Existing 3D multi-object scene generation methods can be mainly classified into two categories: those [28] based on Generative Adversarial Networks (GANs) [10] and those [22] based on Diffusion models [13], besides a few based on VAE or Transformer [3,38]. GAN-based methods, primarily represented by GIRAFFE [28] and its derivatives, depict complex scene images as results of multiple foreground objects, controlled by shape and appearance, subjected to affine transformations (scaling, translation, and rotation), and rendered together with a background, which is also controlled by shape and appearance, from a specific camera viewpoint. Diffusion-based methods [23] perceive scene images as results of multiple latent NeRF [24], which can be represented as 3D models, undergoing affine transformations, optimized with SDS [30], rendered from a specific camera viewpoint. Both categories represent scenes as combinations of multiple codes. To realize editing based on these generative methods, it's imperative to invert the complex multi-object scene images to retrieve their representative codes. After modifying these codes, regeneration can achieve diversified editing of complex images. Most inversion methods study the inversion of a single code based on its generation method. However, each multi-object image is the entangled result of multiple codes, thus inverting all codes from an image requires precise disentangling of the codes, which is extremely difficult and largely overlooked. Moreover, the prevailing inversion algorithms primarily employ optimization approaches. Attempting to optimize all codes simultaneously often leads to chaotic optimization directions and less accurate inversion outcomes.", + "bbox": [ + 212, + 429, + 787, + 760 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Therefore, we propose 3D-GOI, a framework capable of inverting multiple codes to achieve a comprehensive inversion of multi-object images. Given current open-source 3D multi-object scene generation methods, we have chosen GI-RAFFE [28] as our generative model. In theory, our framework can be applied to other generative approaches as well. We address these challenges as follows.", + "bbox": [ + 212, + 763, + 787, + 840 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "H. Li et al.", + "bbox": [ + 271, + 114, + 346, + 127 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "First, we categorize different codes based on object attributes, background attributes, and pose attributes. Through qualitative verification, we found that segmentation methods can roughly separate the codes pertaining to different objects. For example, the codes controlling an object's shape, appearance, scale, translation, and rotation predominantly relate to the object itself. So, during the inversion process, we only use the segmented image of this object to reduce the impact of the background and other objects on its codes.", + "bbox": [ + 212, + 146, + 782, + 251 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Second, we get the attributes' codes from the segmented image. Inspired by the Neural Rendering Block in GIRAFFE, we design a custom Neural Inversion Encoder network to coarsely disentangle and estimate the code values.", + "bbox": [ + 212, + 252, + 782, + 296 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Finally, we obtain precise values for each code through optimization. We observed that optimizing all codes simultaneously tends to get stuck in local minima. Therefore, we propose a round-robin optimization algorithm that employs a ranking function to determine the optimization order for different codes. The algorithm enables a stable and efficient optimization process for accurate image reconstruction. Our contributions can be summarized as follows.", + "bbox": [ + 212, + 297, + 782, + 387 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- To our best knowledge, 3D-GOI is the first multi-code inversion framework in generative models, achieving multifaceted editing of multi-object images.", + "- We introduce a three-stage inversion process: 1) separate the attribute codes of different objects via segmentation; 2) obtain coarse codes using a custom Neural Inversion Encoder; 3) optimize the reconstruction using a round-robin optimization strategy.", + "- Our method outperforms existing methods on both 3D and 2D tasks." + ], + "bbox": [ + 225, + 393, + 782, + 496 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Related Work", + "text_level": 1, + "bbox": [ + 215, + 516, + 385, + 531 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2D/3D GANs. 2D GAN maps a distribution from the latent space to the image space using a generator and a discriminator and has been widely explored. For example, BigGAN [6] increases the batch size and uses a simple truncation trick to finely control the trade-off between sample fidelity and variety. CycleGAN [44] feeds an input image into the generator and loops the output back to the generator. It achieves style transfer by minimizing the consistency loss between the input and its result. StyleGAN [17] maps a latent code into multiple style codes, allowing for detailed style control of images. 3D GANs usually combine 2D GANs with some 3D representation, such as NeRF [25], and have demonstrated excellent abilities to generate complex scenes with multi-view consistency. Broadly, 3D GANs can be classified into explicit and implicit models. Explicit models like HoloGAN [26] enable explicit control over the object pose through rigid body transformations of the learned 3D features. BlockGAN [27] generates foreground and background 3D features separately, combining them into a complete 3D scene representation. On the other hand, implicit models generally perform better. Many of these models take inspiration from NeRF [25], representing images as neural radiance fields and using volume rendering to generate photorealistic images in a continuous view. EG3D [7] introduces an explicit-implicit hybrid network architecture that produces high-quality 3D geometries.", + "bbox": [ + 212, + 551, + 784, + 839 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "3D-GOI", + "bbox": [ + 674, + 114, + 730, + 126 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 774, + 114, + 785, + 126 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/95b140fad3eba4fd0c9ef37e1d7254c76e91c7db998f5dfcb3b1d382d6dc3489.jpg", + "image_caption": [ + "Fig. 2: Different GANs and GAN Inversion methods utilize codes differently. $\\omega$ represents the latent code and $c$ represents the camera pose." + ], + "image_footnote": [], + "bbox": [ + 225, + 143, + 357, + 203 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/74a2f6e85bbd65c440ad6f8c69a023b312a35cc1d757f096c45926c731e0d946.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 419, + 143, + 555, + 203 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/b736a5a1d20e8d30b35cde4467de5078a10d8de84d305eeaa9fff26dc883425a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 616, + 143, + 753, + 203 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "GRAF [33] integrates shape and appearance coding within the generation process, which facilitates independent manipulation of the shape and appearance of the generated vehicle and furniture images. Moreover, the presence of 3D information provides additional control over the camera pose, contributing to the flexibility of the generated outputs. GIRAFFE [28] extends GRAF to multi-object scenes by considering an image as the composition of multiple objects in the foreground through affine transformation and the background rendered at a specific camera viewpoint. In this work, we select GIRAFFE as the 3D GAN model to be inverted.", + "bbox": [ + 212, + 272, + 787, + 409 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2D/3D GAN Inversion. GAN inversion obtains the latent code of an input image under a certain generator and modifies the latent code to perform image editing operations. Current 2D GAN inversion methods can be divided into optimization-based, encoder-based, and hybrid methods. Optimization-based methods [1, 14, 43] directly optimize the initial code, requiring very accurate initial values. Encoder-based methods [29, 31, 36] can map images directly to latent code but generally cannot achieve full reconstruction. Hybrid-based methods [4, 42] combine these two approaches: first employ an encoder to map the image to a suitable latent code, then perform optimization. Currently, most 2D GANs only have one latent code to generate an image $^5$ . Therefore, the 2D GAN inversion task can be represented as:", + "bbox": [ + 212, + 417, + 803, + 584 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\omega^ {*} = \\arg \\min _ {\\omega} \\mathcal {L} (G (\\omega , \\theta), I), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 398, + 595, + 785, + 619 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\omega$ is the latent component, $G$ denotes the generator, $\\theta$ denotes the parameters of the generator, $I$ is the input image, and $\\mathcal{L}$ is the loss function measuring the difference between the generated and input image.", + "bbox": [ + 212, + 628, + 787, + 674 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Typically, 3D GANs have an additional camera pose parameter compared to 2D GANs, making it more challenging to obtain latent codes during inversion. Current methods like SPI [40] use a symmetric prior for faces to generate images with different perspectives, while [19] employs a pre-trained estimator to achieve better initialization and utilizes pixel-level depth calculated from the NeRF parameters for improved image reconstruction.", + "bbox": [ + 212, + 674, + 787, + 763 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Currently, there are only limited works on 3D GAN inversion [9,21,37] which primarily focus on creating novel perspectives of human faces using specialized", + "bbox": [ + 212, + 765, + 785, + 796 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "H. Li et al.", + "bbox": [ + 271, + 114, + 346, + 126 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "5 Although StyleGAN can be controlled by multiple style codes, these codes are all generated from a single initial latent code, indicating their interrelations. Hence only one encoder is needed to predict all the codes during inversion.", + "bbox": [ + 217, + 805, + 787, + 839 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "face datasets considering generally only two codes: camera pose code $\\pmb{c}$ and the latent code $\\pmb{\\omega}$ . Hence its inversion task can be represented as:", + "bbox": [ + 212, + 146, + 782, + 176 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {\\omega} ^ {*}, \\boldsymbol {c} ^ {*} = \\arg \\min _ {\\boldsymbol {\\omega}, \\boldsymbol {c}} \\mathcal {L} (G (\\boldsymbol {\\omega}, \\boldsymbol {c}, \\theta), I). \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 382, + 185, + 785, + 205 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "A major advancement of 3D-GOI is the capability to invert more independent codes compared with other inversion methods, as Figure 2 shows, in order to perform multifaceted edits on multi-object images.", + "bbox": [ + 212, + 212, + 782, + 258 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3 Preliminary", + "text_level": 1, + "bbox": [ + 214, + 277, + 367, + 296 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "GIRAFFE [28] represents individual objects as a combination of feature field and volume density. Through scene compositions, the feature fields of multiple objects and the background are combined. Finally, the combined feature field is rendered into an image using volume rendering and neural rendering.", + "bbox": [ + 212, + 305, + 782, + 364 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "For a coordinate $\\mathbf{x}$ and a viewing direction $\\mathbf{d}$ in scene space, the affine transformation $T(s,t,r)$ (scale, translation, rotation) is used to transform them back into the object space of each individual object. Following the implicit shape representations used in NeRF, a multi-layer perceptron (MLP) $h_{\\theta}$ is used to map the transformed $\\mathbf{x}$ and $\\mathbf{d}$ , along with the shape-controlling code $z_{s}$ and appearance-controlling code $z_{a}$ , to the feature field $\\mathbf{f}$ and volume density $\\sigma$ :", + "bbox": [ + 212, + 366, + 784, + 455 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\left. \\left(T (s, t, r; \\boldsymbol {x})\\right), T (s, t, r; \\boldsymbol {d})\\right), \\left. \\boldsymbol {z} _ {\\boldsymbol {s}}, \\boldsymbol {z} _ {\\boldsymbol {a}}\\right) \\xrightarrow {h _ {\\theta}} (\\sigma , \\boldsymbol {f}). \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 344, + 465, + 785, + 484 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Then, GIRAFFE defines a Scene Composite Operator: at a given $\\pmb{x}$ and $\\pmb{d}$ , the overall density is the sum of the individual densities (including the background). The overall feature field is represented as the density-weighted average of the feature field of each object:", + "bbox": [ + 212, + 489, + 782, + 551 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nC (\\boldsymbol {x}, \\boldsymbol {d}) = \\left(\\sigma , \\frac {1}{\\sigma} \\sum_ {i = 1} ^ {N} \\sigma_ {i} \\boldsymbol {f} _ {\\boldsymbol {i}}\\right), w h e r e \\quad \\sigma = \\sum_ {i = 1} ^ {N} \\sigma_ {i}, \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 339, + 558, + 785, + 598 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathrm{N}$ denotes the background plus (N-1) objects.", + "bbox": [ + 212, + 604, + 591, + 619 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The rendering phase is divided into two stages. Similar to volume rendering in NeRF, given a pixel point, the rendering formula is used to calculate the feature field of this pixel point from the feature fields and the volume density of all sample points in a camera ray direction. After calculating all pixel points, a feature map is obtained. Neural rendering (Upsampling) is then applied to get the rendered image. Please refer to the Supplementary Material 1 for the detailed preliminary and formulas.", + "bbox": [ + 212, + 619, + 784, + 724 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4 3D-GOI", + "text_level": 1, + "bbox": [ + 214, + 744, + 331, + 760 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1 Problem Definition", + "text_level": 1, + "bbox": [ + 214, + 773, + 419, + 787 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The problem we target is similar to the general definition of GAN inversion, with the difference being that we need to invert many more codes than existing methods (1 or 2) shown in Figure 2. The parameter $W$ in GIRAFFE, which controls", + "bbox": [ + 212, + 794, + 782, + 840 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "3D-GOI", + "bbox": [ + 674, + 114, + 730, + 126 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 774, + 116, + 785, + 126 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/6b599194f2d727cfbe2094a6fe49f6c4ac4fba1ba15e0f2c7c3c4fd36a44ee08.jpg", + "image_caption": [ + "Fig. 3: The overall framework of 3D-GOI. As shown in the upper half, the encoders are trained on single-object scenes, each time using $L_{enc}$ to predict one $w, w \\in W$ , while other codes use real values. The lower half depicts the inversion process for the multi-object scene. We first decompose objects and background from the scene, then use the trained encoder to extract coarse codes, and finally use the round-robin optimization algorithm to obtain precise codes. The green blocks indicate required training and the yellow blocks indicate fixed parameters." + ], + "image_footnote": [], + "bbox": [ + 274, + 142, + 759, + 297 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "the generation, can be divided into object attributes, background attributes, and pose attributes, denoted by $O$ , $B$ , and $C$ . Then, $W$ can be expressed as follows:", + "bbox": [ + 212, + 439, + 784, + 470 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nW = \\{O _ {i} ^ {s h a p e}, O _ {i} ^ {a p p}, O _ {i} ^ {s}, O _ {i} ^ {t}, O _ {i} ^ {r}, B ^ {s h a p e}, B ^ {a p p}, C \\}, \\quad i = 1, \\dots , n, \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 272, + 481, + 785, + 501 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $O_{i}^{shape}$ is the object shape latent code, $O_{i}^{app}$ is the object appearance latent code, $O_{i}^{s}$ is the object scale code, $O_{i}^{t}$ is the object translation code, $O_{i}^{r}$ is the object rotation code, $B^{shape}$ is the background shape latent code, $B^{app}$ is the background appearance latent code and $C$ is the camera pose matrix. $n$ denotes the $n$ objects. The reconstruction part can be expressed as:", + "bbox": [ + 212, + 513, + 787, + 590 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nW ^ {*} = \\arg \\min _ {W} \\mathcal {L} (G (W, \\theta), I). \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 395, + 603, + 785, + 625 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "According to Equation 5, we need to invert a total of $(5n + 3)$ codes. Then, we are able to replace or interpolate any inverted code(s) to achieve multifaceted editing of multiple objects.", + "bbox": [ + 212, + 637, + 787, + 683 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2 Scene Decomposition", + "text_level": 1, + "bbox": [ + 214, + 707, + 437, + 722 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "As mentioned, the GIRAFFE generator differs from typical GAN generators in that a large number of codes are involved and not a single code controls all the generated parts. Therefore, it is challenging to transform all codes using just one encoder or optimizer as in typical GAN Inversion methods. While a human can easily distinguish each object and some of its features (appearance, shape), a machine algorithm requires a large number of high-precision annotated samples to understand what code is expressed at what position in the image.", + "bbox": [ + 212, + 733, + 787, + 840 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "H. Li et al.", + "bbox": [ + 271, + 114, + 346, + 126 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/32f752b839e368774c8e064f48d9a7f86df4150af26172f6875e4ba55247d341.jpg", + "image_caption": [ + "(a) Input", + "Fig. 4: Scene decomposition. (a) The input image. (b) The feature weight map of car A, where the redder regions indicate a higher opacity and the bluer regions lower opacity. (c) The feature weight map of car B. (d) The feature weight map of the background. By integrating these maps, it becomes apparent that the region corresponding to car A predominantly consists of the feature representation of cars A and B. The background's visible area solely contains the background's feature representation." + ], + "image_footnote": [], + "bbox": [ + 246, + 143, + 328, + 210 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/73ecd522f757a980eb426abbc10f4f10e12b01aefe644b592aea5d6dcf959342.jpg", + "image_caption": [ + "(b) Car A" + ], + "image_footnote": [], + "bbox": [ + 387, + 143, + 473, + 209 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/de237f2656a16860d64607a8884433dbfa714d679556bec6eb876f4a75b524ec.jpg", + "image_caption": [ + "(c) Car B" + ], + "image_footnote": [], + "bbox": [ + 531, + 143, + 614, + 209 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/34a97b6ab961fa6c051b449acca725226c85ad0215374b22dba85d90a3230fcd.jpg", + "image_caption": [ + "(d) Background" + ], + "image_footnote": [], + "bbox": [ + 674, + 143, + 756, + 209 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "A straightforward idea is that the attribute codes of an object will map to the corresponding position of the object in the image. For example, translation $(O^t)$ and rotation $(O^r)$ codes control the relative position of an object in the scene, scaling $(O^s)$ and shape $(O^{shape})$ codes determine the contour and shape of the object, and appearance $(O^{app})$ codes control the appearance representation at the position of the object. The image obtained from segmentation precisely encompasses these three types of information, allowing us to invert it and obtain the five attribute codes for the corresponding object. Similarly, for codes $(B^{shape}, B^{app})$ that generate the background, we can invert them using the segmented image of the background. Note that obtaining camera pose code $(C)$ requires information from the entire rendered image.", + "bbox": [ + 212, + 383, + 787, + 551 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We can qualitatively validate this idea. In Equation 3, we can see that an object's five attribute codes are mapped to the object's feature field and volume density through $h_{\\theta}$ . As inferred from Equation 4, the scene's feature field is synthesized by weighting the feature fields of each object by density. Therefore, an object appears at its position because its feature field has a high-density weight at the corresponding location. Figure 4 displays the density of different objects at different positions during GIRAFFE's feature field composition process. The redder the higher the density, while the bluer the lower the density. As discussed, car A exhibits a high-density value within its area and near-zero density elsewhere - a similar pattern is seen with car B. The background, however, presents a non-uniform density distribution across the scene. We can consider that both car A and B and the background mainly manifest their feature fields within their visible areas. Hence, we apply a straightforward segmentation method to separate each object's feature field and get the codes. Segmenting each object also allows our encoder to pay more attention to each input object or background. As such, we can train the encoder on single-object scenes and then generalize it to multi-object scenes instead of directly training in multi-object scenes that involve more codes, to reduce computation cost.", + "bbox": [ + 212, + 568, + 787, + 840 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "3D-GOI", + "bbox": [ + 674, + 114, + 730, + 126 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 774, + 114, + 784, + 126 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/6a7668ad2dba3d3163f106f104fd8323f34ebb1a6ef168cfea5376fcefec75ab.jpg", + "image_caption": [ + "(a) Neural Rendering Block" + ], + "image_footnote": [], + "bbox": [ + 241, + 176, + 450, + 271 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/d5bdb0e0efa9b7789efd91c1aec0a513a81f832a4806004bc0e37116528e341b.jpg", + "image_caption": [ + "(b) Neural Inversion Encoder", + "Fig. 5: Neural Inversion Encoder. (a) The Neural Rendering Block in GIRAFFE [28], an upsampling process to generate image $\\hat{I}$ . (b) The Neural Inversion Encoder opposes (a), which is a downsampling process. $I$ is the input image, $H, W$ are image height and width. $I_v$ is the heatmap of the image, $H_v, W_v$ and $M_f$ are the dimensions of $I_v$ , $w$ is the code to be predicted, and $w_f$ is the dimension of $w$ . Up/Down means upsampling/downsampling." + ], + "image_footnote": [], + "bbox": [ + 545, + 143, + 754, + 268 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.3 Coarse Estimation", + "text_level": 1, + "bbox": [ + 215, + 407, + 413, + 422 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The previous segmentation step roughly disentangles the codes. Unlike typical encoder-based methods, it's difficult to predict all codes using just one encoder. Therefore, we assign an encoder to each code, allowing each encoder to focus solely on predicting one code. Hence, we need a total of eight encoders. As shown in Figure 3, we input the object segmentation for the object attribute codes $(O^{shape}, O^{app}, O^s, O^t, O^r)$ , the background segmentation for the background attribute codes $(B^{shape}, B^{app})$ , and the original image for pose attribute code $(C)$ . Different objects share the same encoder for the same attribute code.", + "bbox": [ + 212, + 431, + 787, + 551 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We allocate an encoder called Neural Inversion Encoder with a similar structure to each code. Neural Inversion Encoder consists of three parts as Figure 5(b) shows. The first part employs a standard feature pyramid over a ResNet [12] backbone like in pSp [31] to extract the image features. The second part, in which we designed a structure opposite to GIRAFFE's Neural rendering Block based on its architecture as Figure 5(a) shows, downsamples the images layer by layer using a CNN and then uses skip connections [12] to combine the layers, yielding a one-dimensional feature. The third layer employs an MLP structure to acquire the corresponding dimension of different codes.", + "bbox": [ + 212, + 553, + 787, + 688 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Training multiple encoders simultaneously is difficult to converge due to the large number of parameters. Hence, we use the dataset generated by GIRAFFE to retain the true values of each code and train an encoder for one code at a time, to keep the other codes at their true values, greatly smoothing the training.", + "bbox": [ + 212, + 688, + 787, + 750 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "During encoder training, we use the Mean Squared Error (MSE) loss, perceptual loss (LPIPS) [41], and identity loss (ID) [11] between the reconstructed image and the original image, to be consistent with most 2D and 3D GAN inversion training methodologies. When training the affine codes (scale $O^s$ , translation $O^t$ , rotation $O^r$ ), we find that different combinations of values produce very similar images, e.g., moving an object forward and increasing its scale yield", + "bbox": [ + 212, + 750, + 787, + 840 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "H. Li et al.", + "bbox": [ + 271, + 114, + 346, + 126 + ], + "page_idx": 7 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 1: Round-robin Optimization" + ], + "code_body": "Data: all codes $w\\in W$ predicted by encoders, fixed GIRAFFE generator $G$ input image $I$ 1 Initialize $lr\\_ w = 10^{-3},w\\in W$ . \n2 while any $lr\\_ w > 10^{-5}$ do \n3 foreach $w\\in W$ do \n4 Sample $\\delta w$ . \n5 Compute $\\delta \\mathcal{L}(w)$ using Eq.8; \n6 end \n7 Compute rank_list using Eq.9; \n8 foreach $w\\in$ rank_list and lr_w>10-5 do \n9 Optimization w with $\\mathcal{L}_{opt}$ in Eq. 10 of I and G(W;0); \n10 if the $\\mathcal{L}_{opt}$ ceases to decrease for five consecutive iterations then \n11 | lr_w=lr_w/2; \n12 end \n13 end \n14 end", + "bbox": [ + 217, + 166, + 754, + 393 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "similar results. However, the encoder can only predict one value at a time, hence we add the MSE loss of the predicted $O^s$ , $O^t$ , $O^r$ values, and their true values, to compel the encoder to predict the true value.", + "bbox": [ + 212, + 428, + 782, + 472 + ], + "page_idx": 8 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {e n c} = \\lambda_ {1} L _ {2} + \\lambda_ {2} L _ {l p i p s} + \\lambda_ {3} L _ {i d}, \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 382, + 488, + 784, + 505 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "where $\\lambda_{i}, i = 1,2,3$ represent the ratio coefficient between various losses. When training $O^s$ , $O^t$ , $O^r$ code, the $L_2$ loss includes the MSE loss between the real values of $O^s$ , $O^t$ , $O^r$ and their predicted values.", + "bbox": [ + 212, + 508, + 782, + 555 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.4 Precise Optimization", + "text_level": 1, + "bbox": [ + 215, + 575, + 434, + 590 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Pre-trained segmentation models have some segmentation errors and all encoder-based GAN inversion networks [31,34,35] usually cannot accurately obtain codes, necessitating refinements. Next, we optimize the coarse codes. Through experiments, we have found that using a single optimizer to optimize all latent codes tends to converge to local minima. Hence, we employ multiple optimizers, each handling a single code. The optimization order is crucial due to the variance of the disparity between the predicted and actual values across different encoders, and the different impact of code changes on the image, e.g., changes to $B^{shape}$ and $B^{app}$ codes controlling background generation mostly would have a larger impact on overall pixel values. Prioritizing the optimization of codes with significant disparity and a high potential for changing pixel values tends to yield superior results in our experiments. Hence, we propose an automated round-robin optimization algorithm (Algorithm 1) to sequentially optimize each code based on the image reconstructed in each round.", + "bbox": [ + 212, + 598, + 785, + 809 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Algorithm 1 aims to add multiple minor disturbances to each code, and calculate the loss between the images reconstructed before and after the disturbance", + "bbox": [ + 212, + 809, + 785, + 839 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "3D-GOI", + "bbox": [ + 674, + 114, + 730, + 126 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 774, + 116, + 785, + 126 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "and the original image. A loss increase indicates that the current code value is relatively accurate, hence its optimization order can be postponed, and vice versa. For multiple codes that demand prioritized optimization, we compute their priorities using the partial derivatives of the loss variation and perturbation. We do not use backpropagation automatic differentiation here to ensure the current code value remains unchanged.", + "bbox": [ + 212, + 146, + 787, + 238 + ], + "page_idx": 9 + }, + { + "type": "equation", + "text": "\n$$\n\\delta \\mathcal {L} (w) = \\mathcal {L} (G (W - \\{w \\}, w + \\delta w, \\theta), I) - \\mathcal {L} (G (W, \\theta), I), \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 305, + 250, + 785, + 268 + ], + "page_idx": 9 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {r a n k} _ {-} \\operatorname {l i s t} = F _ {\\operatorname {r a n k}} (\\delta \\mathcal {L} (w), \\frac {\\delta \\mathcal {L} (w)}{\\delta w}), \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 375, + 290, + 785, + 321 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "where $w \\in W$ is one of the codes and $\\delta w$ represents the minor disturbance of $w$ . For the rotation angle $r$ , we have found that adding a depth loss can accelerate its optimization. Thus, the loss $\\mathcal{L}$ during optimization can be expressed as:", + "bbox": [ + 214, + 324, + 784, + 371 + ], + "page_idx": 9 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {o p t}} = \\lambda_ {1} L _ {2} + \\lambda_ {2} L _ {\\text {l p i p s}} + \\lambda_ {3} L _ {\\text {i d}} + \\lambda_ {4} L _ {\\text {d e e p}}. \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 349, + 383, + 785, + 400 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "This optimization method allows for more precise tuning of the codes for more accurate reconstruction and editing of the images.", + "bbox": [ + 214, + 411, + 784, + 441 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "5 Implementation", + "text_level": 1, + "bbox": [ + 215, + 465, + 405, + 483 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Neural Inversion Encoder. The first part of our encoder uses ResNet50 to extract features. In the second part, we downsample the extracted features (512-dimensional) and the input RGB image (3-dimensional) together. The two features are added together through skip connections, as shown in Figure 5. In the downsampling module, we use a 2D convolution with a kernel of 3 and a stride of 1, and the LeakyReLU activation function, to obtain a 256-dimensional intermediate feature. For object shape/appearance attributes, the output dimension is 256, and we use four Fully Connected Layers $\\{4\\times FCL(256,256)\\}$ to get the codes. For background shape/appearance attributes, the output dimension is 128, we use $\\{FCL(256,128) + 3\\times FCL(128,128)\\}$ to get the codes. For object scale/translation attributes, the output dimension is 3, and we use the network $\\{FCL(2^i,2^{i - 1}) + FCL(8,3),i = 8,\\dots ,4\\}$ to get the codes. For camera pose and rotation attributes, the output dimension is 1, and we use a similar network $\\{FCL(2^i,2^{i - 1}) + FCL(8,1),i = 8,\\dots ,4\\}$ to get the codes.", + "bbox": [ + 214, + 500, + 787, + 712 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Training and Optimization are carried out on a single NVIDIA A100 SXM GPU with 40GB of memory, using the Adam optimizer. The initial learning rate is set to $10^{-4}$ and $10^{-3}$ , respectively. Encoder training employs a batch size of 50. Each encoder took about 12 hours to train, and optimizing a single image of a complex multi-object scene took about 1 minute. For rotation features, it is difficult for the encoder to make accurate predictions for some images. Therefore, we uniformly sampled 20 values in the range of $[0, 360^{\\circ}]$ for the", + "bbox": [ + 214, + 733, + 787, + 842 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "H. Li et al.", + "bbox": [ + 271, + 114, + 346, + 127 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "rotation parameters with large deviations. We selected the value that minimizes the loss in Equation 7 as the initial value for the optimization stage.", + "bbox": [ + 212, + 146, + 782, + 176 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "For LPIPS loss, we employ a pre-trained AlexNet [20]. For ID calculation, we employ a pre-trained Arcface [8] model in human face datasets and a pre-trained ResNet-50 [32] model in the car dataset. For depth loss, we use the pre-trained Dense Prediction Transformer model. We set $\\lambda_1 = 1$ , $\\lambda_2 = 0.8$ , and $\\lambda_3 = 0.2$ in Equation 7, as well as in Equation 10, in which $\\lambda_4 = 1$ .", + "bbox": [ + 212, + 176, + 784, + 252 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "6 Experiment", + "text_level": 1, + "bbox": [ + 214, + 275, + 366, + 292 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Datasets. To obtain the true values of the 3D information in GIRAFFE for stable training performance, we use the pre-trained model of GIRAFFE on CompCars [39] and Clevr [15] dataset to generate training datasets. For testing datasets, we also use GIRAFFE to generate images for multi-car datasets denoted as $G$ -CompCars (CompCars is a single car image dataset) and use the original Clevr dataset for multi-geometry dataset (Clevr is a dataset that can be simulated to generate images of multiple geometries). We follow the codes setup in GIRAFFE. For CompCars, we use all the codes from Equation 5. For Clevr, we fixed the rotation, scale, and camera pose codes of the objects. For experiments on facial data, we utilized the FFHQ [17] dataset for training and the CelebA-HQ [16] dataset for testing.", + "bbox": [ + 212, + 306, + 784, + 473 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/6f4059d17e2a50ecd260ff620931b98b5225119e028eda9ef6e7d56ee1724817.jpg", + "image_caption": [ + "(a) Input, Co-R, Pre-R" + ], + "image_footnote": [], + "bbox": [ + 217, + 500, + 352, + 536 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/99cedf4108539a0b8425f9c827c841ba85f659e75a0135cdbebade4b06e78bcf.jpg", + "image_caption": [ + "(b) Edit Shape" + ], + "image_footnote": [], + "bbox": [ + 361, + 500, + 495, + 534 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/2275654dea41191fe4c6f5b09c9caf68899b269a5ad0f36e65b38deaf7be23c9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 500, + 638, + 534 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/991a6fe46aa4683eb0fc5677e2cb36c4256517c60149ed10c0964379cb3e60d4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 651, + 500, + 784, + 534 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/1520efaca4395ba6c7de916da78ec3f9597ecc58c0f54a579f92b0e2fe7c4ea2.jpg", + "image_caption": [ + "(e) Edit Bg Appearance" + ], + "image_footnote": [], + "bbox": [ + 217, + 549, + 349, + 583 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/854b3f8d8dc6fb483a3dc03416093069dad5fba978b55fdfe9443411d6019faa.jpg", + "image_caption": [ + "(f) Edit Scale", + "Fig. 6: Single-object editing on G-CompCars dataset. Co-R: coarse reconstruction. Pre-R: precise reconstruction." + ], + "image_footnote": [], + "bbox": [ + 359, + 549, + 493, + 582 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/51d3b0a65b9e7a72938464344889d7e345b16f5ad7796e65afef93d5677635ff.jpg", + "image_caption": [ + "(c) Edit Appearance", + "(g) Edit Translation" + ], + "image_footnote": [], + "bbox": [ + 504, + 549, + 635, + 582 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/205f9aba677d97b4083e3d4808322dcf56d9c73bef73c32dd668c810580242f3.jpg", + "image_caption": [ + "(d) Edit Bg Shape", + "(h) Edit Rotation" + ], + "image_footnote": [], + "bbox": [ + 648, + 549, + 779, + 582 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/3cc160d66d58ca39b5f80d2bfb80544b37ef9d4662e2bb7e6a8edf439e79d762.jpg", + "image_caption": [ + "(a) Input, Co-R, Pre-R", + "Fig. 7: Single-object editing on Clevr dataset." + ], + "image_footnote": [], + "bbox": [ + 217, + 691, + 349, + 726 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/68531de72840eeea6c399798738f89dc129896dba3dad36f2ae4d8f928278637.jpg", + "image_caption": [ + "(b) Edit Appearance" + ], + "image_footnote": [], + "bbox": [ + 361, + 691, + 493, + 726 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/eabe5ff5c158e1947d8b1c338ba50b7a1cb724d2803459bd876c224c7fde9cbe.jpg", + "image_caption": [ + "(c) Edit Translation" + ], + "image_footnote": [], + "bbox": [ + 504, + 691, + 635, + 726 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/b996cbc5a8c079f6534fb10408582ad709d37d8611b2d01b7fd2aba80f8bdb99.jpg", + "image_caption": [ + "(d) Add Object" + ], + "image_footnote": [], + "bbox": [ + 648, + 691, + 779, + 726 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Baselines. In the comparative experiments for our Neural Inversion Encoder, we benchmarked encoder-based inversion methods such as e4e [34] and pSp [31], which use the 2D GAN StyleGAN2 [18] as the generator, and E3DGE [21] and", + "bbox": [ + 214, + 794, + 782, + 840 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "3D-GOI", + "bbox": [ + 674, + 114, + 730, + 126 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 767, + 114, + 784, + 126 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/60418f3e4c648b49e1839e33d12ec2215c8f6326c07842f1b820aa250aeeb2dc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 217, + 143, + 349, + 179 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/8f49759684059e939ed6d6e095d79b4d02d30b2e94163562e85e1596b75ffa22.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 361, + 143, + 495, + 176 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/7c83539bfa72b0b4fdb6826c942e43d6fe0154ee19229562f323f0583a8cf073.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 143, + 640, + 176 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/fd6d779db6c870b4d676f86dfc5e4ead06a0388acf5301039204102ff65f96bf.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 651, + 143, + 784, + 176 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/dbf24119eba1f34bb7ef704e4d4da88ca7727f295b605a11be74ed20ec2084e5.jpg", + "image_caption": [ + "(a) Input, Co-R, Pre-R", + "(e) Edit Bg Appearance" + ], + "image_footnote": [], + "bbox": [ + 217, + 191, + 349, + 224 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/6645c84379197c8d8e3416110783a0b32f58bf6519325d6af307a62a3706559c.jpg", + "image_caption": [ + "(b) Edit Shape", + "(f) Edit Scale" + ], + "image_footnote": [], + "bbox": [ + 361, + 191, + 493, + 224 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/30abf75e354c3a950a29c3e762c8eecdb9c73444e124f66bd307e777d3260acc.jpg", + "image_caption": [ + "(c) Edit Appearance", + "(g) Edit Translation" + ], + "image_footnote": [], + "bbox": [ + 504, + 191, + 635, + 224 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/10537903b1e9d923080b4ea771ba87384e6ae7365ed3818ed3f2bb257ab97396.jpg", + "image_caption": [ + "(d) Edit Bg Shape", + "(h) Edit Rotation" + ], + "image_footnote": [], + "bbox": [ + 647, + 191, + 779, + 224 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/30e8fdff9c7e5516a6c5826bbb04fed4356d36de6a496ad5cb52a286212a90d7.jpg", + "image_caption": [ + "Fig. 8: Multi-object editing on $G$ -CompCars dataset.", + "(a) Input, Co-R, Pre-R", + "Fig. 9: Multi-object editing on Clevr dataset." + ], + "image_footnote": [], + "bbox": [ + 217, + 277, + 349, + 313 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/5772291549f34b570d7b1c23b37d84d16e2f02bb13a33c5416c854eb724a3f42.jpg", + "image_caption": [ + "(b) Edit Appearance" + ], + "image_footnote": [], + "bbox": [ + 361, + 277, + 493, + 311 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/a2f421f252ce2c58fae785455f661cc17a7b338c61852deaf059ca50e22bc62f.jpg", + "image_caption": [ + "(c) Edit Translation" + ], + "image_footnote": [], + "bbox": [ + 504, + 277, + 637, + 311 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/8890e6aac8a10fca25e28769f159a166a9cbef09bea98748aa1ea62c4712be46.jpg", + "image_caption": [ + "(d) Add/Remove Objects" + ], + "image_footnote": [], + "bbox": [ + 647, + 277, + 781, + 313 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "TriplaneNet [5] that employ the 3D GAN EG3D [7] as the generator, on the generator of GIRAFFE. Additionally, we compared our encoder on StyleGAN2 with SOTA inversion methods HyperStyle [2] and HFGI [35] for StyleGAN2.", + "bbox": [ + 212, + 378, + 784, + 425 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Metrics. We use Mean Squared Error (MSE), perceptual similarity loss (LPIPS) [41], and identity similarity (ID) to measure the quality of image reconstruction.", + "bbox": [ + 217, + 426, + 782, + 455 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "6.1 3D GAN Omni-Inversion", + "text_level": 1, + "bbox": [ + 214, + 477, + 470, + 491 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Single-object Multifaceted Editing. In Figure 6 and Figure 7, (a) depict the original images, the coarsely reconstructed images produced by the Neural Inversion Encoder, and the precisely reconstructed images obtained via round-robin optimization. As Figure 7 shows, the simple scene structure of the Clevr dataset allows us to achieve remarkably accurate results using only the encoder (Co-Recon). However, for car images in Figure 6, predicting precise codes using the encoder only becomes challenging, necessitating the employment of the round-robin optimization algorithm to refine the code values for precise reconstruction (Pre-Recon). Figure 6 (b)-(h) and Figure 7 (b)-(d) show the editing results for different codes. As noted in Section 4.3, moving an object forward and increasing its scale yield similar results. Please refer to the Supplementary Material 3.1 for more results like camera pose and shape editing.", + "bbox": [ + 212, + 500, + 787, + 681 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Multi-object Multifaceted Editing. We notice that the prediction for some object parameters ( $O^{shape}$ , $O^{app}$ , $O^s$ , $O^t$ ) are quite accurate. However, the prediction for the background codes deviates significantly. We speculate this is due to the significant differences in segmentation image input to the background encoder between multi-object scenes and single-object scenes. Therefore, background reconstruction requires further optimization. Figure 8 and Figure 9 depict the multifaceted editing outcomes for two cars and multiple Clevr objects, respectively. The images show individual edits of two objects in the left and middle images and collective edits at the right images in Figure 8 (b-c) and (f-h).", + "bbox": [ + 212, + 703, + 787, + 840 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "H. Li et al.", + "bbox": [ + 271, + 114, + 346, + 126 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/504bf2bc8d429d65185a3c4f35d5eb0acfd376058e512e0f50c38b713f8d7545.jpg", + "image_caption": [ + "(a) Reconstruction results of different GAN inversion encoders using the generator of GI-RAFFE." + ], + "image_footnote": [], + "bbox": [ + 240, + 143, + 470, + 268 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/c70a34891e1e695367e4d50bc62492f070229c74e463a0cb8fc5b240263bee74.jpg", + "image_caption": [ + "(b) Reconstruction results of different GAN inversion encoders using the generator of StyleGAN2.", + "Fig. 10: Reconstruction quality of different GAN inversion encoders." + ], + "image_footnote": [], + "bbox": [ + 506, + 143, + 779, + 268 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "As shown in Figure 8, the predictive discrepancy between the car's background and rotation angle on the left is considerable, requiring adjustments through the round-robin optimization. As illustrated in Figure 1, 2D/3D GAN inversion methods can not inverse multi-object scenes. More images pertaining to multi-object editing can be found in Supplementary Material 3.2.", + "bbox": [ + 212, + 356, + 784, + 431 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "6.2 Comparison Experiment of Neural Inversion Encoder", + "text_level": 1, + "bbox": [ + 212, + 453, + 699, + 468 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "For fair comparison and to eliminate the impact of the generator on the quality of the inverted image generation, we trained the encoders from the baseline methods by connecting them to the GIRAFFE generator using our Neural Inversion Encoder training approach and compared them with our Neural Inversion Encoder. At the same time, we also connected our encoder to StyleGAN2 and compared it with inversion methods based on StyleGAN2, thereby demonstrating the efficiency of our encoder design. Table 1 and Figure 10 quantitatively and qualitatively displays the comparison results on both the GIRAFFE and StyleGAN2 generators respectively. The results show that our Neural Inversion Encoder consistently outperforms baseline methods.", + "bbox": [ + 212, + 477, + 784, + 627 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "6.3 Ablation Study", + "text_level": 1, + "bbox": [ + 214, + 650, + 387, + 664 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We conducted ablation experiments separately for the proposed Neural Inversion Encoder and the Round-robin Optimization algorithm. Table 2 displays the average ablation results of the Neural Inversion Encoder on various attribute codes, where NIB refers to Neural Inversion Block (the second part of the encoder) and MLP is the final part of the encoder. The results clearly show that our encoder structure is extremely effective and can predict code values more accurately. Please find the complete results in the Supplementary Material.", + "bbox": [ + 212, + 672, + 784, + 777 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "For the Round-robin optimization algorithm, we compared it with three fixed optimization order algorithms on both single-object and multi-object scenarios. The three fixed sequences are as follows:", + "bbox": [ + 212, + 779, + 784, + 823 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\text {O r d e r 1}: B ^ {\\text {s h a p e}}, B ^ {\\text {a p p}}, \\left\\{O _ {i} ^ {r}, O _ {i} ^ {t}, O _ {i} ^ {s} \\right\\} _ {i = 1} ^ {N}, \\left\\{O _ {i} ^ {\\text {s h a p e}}, O _ {i} ^ {\\text {a p p}} \\right\\} _ {i = 1} ^ {N}, C\n$$\n", + "text_format": "latex", + "bbox": [ + 238, + 823, + 679, + 840 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "3D-GOI", + "bbox": [ + 674, + 114, + 730, + 126 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 767, + 114, + 785, + 126 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/5ef36d58de3181decad7c356f1b493d30a66f7157ab31ef1615b443dd0a0da8e.jpg", + "table_caption": [ + "Table 1: Reconstruction quality of different GAN inversion encoders using the generator of GIRAFFE and StyleGAN2. $\\downarrow$ indicates the lower the better and $\\uparrow$ indicates the higher the better." + ], + "table_footnote": [], + "table_body": "
MethodGIRAFFE for GeneratorStyleGAN2 for Generator
MSE ↓LPIPS ↓ID↑MSE ↓LPIPS ↓ID↑
e4e [34]0.0310.3060.8670.0520.2000.502
pSp [31]0.0310.3010.8770.0340.1720.561
HyperStyle [2]---0.0190.0910.766
HFGI [35]---0.0230.1240.705
TriplaneNet [5]0.0290.2960.870---
E3DGE [21]0.0310.2990.881---
3D-GOI(Ours)0.0240.2620.8970.0170.0980.769
", + "bbox": [ + 225, + 196, + 774, + 327 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/02531c2937d8b2595e5729007540c1d8415c9eda1bf4426c7e17f7d287e9b1c0.jpg", + "table_caption": [ + "Table 2: Ablation Study of the Neural Inversion Encoder." + ], + "table_footnote": [], + "table_body": "
MethodMSE ↓LPIPS↓ID ↑
w/o NIB0.0230.2880.856
w/o MLP0.0150.1830.878
3D-GOI0.0100.1410.906
", + "bbox": [ + 263, + 393, + 486, + 454 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/69f844ca47ea53092cb1e4c1240f2daae35dd82d06fa0e2cfbed2ecdc456ccda.jpg", + "table_caption": [ + "Table 3: The quantitative metrics of ablation study of the Round-robin Optimization algorithm." + ], + "table_footnote": [], + "table_body": "
MethodMSE ↓LPIPS ↓ID↑
Order10.0160.1840.923
Order20.0190.2290.913
Order30.0190.2210.911
3D-GOI0.0080.1280.938
", + "bbox": [ + 519, + 393, + 736, + 468 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\nO r d e r 2: \\left\\{O _ {i} ^ {r}, O _ {i} ^ {t}, O _ {i} ^ {s} \\right\\} _ {i = 1} ^ {N}, \\left\\{O _ {i} ^ {\\text {s h a p e}}, O _ {i} ^ {\\text {a p p}} \\right\\} _ {i = 1} ^ {N}, B ^ {\\text {s h a p e}}, B ^ {\\text {a p p}}, C\n$$\n", + "text_format": "latex", + "bbox": [ + 240, + 497, + 681, + 515 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\nO r d e r 3: C, \\{O _ {i} ^ {s h a p e}, O _ {i} ^ {a p p} \\} _ {i = 1} ^ {N}, \\{O _ {i} ^ {r}, O _ {i} ^ {t}, O _ {i} ^ {s} \\} _ {i = 1} ^ {N}, B ^ {s h a p e}, B ^ {a p p}\n$$\n", + "text_format": "latex", + "bbox": [ + 240, + 515, + 679, + 532 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "$\\{\\}_{i=1}^{N}$ indicates that the elements inside $\\{\\}$ are arranged in sequence from 1 to N. There are many possible sequence combinations, and here we chose the three with the best results for demonstration. As Table 3 shows, our method achieves the best results on all metrics, demonstrating the effectiveness of our Round-robin optimization algorithm. As mentioned in Section 4.4, optimizing features like the background first can enhance the optimization. Hence, Order1 performs much better than Order2 and Order3. Please see the Supplementary Material 3.5 for qualitative comparisons of these four methods on images.", + "bbox": [ + 212, + 532, + 787, + 654 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "7 Conclusion", + "text_level": 1, + "bbox": [ + 215, + 681, + 359, + 698 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "This paper introduces a 3D GAN inversion method, 3D-GOI, that enables multifaceted editing of scenes containing multiple objects. By using a segmentation approach to separate objects and background, then carrying out a coarse estimation followed by a precise optimization, 3D-GOI can accurately obtain the codes of the image. These codes are then used for multifaceted editing. To the best of our knowledge, 3D-GOI is the first method to attempt multi-object & multifaceted editing. We anticipate that 3D-GOI holds immense potential for future applications in fields such as VR/AR, and the Metaverse.", + "bbox": [ + 212, + 718, + 787, + 840 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "H. Li et al.", + "bbox": [ + 271, + 114, + 346, + 127 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Acknowledgements", + "text_level": 1, + "bbox": [ + 217, + 143, + 401, + 162 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "This work was supported by the National Key Research and Development Program of China (2022YFB3105405, 2021YFC3300502).", + "bbox": [ + 215, + 180, + 784, + 210 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 217, + 239, + 321, + 253 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "1. Abdal, R., Qin, Y., Wonka, P.: Image2stylegan: How to embed images into the stylegan latent space? In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 4432-4441 (2019)", + "2. Alaluf, Y., Tov, O., Mokady, R., Gal, R., Bermano, A.: Hyperstyle: Stylegan inversion with hypernetworks for real image editing. In: Proceedings of the IEEE/CVF conference on computer Vision and pattern recognition. pp. 18511-18521 (2022)", + "3. Arad Hudson, D., Zitnick, L.: Compositional transformers for scene generation. Advances in Neural Information Processing Systems 34, 9506-9520 (2021)", + "4. Bau, D., Zhu, J.Y., Wulff, J., Peebles, W., Strobelt, H., Zhou, B., Torralba, A.: Seeing what a gan cannot generate. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 4502-4511 (2019)", + "5. Bhattacharai, A.R., Nießner, M., Sevastopolsky, A.: Triplanenet: An encoder for eg3d inversion. arXiv preprint arXiv:2303.13497 (2023)", + "6. Brock, A., Donahue, J., Simonyan, K.: Large scale gan training for high fidelity natural image synthesis. arXiv preprint arXiv:1809.11096 (2018)", + "7. Chan, E.R., Lin, C.Z., Chan, M.A., Nagano, K., Pan, B., De Mello, S., Gallo, O., Guibas, L.J., Tremblay, J., Khamis, S., et al.: Efficient geometry-aware 3d generative adversarial networks. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 16123-16133 (2022)", + "8. Deng, J., Guo, J., Xue, N., Zafeiriou, S.: Arcface: Additive angular margin loss for deep face recognition. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 4690-4699 (2019)", + "9. Deng, Y., Wang, B., Shum, H.Y.: Learning detailed radiance manifolds for high-fidelity and 3d-consistent portrait synthesis from monocular image. arXiv preprint arXiv:2211.13901 (2022)", + "10. Goodfellow, I., Pouget-Abadie, J., Mirza, M., Xu, B., Warde-Farley, D., Ozair, S., Courville, A., Bengio, Y.: Generative adversarial networks. Communications of the ACM 63(11), 139–144 (2020)", + "1. He, K., Fan, H., Wu, Y., Xie, S., Girshick, R.: Momentum contrast for unsupervised visual representation learning. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 9729-9738 (2020)", + "2. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 770-778 (2016)", + "3. Ho, J., Jain, A., Abbeel, P.: Denoising diffusion probabilistic models. Advances in neural information processing systems 33, 6840-6851 (2020)", + "4. Huh, M., Zhang, R., Zhu, J.Y., Paris, S., Hertzmann, A.: Transforming and projecting images into class-conditional generative networks. In: Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23–28, 2020, Proceedings, Part II 16. pp. 17–34. Springer (2020)" + ], + "bbox": [ + 225, + 273, + 785, + 839 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "3D-GOI", + "bbox": [ + 674, + 114, + 730, + 126 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 767, + 116, + 784, + 126 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "15. Johnson, J., Hariharan, B., Van Der Maaten, L., Fei-Fei, L., Lawrence Zitnick, C., Girshick, R.: Clevr: A diagnostic dataset for compositional language and elementary visual reasoning. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 2901-2910 (2017)", + "16. Karras, T., Aila, T., Laine, S., Lehtinen, J.: Progressive growing of gans for improved quality, stability, and variation. arXiv preprint arXiv:1710.10196 (2017)", + "17. Karras, T., Laine, S., Aila, T.: A style-based generator architecture for generative adversarial networks. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 4401-4410 (2019)", + "18. Karras, T., Laine, S., Aittala, M., Hellsten, J., Lehtinen, J., Aila, T.: Analyzing and improving the image quality of stylegan. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 8110-8119 (2020)", + "19. Ko, J., Cho, K., Choi, D., Ryoo, K., Kim, S.: 3d gan inversion with pose optimization. In: Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision. pp. 2967-2976 (2023)", + "20. Krizhevsky, A., Sutskever, I., Hinton, G.E.: Imagenet classification with deep convolutional neural networks. Communications of the ACM 60(6), 84-90 (2017)", + "21. Lan, Y., Meng, X., Yang, S., Loy, C.C., Dai, B.: Self-supervised geometry-aware encoder for style-based 3d gan inversion. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 20940-20949 (2023)", + "22. Li, H., Shi, H., Zhang, W., Wu, W., Liao, Y., Wang, L., Lee, L.h., Zhou, P.: Dreamscene: 3d gaussian-based text-to-3d scene generation via formation pattern sampling. arXiv preprint arXiv:2404.03575 (2024)", + "23. Lin, Y., Bai, H., Li, S., Lu, H., Lin, X., Xiong, H., Wang, L.: Componerf: Text-guided multi-object compositional nerf with editable 3d scene layout. arXiv preprint arXiv:2303.13843 (2023)", + "24. Metzer, G., Richardson, E., Patashnik, O., Giryes, R., Cohen-Or, D.: Latentnerf for shape-guided generation of 3d shapes and textures. arXiv preprint arXiv:2211.07600 (2022)", + "25. Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM 65(1), 99-106 (2021)", + "26. Nguyen-Phuoc, T., Li, C., Theis, L., Richardt, C., Yang, Y.L.: Hologan: Unsupervised learning of 3d representations from natural images. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 7588-7597 (2019)", + "27. Nguyen-Phuoc, T.H., Richardt, C., Mai, L., Yang, Y., Mitra, N.: Blockgan: Learning 3d object-aware scene representations from unlabelled images. Advances in neural information processing systems 33, 6767–6778 (2020)", + "28. Niemeyer, M., Geiger, A.: Giraffe: Representing scenes as compositional generative neural feature fields. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 11453-11464 (2021)", + "29. Perarnau, G., Van De Weijer, J., Raducanu, B., Álvarez, J.M.: Invertible conditional gans for image editing. arXiv preprint arXiv:1611.06355 (2016)", + "30. Poole, B., Jain, A., Barron, J.T., Mildenhall, B.: Dreamfusion: Text-to-3d using 2d diffusion. arXiv preprint arXiv:2209.14988 (2022)", + "31. Richardson, E., Alaluf, Y., Patashnik, O., Nitzan, Y., Azar, Y., Shapiro, S., Cohen-Or, D.: Encoding in style: a stylegan encoder for image-to-image translation. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 2287-2296 (2021)" + ], + "bbox": [ + 217, + 147, + 784, + 839 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "H. Li et al.", + "bbox": [ + 271, + 114, + 346, + 126 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "32. Russakovsky, O., Deng, J., Su, H., Krause, J., Satheesh, S., Ma, S., Huang, Z., Karpathy, A., Khosla, A., Bernstein, M., et al.: Imagenet large scale visual recognition challenge. International journal of computer vision 115, 211-252 (2015)", + "33. Schwarz, K., Liao, Y., Niemeyer, M., Geiger, A.: Graf: Generative radiance fields for 3d-aware image synthesis. Advances in Neural Information Processing Systems 33, 20154-20166 (2020)", + "34. Tov, O., Alaluf, Y., Nitzan, Y., Patashnik, O., Cohen-Or, D.: Designing an encoder for stylegan image manipulation. ACM Transactions on Graphics (TOG) 40(4), 1-14 (2021)", + "35. Wang, T., Zhang, Y., Fan, Y., Wang, J., Chen, Q.: High-fidelity gan inversion for image attribute editing. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 11379-11388 (2022)", + "36. Wei, T., Chen, D., Zhou, W., Liao, J., Zhang, W., Yuan, L., Hua, G., Yu, N.: E2style: Improve the efficiency and effectiveness of stylegan inversion. IEEE Transactions on Image Processing 31, 3267-3280 (2022)", + "37. Xie, J., Ouyang, H., Piao, J., Lei, C., Chen, Q.: High-fidelity 3d gan inversion by pseudo-multi-view optimization. arXiv preprint arXiv:2211.15662 (2022)", + "38. Yang, H., Zhang, Z., Yan, S., Huang, H., Ma, C., Zheng, Y., Bajaj, C., Huang, Q.: Scene synthesis via uncertainty-driven attribute synchronization. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 5630-5640 (2021)", + "39. Yang, J., Li, H.: Dense, accurate optical flow estimation with piecewise parametric model. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 1019-1027 (2015)", + "40. Yin, F., Zhang, Y., Wang, X., Wang, T., Li, X., Gong, Y., Fan, Y., Cun, X., Shan, Y., Oztireli, C., et al.: 3d gan inversion with facial symmetry prior. arXiv preprint arXiv:2211.16927 (2022)", + "41. Zhang, R., Isola, P., Efros, A.A., Shechtman, E., Wang, O.: The unreasonable effectiveness of deep features as a perceptual metric. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 586-595 (2018)", + "42. Zhu, J., Shen, Y., Zhao, D., Zhou, B.: In-domain gan inversion for real image editing. In: Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XVII 16. pp. 592-608. Springer (2020)", + "43. Zhu, J.Y., Krahenbihl, P., Shechtman, E., Efros, A.A.: Generative visual manipulation on the natural image manifold. In: Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part V 14. pp. 597-613. Springer (2016)", + "44. Zhu, J.Y., Park, T., Isola, P., Efros, A.A.: Unpaired image-to-image translation using cycle-consistent adversarial networks. In: Proceedings of the IEEE international conference on computer vision. pp. 2223-2232 (2017)" + ], + "bbox": [ + 212, + 146, + 787, + 700 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "3D-GOI", + "bbox": [ + 674, + 114, + 730, + 126 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 767, + 114, + 785, + 126 + ], + "page_idx": 16 + } +] \ No newline at end of file diff --git a/2024/3D-GOI_ 3D GAN Omni-Inversion for Multifaceted and Multi-object Editing/c22d5df4-9f40-4c3a-8e7a-40f3a1d6dbe5_model.json b/2024/3D-GOI_ 3D GAN Omni-Inversion for Multifaceted and Multi-object Editing/c22d5df4-9f40-4c3a-8e7a-40f3a1d6dbe5_model.json new file mode 100644 index 0000000000000000000000000000000000000000..2c775c07a3a2f55ed6c8db18e91b45e4a516f4e6 --- /dev/null +++ b/2024/3D-GOI_ 3D GAN Omni-Inversion for Multifaceted and Multi-object Editing/c22d5df4-9f40-4c3a-8e7a-40f3a1d6dbe5_model.json @@ -0,0 +1,2951 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.283, + 0.141, + 0.723, + 0.187 + ], + "angle": 0, + "content": "3D-GOI: 3D GAN Omni-Inversion for Multifaceted and Multi-object Editing" + }, + { + "type": "text", + "bbox": [ + 0.22, + 0.212, + 0.783, + 0.245 + ], + "angle": 0, + "content": "Haoran Li\\(^{1,2}\\), Long Ma\\(^{1,2}\\), Haolin Shi\\(^{1,2}\\), Yanbin Hao\\(^{1,2}\\), Yong Liao\\(^{1,2*}\\), Lechao Cheng\\(^{3}\\), and Peng Yuan Zhou\\(^{4*}\\)" + }, + { + "type": "text", + "bbox": [ + 0.338, + 0.255, + 0.666, + 0.27 + ], + "angle": 0, + "content": "1 University of Science and Technology of China" + }, + { + "type": "text", + "bbox": [ + 0.265, + 0.269, + 0.737, + 0.31 + ], + "angle": 0, + "content": "2 CCCD Key Lab of Ministry of Culture and Tourism {1hr123, longm, mar}@mail.ustc.edu.cn, haoyanbin@hotmail.com, yliao@ustc.edu.cn" + }, + { + "type": "text", + "bbox": [ + 0.392, + 0.31, + 0.612, + 0.326 + ], + "angle": 0, + "content": "3 Hefei University of Technology" + }, + { + "type": "text", + "bbox": [ + 0.428, + 0.327, + 0.576, + 0.339 + ], + "angle": 0, + "content": "chenglc@hfut.edu.cn" + }, + { + "type": "text", + "bbox": [ + 0.434, + 0.339, + 0.57, + 0.353 + ], + "angle": 0, + "content": "Aarhus University" + }, + { + "type": "text", + "bbox": [ + 0.412, + 0.354, + 0.593, + 0.367 + ], + "angle": 0, + "content": "pengyuan.zhou@ece.au.dk" + }, + { + "type": "text", + "bbox": [ + 0.261, + 0.404, + 0.744, + 0.669 + ], + "angle": 0, + "content": "Abstract. The current GAN inversion methods typically can only edit the appearance and shape of a single object and background while overlooking spatial information. In this work, we propose a 3D editing framework, 3D-GOI to enable multifaceted editing of affine information (scale, translation, and rotation) on multiple objects. 3D-GOI realizes the complex editing function by inverting the abundance of attribute codes (object shape/ appearance/ scale/ rotation/ translation, background shape/ appearance, and camera pose) controlled by GIRAFFE, a renowned 3D GAN. Accurately inverting all the codes is challenging, 3D-GOI solves this challenge following three main steps. First, we segment the objects and the background in a multi-object image. Second, we use a custom Neural Inversion Encoder to obtain coarse codes of each object. Finally, we use a round-robin optimization algorithm to get precise codes to reconstruct the image. To the best of our knowledge, 3D-GOI is the first framework to enable multifaceted editing on multiple objects. Both qualitative and quantitative experiments demonstrate that 3D-GOI holds immense potential for flexible, multifaceted editing in complex multi-object scenes. Our project and code are released at https://3d-goi.github.io." + }, + { + "type": "title", + "bbox": [ + 0.217, + 0.694, + 0.376, + 0.71 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.726, + 0.788, + 0.817 + ], + "angle": 0, + "content": "The development of generative 3D models has attracted increasing attention to automatic 3D objects and scene generation and edition. Most existing works are limited to a single object, such as 3D face generation [7] and synthesis of facial viewpoints [40]. There are few methods for generating multi-object 3D scenes while editing such scenes remains unexplored. In this paper, we propose 3D-GOI to edit images containing multiple objects with complex spatial geometric" + }, + { + "type": "page_footnote", + "bbox": [ + 0.232, + 0.826, + 0.402, + 0.841 + ], + "angle": 0, + "content": "* Corresponding authors" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "2" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.347, + 0.128 + ], + "angle": 0, + "content": "H. Li et al." + }, + { + "type": "image", + "bbox": [ + 0.22, + 0.148, + 0.782, + 0.263 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.276, + 0.788, + 0.346 + ], + "angle": 0, + "content": "Fig. 1: The first row shows the editing results of traditional 2D/3D GAN inversion methods on multi-object images. The second row showcases 3D-GOI, which can perform multifaceted editing on complex images with multiple objects. 'bg' stands for background. The red crosses in the upper right figures indicate features that cannot be edited with current 2D/3D GAN inversion methods." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.381, + 0.785, + 0.427 + ], + "angle": 0, + "content": "relationships. 3D-GOI not only can change the appearance and shape of each object and the background, but also can edit the spatial position of each object and the camera pose of the image as shown by Figure 1." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.43, + 0.788, + 0.761 + ], + "angle": 0, + "content": "Existing 3D multi-object scene generation methods can be mainly classified into two categories: those [28] based on Generative Adversarial Networks (GANs) [10] and those [22] based on Diffusion models [13], besides a few based on VAE or Transformer [3,38]. GAN-based methods, primarily represented by GIRAFFE [28] and its derivatives, depict complex scene images as results of multiple foreground objects, controlled by shape and appearance, subjected to affine transformations (scaling, translation, and rotation), and rendered together with a background, which is also controlled by shape and appearance, from a specific camera viewpoint. Diffusion-based methods [23] perceive scene images as results of multiple latent NeRF [24], which can be represented as 3D models, undergoing affine transformations, optimized with SDS [30], rendered from a specific camera viewpoint. Both categories represent scenes as combinations of multiple codes. To realize editing based on these generative methods, it's imperative to invert the complex multi-object scene images to retrieve their representative codes. After modifying these codes, regeneration can achieve diversified editing of complex images. Most inversion methods study the inversion of a single code based on its generation method. However, each multi-object image is the entangled result of multiple codes, thus inverting all codes from an image requires precise disentangling of the codes, which is extremely difficult and largely overlooked. Moreover, the prevailing inversion algorithms primarily employ optimization approaches. Attempting to optimize all codes simultaneously often leads to chaotic optimization directions and less accurate inversion outcomes." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.765, + 0.788, + 0.841 + ], + "angle": 0, + "content": "Therefore, we propose 3D-GOI, a framework capable of inverting multiple codes to achieve a comprehensive inversion of multi-object images. Given current open-source 3D multi-object scene generation methods, we have chosen GI-RAFFE [28] as our generative model. In theory, our framework can be applied to other generative approaches as well. We address these challenges as follows." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.675, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "3D-GOI" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.116, + 0.787, + 0.127 + ], + "angle": 0, + "content": "3" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.784, + 0.252 + ], + "angle": 0, + "content": "First, we categorize different codes based on object attributes, background attributes, and pose attributes. Through qualitative verification, we found that segmentation methods can roughly separate the codes pertaining to different objects. For example, the codes controlling an object's shape, appearance, scale, translation, and rotation predominantly relate to the object itself. So, during the inversion process, we only use the segmented image of this object to reduce the impact of the background and other objects on its codes." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.253, + 0.784, + 0.297 + ], + "angle": 0, + "content": "Second, we get the attributes' codes from the segmented image. Inspired by the Neural Rendering Block in GIRAFFE, we design a custom Neural Inversion Encoder network to coarsely disentangle and estimate the code values." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.298, + 0.784, + 0.388 + ], + "angle": 0, + "content": "Finally, we obtain precise values for each code through optimization. We observed that optimizing all codes simultaneously tends to get stuck in local minima. Therefore, we propose a round-robin optimization algorithm that employs a ranking function to determine the optimization order for different codes. The algorithm enables a stable and efficient optimization process for accurate image reconstruction. Our contributions can be summarized as follows." + }, + { + "type": "text", + "bbox": [ + 0.226, + 0.395, + 0.784, + 0.424 + ], + "angle": 0, + "content": "- To our best knowledge, 3D-GOI is the first multi-code inversion framework in generative models, achieving multifaceted editing of multi-object images." + }, + { + "type": "text", + "bbox": [ + 0.226, + 0.424, + 0.784, + 0.483 + ], + "angle": 0, + "content": "- We introduce a three-stage inversion process: 1) separate the attribute codes of different objects via segmentation; 2) obtain coarse codes using a custom Neural Inversion Encoder; 3) optimize the reconstruction using a round-robin optimization strategy." + }, + { + "type": "text", + "bbox": [ + 0.226, + 0.483, + 0.741, + 0.497 + ], + "angle": 0, + "content": "- Our method outperforms existing methods on both 3D and 2D tasks." + }, + { + "type": "list", + "bbox": [ + 0.226, + 0.395, + 0.784, + 0.497 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.517, + 0.387, + 0.532 + ], + "angle": 0, + "content": "2 Related Work" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.553, + 0.785, + 0.84 + ], + "angle": 0, + "content": "2D/3D GANs. 2D GAN maps a distribution from the latent space to the image space using a generator and a discriminator and has been widely explored. For example, BigGAN [6] increases the batch size and uses a simple truncation trick to finely control the trade-off between sample fidelity and variety. CycleGAN [44] feeds an input image into the generator and loops the output back to the generator. It achieves style transfer by minimizing the consistency loss between the input and its result. StyleGAN [17] maps a latent code into multiple style codes, allowing for detailed style control of images. 3D GANs usually combine 2D GANs with some 3D representation, such as NeRF [25], and have demonstrated excellent abilities to generate complex scenes with multi-view consistency. Broadly, 3D GANs can be classified into explicit and implicit models. Explicit models like HoloGAN [26] enable explicit control over the object pose through rigid body transformations of the learned 3D features. BlockGAN [27] generates foreground and background 3D features separately, combining them into a complete 3D scene representation. On the other hand, implicit models generally perform better. Many of these models take inspiration from NeRF [25], representing images as neural radiance fields and using volume rendering to generate photorealistic images in a continuous view. EG3D [7] introduces an explicit-implicit hybrid network architecture that produces high-quality 3D geometries." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "4" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.347, + 0.127 + ], + "angle": 0, + "content": "H. Li et al." + }, + { + "type": "image", + "bbox": [ + 0.227, + 0.145, + 0.358, + 0.204 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.42, + 0.145, + 0.557, + 0.204 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.617, + 0.144, + 0.754, + 0.204 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.215, + 0.214, + 0.787, + 0.245 + ], + "angle": 0, + "content": "Fig. 2: Different GANs and GAN Inversion methods utilize codes differently. \\(\\omega\\) represents the latent code and \\(c\\) represents the camera pose." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.273, + 0.788, + 0.41 + ], + "angle": 0, + "content": "GRAF [33] integrates shape and appearance coding within the generation process, which facilitates independent manipulation of the shape and appearance of the generated vehicle and furniture images. Moreover, the presence of 3D information provides additional control over the camera pose, contributing to the flexibility of the generated outputs. GIRAFFE [28] extends GRAF to multi-object scenes by considering an image as the composition of multiple objects in the foreground through affine transformation and the background rendered at a specific camera viewpoint. In this work, we select GIRAFFE as the 3D GAN model to be inverted." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.419, + 0.804, + 0.585 + ], + "angle": 0, + "content": "2D/3D GAN Inversion. GAN inversion obtains the latent code of an input image under a certain generator and modifies the latent code to perform image editing operations. Current 2D GAN inversion methods can be divided into optimization-based, encoder-based, and hybrid methods. Optimization-based methods [1, 14, 43] directly optimize the initial code, requiring very accurate initial values. Encoder-based methods [29, 31, 36] can map images directly to latent code but generally cannot achieve full reconstruction. Hybrid-based methods [4, 42] combine these two approaches: first employ an encoder to map the image to a suitable latent code, then perform optimization. Currently, most 2D GANs only have one latent code to generate an image \\( ^5 \\). Therefore, the 2D GAN inversion task can be represented as:" + }, + { + "type": "equation", + "bbox": [ + 0.4, + 0.597, + 0.787, + 0.62 + ], + "angle": 0, + "content": "\\[\n\\omega^ {*} = \\arg \\min _ {\\omega} \\mathcal {L} (G (\\omega , \\theta), I), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.63, + 0.788, + 0.675 + ], + "angle": 0, + "content": "where \\(\\omega\\) is the latent component, \\(G\\) denotes the generator, \\(\\theta\\) denotes the parameters of the generator, \\(I\\) is the input image, and \\(\\mathcal{L}\\) is the loss function measuring the difference between the generated and input image." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.675, + 0.788, + 0.765 + ], + "angle": 0, + "content": "Typically, 3D GANs have an additional camera pose parameter compared to 2D GANs, making it more challenging to obtain latent codes during inversion. Current methods like SPI [40] use a symmetric prior for faces to generate images with different perspectives, while [19] employs a pre-trained estimator to achieve better initialization and utilizes pixel-level depth calculated from the NeRF parameters for improved image reconstruction." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.766, + 0.787, + 0.797 + ], + "angle": 0, + "content": "Currently, there are only limited works on 3D GAN inversion [9,21,37] which primarily focus on creating novel perspectives of human faces using specialized" + }, + { + "type": "page_footnote", + "bbox": [ + 0.218, + 0.806, + 0.789, + 0.84 + ], + "angle": 0, + "content": "5 Although StyleGAN can be controlled by multiple style codes, these codes are all generated from a single initial latent code, indicating their interrelations. Hence only one encoder is needed to predict all the codes during inversion." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.675, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "3D-GOI" + }, + { + "type": "page_number", + "bbox": [ + 0.776, + 0.117, + 0.786, + 0.127 + ], + "angle": 0, + "content": "5" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.784, + 0.178 + ], + "angle": 0, + "content": "face datasets considering generally only two codes: camera pose code \\( \\pmb{c} \\) and the latent code \\( \\pmb{\\omega} \\). Hence its inversion task can be represented as:" + }, + { + "type": "equation", + "bbox": [ + 0.383, + 0.186, + 0.786, + 0.207 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {\\omega} ^ {*}, \\boldsymbol {c} ^ {*} = \\arg \\min _ {\\boldsymbol {\\omega}, \\boldsymbol {c}} \\mathcal {L} (G (\\boldsymbol {\\omega}, \\boldsymbol {c}, \\theta), I). \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.213, + 0.784, + 0.259 + ], + "angle": 0, + "content": "A major advancement of 3D-GOI is the capability to invert more independent codes compared with other inversion methods, as Figure 2 shows, in order to perform multifaceted edits on multi-object images." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.278, + 0.369, + 0.297 + ], + "angle": 0, + "content": "3 Preliminary" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.306, + 0.784, + 0.366 + ], + "angle": 0, + "content": "GIRAFFE [28] represents individual objects as a combination of feature field and volume density. Through scene compositions, the feature fields of multiple objects and the background are combined. Finally, the combined feature field is rendered into an image using volume rendering and neural rendering." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.367, + 0.785, + 0.457 + ], + "angle": 0, + "content": "For a coordinate \\( \\mathbf{x} \\) and a viewing direction \\( \\mathbf{d} \\) in scene space, the affine transformation \\( T(s,t,r) \\) (scale, translation, rotation) is used to transform them back into the object space of each individual object. Following the implicit shape representations used in NeRF, a multi-layer perceptron (MLP) \\( h_{\\theta} \\) is used to map the transformed \\( \\mathbf{x} \\) and \\( \\mathbf{d} \\), along with the shape-controlling code \\( z_{s} \\) and appearance-controlling code \\( z_{a} \\), to the feature field \\( \\mathbf{f} \\) and volume density \\( \\sigma \\):" + }, + { + "type": "equation", + "bbox": [ + 0.345, + 0.466, + 0.786, + 0.485 + ], + "angle": 0, + "content": "\\[\n\\left. \\left(T (s, t, r; \\boldsymbol {x})\\right), T (s, t, r; \\boldsymbol {d})\\right), \\left. \\boldsymbol {z} _ {\\boldsymbol {s}}, \\boldsymbol {z} _ {\\boldsymbol {a}}\\right) \\xrightarrow {h _ {\\theta}} (\\sigma , \\boldsymbol {f}). \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.491, + 0.784, + 0.553 + ], + "angle": 0, + "content": "Then, GIRAFFE defines a Scene Composite Operator: at a given \\( \\pmb{x} \\) and \\( \\pmb{d} \\), the overall density is the sum of the individual densities (including the background). The overall feature field is represented as the density-weighted average of the feature field of each object:" + }, + { + "type": "equation", + "bbox": [ + 0.34, + 0.559, + 0.786, + 0.599 + ], + "angle": 0, + "content": "\\[\nC (\\boldsymbol {x}, \\boldsymbol {d}) = \\left(\\sigma , \\frac {1}{\\sigma} \\sum_ {i = 1} ^ {N} \\sigma_ {i} \\boldsymbol {f} _ {\\boldsymbol {i}}\\right), w h e r e \\quad \\sigma = \\sum_ {i = 1} ^ {N} \\sigma_ {i}, \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.606, + 0.592, + 0.621 + ], + "angle": 0, + "content": "where \\( \\mathrm{N} \\) denotes the background plus (N-1) objects." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.621, + 0.785, + 0.726 + ], + "angle": 0, + "content": "The rendering phase is divided into two stages. Similar to volume rendering in NeRF, given a pixel point, the rendering formula is used to calculate the feature field of this pixel point from the feature fields and the volume density of all sample points in a camera ray direction. After calculating all pixel points, a feature map is obtained. Neural rendering (Upsampling) is then applied to get the rendered image. Please refer to the Supplementary Material 1 for the detailed preliminary and formulas." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.746, + 0.332, + 0.761 + ], + "angle": 0, + "content": "4 3D-GOI" + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.774, + 0.421, + 0.788 + ], + "angle": 0, + "content": "4.1 Problem Definition" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.795, + 0.784, + 0.841 + ], + "angle": 0, + "content": "The problem we target is similar to the general definition of GAN inversion, with the difference being that we need to invert many more codes than existing methods (1 or 2) shown in Figure 2. The parameter \\( W \\) in GIRAFFE, which controls" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "6" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.347, + 0.127 + ], + "angle": 0, + "content": "H. Li et al." + }, + { + "type": "image", + "bbox": [ + 0.276, + 0.143, + 0.761, + 0.298 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.312, + 0.788, + 0.41 + ], + "angle": 0, + "content": "Fig. 3: The overall framework of 3D-GOI. As shown in the upper half, the encoders are trained on single-object scenes, each time using \\( L_{enc} \\) to predict one \\( w, w \\in W \\), while other codes use real values. The lower half depicts the inversion process for the multi-object scene. We first decompose objects and background from the scene, then use the trained encoder to extract coarse codes, and finally use the round-robin optimization algorithm to obtain precise codes. The green blocks indicate required training and the yellow blocks indicate fixed parameters." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.44, + 0.785, + 0.471 + ], + "angle": 0, + "content": "the generation, can be divided into object attributes, background attributes, and pose attributes, denoted by \\( O \\), \\( B \\), and \\( C \\). Then, \\( W \\) can be expressed as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.273, + 0.482, + 0.786, + 0.502 + ], + "angle": 0, + "content": "\\[\nW = \\{O _ {i} ^ {s h a p e}, O _ {i} ^ {a p p}, O _ {i} ^ {s}, O _ {i} ^ {t}, O _ {i} ^ {r}, B ^ {s h a p e}, B ^ {a p p}, C \\}, \\quad i = 1, \\dots , n, \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.514, + 0.788, + 0.592 + ], + "angle": 0, + "content": "where \\( O_{i}^{shape} \\) is the object shape latent code, \\( O_{i}^{app} \\) is the object appearance latent code, \\( O_{i}^{s} \\) is the object scale code, \\( O_{i}^{t} \\) is the object translation code, \\( O_{i}^{r} \\) is the object rotation code, \\( B^{shape} \\) is the background shape latent code, \\( B^{app} \\) is the background appearance latent code and \\( C \\) is the camera pose matrix. \\( n \\) denotes the \\( n \\) objects. The reconstruction part can be expressed as:" + }, + { + "type": "equation", + "bbox": [ + 0.396, + 0.604, + 0.787, + 0.626 + ], + "angle": 0, + "content": "\\[\nW ^ {*} = \\arg \\min _ {W} \\mathcal {L} (G (W, \\theta), I). \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.638, + 0.788, + 0.684 + ], + "angle": 0, + "content": "According to Equation 5, we need to invert a total of \\((5n + 3)\\) codes. Then, we are able to replace or interpolate any inverted code(s) to achieve multifaceted editing of multiple objects." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.708, + 0.438, + 0.723 + ], + "angle": 0, + "content": "4.2 Scene Decomposition" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.734, + 0.788, + 0.842 + ], + "angle": 0, + "content": "As mentioned, the GIRAFFE generator differs from typical GAN generators in that a large number of codes are involved and not a single code controls all the generated parts. Therefore, it is challenging to transform all codes using just one encoder or optimizer as in typical GAN Inversion methods. While a human can easily distinguish each object and some of its features (appearance, shape), a machine algorithm requires a large number of high-precision annotated samples to understand what code is expressed at what position in the image." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.675, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "3D-GOI" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.116, + 0.785, + 0.127 + ], + "angle": 0, + "content": "7" + }, + { + "type": "image", + "bbox": [ + 0.247, + 0.145, + 0.33, + 0.211 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.26, + 0.212, + 0.316, + 0.223 + ], + "angle": 0, + "content": "(a) Input" + }, + { + "type": "image", + "bbox": [ + 0.388, + 0.144, + 0.474, + 0.21 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.401, + 0.212, + 0.461, + 0.223 + ], + "angle": 0, + "content": "(b) Car A" + }, + { + "type": "image", + "bbox": [ + 0.532, + 0.144, + 0.615, + 0.21 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.545, + 0.212, + 0.604, + 0.223 + ], + "angle": 0, + "content": "(c) Car B" + }, + { + "type": "image", + "bbox": [ + 0.675, + 0.145, + 0.758, + 0.21 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.671, + 0.212, + 0.764, + 0.223 + ], + "angle": 0, + "content": "(d) Background" + }, + { + "type": "image_caption", + "bbox": [ + 0.215, + 0.234, + 0.787, + 0.318 + ], + "angle": 0, + "content": "Fig. 4: Scene decomposition. (a) The input image. (b) The feature weight map of car A, where the redder regions indicate a higher opacity and the bluer regions lower opacity. (c) The feature weight map of car B. (d) The feature weight map of the background. By integrating these maps, it becomes apparent that the region corresponding to car A predominantly consists of the feature representation of cars A and B. The background's visible area solely contains the background's feature representation." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.384, + 0.788, + 0.552 + ], + "angle": 0, + "content": "A straightforward idea is that the attribute codes of an object will map to the corresponding position of the object in the image. For example, translation \\((O^t)\\) and rotation \\((O^r)\\) codes control the relative position of an object in the scene, scaling \\((O^s)\\) and shape \\((O^{shape})\\) codes determine the contour and shape of the object, and appearance \\((O^{app})\\) codes control the appearance representation at the position of the object. The image obtained from segmentation precisely encompasses these three types of information, allowing us to invert it and obtain the five attribute codes for the corresponding object. Similarly, for codes \\((B^{shape}, B^{app})\\) that generate the background, we can invert them using the segmented image of the background. Note that obtaining camera pose code \\((C)\\) requires information from the entire rendered image." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.569, + 0.789, + 0.841 + ], + "angle": 0, + "content": "We can qualitatively validate this idea. In Equation 3, we can see that an object's five attribute codes are mapped to the object's feature field and volume density through \\( h_{\\theta} \\). As inferred from Equation 4, the scene's feature field is synthesized by weighting the feature fields of each object by density. Therefore, an object appears at its position because its feature field has a high-density weight at the corresponding location. Figure 4 displays the density of different objects at different positions during GIRAFFE's feature field composition process. The redder the higher the density, while the bluer the lower the density. As discussed, car A exhibits a high-density value within its area and near-zero density elsewhere - a similar pattern is seen with car B. The background, however, presents a non-uniform density distribution across the scene. We can consider that both car A and B and the background mainly manifest their feature fields within their visible areas. Hence, we apply a straightforward segmentation method to separate each object's feature field and get the codes. Segmenting each object also allows our encoder to pay more attention to each input object or background. As such, we can train the encoder on single-object scenes and then generalize it to multi-object scenes instead of directly training in multi-object scenes that involve more codes, to reduce computation cost." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "8" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.347, + 0.127 + ], + "angle": 0, + "content": "H. Li et al." + }, + { + "type": "image", + "bbox": [ + 0.242, + 0.178, + 0.452, + 0.272 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.266, + 0.273, + 0.429, + 0.284 + ], + "angle": 0, + "content": "(a) Neural Rendering Block" + }, + { + "type": "image", + "bbox": [ + 0.547, + 0.144, + 0.756, + 0.27 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.567, + 0.272, + 0.739, + 0.284 + ], + "angle": 0, + "content": "(b) Neural Inversion Encoder" + }, + { + "type": "image_caption", + "bbox": [ + 0.215, + 0.295, + 0.789, + 0.38 + ], + "angle": 0, + "content": "Fig. 5: Neural Inversion Encoder. (a) The Neural Rendering Block in GIRAFFE [28], an upsampling process to generate image \\(\\hat{I}\\). (b) The Neural Inversion Encoder opposes (a), which is a downsampling process. \\(I\\) is the input image, \\(H, W\\) are image height and width. \\(I_v\\) is the heatmap of the image, \\(H_v, W_v\\) and \\(M_f\\) are the dimensions of \\(I_v\\), \\(w\\) is the code to be predicted, and \\(w_f\\) is the dimension of \\(w\\). Up/Down means upsampling/downsampling." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.409, + 0.415, + 0.423 + ], + "angle": 0, + "content": "4.3 Coarse Estimation" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.432, + 0.788, + 0.553 + ], + "angle": 0, + "content": "The previous segmentation step roughly disentangles the codes. Unlike typical encoder-based methods, it's difficult to predict all codes using just one encoder. Therefore, we assign an encoder to each code, allowing each encoder to focus solely on predicting one code. Hence, we need a total of eight encoders. As shown in Figure 3, we input the object segmentation for the object attribute codes \\((O^{shape}, O^{app}, O^s, O^t, O^r)\\), the background segmentation for the background attribute codes \\((B^{shape}, B^{app})\\), and the original image for pose attribute code \\((C)\\). Different objects share the same encoder for the same attribute code." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.554, + 0.788, + 0.689 + ], + "angle": 0, + "content": "We allocate an encoder called Neural Inversion Encoder with a similar structure to each code. Neural Inversion Encoder consists of three parts as Figure 5(b) shows. The first part employs a standard feature pyramid over a ResNet [12] backbone like in pSp [31] to extract the image features. The second part, in which we designed a structure opposite to GIRAFFE's Neural rendering Block based on its architecture as Figure 5(a) shows, downsamples the images layer by layer using a CNN and then uses skip connections [12] to combine the layers, yielding a one-dimensional feature. The third layer employs an MLP structure to acquire the corresponding dimension of different codes." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.689, + 0.788, + 0.75 + ], + "angle": 0, + "content": "Training multiple encoders simultaneously is difficult to converge due to the large number of parameters. Hence, we use the dataset generated by GIRAFFE to retain the true values of each code and train an encoder for one code at a time, to keep the other codes at their true values, greatly smoothing the training." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.75, + 0.788, + 0.841 + ], + "angle": 0, + "content": "During encoder training, we use the Mean Squared Error (MSE) loss, perceptual loss (LPIPS) [41], and identity loss (ID) [11] between the reconstructed image and the original image, to be consistent with most 2D and 3D GAN inversion training methodologies. When training the affine codes (scale \\(O^s\\), translation \\(O^t\\), rotation \\(O^r\\)), we find that different combinations of values produce very similar images, e.g., moving an object forward and increasing its scale yield" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.675, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "3D-GOI" + }, + { + "type": "page_number", + "bbox": [ + 0.776, + 0.117, + 0.786, + 0.127 + ], + "angle": 0, + "content": "9" + }, + { + "type": "code_caption", + "bbox": [ + 0.228, + 0.15, + 0.533, + 0.165 + ], + "angle": 0, + "content": "Algorithm 1: Round-robin Optimization" + }, + { + "type": "algorithm", + "bbox": [ + 0.218, + 0.167, + 0.756, + 0.395 + ], + "angle": 0, + "content": "Data: all codes \\(w\\in W\\) predicted by encoders, fixed GIRAFFE generator \\(G\\) input image \\(I\\) 1 Initialize \\(lr\\_ w = 10^{-3},w\\in W\\) . \n2 while any \\(lr\\_ w > 10^{-5}\\) do \n3 foreach \\(w\\in W\\) do \n4 Sample \\(\\delta w\\) . \n5 Compute \\(\\delta \\mathcal{L}(w)\\) using Eq.8; \n6 end \n7 Compute rank_list using Eq.9; \n8 foreach \\(w\\in\\) rank_list and lr_w>10-5 do \n9 Optimization w with \\(\\mathcal{L}_{opt}\\) in Eq. 10 of I and G(W;0); \n10 if the \\(\\mathcal{L}_{opt}\\) ceases to decrease for five consecutive iterations then \n11 | lr_w=lr_w/2; \n12 end \n13 end \n14 end" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.429, + 0.784, + 0.473 + ], + "angle": 0, + "content": "similar results. However, the encoder can only predict one value at a time, hence we add the MSE loss of the predicted \\( O^s \\), \\( O^t \\), \\( O^r \\) values, and their true values, to compel the encoder to predict the true value." + }, + { + "type": "equation", + "bbox": [ + 0.383, + 0.489, + 0.785, + 0.506 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {e n c} = \\lambda_ {1} L _ {2} + \\lambda_ {2} L _ {l p i p s} + \\lambda_ {3} L _ {i d}, \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.51, + 0.784, + 0.556 + ], + "angle": 0, + "content": "where \\(\\lambda_{i}, i = 1,2,3\\) represent the ratio coefficient between various losses. When training \\(O^s\\), \\(O^t\\), \\(O^r\\) code, the \\(L_2\\) loss includes the MSE loss between the real values of \\(O^s\\), \\(O^t\\), \\(O^r\\) and their predicted values." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.576, + 0.436, + 0.592 + ], + "angle": 0, + "content": "4.4 Precise Optimization" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.599, + 0.787, + 0.81 + ], + "angle": 0, + "content": "Pre-trained segmentation models have some segmentation errors and all encoder-based GAN inversion networks [31,34,35] usually cannot accurately obtain codes, necessitating refinements. Next, we optimize the coarse codes. Through experiments, we have found that using a single optimizer to optimize all latent codes tends to converge to local minima. Hence, we employ multiple optimizers, each handling a single code. The optimization order is crucial due to the variance of the disparity between the predicted and actual values across different encoders, and the different impact of code changes on the image, e.g., changes to \\( B^{shape} \\) and \\( B^{app} \\) codes controlling background generation mostly would have a larger impact on overall pixel values. Prioritizing the optimization of codes with significant disparity and a high potential for changing pixel values tends to yield superior results in our experiments. Hence, we propose an automated round-robin optimization algorithm (Algorithm 1) to sequentially optimize each code based on the image reconstructed in each round." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.81, + 0.786, + 0.84 + ], + "angle": 0, + "content": "Algorithm 1 aims to add multiple minor disturbances to each code, and calculate the loss between the images reconstructed before and after the disturbance" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "10" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.348, + 0.128 + ], + "angle": 0, + "content": "H. Li et al." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.239 + ], + "angle": 0, + "content": "and the original image. A loss increase indicates that the current code value is relatively accurate, hence its optimization order can be postponed, and vice versa. For multiple codes that demand prioritized optimization, we compute their priorities using the partial derivatives of the loss variation and perturbation. We do not use backpropagation automatic differentiation here to ensure the current code value remains unchanged." + }, + { + "type": "equation", + "bbox": [ + 0.306, + 0.251, + 0.786, + 0.269 + ], + "angle": 0, + "content": "\\[\n\\delta \\mathcal {L} (w) = \\mathcal {L} (G (W - \\{w \\}, w + \\delta w, \\theta), I) - \\mathcal {L} (G (W, \\theta), I), \\tag {8}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.377, + 0.291, + 0.786, + 0.322 + ], + "angle": 0, + "content": "\\[\n\\operatorname {r a n k} _ {-} \\operatorname {l i s t} = F _ {\\operatorname {r a n k}} (\\delta \\mathcal {L} (w), \\frac {\\delta \\mathcal {L} (w)}{\\delta w}), \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.325, + 0.785, + 0.372 + ], + "angle": 0, + "content": "where \\( w \\in W \\) is one of the codes and \\( \\delta w \\) represents the minor disturbance of \\( w \\). For the rotation angle \\( r \\), we have found that adding a depth loss can accelerate its optimization. Thus, the loss \\( \\mathcal{L} \\) during optimization can be expressed as:" + }, + { + "type": "equation", + "bbox": [ + 0.351, + 0.385, + 0.786, + 0.401 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {o p t}} = \\lambda_ {1} L _ {2} + \\lambda_ {2} L _ {\\text {l p i p s}} + \\lambda_ {3} L _ {\\text {i d}} + \\lambda_ {4} L _ {\\text {d e e p}}. \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.412, + 0.785, + 0.443 + ], + "angle": 0, + "content": "This optimization method allows for more precise tuning of the codes for more accurate reconstruction and editing of the images." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.467, + 0.406, + 0.484 + ], + "angle": 0, + "content": "5 Implementation" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.5, + 0.788, + 0.713 + ], + "angle": 0, + "content": "Neural Inversion Encoder. The first part of our encoder uses ResNet50 to extract features. In the second part, we downsample the extracted features (512-dimensional) and the input RGB image (3-dimensional) together. The two features are added together through skip connections, as shown in Figure 5. In the downsampling module, we use a 2D convolution with a kernel of 3 and a stride of 1, and the LeakyReLU activation function, to obtain a 256-dimensional intermediate feature. For object shape/appearance attributes, the output dimension is 256, and we use four Fully Connected Layers \\(\\{4\\times FCL(256,256)\\}\\) to get the codes. For background shape/appearance attributes, the output dimension is 128, we use \\(\\{FCL(256,128) + 3\\times FCL(128,128)\\}\\) to get the codes. For object scale/translation attributes, the output dimension is 3, and we use the network \\(\\{FCL(2^i,2^{i - 1}) + FCL(8,3),i = 8,\\dots ,4\\}\\) to get the codes. For camera pose and rotation attributes, the output dimension is 1, and we use a similar network \\(\\{FCL(2^i,2^{i - 1}) + FCL(8,1),i = 8,\\dots ,4\\}\\) to get the codes." + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.734, + 0.788, + 0.843 + ], + "angle": 0, + "content": "Training and Optimization are carried out on a single NVIDIA A100 SXM GPU with 40GB of memory, using the Adam optimizer. The initial learning rate is set to \\(10^{-4}\\) and \\(10^{-3}\\), respectively. Encoder training employs a batch size of 50. Each encoder took about 12 hours to train, and optimizing a single image of a complex multi-object scene took about 1 minute. For rotation features, it is difficult for the encoder to make accurate predictions for some images. Therefore, we uniformly sampled 20 values in the range of \\([0, 360^{\\circ}]\\) for the" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.675, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "3D-GOI" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.116, + 0.785, + 0.127 + ], + "angle": 0, + "content": "11" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.784, + 0.177 + ], + "angle": 0, + "content": "rotation parameters with large deviations. We selected the value that minimizes the loss in Equation 7 as the initial value for the optimization stage." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.178, + 0.785, + 0.253 + ], + "angle": 0, + "content": "For LPIPS loss, we employ a pre-trained AlexNet [20]. For ID calculation, we employ a pre-trained Arcface [8] model in human face datasets and a pre-trained ResNet-50 [32] model in the car dataset. For depth loss, we use the pre-trained Dense Prediction Transformer model. We set \\(\\lambda_1 = 1\\), \\(\\lambda_2 = 0.8\\), and \\(\\lambda_3 = 0.2\\) in Equation 7, as well as in Equation 10, in which \\(\\lambda_4 = 1\\)." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.276, + 0.367, + 0.294 + ], + "angle": 0, + "content": "6 Experiment" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.308, + 0.785, + 0.474 + ], + "angle": 0, + "content": "Datasets. To obtain the true values of the 3D information in GIRAFFE for stable training performance, we use the pre-trained model of GIRAFFE on CompCars [39] and Clevr [15] dataset to generate training datasets. For testing datasets, we also use GIRAFFE to generate images for multi-car datasets denoted as \\( G \\)-CompCars (CompCars is a single car image dataset) and use the original Clevr dataset for multi-geometry dataset (Clevr is a dataset that can be simulated to generate images of multiple geometries). We follow the codes setup in GIRAFFE. For CompCars, we use all the codes from Equation 5. For Clevr, we fixed the rotation, scale, and camera pose codes of the objects. For experiments on facial data, we utilized the FFHQ [17] dataset for training and the CelebA-HQ [16] dataset for testing." + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.501, + 0.353, + 0.537 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.226, + 0.538, + 0.345, + 0.548 + ], + "angle": 0, + "content": "(a) Input, Co-R, Pre-R" + }, + { + "type": "image", + "bbox": [ + 0.362, + 0.501, + 0.496, + 0.535 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.385, + 0.537, + 0.475, + 0.548 + ], + "angle": 0, + "content": "(b) Edit Shape" + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.501, + 0.64, + 0.535 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.515, + 0.537, + 0.635, + 0.548 + ], + "angle": 0, + "content": "(c) Edit Appearance" + }, + { + "type": "image", + "bbox": [ + 0.653, + 0.501, + 0.785, + 0.535 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.665, + 0.537, + 0.774, + 0.548 + ], + "angle": 0, + "content": "(d) Edit Bg Shape" + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.55, + 0.351, + 0.584 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.223, + 0.586, + 0.347, + 0.597 + ], + "angle": 0, + "content": "(e) Edit Bg Appearance" + }, + { + "type": "image", + "bbox": [ + 0.361, + 0.55, + 0.495, + 0.583 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.387, + 0.585, + 0.469, + 0.596 + ], + "angle": 0, + "content": "(f) Edit Scale" + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.55, + 0.636, + 0.583 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.512, + 0.585, + 0.631, + 0.596 + ], + "angle": 0, + "content": "(g) Edit Translation" + }, + { + "type": "image", + "bbox": [ + 0.65, + 0.55, + 0.781, + 0.583 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.663, + 0.585, + 0.767, + 0.596 + ], + "angle": 0, + "content": "(h) Edit Rotation" + }, + { + "type": "image_caption", + "bbox": [ + 0.215, + 0.608, + 0.784, + 0.636 + ], + "angle": 0, + "content": "Fig. 6: Single-object editing on G-CompCars dataset. Co-R: coarse reconstruction. Pre-R: precise reconstruction." + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.692, + 0.351, + 0.727 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.225, + 0.729, + 0.345, + 0.74 + ], + "angle": 0, + "content": "(a) Input, Co-R, Pre-R" + }, + { + "type": "image", + "bbox": [ + 0.362, + 0.692, + 0.495, + 0.727 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.367, + 0.729, + 0.49, + 0.74 + ], + "angle": 0, + "content": "(b) Edit Appearance" + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.692, + 0.637, + 0.727 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.729, + 0.631, + 0.739 + ], + "angle": 0, + "content": "(c) Edit Translation" + }, + { + "type": "image", + "bbox": [ + 0.649, + 0.692, + 0.78, + 0.727 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.669, + 0.728, + 0.761, + 0.74 + ], + "angle": 0, + "content": "(d) Add Object" + }, + { + "type": "image_caption", + "bbox": [ + 0.345, + 0.751, + 0.655, + 0.766 + ], + "angle": 0, + "content": "Fig. 7: Single-object editing on Clevr dataset." + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.795, + 0.784, + 0.841 + ], + "angle": 0, + "content": "Baselines. In the comparative experiments for our Neural Inversion Encoder, we benchmarked encoder-based inversion methods such as e4e [34] and pSp [31], which use the 2D GAN StyleGAN2 [18] as the generator, and E3DGE [21] and" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "12" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.347, + 0.127 + ], + "angle": 0, + "content": "H. Li et al." + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.145, + 0.351, + 0.18 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.225, + 0.181, + 0.345, + 0.192 + ], + "angle": 0, + "content": "(a) Input, Co-R, Pre-R" + }, + { + "type": "image", + "bbox": [ + 0.362, + 0.144, + 0.496, + 0.178 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.385, + 0.18, + 0.474, + 0.191 + ], + "angle": 0, + "content": "(b) Edit Shape" + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.144, + 0.641, + 0.178 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.515, + 0.18, + 0.636, + 0.191 + ], + "angle": 0, + "content": "(c) Edit Appearance" + }, + { + "type": "image", + "bbox": [ + 0.653, + 0.144, + 0.785, + 0.178 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.665, + 0.18, + 0.774, + 0.191 + ], + "angle": 0, + "content": "(d) Edit Bg Shape" + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.192, + 0.351, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.223, + 0.228, + 0.347, + 0.239 + ], + "angle": 0, + "content": "(e) Edit Bg Appearance" + }, + { + "type": "image", + "bbox": [ + 0.362, + 0.192, + 0.495, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.387, + 0.227, + 0.469, + 0.238 + ], + "angle": 0, + "content": "(f) Edit Scale" + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.192, + 0.637, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.512, + 0.228, + 0.631, + 0.238 + ], + "angle": 0, + "content": "(g) Edit Translation" + }, + { + "type": "image", + "bbox": [ + 0.648, + 0.192, + 0.781, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.663, + 0.228, + 0.767, + 0.238 + ], + "angle": 0, + "content": "(h) Edit Rotation" + }, + { + "type": "image_caption", + "bbox": [ + 0.321, + 0.25, + 0.679, + 0.264 + ], + "angle": 0, + "content": "Fig. 8: Multi-object editing on \\( G \\)-CompCars dataset." + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.278, + 0.351, + 0.314 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.225, + 0.316, + 0.345, + 0.326 + ], + "angle": 0, + "content": "(a) Input, Co-R, Pre-R" + }, + { + "type": "image", + "bbox": [ + 0.362, + 0.278, + 0.495, + 0.313 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.367, + 0.314, + 0.489, + 0.325 + ], + "angle": 0, + "content": "(b) Edit Appearance" + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.278, + 0.638, + 0.313 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.314, + 0.631, + 0.325 + ], + "angle": 0, + "content": "(c) Edit Translation" + }, + { + "type": "image", + "bbox": [ + 0.648, + 0.279, + 0.782, + 0.314 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.65, + 0.315, + 0.78, + 0.326 + ], + "angle": 0, + "content": "(d) Add/Remove Objects" + }, + { + "type": "image_caption", + "bbox": [ + 0.347, + 0.337, + 0.654, + 0.351 + ], + "angle": 0, + "content": "Fig. 9: Multi-object editing on Clevr dataset." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.38, + 0.785, + 0.426 + ], + "angle": 0, + "content": "TriplaneNet [5] that employ the 3D GAN EG3D [7] as the generator, on the generator of GIRAFFE. Additionally, we compared our encoder on StyleGAN2 with SOTA inversion methods HyperStyle [2] and HFGI [35] for StyleGAN2." + }, + { + "type": "text", + "bbox": [ + 0.218, + 0.427, + 0.784, + 0.456 + ], + "angle": 0, + "content": "Metrics. We use Mean Squared Error (MSE), perceptual similarity loss (LPIPS) [41], and identity similarity (ID) to measure the quality of image reconstruction." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.478, + 0.471, + 0.492 + ], + "angle": 0, + "content": "6.1 3D GAN Omni-Inversion" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.501, + 0.788, + 0.683 + ], + "angle": 0, + "content": "Single-object Multifaceted Editing. In Figure 6 and Figure 7, (a) depict the original images, the coarsely reconstructed images produced by the Neural Inversion Encoder, and the precisely reconstructed images obtained via round-robin optimization. As Figure 7 shows, the simple scene structure of the Clevr dataset allows us to achieve remarkably accurate results using only the encoder (Co-Recon). However, for car images in Figure 6, predicting precise codes using the encoder only becomes challenging, necessitating the employment of the round-robin optimization algorithm to refine the code values for precise reconstruction (Pre-Recon). Figure 6 (b)-(h) and Figure 7 (b)-(d) show the editing results for different codes. As noted in Section 4.3, moving an object forward and increasing its scale yield similar results. Please refer to the Supplementary Material 3.1 for more results like camera pose and shape editing." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.704, + 0.788, + 0.841 + ], + "angle": 0, + "content": "Multi-object Multifaceted Editing. We notice that the prediction for some object parameters (\\(O^{shape}\\), \\(O^{app}\\), \\(O^s\\), \\(O^t\\)) are quite accurate. However, the prediction for the background codes deviates significantly. We speculate this is due to the significant differences in segmentation image input to the background encoder between multi-object scenes and single-object scenes. Therefore, background reconstruction requires further optimization. Figure 8 and Figure 9 depict the multifaceted editing outcomes for two cars and multiple Clevr objects, respectively. The images show individual edits of two objects in the left and middle images and collective edits at the right images in Figure 8 (b-c) and (f-h)." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.675, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "3D-GOI" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.116, + 0.786, + 0.127 + ], + "angle": 0, + "content": "13" + }, + { + "type": "image", + "bbox": [ + 0.241, + 0.144, + 0.472, + 0.269 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.218, + 0.271, + 0.492, + 0.301 + ], + "angle": 0, + "content": "(a) Reconstruction results of different GAN inversion encoders using the generator of GI-RAFFE." + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.145, + 0.781, + 0.269 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.507, + 0.271, + 0.781, + 0.301 + ], + "angle": 0, + "content": "(b) Reconstruction results of different GAN inversion encoders using the generator of StyleGAN2." + }, + { + "type": "image_caption", + "bbox": [ + 0.27, + 0.314, + 0.729, + 0.328 + ], + "angle": 0, + "content": "Fig. 10: Reconstruction quality of different GAN inversion encoders." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.357, + 0.785, + 0.432 + ], + "angle": 0, + "content": "As shown in Figure 8, the predictive discrepancy between the car's background and rotation angle on the left is considerable, requiring adjustments through the round-robin optimization. As illustrated in Figure 1, 2D/3D GAN inversion methods can not inverse multi-object scenes. More images pertaining to multi-object editing can be found in Supplementary Material 3.2." + }, + { + "type": "title", + "bbox": [ + 0.214, + 0.454, + 0.7, + 0.469 + ], + "angle": 0, + "content": "6.2 Comparison Experiment of Neural Inversion Encoder" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.478, + 0.785, + 0.628 + ], + "angle": 0, + "content": "For fair comparison and to eliminate the impact of the generator on the quality of the inverted image generation, we trained the encoders from the baseline methods by connecting them to the GIRAFFE generator using our Neural Inversion Encoder training approach and compared them with our Neural Inversion Encoder. At the same time, we also connected our encoder to StyleGAN2 and compared it with inversion methods based on StyleGAN2, thereby demonstrating the efficiency of our encoder design. Table 1 and Figure 10 quantitatively and qualitatively displays the comparison results on both the GIRAFFE and StyleGAN2 generators respectively. The results show that our Neural Inversion Encoder consistently outperforms baseline methods." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.651, + 0.388, + 0.665 + ], + "angle": 0, + "content": "6.3 Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.674, + 0.785, + 0.779 + ], + "angle": 0, + "content": "We conducted ablation experiments separately for the proposed Neural Inversion Encoder and the Round-robin Optimization algorithm. Table 2 displays the average ablation results of the Neural Inversion Encoder on various attribute codes, where NIB refers to Neural Inversion Block (the second part of the encoder) and MLP is the final part of the encoder. The results clearly show that our encoder structure is extremely effective and can predict code values more accurately. Please find the complete results in the Supplementary Material." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.78, + 0.785, + 0.824 + ], + "angle": 0, + "content": "For the Round-robin optimization algorithm, we compared it with three fixed optimization order algorithms on both single-object and multi-object scenarios. The three fixed sequences are as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.24, + 0.824, + 0.68, + 0.842 + ], + "angle": 0, + "content": "\\[\n\\text {O r d e r 1}: B ^ {\\text {s h a p e}}, B ^ {\\text {a p p}}, \\left\\{O _ {i} ^ {r}, O _ {i} ^ {t}, O _ {i} ^ {s} \\right\\} _ {i = 1} ^ {N}, \\left\\{O _ {i} ^ {\\text {s h a p e}}, O _ {i} ^ {\\text {a p p}} \\right\\} _ {i = 1} ^ {N}, C\n\\]" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "14" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.347, + 0.128 + ], + "angle": 0, + "content": "H. Li et al." + }, + { + "type": "table_caption", + "bbox": [ + 0.216, + 0.145, + 0.788, + 0.187 + ], + "angle": 0, + "content": "Table 1: Reconstruction quality of different GAN inversion encoders using the generator of GIRAFFE and StyleGAN2. \\(\\downarrow\\) indicates the lower the better and \\(\\uparrow\\) indicates the higher the better." + }, + { + "type": "table", + "bbox": [ + 0.226, + 0.198, + 0.775, + 0.328 + ], + "angle": 0, + "content": "
MethodGIRAFFE for GeneratorStyleGAN2 for Generator
MSE ↓LPIPS ↓ID↑MSE ↓LPIPS ↓ID↑
e4e [34]0.0310.3060.8670.0520.2000.502
pSp [31]0.0310.3010.8770.0340.1720.561
HyperStyle [2]---0.0190.0910.766
HFGI [35]---0.0230.1240.705
TriplaneNet [5]0.0290.2960.870---
E3DGE [21]0.0310.2990.881---
3D-GOI(Ours)0.0240.2620.8970.0170.0980.769
" + }, + { + "type": "table_caption", + "bbox": [ + 0.25, + 0.355, + 0.499, + 0.383 + ], + "angle": 0, + "content": "Table 2: Ablation Study of the Neural Inversion Encoder." + }, + { + "type": "table", + "bbox": [ + 0.264, + 0.395, + 0.487, + 0.455 + ], + "angle": 0, + "content": "
MethodMSE ↓LPIPS↓ID ↑
w/o NIB0.0230.2880.856
w/o MLP0.0150.1830.878
3D-GOI0.0100.1410.906
" + }, + { + "type": "table_caption", + "bbox": [ + 0.51, + 0.342, + 0.754, + 0.385 + ], + "angle": 0, + "content": "Table 3: The quantitative metrics of ablation study of the Round-robin Optimization algorithm." + }, + { + "type": "table", + "bbox": [ + 0.521, + 0.395, + 0.738, + 0.469 + ], + "angle": 0, + "content": "
MethodMSE ↓LPIPS ↓ID↑
Order10.0160.1840.923
Order20.0190.2290.913
Order30.0190.2210.911
3D-GOI0.0080.1280.938
" + }, + { + "type": "equation", + "bbox": [ + 0.241, + 0.498, + 0.682, + 0.516 + ], + "angle": 0, + "content": "\\[\nO r d e r 2: \\left\\{O _ {i} ^ {r}, O _ {i} ^ {t}, O _ {i} ^ {s} \\right\\} _ {i = 1} ^ {N}, \\left\\{O _ {i} ^ {\\text {s h a p e}}, O _ {i} ^ {\\text {a p p}} \\right\\} _ {i = 1} ^ {N}, B ^ {\\text {s h a p e}}, B ^ {\\text {a p p}}, C\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.241, + 0.516, + 0.681, + 0.534 + ], + "angle": 0, + "content": "\\[\nO r d e r 3: C, \\{O _ {i} ^ {s h a p e}, O _ {i} ^ {a p p} \\} _ {i = 1} ^ {N}, \\{O _ {i} ^ {r}, O _ {i} ^ {t}, O _ {i} ^ {s} \\} _ {i = 1} ^ {N}, B ^ {s h a p e}, B ^ {a p p}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.534, + 0.788, + 0.655 + ], + "angle": 0, + "content": "\\(\\{\\}_{i=1}^{N}\\) indicates that the elements inside \\(\\{\\}\\) are arranged in sequence from 1 to N. There are many possible sequence combinations, and here we chose the three with the best results for demonstration. As Table 3 shows, our method achieves the best results on all metrics, demonstrating the effectiveness of our Round-robin optimization algorithm. As mentioned in Section 4.4, optimizing features like the background first can enhance the optimization. Hence, Order1 performs much better than Order2 and Order3. Please see the Supplementary Material 3.5 for qualitative comparisons of these four methods on images." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.683, + 0.36, + 0.699 + ], + "angle": 0, + "content": "7 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.719, + 0.788, + 0.841 + ], + "angle": 0, + "content": "This paper introduces a 3D GAN inversion method, 3D-GOI, that enables multifaceted editing of scenes containing multiple objects. By using a segmentation approach to separate objects and background, then carrying out a coarse estimation followed by a precise optimization, 3D-GOI can accurately obtain the codes of the image. These codes are then used for multifaceted editing. To the best of our knowledge, 3D-GOI is the first method to attempt multi-object & multifaceted editing. We anticipate that 3D-GOI holds immense potential for future applications in fields such as VR/AR, and the Metaverse." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.675, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "3D-GOI" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "15" + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.145, + 0.403, + 0.163 + ], + "angle": 0, + "content": "Acknowledgements" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.181, + 0.785, + 0.212 + ], + "angle": 0, + "content": "This work was supported by the National Key Research and Development Program of China (2022YFB3105405, 2021YFC3300502)." + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.24, + 0.323, + 0.255 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.274, + 0.787, + 0.317 + ], + "angle": 0, + "content": "1. Abdal, R., Qin, Y., Wonka, P.: Image2stylegan: How to embed images into the stylegan latent space? In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 4432-4441 (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.318, + 0.787, + 0.36 + ], + "angle": 0, + "content": "2. Alaluf, Y., Tov, O., Mokady, R., Gal, R., Bermano, A.: Hyperstyle: Stylegan inversion with hypernetworks for real image editing. In: Proceedings of the IEEE/CVF conference on computer Vision and pattern recognition. pp. 18511-18521 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.361, + 0.785, + 0.388 + ], + "angle": 0, + "content": "3. Arad Hudson, D., Zitnick, L.: Compositional transformers for scene generation. Advances in Neural Information Processing Systems 34, 9506-9520 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.389, + 0.787, + 0.431 + ], + "angle": 0, + "content": "4. Bau, D., Zhu, J.Y., Wulff, J., Peebles, W., Strobelt, H., Zhou, B., Torralba, A.: Seeing what a gan cannot generate. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 4502-4511 (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.432, + 0.787, + 0.459 + ], + "angle": 0, + "content": "5. Bhattacharai, A.R., Nießner, M., Sevastopolsky, A.: Triplanenet: An encoder for eg3d inversion. arXiv preprint arXiv:2303.13497 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.46, + 0.787, + 0.487 + ], + "angle": 0, + "content": "6. Brock, A., Donahue, J., Simonyan, K.: Large scale gan training for high fidelity natural image synthesis. arXiv preprint arXiv:1809.11096 (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.488, + 0.787, + 0.543 + ], + "angle": 0, + "content": "7. Chan, E.R., Lin, C.Z., Chan, M.A., Nagano, K., Pan, B., De Mello, S., Gallo, O., Guibas, L.J., Tremblay, J., Khamis, S., et al.: Efficient geometry-aware 3d generative adversarial networks. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 16123-16133 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.545, + 0.787, + 0.586 + ], + "angle": 0, + "content": "8. Deng, J., Guo, J., Xue, N., Zafeiriou, S.: Arcface: Additive angular margin loss for deep face recognition. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 4690-4699 (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.587, + 0.787, + 0.628 + ], + "angle": 0, + "content": "9. Deng, Y., Wang, B., Shum, H.Y.: Learning detailed radiance manifolds for high-fidelity and 3d-consistent portrait synthesis from monocular image. arXiv preprint arXiv:2211.13901 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.63, + 0.787, + 0.671 + ], + "angle": 0, + "content": "10. Goodfellow, I., Pouget-Abadie, J., Mirza, M., Xu, B., Warde-Farley, D., Ozair, S., Courville, A., Bengio, Y.: Generative adversarial networks. Communications of the ACM 63(11), 139–144 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.672, + 0.787, + 0.713 + ], + "angle": 0, + "content": "1. He, K., Fan, H., Wu, Y., Xie, S., Girshick, R.: Momentum contrast for unsupervised visual representation learning. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 9729-9738 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.714, + 0.787, + 0.755 + ], + "angle": 0, + "content": "2. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 770-778 (2016)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.757, + 0.787, + 0.784 + ], + "angle": 0, + "content": "3. Ho, J., Jain, A., Abbeel, P.: Denoising diffusion probabilistic models. Advances in neural information processing systems 33, 6840-6851 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.785, + 0.787, + 0.84 + ], + "angle": 0, + "content": "4. Huh, M., Zhang, R., Zhu, J.Y., Paris, S., Hertzmann, A.: Transforming and projecting images into class-conditional generative networks. In: Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23–28, 2020, Proceedings, Part II 16. pp. 17–34. Springer (2020)" + }, + { + "type": "list", + "bbox": [ + 0.226, + 0.274, + 0.787, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "16" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.347, + 0.127 + ], + "angle": 0, + "content": "H. Li et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.148, + 0.785, + 0.204 + ], + "angle": 0, + "content": "15. Johnson, J., Hariharan, B., Van Der Maaten, L., Fei-Fei, L., Lawrence Zitnick, C., Girshick, R.: Clevr: A diagnostic dataset for compositional language and elementary visual reasoning. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 2901-2910 (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.205, + 0.785, + 0.233 + ], + "angle": 0, + "content": "16. Karras, T., Aila, T., Laine, S., Lehtinen, J.: Progressive growing of gans for improved quality, stability, and variation. arXiv preprint arXiv:1710.10196 (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.219, + 0.234, + 0.785, + 0.275 + ], + "angle": 0, + "content": "17. Karras, T., Laine, S., Aila, T.: A style-based generator architecture for generative adversarial networks. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 4401-4410 (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.219, + 0.276, + 0.785, + 0.317 + ], + "angle": 0, + "content": "18. Karras, T., Laine, S., Aittala, M., Hellsten, J., Lehtinen, J., Aila, T.: Analyzing and improving the image quality of stylegan. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 8110-8119 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.219, + 0.318, + 0.785, + 0.359 + ], + "angle": 0, + "content": "19. Ko, J., Cho, K., Choi, D., Ryoo, K., Kim, S.: 3d gan inversion with pose optimization. In: Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision. pp. 2967-2976 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.219, + 0.36, + 0.785, + 0.388 + ], + "angle": 0, + "content": "20. Krizhevsky, A., Sutskever, I., Hinton, G.E.: Imagenet classification with deep convolutional neural networks. Communications of the ACM 60(6), 84-90 (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.219, + 0.39, + 0.785, + 0.431 + ], + "angle": 0, + "content": "21. Lan, Y., Meng, X., Yang, S., Loy, C.C., Dai, B.: Self-supervised geometry-aware encoder for style-based 3d gan inversion. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 20940-20949 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.219, + 0.432, + 0.785, + 0.472 + ], + "angle": 0, + "content": "22. Li, H., Shi, H., Zhang, W., Wu, W., Liao, Y., Wang, L., Lee, L.h., Zhou, P.: Dreamscene: 3d gaussian-based text-to-3d scene generation via formation pattern sampling. arXiv preprint arXiv:2404.03575 (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.219, + 0.474, + 0.785, + 0.515 + ], + "angle": 0, + "content": "23. Lin, Y., Bai, H., Li, S., Lu, H., Lin, X., Xiong, H., Wang, L.: Componerf: Text-guided multi-object compositional nerf with editable 3d scene layout. arXiv preprint arXiv:2303.13843 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.219, + 0.517, + 0.785, + 0.557 + ], + "angle": 0, + "content": "24. Metzer, G., Richardson, E., Patashnik, O., Giryes, R., Cohen-Or, D.: Latentnerf for shape-guided generation of 3d shapes and textures. arXiv preprint arXiv:2211.07600 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.219, + 0.559, + 0.785, + 0.6 + ], + "angle": 0, + "content": "25. Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM 65(1), 99-106 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.219, + 0.601, + 0.785, + 0.642 + ], + "angle": 0, + "content": "26. Nguyen-Phuoc, T., Li, C., Theis, L., Richardt, C., Yang, Y.L.: Hologan: Unsupervised learning of 3d representations from natural images. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 7588-7597 (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.219, + 0.643, + 0.785, + 0.685 + ], + "angle": 0, + "content": "27. Nguyen-Phuoc, T.H., Richardt, C., Mai, L., Yang, Y., Mitra, N.: Blockgan: Learning 3d object-aware scene representations from unlabelled images. Advances in neural information processing systems 33, 6767–6778 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.219, + 0.686, + 0.785, + 0.727 + ], + "angle": 0, + "content": "28. Niemeyer, M., Geiger, A.: Giraffe: Representing scenes as compositional generative neural feature fields. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 11453-11464 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.219, + 0.728, + 0.785, + 0.755 + ], + "angle": 0, + "content": "29. Perarnau, G., Van De Weijer, J., Raducanu, B., Álvarez, J.M.: Invertible conditional gans for image editing. arXiv preprint arXiv:1611.06355 (2016)" + }, + { + "type": "ref_text", + "bbox": [ + 0.219, + 0.757, + 0.785, + 0.784 + ], + "angle": 0, + "content": "30. Poole, B., Jain, A., Barron, J.T., Mildenhall, B.: Dreamfusion: Text-to-3d using 2d diffusion. arXiv preprint arXiv:2209.14988 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.219, + 0.785, + 0.785, + 0.84 + ], + "angle": 0, + "content": "31. Richardson, E., Alaluf, Y., Patashnik, O., Nitzan, Y., Azar, Y., Shapiro, S., Cohen-Or, D.: Encoding in style: a stylegan encoder for image-to-image translation. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 2287-2296 (2021)" + }, + { + "type": "list", + "bbox": [ + 0.218, + 0.148, + 0.785, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.675, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "3D-GOI" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.116, + 0.786, + 0.127 + ], + "angle": 0, + "content": "17" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.189 + ], + "angle": 0, + "content": "32. Russakovsky, O., Deng, J., Su, H., Krause, J., Satheesh, S., Ma, S., Huang, Z., Karpathy, A., Khosla, A., Bernstein, M., et al.: Imagenet large scale visual recognition challenge. International journal of computer vision 115, 211-252 (2015)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.19, + 0.788, + 0.231 + ], + "angle": 0, + "content": "33. Schwarz, K., Liao, Y., Niemeyer, M., Geiger, A.: Graf: Generative radiance fields for 3d-aware image synthesis. Advances in Neural Information Processing Systems 33, 20154-20166 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.231, + 0.788, + 0.272 + ], + "angle": 0, + "content": "34. Tov, O., Alaluf, Y., Nitzan, Y., Patashnik, O., Cohen-Or, D.: Designing an encoder for stylegan image manipulation. ACM Transactions on Graphics (TOG) 40(4), 1-14 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.272, + 0.788, + 0.314 + ], + "angle": 0, + "content": "35. Wang, T., Zhang, Y., Fan, Y., Wang, J., Chen, Q.: High-fidelity gan inversion for image attribute editing. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 11379-11388 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.314, + 0.788, + 0.355 + ], + "angle": 0, + "content": "36. Wei, T., Chen, D., Zhou, W., Liao, J., Zhang, W., Yuan, L., Hua, G., Yu, N.: E2style: Improve the efficiency and effectiveness of stylegan inversion. IEEE Transactions on Image Processing 31, 3267-3280 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.356, + 0.788, + 0.384 + ], + "angle": 0, + "content": "37. Xie, J., Ouyang, H., Piao, J., Lei, C., Chen, Q.: High-fidelity 3d gan inversion by pseudo-multi-view optimization. arXiv preprint arXiv:2211.15662 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.384, + 0.788, + 0.438 + ], + "angle": 0, + "content": "38. Yang, H., Zhang, Z., Yan, S., Huang, H., Ma, C., Zheng, Y., Bajaj, C., Huang, Q.: Scene synthesis via uncertainty-driven attribute synchronization. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 5630-5640 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.439, + 0.788, + 0.481 + ], + "angle": 0, + "content": "39. Yang, J., Li, H.: Dense, accurate optical flow estimation with piecewise parametric model. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 1019-1027 (2015)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.481, + 0.788, + 0.522 + ], + "angle": 0, + "content": "40. Yin, F., Zhang, Y., Wang, X., Wang, T., Li, X., Gong, Y., Fan, Y., Cun, X., Shan, Y., Oztireli, C., et al.: 3d gan inversion with facial symmetry prior. arXiv preprint arXiv:2211.16927 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.522, + 0.788, + 0.563 + ], + "angle": 0, + "content": "41. Zhang, R., Isola, P., Efros, A.A., Shechtman, E., Wang, O.: The unreasonable effectiveness of deep features as a perceptual metric. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 586-595 (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.564, + 0.788, + 0.605 + ], + "angle": 0, + "content": "42. Zhu, J., Shen, Y., Zhao, D., Zhou, B.: In-domain gan inversion for real image editing. In: Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XVII 16. pp. 592-608. Springer (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.605, + 0.788, + 0.66 + ], + "angle": 0, + "content": "43. Zhu, J.Y., Krahenbihl, P., Shechtman, E., Efros, A.A.: Generative visual manipulation on the natural image manifold. In: Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part V 14. pp. 597-613. Springer (2016)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.66, + 0.788, + 0.702 + ], + "angle": 0, + "content": "44. Zhu, J.Y., Park, T., Isola, P., Efros, A.A.: Unpaired image-to-image translation using cycle-consistent adversarial networks. In: Proceedings of the IEEE international conference on computer vision. pp. 2223-2232 (2017)" + }, + { + "type": "list", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.702 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/2024/3D-GOI_ 3D GAN Omni-Inversion for Multifaceted and Multi-object Editing/c22d5df4-9f40-4c3a-8e7a-40f3a1d6dbe5_origin.pdf b/2024/3D-GOI_ 3D GAN Omni-Inversion for Multifaceted and Multi-object Editing/c22d5df4-9f40-4c3a-8e7a-40f3a1d6dbe5_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..b6fe21fbcc70c0b02a0b7a1c09539220b0324797 --- /dev/null +++ b/2024/3D-GOI_ 3D GAN Omni-Inversion for Multifaceted and Multi-object Editing/c22d5df4-9f40-4c3a-8e7a-40f3a1d6dbe5_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8285186604c68d26b0ddbc43c7dad6d140e4d596fd72fc8098cc98297ddf6ee1 +size 4107784 diff --git a/2024/3D-GOI_ 3D GAN Omni-Inversion for Multifaceted and Multi-object Editing/full.md b/2024/3D-GOI_ 3D GAN Omni-Inversion for Multifaceted and Multi-object Editing/full.md new file mode 100644 index 0000000000000000000000000000000000000000..bbe84892f29a8f63a18cee0ab141a15c9fa4962c --- /dev/null +++ b/2024/3D-GOI_ 3D GAN Omni-Inversion for Multifaceted and Multi-object Editing/full.md @@ -0,0 +1,406 @@ +# 3D-GOI: 3D GAN Omni-Inversion for Multifaceted and Multi-object Editing + +Haoran Li $^{1,2}$ , Long Ma $^{1,2}$ , Haolin Shi $^{1,2}$ , Yanbin Hao $^{1,2}$ , Yong Liao $^{1,2*}$ , Lechao Cheng $^{3}$ , and Peng Yuan Zhou $^{4*}$ + +1 University of Science and Technology of China + +2 CCCD Key Lab of Ministry of Culture and Tourism {1hr123, longm, mar}@mail.ustc.edu.cn, haoyanbin@hotmail.com, yliao@ustc.edu.cn + +3 Hefei University of Technology + +chenglc@hfut.edu.cn + +Aarhus University + +pengyuan.zhou@ece.au.dk + +Abstract. The current GAN inversion methods typically can only edit the appearance and shape of a single object and background while overlooking spatial information. In this work, we propose a 3D editing framework, 3D-GOI to enable multifaceted editing of affine information (scale, translation, and rotation) on multiple objects. 3D-GOI realizes the complex editing function by inverting the abundance of attribute codes (object shape/ appearance/ scale/ rotation/ translation, background shape/ appearance, and camera pose) controlled by GIRAFFE, a renowned 3D GAN. Accurately inverting all the codes is challenging, 3D-GOI solves this challenge following three main steps. First, we segment the objects and the background in a multi-object image. Second, we use a custom Neural Inversion Encoder to obtain coarse codes of each object. Finally, we use a round-robin optimization algorithm to get precise codes to reconstruct the image. To the best of our knowledge, 3D-GOI is the first framework to enable multifaceted editing on multiple objects. Both qualitative and quantitative experiments demonstrate that 3D-GOI holds immense potential for flexible, multifaceted editing in complex multi-object scenes. Our project and code are released at https://3d-goi.github.io. + +# 1 Introduction + +The development of generative 3D models has attracted increasing attention to automatic 3D objects and scene generation and edition. Most existing works are limited to a single object, such as 3D face generation [7] and synthesis of facial viewpoints [40]. There are few methods for generating multi-object 3D scenes while editing such scenes remains unexplored. In this paper, we propose 3D-GOI to edit images containing multiple objects with complex spatial geometric + +![](images/d9ae7d3da607da8e33995c2a1218ca96f1aa5b7eb57b8e91e023402c9b26b44e.jpg) +Fig. 1: The first row shows the editing results of traditional 2D/3D GAN inversion methods on multi-object images. The second row showcases 3D-GOI, which can perform multifaceted editing on complex images with multiple objects. 'bg' stands for background. The red crosses in the upper right figures indicate features that cannot be edited with current 2D/3D GAN inversion methods. + +relationships. 3D-GOI not only can change the appearance and shape of each object and the background, but also can edit the spatial position of each object and the camera pose of the image as shown by Figure 1. + +Existing 3D multi-object scene generation methods can be mainly classified into two categories: those [28] based on Generative Adversarial Networks (GANs) [10] and those [22] based on Diffusion models [13], besides a few based on VAE or Transformer [3,38]. GAN-based methods, primarily represented by GIRAFFE [28] and its derivatives, depict complex scene images as results of multiple foreground objects, controlled by shape and appearance, subjected to affine transformations (scaling, translation, and rotation), and rendered together with a background, which is also controlled by shape and appearance, from a specific camera viewpoint. Diffusion-based methods [23] perceive scene images as results of multiple latent NeRF [24], which can be represented as 3D models, undergoing affine transformations, optimized with SDS [30], rendered from a specific camera viewpoint. Both categories represent scenes as combinations of multiple codes. To realize editing based on these generative methods, it's imperative to invert the complex multi-object scene images to retrieve their representative codes. After modifying these codes, regeneration can achieve diversified editing of complex images. Most inversion methods study the inversion of a single code based on its generation method. However, each multi-object image is the entangled result of multiple codes, thus inverting all codes from an image requires precise disentangling of the codes, which is extremely difficult and largely overlooked. Moreover, the prevailing inversion algorithms primarily employ optimization approaches. Attempting to optimize all codes simultaneously often leads to chaotic optimization directions and less accurate inversion outcomes. + +Therefore, we propose 3D-GOI, a framework capable of inverting multiple codes to achieve a comprehensive inversion of multi-object images. Given current open-source 3D multi-object scene generation methods, we have chosen GI-RAFFE [28] as our generative model. In theory, our framework can be applied to other generative approaches as well. We address these challenges as follows. + +First, we categorize different codes based on object attributes, background attributes, and pose attributes. Through qualitative verification, we found that segmentation methods can roughly separate the codes pertaining to different objects. For example, the codes controlling an object's shape, appearance, scale, translation, and rotation predominantly relate to the object itself. So, during the inversion process, we only use the segmented image of this object to reduce the impact of the background and other objects on its codes. + +Second, we get the attributes' codes from the segmented image. Inspired by the Neural Rendering Block in GIRAFFE, we design a custom Neural Inversion Encoder network to coarsely disentangle and estimate the code values. + +Finally, we obtain precise values for each code through optimization. We observed that optimizing all codes simultaneously tends to get stuck in local minima. Therefore, we propose a round-robin optimization algorithm that employs a ranking function to determine the optimization order for different codes. The algorithm enables a stable and efficient optimization process for accurate image reconstruction. Our contributions can be summarized as follows. + +- To our best knowledge, 3D-GOI is the first multi-code inversion framework in generative models, achieving multifaceted editing of multi-object images. +- We introduce a three-stage inversion process: 1) separate the attribute codes of different objects via segmentation; 2) obtain coarse codes using a custom Neural Inversion Encoder; 3) optimize the reconstruction using a round-robin optimization strategy. +- Our method outperforms existing methods on both 3D and 2D tasks. + +# 2 Related Work + +2D/3D GANs. 2D GAN maps a distribution from the latent space to the image space using a generator and a discriminator and has been widely explored. For example, BigGAN [6] increases the batch size and uses a simple truncation trick to finely control the trade-off between sample fidelity and variety. CycleGAN [44] feeds an input image into the generator and loops the output back to the generator. It achieves style transfer by minimizing the consistency loss between the input and its result. StyleGAN [17] maps a latent code into multiple style codes, allowing for detailed style control of images. 3D GANs usually combine 2D GANs with some 3D representation, such as NeRF [25], and have demonstrated excellent abilities to generate complex scenes with multi-view consistency. Broadly, 3D GANs can be classified into explicit and implicit models. Explicit models like HoloGAN [26] enable explicit control over the object pose through rigid body transformations of the learned 3D features. BlockGAN [27] generates foreground and background 3D features separately, combining them into a complete 3D scene representation. On the other hand, implicit models generally perform better. Many of these models take inspiration from NeRF [25], representing images as neural radiance fields and using volume rendering to generate photorealistic images in a continuous view. EG3D [7] introduces an explicit-implicit hybrid network architecture that produces high-quality 3D geometries. + +![](images/95b140fad3eba4fd0c9ef37e1d7254c76e91c7db998f5dfcb3b1d382d6dc3489.jpg) +Fig. 2: Different GANs and GAN Inversion methods utilize codes differently. $\omega$ represents the latent code and $c$ represents the camera pose. + +![](images/74a2f6e85bbd65c440ad6f8c69a023b312a35cc1d757f096c45926c731e0d946.jpg) + +![](images/b736a5a1d20e8d30b35cde4467de5078a10d8de84d305eeaa9fff26dc883425a.jpg) + +GRAF [33] integrates shape and appearance coding within the generation process, which facilitates independent manipulation of the shape and appearance of the generated vehicle and furniture images. Moreover, the presence of 3D information provides additional control over the camera pose, contributing to the flexibility of the generated outputs. GIRAFFE [28] extends GRAF to multi-object scenes by considering an image as the composition of multiple objects in the foreground through affine transformation and the background rendered at a specific camera viewpoint. In this work, we select GIRAFFE as the 3D GAN model to be inverted. + +2D/3D GAN Inversion. GAN inversion obtains the latent code of an input image under a certain generator and modifies the latent code to perform image editing operations. Current 2D GAN inversion methods can be divided into optimization-based, encoder-based, and hybrid methods. Optimization-based methods [1, 14, 43] directly optimize the initial code, requiring very accurate initial values. Encoder-based methods [29, 31, 36] can map images directly to latent code but generally cannot achieve full reconstruction. Hybrid-based methods [4, 42] combine these two approaches: first employ an encoder to map the image to a suitable latent code, then perform optimization. Currently, most 2D GANs only have one latent code to generate an image $^5$ . Therefore, the 2D GAN inversion task can be represented as: + +$$ +\omega^ {*} = \arg \min _ {\omega} \mathcal {L} (G (\omega , \theta), I), \tag {1} +$$ + +where $\omega$ is the latent component, $G$ denotes the generator, $\theta$ denotes the parameters of the generator, $I$ is the input image, and $\mathcal{L}$ is the loss function measuring the difference between the generated and input image. + +Typically, 3D GANs have an additional camera pose parameter compared to 2D GANs, making it more challenging to obtain latent codes during inversion. Current methods like SPI [40] use a symmetric prior for faces to generate images with different perspectives, while [19] employs a pre-trained estimator to achieve better initialization and utilizes pixel-level depth calculated from the NeRF parameters for improved image reconstruction. + +Currently, there are only limited works on 3D GAN inversion [9,21,37] which primarily focus on creating novel perspectives of human faces using specialized + +face datasets considering generally only two codes: camera pose code $\pmb{c}$ and the latent code $\pmb{\omega}$ . Hence its inversion task can be represented as: + +$$ +\boldsymbol {\omega} ^ {*}, \boldsymbol {c} ^ {*} = \arg \min _ {\boldsymbol {\omega}, \boldsymbol {c}} \mathcal {L} (G (\boldsymbol {\omega}, \boldsymbol {c}, \theta), I). \tag {2} +$$ + +A major advancement of 3D-GOI is the capability to invert more independent codes compared with other inversion methods, as Figure 2 shows, in order to perform multifaceted edits on multi-object images. + +# 3 Preliminary + +GIRAFFE [28] represents individual objects as a combination of feature field and volume density. Through scene compositions, the feature fields of multiple objects and the background are combined. Finally, the combined feature field is rendered into an image using volume rendering and neural rendering. + +For a coordinate $\mathbf{x}$ and a viewing direction $\mathbf{d}$ in scene space, the affine transformation $T(s,t,r)$ (scale, translation, rotation) is used to transform them back into the object space of each individual object. Following the implicit shape representations used in NeRF, a multi-layer perceptron (MLP) $h_{\theta}$ is used to map the transformed $\mathbf{x}$ and $\mathbf{d}$ , along with the shape-controlling code $z_{s}$ and appearance-controlling code $z_{a}$ , to the feature field $\mathbf{f}$ and volume density $\sigma$ : + +$$ +\left. \left(T (s, t, r; \boldsymbol {x})\right), T (s, t, r; \boldsymbol {d})\right), \left. \boldsymbol {z} _ {\boldsymbol {s}}, \boldsymbol {z} _ {\boldsymbol {a}}\right) \xrightarrow {h _ {\theta}} (\sigma , \boldsymbol {f}). \tag {3} +$$ + +Then, GIRAFFE defines a Scene Composite Operator: at a given $\pmb{x}$ and $\pmb{d}$ , the overall density is the sum of the individual densities (including the background). The overall feature field is represented as the density-weighted average of the feature field of each object: + +$$ +C (\boldsymbol {x}, \boldsymbol {d}) = \left(\sigma , \frac {1}{\sigma} \sum_ {i = 1} ^ {N} \sigma_ {i} \boldsymbol {f} _ {\boldsymbol {i}}\right), w h e r e \quad \sigma = \sum_ {i = 1} ^ {N} \sigma_ {i}, \tag {4} +$$ + +where $\mathrm{N}$ denotes the background plus (N-1) objects. + +The rendering phase is divided into two stages. Similar to volume rendering in NeRF, given a pixel point, the rendering formula is used to calculate the feature field of this pixel point from the feature fields and the volume density of all sample points in a camera ray direction. After calculating all pixel points, a feature map is obtained. Neural rendering (Upsampling) is then applied to get the rendered image. Please refer to the Supplementary Material 1 for the detailed preliminary and formulas. + +# 4 3D-GOI + +# 4.1 Problem Definition + +The problem we target is similar to the general definition of GAN inversion, with the difference being that we need to invert many more codes than existing methods (1 or 2) shown in Figure 2. The parameter $W$ in GIRAFFE, which controls + +![](images/6b599194f2d727cfbe2094a6fe49f6c4ac4fba1ba15e0f2c7c3c4fd36a44ee08.jpg) +Fig. 3: The overall framework of 3D-GOI. As shown in the upper half, the encoders are trained on single-object scenes, each time using $L_{enc}$ to predict one $w, w \in W$ , while other codes use real values. The lower half depicts the inversion process for the multi-object scene. We first decompose objects and background from the scene, then use the trained encoder to extract coarse codes, and finally use the round-robin optimization algorithm to obtain precise codes. The green blocks indicate required training and the yellow blocks indicate fixed parameters. + +the generation, can be divided into object attributes, background attributes, and pose attributes, denoted by $O$ , $B$ , and $C$ . Then, $W$ can be expressed as follows: + +$$ +W = \{O _ {i} ^ {s h a p e}, O _ {i} ^ {a p p}, O _ {i} ^ {s}, O _ {i} ^ {t}, O _ {i} ^ {r}, B ^ {s h a p e}, B ^ {a p p}, C \}, \quad i = 1, \dots , n, \tag {5} +$$ + +where $O_{i}^{shape}$ is the object shape latent code, $O_{i}^{app}$ is the object appearance latent code, $O_{i}^{s}$ is the object scale code, $O_{i}^{t}$ is the object translation code, $O_{i}^{r}$ is the object rotation code, $B^{shape}$ is the background shape latent code, $B^{app}$ is the background appearance latent code and $C$ is the camera pose matrix. $n$ denotes the $n$ objects. The reconstruction part can be expressed as: + +$$ +W ^ {*} = \arg \min _ {W} \mathcal {L} (G (W, \theta), I). \tag {6} +$$ + +According to Equation 5, we need to invert a total of $(5n + 3)$ codes. Then, we are able to replace or interpolate any inverted code(s) to achieve multifaceted editing of multiple objects. + +# 4.2 Scene Decomposition + +As mentioned, the GIRAFFE generator differs from typical GAN generators in that a large number of codes are involved and not a single code controls all the generated parts. Therefore, it is challenging to transform all codes using just one encoder or optimizer as in typical GAN Inversion methods. While a human can easily distinguish each object and some of its features (appearance, shape), a machine algorithm requires a large number of high-precision annotated samples to understand what code is expressed at what position in the image. + +![](images/32f752b839e368774c8e064f48d9a7f86df4150af26172f6875e4ba55247d341.jpg) +(a) Input +Fig. 4: Scene decomposition. (a) The input image. (b) The feature weight map of car A, where the redder regions indicate a higher opacity and the bluer regions lower opacity. (c) The feature weight map of car B. (d) The feature weight map of the background. By integrating these maps, it becomes apparent that the region corresponding to car A predominantly consists of the feature representation of cars A and B. The background's visible area solely contains the background's feature representation. + +![](images/73ecd522f757a980eb426abbc10f4f10e12b01aefe644b592aea5d6dcf959342.jpg) +(b) Car A + +![](images/de237f2656a16860d64607a8884433dbfa714d679556bec6eb876f4a75b524ec.jpg) +(c) Car B + +![](images/34a97b6ab961fa6c051b449acca725226c85ad0215374b22dba85d90a3230fcd.jpg) +(d) Background + +A straightforward idea is that the attribute codes of an object will map to the corresponding position of the object in the image. For example, translation $(O^t)$ and rotation $(O^r)$ codes control the relative position of an object in the scene, scaling $(O^s)$ and shape $(O^{shape})$ codes determine the contour and shape of the object, and appearance $(O^{app})$ codes control the appearance representation at the position of the object. The image obtained from segmentation precisely encompasses these three types of information, allowing us to invert it and obtain the five attribute codes for the corresponding object. Similarly, for codes $(B^{shape}, B^{app})$ that generate the background, we can invert them using the segmented image of the background. Note that obtaining camera pose code $(C)$ requires information from the entire rendered image. + +We can qualitatively validate this idea. In Equation 3, we can see that an object's five attribute codes are mapped to the object's feature field and volume density through $h_{\theta}$ . As inferred from Equation 4, the scene's feature field is synthesized by weighting the feature fields of each object by density. Therefore, an object appears at its position because its feature field has a high-density weight at the corresponding location. Figure 4 displays the density of different objects at different positions during GIRAFFE's feature field composition process. The redder the higher the density, while the bluer the lower the density. As discussed, car A exhibits a high-density value within its area and near-zero density elsewhere - a similar pattern is seen with car B. The background, however, presents a non-uniform density distribution across the scene. We can consider that both car A and B and the background mainly manifest their feature fields within their visible areas. Hence, we apply a straightforward segmentation method to separate each object's feature field and get the codes. Segmenting each object also allows our encoder to pay more attention to each input object or background. As such, we can train the encoder on single-object scenes and then generalize it to multi-object scenes instead of directly training in multi-object scenes that involve more codes, to reduce computation cost. + +![](images/6a7668ad2dba3d3163f106f104fd8323f34ebb1a6ef168cfea5376fcefec75ab.jpg) +(a) Neural Rendering Block + +![](images/d5bdb0e0efa9b7789efd91c1aec0a513a81f832a4806004bc0e37116528e341b.jpg) +(b) Neural Inversion Encoder +Fig. 5: Neural Inversion Encoder. (a) The Neural Rendering Block in GIRAFFE [28], an upsampling process to generate image $\hat{I}$ . (b) The Neural Inversion Encoder opposes (a), which is a downsampling process. $I$ is the input image, $H, W$ are image height and width. $I_v$ is the heatmap of the image, $H_v, W_v$ and $M_f$ are the dimensions of $I_v$ , $w$ is the code to be predicted, and $w_f$ is the dimension of $w$ . Up/Down means upsampling/downsampling. + +# 4.3 Coarse Estimation + +The previous segmentation step roughly disentangles the codes. Unlike typical encoder-based methods, it's difficult to predict all codes using just one encoder. Therefore, we assign an encoder to each code, allowing each encoder to focus solely on predicting one code. Hence, we need a total of eight encoders. As shown in Figure 3, we input the object segmentation for the object attribute codes $(O^{shape}, O^{app}, O^s, O^t, O^r)$ , the background segmentation for the background attribute codes $(B^{shape}, B^{app})$ , and the original image for pose attribute code $(C)$ . Different objects share the same encoder for the same attribute code. + +We allocate an encoder called Neural Inversion Encoder with a similar structure to each code. Neural Inversion Encoder consists of three parts as Figure 5(b) shows. The first part employs a standard feature pyramid over a ResNet [12] backbone like in pSp [31] to extract the image features. The second part, in which we designed a structure opposite to GIRAFFE's Neural rendering Block based on its architecture as Figure 5(a) shows, downsamples the images layer by layer using a CNN and then uses skip connections [12] to combine the layers, yielding a one-dimensional feature. The third layer employs an MLP structure to acquire the corresponding dimension of different codes. + +Training multiple encoders simultaneously is difficult to converge due to the large number of parameters. Hence, we use the dataset generated by GIRAFFE to retain the true values of each code and train an encoder for one code at a time, to keep the other codes at their true values, greatly smoothing the training. + +During encoder training, we use the Mean Squared Error (MSE) loss, perceptual loss (LPIPS) [41], and identity loss (ID) [11] between the reconstructed image and the original image, to be consistent with most 2D and 3D GAN inversion training methodologies. When training the affine codes (scale $O^s$ , translation $O^t$ , rotation $O^r$ ), we find that different combinations of values produce very similar images, e.g., moving an object forward and increasing its scale yield + +Algorithm 1: Round-robin Optimization +Data: all codes $w\in W$ predicted by encoders, fixed GIRAFFE generator $G$ input image $I$ 1 Initialize $lr\_ w = 10^{-3},w\in W$ . +2 while any $lr\_ w > 10^{-5}$ do +3 foreach $w\in W$ do +4 Sample $\delta w$ . +5 Compute $\delta \mathcal{L}(w)$ using Eq.8; +6 end +7 Compute rank_list using Eq.9; +8 foreach $w\in$ rank_list and lr_w>10-5 do +9 Optimization w with $\mathcal{L}_{opt}$ in Eq. 10 of I and G(W;0); +10 if the $\mathcal{L}_{opt}$ ceases to decrease for five consecutive iterations then +11 | lr_w=lr_w/2; +12 end +13 end +14 end + +similar results. However, the encoder can only predict one value at a time, hence we add the MSE loss of the predicted $O^s$ , $O^t$ , $O^r$ values, and their true values, to compel the encoder to predict the true value. + +$$ +\mathcal {L} _ {e n c} = \lambda_ {1} L _ {2} + \lambda_ {2} L _ {l p i p s} + \lambda_ {3} L _ {i d}, \tag {7} +$$ + +where $\lambda_{i}, i = 1,2,3$ represent the ratio coefficient between various losses. When training $O^s$ , $O^t$ , $O^r$ code, the $L_2$ loss includes the MSE loss between the real values of $O^s$ , $O^t$ , $O^r$ and their predicted values. + +# 4.4 Precise Optimization + +Pre-trained segmentation models have some segmentation errors and all encoder-based GAN inversion networks [31,34,35] usually cannot accurately obtain codes, necessitating refinements. Next, we optimize the coarse codes. Through experiments, we have found that using a single optimizer to optimize all latent codes tends to converge to local minima. Hence, we employ multiple optimizers, each handling a single code. The optimization order is crucial due to the variance of the disparity between the predicted and actual values across different encoders, and the different impact of code changes on the image, e.g., changes to $B^{shape}$ and $B^{app}$ codes controlling background generation mostly would have a larger impact on overall pixel values. Prioritizing the optimization of codes with significant disparity and a high potential for changing pixel values tends to yield superior results in our experiments. Hence, we propose an automated round-robin optimization algorithm (Algorithm 1) to sequentially optimize each code based on the image reconstructed in each round. + +Algorithm 1 aims to add multiple minor disturbances to each code, and calculate the loss between the images reconstructed before and after the disturbance + +and the original image. A loss increase indicates that the current code value is relatively accurate, hence its optimization order can be postponed, and vice versa. For multiple codes that demand prioritized optimization, we compute their priorities using the partial derivatives of the loss variation and perturbation. We do not use backpropagation automatic differentiation here to ensure the current code value remains unchanged. + +$$ +\delta \mathcal {L} (w) = \mathcal {L} (G (W - \{w \}, w + \delta w, \theta), I) - \mathcal {L} (G (W, \theta), I), \tag {8} +$$ + +$$ +\operatorname {r a n k} _ {-} \operatorname {l i s t} = F _ {\operatorname {r a n k}} (\delta \mathcal {L} (w), \frac {\delta \mathcal {L} (w)}{\delta w}), \tag {9} +$$ + +where $w \in W$ is one of the codes and $\delta w$ represents the minor disturbance of $w$ . For the rotation angle $r$ , we have found that adding a depth loss can accelerate its optimization. Thus, the loss $\mathcal{L}$ during optimization can be expressed as: + +$$ +\mathcal {L} _ {\text {o p t}} = \lambda_ {1} L _ {2} + \lambda_ {2} L _ {\text {l p i p s}} + \lambda_ {3} L _ {\text {i d}} + \lambda_ {4} L _ {\text {d e e p}}. \tag {10} +$$ + +This optimization method allows for more precise tuning of the codes for more accurate reconstruction and editing of the images. + +# 5 Implementation + +Neural Inversion Encoder. The first part of our encoder uses ResNet50 to extract features. In the second part, we downsample the extracted features (512-dimensional) and the input RGB image (3-dimensional) together. The two features are added together through skip connections, as shown in Figure 5. In the downsampling module, we use a 2D convolution with a kernel of 3 and a stride of 1, and the LeakyReLU activation function, to obtain a 256-dimensional intermediate feature. For object shape/appearance attributes, the output dimension is 256, and we use four Fully Connected Layers $\{4\times FCL(256,256)\}$ to get the codes. For background shape/appearance attributes, the output dimension is 128, we use $\{FCL(256,128) + 3\times FCL(128,128)\}$ to get the codes. For object scale/translation attributes, the output dimension is 3, and we use the network $\{FCL(2^i,2^{i - 1}) + FCL(8,3),i = 8,\dots ,4\}$ to get the codes. For camera pose and rotation attributes, the output dimension is 1, and we use a similar network $\{FCL(2^i,2^{i - 1}) + FCL(8,1),i = 8,\dots ,4\}$ to get the codes. + +Training and Optimization are carried out on a single NVIDIA A100 SXM GPU with 40GB of memory, using the Adam optimizer. The initial learning rate is set to $10^{-4}$ and $10^{-3}$ , respectively. Encoder training employs a batch size of 50. Each encoder took about 12 hours to train, and optimizing a single image of a complex multi-object scene took about 1 minute. For rotation features, it is difficult for the encoder to make accurate predictions for some images. Therefore, we uniformly sampled 20 values in the range of $[0, 360^{\circ}]$ for the + +rotation parameters with large deviations. We selected the value that minimizes the loss in Equation 7 as the initial value for the optimization stage. + +For LPIPS loss, we employ a pre-trained AlexNet [20]. For ID calculation, we employ a pre-trained Arcface [8] model in human face datasets and a pre-trained ResNet-50 [32] model in the car dataset. For depth loss, we use the pre-trained Dense Prediction Transformer model. We set $\lambda_1 = 1$ , $\lambda_2 = 0.8$ , and $\lambda_3 = 0.2$ in Equation 7, as well as in Equation 10, in which $\lambda_4 = 1$ . + +# 6 Experiment + +Datasets. To obtain the true values of the 3D information in GIRAFFE for stable training performance, we use the pre-trained model of GIRAFFE on CompCars [39] and Clevr [15] dataset to generate training datasets. For testing datasets, we also use GIRAFFE to generate images for multi-car datasets denoted as $G$ -CompCars (CompCars is a single car image dataset) and use the original Clevr dataset for multi-geometry dataset (Clevr is a dataset that can be simulated to generate images of multiple geometries). We follow the codes setup in GIRAFFE. For CompCars, we use all the codes from Equation 5. For Clevr, we fixed the rotation, scale, and camera pose codes of the objects. For experiments on facial data, we utilized the FFHQ [17] dataset for training and the CelebA-HQ [16] dataset for testing. + +![](images/6f4059d17e2a50ecd260ff620931b98b5225119e028eda9ef6e7d56ee1724817.jpg) +(a) Input, Co-R, Pre-R + +![](images/99cedf4108539a0b8425f9c827c841ba85f659e75a0135cdbebade4b06e78bcf.jpg) +(b) Edit Shape + +![](images/2275654dea41191fe4c6f5b09c9caf68899b269a5ad0f36e65b38deaf7be23c9.jpg) + +![](images/991a6fe46aa4683eb0fc5677e2cb36c4256517c60149ed10c0964379cb3e60d4.jpg) + +![](images/1520efaca4395ba6c7de916da78ec3f9597ecc58c0f54a579f92b0e2fe7c4ea2.jpg) +(e) Edit Bg Appearance + +![](images/854b3f8d8dc6fb483a3dc03416093069dad5fba978b55fdfe9443411d6019faa.jpg) +(f) Edit Scale +Fig. 6: Single-object editing on G-CompCars dataset. Co-R: coarse reconstruction. Pre-R: precise reconstruction. + +![](images/51d3b0a65b9e7a72938464344889d7e345b16f5ad7796e65afef93d5677635ff.jpg) +(c) Edit Appearance +(g) Edit Translation + +![](images/205f9aba677d97b4083e3d4808322dcf56d9c73bef73c32dd668c810580242f3.jpg) +(d) Edit Bg Shape +(h) Edit Rotation + +![](images/3cc160d66d58ca39b5f80d2bfb80544b37ef9d4662e2bb7e6a8edf439e79d762.jpg) +(a) Input, Co-R, Pre-R +Fig. 7: Single-object editing on Clevr dataset. + +![](images/68531de72840eeea6c399798738f89dc129896dba3dad36f2ae4d8f928278637.jpg) +(b) Edit Appearance + +![](images/eabe5ff5c158e1947d8b1c338ba50b7a1cb724d2803459bd876c224c7fde9cbe.jpg) +(c) Edit Translation + +![](images/b996cbc5a8c079f6534fb10408582ad709d37d8611b2d01b7fd2aba80f8bdb99.jpg) +(d) Add Object + +Baselines. In the comparative experiments for our Neural Inversion Encoder, we benchmarked encoder-based inversion methods such as e4e [34] and pSp [31], which use the 2D GAN StyleGAN2 [18] as the generator, and E3DGE [21] and + +![](images/60418f3e4c648b49e1839e33d12ec2215c8f6326c07842f1b820aa250aeeb2dc.jpg) + +![](images/8f49759684059e939ed6d6e095d79b4d02d30b2e94163562e85e1596b75ffa22.jpg) + +![](images/7c83539bfa72b0b4fdb6826c942e43d6fe0154ee19229562f323f0583a8cf073.jpg) + +![](images/fd6d779db6c870b4d676f86dfc5e4ead06a0388acf5301039204102ff65f96bf.jpg) + +![](images/dbf24119eba1f34bb7ef704e4d4da88ca7727f295b605a11be74ed20ec2084e5.jpg) +(a) Input, Co-R, Pre-R +(e) Edit Bg Appearance + +![](images/6645c84379197c8d8e3416110783a0b32f58bf6519325d6af307a62a3706559c.jpg) +(b) Edit Shape +(f) Edit Scale + +![](images/30abf75e354c3a950a29c3e762c8eecdb9c73444e124f66bd307e777d3260acc.jpg) +(c) Edit Appearance +(g) Edit Translation + +![](images/10537903b1e9d923080b4ea771ba87384e6ae7365ed3818ed3f2bb257ab97396.jpg) +(d) Edit Bg Shape +(h) Edit Rotation + +![](images/30e8fdff9c7e5516a6c5826bbb04fed4356d36de6a496ad5cb52a286212a90d7.jpg) +Fig. 8: Multi-object editing on $G$ -CompCars dataset. +(a) Input, Co-R, Pre-R +Fig. 9: Multi-object editing on Clevr dataset. + +![](images/5772291549f34b570d7b1c23b37d84d16e2f02bb13a33c5416c854eb724a3f42.jpg) +(b) Edit Appearance + +![](images/a2f421f252ce2c58fae785455f661cc17a7b338c61852deaf059ca50e22bc62f.jpg) +(c) Edit Translation + +![](images/8890e6aac8a10fca25e28769f159a166a9cbef09bea98748aa1ea62c4712be46.jpg) +(d) Add/Remove Objects + +TriplaneNet [5] that employ the 3D GAN EG3D [7] as the generator, on the generator of GIRAFFE. Additionally, we compared our encoder on StyleGAN2 with SOTA inversion methods HyperStyle [2] and HFGI [35] for StyleGAN2. + +Metrics. We use Mean Squared Error (MSE), perceptual similarity loss (LPIPS) [41], and identity similarity (ID) to measure the quality of image reconstruction. + +# 6.1 3D GAN Omni-Inversion + +Single-object Multifaceted Editing. In Figure 6 and Figure 7, (a) depict the original images, the coarsely reconstructed images produced by the Neural Inversion Encoder, and the precisely reconstructed images obtained via round-robin optimization. As Figure 7 shows, the simple scene structure of the Clevr dataset allows us to achieve remarkably accurate results using only the encoder (Co-Recon). However, for car images in Figure 6, predicting precise codes using the encoder only becomes challenging, necessitating the employment of the round-robin optimization algorithm to refine the code values for precise reconstruction (Pre-Recon). Figure 6 (b)-(h) and Figure 7 (b)-(d) show the editing results for different codes. As noted in Section 4.3, moving an object forward and increasing its scale yield similar results. Please refer to the Supplementary Material 3.1 for more results like camera pose and shape editing. + +Multi-object Multifaceted Editing. We notice that the prediction for some object parameters ( $O^{shape}$ , $O^{app}$ , $O^s$ , $O^t$ ) are quite accurate. However, the prediction for the background codes deviates significantly. We speculate this is due to the significant differences in segmentation image input to the background encoder between multi-object scenes and single-object scenes. Therefore, background reconstruction requires further optimization. Figure 8 and Figure 9 depict the multifaceted editing outcomes for two cars and multiple Clevr objects, respectively. The images show individual edits of two objects in the left and middle images and collective edits at the right images in Figure 8 (b-c) and (f-h). + +![](images/504bf2bc8d429d65185a3c4f35d5eb0acfd376058e512e0f50c38b713f8d7545.jpg) +(a) Reconstruction results of different GAN inversion encoders using the generator of GI-RAFFE. + +![](images/c70a34891e1e695367e4d50bc62492f070229c74e463a0cb8fc5b240263bee74.jpg) +(b) Reconstruction results of different GAN inversion encoders using the generator of StyleGAN2. +Fig. 10: Reconstruction quality of different GAN inversion encoders. + +As shown in Figure 8, the predictive discrepancy between the car's background and rotation angle on the left is considerable, requiring adjustments through the round-robin optimization. As illustrated in Figure 1, 2D/3D GAN inversion methods can not inverse multi-object scenes. More images pertaining to multi-object editing can be found in Supplementary Material 3.2. + +# 6.2 Comparison Experiment of Neural Inversion Encoder + +For fair comparison and to eliminate the impact of the generator on the quality of the inverted image generation, we trained the encoders from the baseline methods by connecting them to the GIRAFFE generator using our Neural Inversion Encoder training approach and compared them with our Neural Inversion Encoder. At the same time, we also connected our encoder to StyleGAN2 and compared it with inversion methods based on StyleGAN2, thereby demonstrating the efficiency of our encoder design. Table 1 and Figure 10 quantitatively and qualitatively displays the comparison results on both the GIRAFFE and StyleGAN2 generators respectively. The results show that our Neural Inversion Encoder consistently outperforms baseline methods. + +# 6.3 Ablation Study + +We conducted ablation experiments separately for the proposed Neural Inversion Encoder and the Round-robin Optimization algorithm. Table 2 displays the average ablation results of the Neural Inversion Encoder on various attribute codes, where NIB refers to Neural Inversion Block (the second part of the encoder) and MLP is the final part of the encoder. The results clearly show that our encoder structure is extremely effective and can predict code values more accurately. Please find the complete results in the Supplementary Material. + +For the Round-robin optimization algorithm, we compared it with three fixed optimization order algorithms on both single-object and multi-object scenarios. The three fixed sequences are as follows: + +$$ +\text {O r d e r 1}: B ^ {\text {s h a p e}}, B ^ {\text {a p p}}, \left\{O _ {i} ^ {r}, O _ {i} ^ {t}, O _ {i} ^ {s} \right\} _ {i = 1} ^ {N}, \left\{O _ {i} ^ {\text {s h a p e}}, O _ {i} ^ {\text {a p p}} \right\} _ {i = 1} ^ {N}, C +$$ + +Table 1: Reconstruction quality of different GAN inversion encoders using the generator of GIRAFFE and StyleGAN2. $\downarrow$ indicates the lower the better and $\uparrow$ indicates the higher the better. + +
MethodGIRAFFE for GeneratorStyleGAN2 for Generator
MSE ↓LPIPS ↓ID↑MSE ↓LPIPS ↓ID↑
e4e [34]0.0310.3060.8670.0520.2000.502
pSp [31]0.0310.3010.8770.0340.1720.561
HyperStyle [2]---0.0190.0910.766
HFGI [35]---0.0230.1240.705
TriplaneNet [5]0.0290.2960.870---
E3DGE [21]0.0310.2990.881---
3D-GOI(Ours)0.0240.2620.8970.0170.0980.769
+ +Table 2: Ablation Study of the Neural Inversion Encoder. + +
MethodMSE ↓LPIPS↓ID ↑
w/o NIB0.0230.2880.856
w/o MLP0.0150.1830.878
3D-GOI0.0100.1410.906
+ +Table 3: The quantitative metrics of ablation study of the Round-robin Optimization algorithm. + +
MethodMSE ↓LPIPS ↓ID↑
Order10.0160.1840.923
Order20.0190.2290.913
Order30.0190.2210.911
3D-GOI0.0080.1280.938
+ +$$ +O r d e r 2: \left\{O _ {i} ^ {r}, O _ {i} ^ {t}, O _ {i} ^ {s} \right\} _ {i = 1} ^ {N}, \left\{O _ {i} ^ {\text {s h a p e}}, O _ {i} ^ {\text {a p p}} \right\} _ {i = 1} ^ {N}, B ^ {\text {s h a p e}}, B ^ {\text {a p p}}, C +$$ + +$$ +O r d e r 3: C, \{O _ {i} ^ {s h a p e}, O _ {i} ^ {a p p} \} _ {i = 1} ^ {N}, \{O _ {i} ^ {r}, O _ {i} ^ {t}, O _ {i} ^ {s} \} _ {i = 1} ^ {N}, B ^ {s h a p e}, B ^ {a p p} +$$ + +$\{\}_{i=1}^{N}$ indicates that the elements inside $\{\}$ are arranged in sequence from 1 to N. There are many possible sequence combinations, and here we chose the three with the best results for demonstration. As Table 3 shows, our method achieves the best results on all metrics, demonstrating the effectiveness of our Round-robin optimization algorithm. As mentioned in Section 4.4, optimizing features like the background first can enhance the optimization. Hence, Order1 performs much better than Order2 and Order3. Please see the Supplementary Material 3.5 for qualitative comparisons of these four methods on images. + +# 7 Conclusion + +This paper introduces a 3D GAN inversion method, 3D-GOI, that enables multifaceted editing of scenes containing multiple objects. By using a segmentation approach to separate objects and background, then carrying out a coarse estimation followed by a precise optimization, 3D-GOI can accurately obtain the codes of the image. These codes are then used for multifaceted editing. To the best of our knowledge, 3D-GOI is the first method to attempt multi-object & multifaceted editing. We anticipate that 3D-GOI holds immense potential for future applications in fields such as VR/AR, and the Metaverse. + +# Acknowledgements + +This work was supported by the National Key Research and Development Program of China (2022YFB3105405, 2021YFC3300502). + +# References + +1. Abdal, R., Qin, Y., Wonka, P.: Image2stylegan: How to embed images into the stylegan latent space? In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 4432-4441 (2019) +2. Alaluf, Y., Tov, O., Mokady, R., Gal, R., Bermano, A.: Hyperstyle: Stylegan inversion with hypernetworks for real image editing. In: Proceedings of the IEEE/CVF conference on computer Vision and pattern recognition. pp. 18511-18521 (2022) +3. Arad Hudson, D., Zitnick, L.: Compositional transformers for scene generation. Advances in Neural Information Processing Systems 34, 9506-9520 (2021) +4. Bau, D., Zhu, J.Y., Wulff, J., Peebles, W., Strobelt, H., Zhou, B., Torralba, A.: Seeing what a gan cannot generate. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 4502-4511 (2019) +5. Bhattacharai, A.R., Nießner, M., Sevastopolsky, A.: Triplanenet: An encoder for eg3d inversion. arXiv preprint arXiv:2303.13497 (2023) +6. Brock, A., Donahue, J., Simonyan, K.: Large scale gan training for high fidelity natural image synthesis. arXiv preprint arXiv:1809.11096 (2018) +7. Chan, E.R., Lin, C.Z., Chan, M.A., Nagano, K., Pan, B., De Mello, S., Gallo, O., Guibas, L.J., Tremblay, J., Khamis, S., et al.: Efficient geometry-aware 3d generative adversarial networks. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 16123-16133 (2022) +8. Deng, J., Guo, J., Xue, N., Zafeiriou, S.: Arcface: Additive angular margin loss for deep face recognition. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 4690-4699 (2019) +9. Deng, Y., Wang, B., Shum, H.Y.: Learning detailed radiance manifolds for high-fidelity and 3d-consistent portrait synthesis from monocular image. arXiv preprint arXiv:2211.13901 (2022) +10. Goodfellow, I., Pouget-Abadie, J., Mirza, M., Xu, B., Warde-Farley, D., Ozair, S., Courville, A., Bengio, Y.: Generative adversarial networks. Communications of the ACM 63(11), 139–144 (2020) +1. He, K., Fan, H., Wu, Y., Xie, S., Girshick, R.: Momentum contrast for unsupervised visual representation learning. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 9729-9738 (2020) +2. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 770-778 (2016) +3. Ho, J., Jain, A., Abbeel, P.: Denoising diffusion probabilistic models. Advances in neural information processing systems 33, 6840-6851 (2020) +4. Huh, M., Zhang, R., Zhu, J.Y., Paris, S., Hertzmann, A.: Transforming and projecting images into class-conditional generative networks. In: Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23–28, 2020, Proceedings, Part II 16. pp. 17–34. Springer (2020) + +15. Johnson, J., Hariharan, B., Van Der Maaten, L., Fei-Fei, L., Lawrence Zitnick, C., Girshick, R.: Clevr: A diagnostic dataset for compositional language and elementary visual reasoning. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 2901-2910 (2017) +16. Karras, T., Aila, T., Laine, S., Lehtinen, J.: Progressive growing of gans for improved quality, stability, and variation. arXiv preprint arXiv:1710.10196 (2017) +17. Karras, T., Laine, S., Aila, T.: A style-based generator architecture for generative adversarial networks. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 4401-4410 (2019) +18. Karras, T., Laine, S., Aittala, M., Hellsten, J., Lehtinen, J., Aila, T.: Analyzing and improving the image quality of stylegan. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 8110-8119 (2020) +19. Ko, J., Cho, K., Choi, D., Ryoo, K., Kim, S.: 3d gan inversion with pose optimization. In: Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision. pp. 2967-2976 (2023) +20. Krizhevsky, A., Sutskever, I., Hinton, G.E.: Imagenet classification with deep convolutional neural networks. Communications of the ACM 60(6), 84-90 (2017) +21. Lan, Y., Meng, X., Yang, S., Loy, C.C., Dai, B.: Self-supervised geometry-aware encoder for style-based 3d gan inversion. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 20940-20949 (2023) +22. Li, H., Shi, H., Zhang, W., Wu, W., Liao, Y., Wang, L., Lee, L.h., Zhou, P.: Dreamscene: 3d gaussian-based text-to-3d scene generation via formation pattern sampling. arXiv preprint arXiv:2404.03575 (2024) +23. Lin, Y., Bai, H., Li, S., Lu, H., Lin, X., Xiong, H., Wang, L.: Componerf: Text-guided multi-object compositional nerf with editable 3d scene layout. arXiv preprint arXiv:2303.13843 (2023) +24. Metzer, G., Richardson, E., Patashnik, O., Giryes, R., Cohen-Or, D.: Latentnerf for shape-guided generation of 3d shapes and textures. arXiv preprint arXiv:2211.07600 (2022) +25. Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM 65(1), 99-106 (2021) +26. Nguyen-Phuoc, T., Li, C., Theis, L., Richardt, C., Yang, Y.L.: Hologan: Unsupervised learning of 3d representations from natural images. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 7588-7597 (2019) +27. Nguyen-Phuoc, T.H., Richardt, C., Mai, L., Yang, Y., Mitra, N.: Blockgan: Learning 3d object-aware scene representations from unlabelled images. Advances in neural information processing systems 33, 6767–6778 (2020) +28. Niemeyer, M., Geiger, A.: Giraffe: Representing scenes as compositional generative neural feature fields. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 11453-11464 (2021) +29. Perarnau, G., Van De Weijer, J., Raducanu, B., Álvarez, J.M.: Invertible conditional gans for image editing. arXiv preprint arXiv:1611.06355 (2016) +30. Poole, B., Jain, A., Barron, J.T., Mildenhall, B.: Dreamfusion: Text-to-3d using 2d diffusion. arXiv preprint arXiv:2209.14988 (2022) +31. Richardson, E., Alaluf, Y., Patashnik, O., Nitzan, Y., Azar, Y., Shapiro, S., Cohen-Or, D.: Encoding in style: a stylegan encoder for image-to-image translation. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 2287-2296 (2021) + +32. Russakovsky, O., Deng, J., Su, H., Krause, J., Satheesh, S., Ma, S., Huang, Z., Karpathy, A., Khosla, A., Bernstein, M., et al.: Imagenet large scale visual recognition challenge. International journal of computer vision 115, 211-252 (2015) +33. Schwarz, K., Liao, Y., Niemeyer, M., Geiger, A.: Graf: Generative radiance fields for 3d-aware image synthesis. Advances in Neural Information Processing Systems 33, 20154-20166 (2020) +34. Tov, O., Alaluf, Y., Nitzan, Y., Patashnik, O., Cohen-Or, D.: Designing an encoder for stylegan image manipulation. ACM Transactions on Graphics (TOG) 40(4), 1-14 (2021) +35. Wang, T., Zhang, Y., Fan, Y., Wang, J., Chen, Q.: High-fidelity gan inversion for image attribute editing. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 11379-11388 (2022) +36. Wei, T., Chen, D., Zhou, W., Liao, J., Zhang, W., Yuan, L., Hua, G., Yu, N.: E2style: Improve the efficiency and effectiveness of stylegan inversion. IEEE Transactions on Image Processing 31, 3267-3280 (2022) +37. Xie, J., Ouyang, H., Piao, J., Lei, C., Chen, Q.: High-fidelity 3d gan inversion by pseudo-multi-view optimization. arXiv preprint arXiv:2211.15662 (2022) +38. Yang, H., Zhang, Z., Yan, S., Huang, H., Ma, C., Zheng, Y., Bajaj, C., Huang, Q.: Scene synthesis via uncertainty-driven attribute synchronization. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 5630-5640 (2021) +39. Yang, J., Li, H.: Dense, accurate optical flow estimation with piecewise parametric model. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 1019-1027 (2015) +40. Yin, F., Zhang, Y., Wang, X., Wang, T., Li, X., Gong, Y., Fan, Y., Cun, X., Shan, Y., Oztireli, C., et al.: 3d gan inversion with facial symmetry prior. arXiv preprint arXiv:2211.16927 (2022) +41. Zhang, R., Isola, P., Efros, A.A., Shechtman, E., Wang, O.: The unreasonable effectiveness of deep features as a perceptual metric. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 586-595 (2018) +42. Zhu, J., Shen, Y., Zhao, D., Zhou, B.: In-domain gan inversion for real image editing. In: Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XVII 16. pp. 592-608. Springer (2020) +43. Zhu, J.Y., Krahenbihl, P., Shechtman, E., Efros, A.A.: Generative visual manipulation on the natural image manifold. In: Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part V 14. pp. 597-613. Springer (2016) +44. Zhu, J.Y., Park, T., Isola, P., Efros, A.A.: Unpaired image-to-image translation using cycle-consistent adversarial networks. In: Proceedings of the IEEE international conference on computer vision. pp. 2223-2232 (2017) \ No newline at end of file diff --git a/2024/3D-GOI_ 3D GAN Omni-Inversion for Multifaceted and Multi-object Editing/images.zip b/2024/3D-GOI_ 3D GAN Omni-Inversion for Multifaceted and Multi-object Editing/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..eae6773704dad63fc91cfc58b43fcf3e94999078 --- /dev/null +++ b/2024/3D-GOI_ 3D GAN Omni-Inversion for Multifaceted and Multi-object Editing/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb67a223e0814c111a516760a17b7872c895dbd164886dbef694ca21d476e7f1 +size 476775 diff --git a/2024/3D-GOI_ 3D GAN Omni-Inversion for Multifaceted and Multi-object Editing/layout.json b/2024/3D-GOI_ 3D GAN Omni-Inversion for Multifaceted and Multi-object Editing/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..e82932caa79675026e2e6b7ae033ef178bdc7f42 --- /dev/null +++ b/2024/3D-GOI_ 3D GAN Omni-Inversion for Multifaceted and Multi-object Editing/layout.json @@ -0,0 +1,11791 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 173, + 111, + 442, + 148 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 173, + 111, + 442, + 148 + ], + "spans": [ + { + "bbox": [ + 173, + 111, + 442, + 148 + ], + "type": "text", + "content": "3D-GOI: 3D GAN Omni-Inversion for Multifaceted and Multi-object Editing" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 134, + 167, + 479, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 167, + 479, + 194 + ], + "spans": [ + { + "bbox": [ + 134, + 167, + 479, + 194 + ], + "type": "text", + "content": "Haoran Li" + }, + { + "bbox": [ + 134, + 167, + 479, + 194 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 134, + 167, + 479, + 194 + ], + "type": "text", + "content": ", Long Ma" + }, + { + "bbox": [ + 134, + 167, + 479, + 194 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 134, + 167, + 479, + 194 + ], + "type": "text", + "content": ", Haolin Shi" + }, + { + "bbox": [ + 134, + 167, + 479, + 194 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 134, + 167, + 479, + 194 + ], + "type": "text", + "content": ", Yanbin Hao" + }, + { + "bbox": [ + 134, + 167, + 479, + 194 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 134, + 167, + 479, + 194 + ], + "type": "text", + "content": ", Yong Liao" + }, + { + "bbox": [ + 134, + 167, + 479, + 194 + ], + "type": "inline_equation", + "content": "^{1,2*}" + }, + { + "bbox": [ + 134, + 167, + 479, + 194 + ], + "type": "text", + "content": ", Lechao Cheng" + }, + { + "bbox": [ + 134, + 167, + 479, + 194 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 134, + 167, + 479, + 194 + ], + "type": "text", + "content": ", and Peng Yuan Zhou" + }, + { + "bbox": [ + 134, + 167, + 479, + 194 + ], + "type": "inline_equation", + "content": "^{4*}" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 206, + 201, + 407, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 206, + 201, + 407, + 213 + ], + "spans": [ + { + "bbox": [ + 206, + 201, + 407, + 213 + ], + "type": "text", + "content": "1 University of Science and Technology of China" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 162, + 213, + 451, + 245 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 162, + 213, + 451, + 245 + ], + "spans": [ + { + "bbox": [ + 162, + 213, + 451, + 245 + ], + "type": "text", + "content": "2 CCCD Key Lab of Ministry of Culture and Tourism {1hr123, longm, mar}@mail.ustc.edu.cn, haoyanbin@hotmail.com, yliao@ustc.edu.cn" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 239, + 245, + 374, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 239, + 245, + 374, + 258 + ], + "spans": [ + { + "bbox": [ + 239, + 245, + 374, + 258 + ], + "type": "text", + "content": "3 Hefei University of Technology" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 261, + 258, + 352, + 268 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 261, + 258, + 352, + 268 + ], + "spans": [ + { + "bbox": [ + 261, + 258, + 352, + 268 + ], + "type": "text", + "content": "chenglc@hfut.edu.cn" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 265, + 268, + 348, + 279 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 265, + 268, + 348, + 279 + ], + "spans": [ + { + "bbox": [ + 265, + 268, + 348, + 279 + ], + "type": "text", + "content": "Aarhus University" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 252, + 280, + 362, + 290 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 252, + 280, + 362, + 290 + ], + "spans": [ + { + "bbox": [ + 252, + 280, + 362, + 290 + ], + "type": "text", + "content": "pengyuan.zhou@ece.au.dk" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 159, + 319, + 455, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 319, + 455, + 529 + ], + "spans": [ + { + "bbox": [ + 159, + 319, + 455, + 529 + ], + "type": "text", + "content": "Abstract. The current GAN inversion methods typically can only edit the appearance and shape of a single object and background while overlooking spatial information. In this work, we propose a 3D editing framework, 3D-GOI to enable multifaceted editing of affine information (scale, translation, and rotation) on multiple objects. 3D-GOI realizes the complex editing function by inverting the abundance of attribute codes (object shape/ appearance/ scale/ rotation/ translation, background shape/ appearance, and camera pose) controlled by GIRAFFE, a renowned 3D GAN. Accurately inverting all the codes is challenging, 3D-GOI solves this challenge following three main steps. First, we segment the objects and the background in a multi-object image. Second, we use a custom Neural Inversion Encoder to obtain coarse codes of each object. Finally, we use a round-robin optimization algorithm to get precise codes to reconstruct the image. To the best of our knowledge, 3D-GOI is the first framework to enable multifaceted editing on multiple objects. Both qualitative and quantitative experiments demonstrate that 3D-GOI holds immense potential for flexible, multifaceted editing in complex multi-object scenes. Our project and code are released at https://3d-goi.github.io." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 549, + 230, + 562 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 549, + 230, + 562 + ], + "spans": [ + { + "bbox": [ + 132, + 549, + 230, + 562 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 574, + 482, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 574, + 482, + 647 + ], + "spans": [ + { + "bbox": [ + 130, + 574, + 482, + 647 + ], + "type": "text", + "content": "The development of generative 3D models has attracted increasing attention to automatic 3D objects and scene generation and edition. Most existing works are limited to a single object, such as 3D face generation [7] and synthesis of facial viewpoints [40]. There are few methods for generating multi-object 3D scenes while editing such scenes remains unexplored. In this paper, we propose 3D-GOI to edit images containing multiple objects with complex spatial geometric" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 141, + 654, + 246, + 666 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 654, + 246, + 666 + ], + "spans": [ + { + "bbox": [ + 141, + 654, + 246, + 666 + ], + "type": "text", + "content": "* Corresponding authors" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 134, + 117, + 478, + 208 + ], + "blocks": [ + { + "bbox": [ + 134, + 117, + 478, + 208 + ], + "lines": [ + { + "bbox": [ + 134, + 117, + 478, + 208 + ], + "spans": [ + { + "bbox": [ + 134, + 117, + 478, + 208 + ], + "type": "image", + "image_path": "d9ae7d3da607da8e33995c2a1218ca96f1aa5b7eb57b8e91e023402c9b26b44e.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 218, + 482, + 274 + ], + "lines": [ + { + "bbox": [ + 130, + 218, + 482, + 274 + ], + "spans": [ + { + "bbox": [ + 130, + 218, + 482, + 274 + ], + "type": "text", + "content": "Fig. 1: The first row shows the editing results of traditional 2D/3D GAN inversion methods on multi-object images. The second row showcases 3D-GOI, which can perform multifaceted editing on complex images with multiple objects. 'bg' stands for background. The red crosses in the upper right figures indicate features that cannot be edited with current 2D/3D GAN inversion methods." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 301, + 480, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 301, + 480, + 338 + ], + "spans": [ + { + "bbox": [ + 130, + 301, + 480, + 338 + ], + "type": "text", + "content": "relationships. 3D-GOI not only can change the appearance and shape of each object and the background, but also can edit the spatial position of each object and the camera pose of the image as shown by Figure 1." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 340, + 482, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 340, + 482, + 602 + ], + "spans": [ + { + "bbox": [ + 130, + 340, + 482, + 602 + ], + "type": "text", + "content": "Existing 3D multi-object scene generation methods can be mainly classified into two categories: those [28] based on Generative Adversarial Networks (GANs) [10] and those [22] based on Diffusion models [13], besides a few based on VAE or Transformer [3,38]. GAN-based methods, primarily represented by GIRAFFE [28] and its derivatives, depict complex scene images as results of multiple foreground objects, controlled by shape and appearance, subjected to affine transformations (scaling, translation, and rotation), and rendered together with a background, which is also controlled by shape and appearance, from a specific camera viewpoint. Diffusion-based methods [23] perceive scene images as results of multiple latent NeRF [24], which can be represented as 3D models, undergoing affine transformations, optimized with SDS [30], rendered from a specific camera viewpoint. Both categories represent scenes as combinations of multiple codes. To realize editing based on these generative methods, it's imperative to invert the complex multi-object scene images to retrieve their representative codes. After modifying these codes, regeneration can achieve diversified editing of complex images. Most inversion methods study the inversion of a single code based on its generation method. However, each multi-object image is the entangled result of multiple codes, thus inverting all codes from an image requires precise disentangling of the codes, which is extremely difficult and largely overlooked. Moreover, the prevailing inversion algorithms primarily employ optimization approaches. Attempting to optimize all codes simultaneously often leads to chaotic optimization directions and less accurate inversion outcomes." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "type": "text", + "content": "Therefore, we propose 3D-GOI, a framework capable of inverting multiple codes to achieve a comprehensive inversion of multi-object images. Given current open-source 3D multi-object scene generation methods, we have chosen GI-RAFFE [28] as our generative model. In theory, our framework can be applied to other generative approaches as well. We address these challenges as follows." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 212, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 212, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 212, + 101 + ], + "type": "text", + "content": "H. Li et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 479, + 199 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 479, + 199 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 479, + 199 + ], + "type": "text", + "content": "First, we categorize different codes based on object attributes, background attributes, and pose attributes. Through qualitative verification, we found that segmentation methods can roughly separate the codes pertaining to different objects. For example, the codes controlling an object's shape, appearance, scale, translation, and rotation predominantly relate to the object itself. So, during the inversion process, we only use the segmented image of this object to reduce the impact of the background and other objects on its codes." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 200, + 479, + 235 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 200, + 479, + 235 + ], + "spans": [ + { + "bbox": [ + 130, + 200, + 479, + 235 + ], + "type": "text", + "content": "Second, we get the attributes' codes from the segmented image. Inspired by the Neural Rendering Block in GIRAFFE, we design a custom Neural Inversion Encoder network to coarsely disentangle and estimate the code values." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 236, + 479, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 236, + 479, + 307 + ], + "spans": [ + { + "bbox": [ + 130, + 236, + 479, + 307 + ], + "type": "text", + "content": "Finally, we obtain precise values for each code through optimization. We observed that optimizing all codes simultaneously tends to get stuck in local minima. Therefore, we propose a round-robin optimization algorithm that employs a ranking function to determine the optimization order for different codes. The algorithm enables a stable and efficient optimization process for accurate image reconstruction. Our contributions can be summarized as follows." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 138, + 312, + 479, + 393 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 138, + 312, + 479, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 312, + 479, + 335 + ], + "spans": [ + { + "bbox": [ + 138, + 312, + 479, + 335 + ], + "type": "text", + "content": "- To our best knowledge, 3D-GOI is the first multi-code inversion framework in generative models, achieving multifaceted editing of multi-object images." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 138, + 335, + 479, + 382 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 335, + 479, + 382 + ], + "spans": [ + { + "bbox": [ + 138, + 335, + 479, + 382 + ], + "type": "text", + "content": "- We introduce a three-stage inversion process: 1) separate the attribute codes of different objects via segmentation; 2) obtain coarse codes using a custom Neural Inversion Encoder; 3) optimize the reconstruction using a round-robin optimization strategy." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 138, + 382, + 453, + 393 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 382, + 453, + 393 + ], + "spans": [ + { + "bbox": [ + 138, + 382, + 453, + 393 + ], + "type": "text", + "content": "- Our method outperforms existing methods on both 3D and 2D tasks." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 132, + 409, + 236, + 421 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 409, + 236, + 421 + ], + "spans": [ + { + "bbox": [ + 132, + 409, + 236, + 421 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 437, + 480, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 437, + 480, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 437, + 480, + 665 + ], + "type": "text", + "content": "2D/3D GANs. 2D GAN maps a distribution from the latent space to the image space using a generator and a discriminator and has been widely explored. For example, BigGAN [6] increases the batch size and uses a simple truncation trick to finely control the trade-off between sample fidelity and variety. CycleGAN [44] feeds an input image into the generator and loops the output back to the generator. It achieves style transfer by minimizing the consistency loss between the input and its result. StyleGAN [17] maps a latent code into multiple style codes, allowing for detailed style control of images. 3D GANs usually combine 2D GANs with some 3D representation, such as NeRF [25], and have demonstrated excellent abilities to generate complex scenes with multi-view consistency. Broadly, 3D GANs can be classified into explicit and implicit models. Explicit models like HoloGAN [26] enable explicit control over the object pose through rigid body transformations of the learned 3D features. BlockGAN [27] generates foreground and background 3D features separately, combining them into a complete 3D scene representation. On the other hand, implicit models generally perform better. Many of these models take inspiration from NeRF [25], representing images as neural radiance fields and using volume rendering to generate photorealistic images in a continuous view. EG3D [7] introduces an explicit-implicit hybrid network architecture that produces high-quality 3D geometries." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 413, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 413, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 413, + 91, + 447, + 100 + ], + "type": "text", + "content": "3D-GOI" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 91, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 91, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 91, + 481, + 100 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 138, + 114, + 219, + 161 + ], + "blocks": [ + { + "bbox": [ + 138, + 114, + 219, + 161 + ], + "lines": [ + { + "bbox": [ + 138, + 114, + 219, + 161 + ], + "spans": [ + { + "bbox": [ + 138, + 114, + 219, + 161 + ], + "type": "image", + "image_path": "95b140fad3eba4fd0c9ef37e1d7254c76e91c7db998f5dfcb3b1d382d6dc3489.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 131, + 169, + 481, + 194 + ], + "lines": [ + { + "bbox": [ + 131, + 169, + 481, + 194 + ], + "spans": [ + { + "bbox": [ + 131, + 169, + 481, + 194 + ], + "type": "text", + "content": "Fig. 2: Different GANs and GAN Inversion methods utilize codes differently. " + }, + { + "bbox": [ + 131, + 169, + 481, + 194 + ], + "type": "inline_equation", + "content": "\\omega" + }, + { + "bbox": [ + 131, + 169, + 481, + 194 + ], + "type": "text", + "content": " represents the latent code and " + }, + { + "bbox": [ + 131, + 169, + 481, + 194 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 131, + 169, + 481, + 194 + ], + "type": "text", + "content": " represents the camera pose." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 257, + 114, + 340, + 161 + ], + "blocks": [ + { + "bbox": [ + 257, + 114, + 340, + 161 + ], + "lines": [ + { + "bbox": [ + 257, + 114, + 340, + 161 + ], + "spans": [ + { + "bbox": [ + 257, + 114, + 340, + 161 + ], + "type": "image", + "image_path": "74a2f6e85bbd65c440ad6f8c69a023b312a35cc1d757f096c45926c731e0d946.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 377, + 114, + 461, + 161 + ], + "blocks": [ + { + "bbox": [ + 377, + 114, + 461, + 161 + ], + "lines": [ + { + "bbox": [ + 377, + 114, + 461, + 161 + ], + "spans": [ + { + "bbox": [ + 377, + 114, + 461, + 161 + ], + "type": "image", + "image_path": "b736a5a1d20e8d30b35cde4467de5078a10d8de84d305eeaa9fff26dc883425a.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 216, + 482, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 216, + 482, + 324 + ], + "spans": [ + { + "bbox": [ + 130, + 216, + 482, + 324 + ], + "type": "text", + "content": "GRAF [33] integrates shape and appearance coding within the generation process, which facilitates independent manipulation of the shape and appearance of the generated vehicle and furniture images. Moreover, the presence of 3D information provides additional control over the camera pose, contributing to the flexibility of the generated outputs. GIRAFFE [28] extends GRAF to multi-object scenes by considering an image as the composition of multiple objects in the foreground through affine transformation and the background rendered at a specific camera viewpoint. In this work, we select GIRAFFE as the 3D GAN model to be inverted." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 331, + 492, + 463 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 331, + 492, + 463 + ], + "spans": [ + { + "bbox": [ + 130, + 331, + 492, + 463 + ], + "type": "text", + "content": "2D/3D GAN Inversion. GAN inversion obtains the latent code of an input image under a certain generator and modifies the latent code to perform image editing operations. Current 2D GAN inversion methods can be divided into optimization-based, encoder-based, and hybrid methods. Optimization-based methods [1, 14, 43] directly optimize the initial code, requiring very accurate initial values. Encoder-based methods [29, 31, 36] can map images directly to latent code but generally cannot achieve full reconstruction. Hybrid-based methods [4, 42] combine these two approaches: first employ an encoder to map the image to a suitable latent code, then perform optimization. Currently, most 2D GANs only have one latent code to generate an image " + }, + { + "bbox": [ + 130, + 331, + 492, + 463 + ], + "type": "inline_equation", + "content": "^5" + }, + { + "bbox": [ + 130, + 331, + 492, + 463 + ], + "type": "text", + "content": ". Therefore, the 2D GAN inversion task can be represented as:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 244, + 472, + 481, + 491 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 244, + 472, + 481, + 491 + ], + "spans": [ + { + "bbox": [ + 244, + 472, + 481, + 491 + ], + "type": "interline_equation", + "content": "\\omega^ {*} = \\arg \\min _ {\\omega} \\mathcal {L} (G (\\omega , \\theta), I), \\tag {1}", + "image_path": "f37550421a68eb088662dc93b2e9e156c7ace18cacc3b565fedd0cc676ea188b.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 498, + 482, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 498, + 482, + 534 + ], + "spans": [ + { + "bbox": [ + 130, + 498, + 482, + 534 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 498, + 482, + 534 + ], + "type": "inline_equation", + "content": "\\omega" + }, + { + "bbox": [ + 130, + 498, + 482, + 534 + ], + "type": "text", + "content": " is the latent component, " + }, + { + "bbox": [ + 130, + 498, + 482, + 534 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 130, + 498, + 482, + 534 + ], + "type": "text", + "content": " denotes the generator, " + }, + { + "bbox": [ + 130, + 498, + 482, + 534 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 130, + 498, + 482, + 534 + ], + "type": "text", + "content": " denotes the parameters of the generator, " + }, + { + "bbox": [ + 130, + 498, + 482, + 534 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 130, + 498, + 482, + 534 + ], + "type": "text", + "content": " is the input image, and " + }, + { + "bbox": [ + 130, + 498, + 482, + 534 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 130, + 498, + 482, + 534 + ], + "type": "text", + "content": " is the loss function measuring the difference between the generated and input image." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 534, + 482, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 534, + 482, + 605 + ], + "spans": [ + { + "bbox": [ + 130, + 534, + 482, + 605 + ], + "type": "text", + "content": "Typically, 3D GANs have an additional camera pose parameter compared to 2D GANs, making it more challenging to obtain latent codes during inversion. Current methods like SPI [40] use a symmetric prior for faces to generate images with different perspectives, while [19] employs a pre-trained estimator to achieve better initialization and utilizes pixel-level depth calculated from the NeRF parameters for improved image reconstruction." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 606, + 481, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 606, + 481, + 631 + ], + "spans": [ + { + "bbox": [ + 130, + 606, + 481, + 631 + ], + "type": "text", + "content": "Currently, there are only limited works on 3D GAN inversion [9,21,37] which primarily focus on creating novel perspectives of human faces using specialized" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 212, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 212, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 212, + 100 + ], + "type": "text", + "content": "H. Li et al." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 133, + 638, + 482, + 665 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 638, + 482, + 665 + ], + "spans": [ + { + "bbox": [ + 133, + 638, + 482, + 665 + ], + "type": "text", + "content": "5 Although StyleGAN can be controlled by multiple style codes, these codes are all generated from a single initial latent code, indicating their interrelations. Hence only one encoder is needed to predict all the codes during inversion." + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "type": "text", + "content": "face datasets considering generally only two codes: camera pose code " + }, + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "type": "inline_equation", + "content": "\\pmb{c}" + }, + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "type": "text", + "content": " and the latent code " + }, + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "type": "inline_equation", + "content": "\\pmb{\\omega}" + }, + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "type": "text", + "content": ". Hence its inversion task can be represented as:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 234, + 147, + 481, + 163 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 234, + 147, + 481, + 163 + ], + "spans": [ + { + "bbox": [ + 234, + 147, + 481, + 163 + ], + "type": "interline_equation", + "content": "\\boldsymbol {\\omega} ^ {*}, \\boldsymbol {c} ^ {*} = \\arg \\min _ {\\boldsymbol {\\omega}, \\boldsymbol {c}} \\mathcal {L} (G (\\boldsymbol {\\omega}, \\boldsymbol {c}, \\theta), I). \\tag {2}", + "image_path": "eef6536839182422c0d44306434317ed7ce51337971de27af83366d144e3a072.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 168, + 479, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 168, + 479, + 205 + ], + "spans": [ + { + "bbox": [ + 130, + 168, + 479, + 205 + ], + "type": "text", + "content": "A major advancement of 3D-GOI is the capability to invert more independent codes compared with other inversion methods, as Figure 2 shows, in order to perform multifaceted edits on multi-object images." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 131, + 220, + 225, + 235 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 220, + 225, + 235 + ], + "spans": [ + { + "bbox": [ + 131, + 220, + 225, + 235 + ], + "type": "text", + "content": "3 Preliminary" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 242, + 479, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 242, + 479, + 289 + ], + "spans": [ + { + "bbox": [ + 130, + 242, + 479, + 289 + ], + "type": "text", + "content": "GIRAFFE [28] represents individual objects as a combination of feature field and volume density. Through scene compositions, the feature fields of multiple objects and the background are combined. Finally, the combined feature field is rendered into an image using volume rendering and neural rendering." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 290, + 480, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 290, + 480, + 361 + ], + "spans": [ + { + "bbox": [ + 130, + 290, + 480, + 361 + ], + "type": "text", + "content": "For a coordinate " + }, + { + "bbox": [ + 130, + 290, + 480, + 361 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 130, + 290, + 480, + 361 + ], + "type": "text", + "content": " and a viewing direction " + }, + { + "bbox": [ + 130, + 290, + 480, + 361 + ], + "type": "inline_equation", + "content": "\\mathbf{d}" + }, + { + "bbox": [ + 130, + 290, + 480, + 361 + ], + "type": "text", + "content": " in scene space, the affine transformation " + }, + { + "bbox": [ + 130, + 290, + 480, + 361 + ], + "type": "inline_equation", + "content": "T(s,t,r)" + }, + { + "bbox": [ + 130, + 290, + 480, + 361 + ], + "type": "text", + "content": " (scale, translation, rotation) is used to transform them back into the object space of each individual object. Following the implicit shape representations used in NeRF, a multi-layer perceptron (MLP) " + }, + { + "bbox": [ + 130, + 290, + 480, + 361 + ], + "type": "inline_equation", + "content": "h_{\\theta}" + }, + { + "bbox": [ + 130, + 290, + 480, + 361 + ], + "type": "text", + "content": " is used to map the transformed " + }, + { + "bbox": [ + 130, + 290, + 480, + 361 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 130, + 290, + 480, + 361 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 290, + 480, + 361 + ], + "type": "inline_equation", + "content": "\\mathbf{d}" + }, + { + "bbox": [ + 130, + 290, + 480, + 361 + ], + "type": "text", + "content": ", along with the shape-controlling code " + }, + { + "bbox": [ + 130, + 290, + 480, + 361 + ], + "type": "inline_equation", + "content": "z_{s}" + }, + { + "bbox": [ + 130, + 290, + 480, + 361 + ], + "type": "text", + "content": " and appearance-controlling code " + }, + { + "bbox": [ + 130, + 290, + 480, + 361 + ], + "type": "inline_equation", + "content": "z_{a}" + }, + { + "bbox": [ + 130, + 290, + 480, + 361 + ], + "type": "text", + "content": ", to the feature field " + }, + { + "bbox": [ + 130, + 290, + 480, + 361 + ], + "type": "inline_equation", + "content": "\\mathbf{f}" + }, + { + "bbox": [ + 130, + 290, + 480, + 361 + ], + "type": "text", + "content": " and volume density " + }, + { + "bbox": [ + 130, + 290, + 480, + 361 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 130, + 290, + 480, + 361 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 211, + 369, + 481, + 384 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 211, + 369, + 481, + 384 + ], + "spans": [ + { + "bbox": [ + 211, + 369, + 481, + 384 + ], + "type": "interline_equation", + "content": "\\left. \\left(T (s, t, r; \\boldsymbol {x})\\right), T (s, t, r; \\boldsymbol {d})\\right), \\left. \\boldsymbol {z} _ {\\boldsymbol {s}}, \\boldsymbol {z} _ {\\boldsymbol {a}}\\right) \\xrightarrow {h _ {\\theta}} (\\sigma , \\boldsymbol {f}). \\tag {3}", + "image_path": "41826f404441adb1283da996a0745b56d6bd4248a1745ecbf1f25ad0f20263b9.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 388, + 479, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 388, + 479, + 437 + ], + "spans": [ + { + "bbox": [ + 130, + 388, + 479, + 437 + ], + "type": "text", + "content": "Then, GIRAFFE defines a Scene Composite Operator: at a given " + }, + { + "bbox": [ + 130, + 388, + 479, + 437 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 130, + 388, + 479, + 437 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 388, + 479, + 437 + ], + "type": "inline_equation", + "content": "\\pmb{d}" + }, + { + "bbox": [ + 130, + 388, + 479, + 437 + ], + "type": "text", + "content": ", the overall density is the sum of the individual densities (including the background). The overall feature field is represented as the density-weighted average of the feature field of each object:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 208, + 442, + 481, + 474 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 208, + 442, + 481, + 474 + ], + "spans": [ + { + "bbox": [ + 208, + 442, + 481, + 474 + ], + "type": "interline_equation", + "content": "C (\\boldsymbol {x}, \\boldsymbol {d}) = \\left(\\sigma , \\frac {1}{\\sigma} \\sum_ {i = 1} ^ {N} \\sigma_ {i} \\boldsymbol {f} _ {\\boldsymbol {i}}\\right), w h e r e \\quad \\sigma = \\sum_ {i = 1} ^ {N} \\sigma_ {i}, \\tag {4}", + "image_path": "b5d50ee18acccc5e97b8acea9a3e376fe3f2cd2e77f9739dec0ffe8e5cfa1484.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 479, + 362, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 479, + 362, + 491 + ], + "spans": [ + { + "bbox": [ + 130, + 479, + 362, + 491 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 479, + 362, + 491 + ], + "type": "inline_equation", + "content": "\\mathrm{N}" + }, + { + "bbox": [ + 130, + 479, + 362, + 491 + ], + "type": "text", + "content": " denotes the background plus (N-1) objects." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 130, + 491, + 480, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 491, + 480, + 574 + ], + "spans": [ + { + "bbox": [ + 130, + 491, + 480, + 574 + ], + "type": "text", + "content": "The rendering phase is divided into two stages. Similar to volume rendering in NeRF, given a pixel point, the rendering formula is used to calculate the feature field of this pixel point from the feature fields and the volume density of all sample points in a camera ray direction. After calculating all pixel points, a feature map is obtained. Neural rendering (Upsampling) is then applied to get the rendered image. Please refer to the Supplementary Material 1 for the detailed preliminary and formulas." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 131, + 590, + 203, + 602 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 590, + 203, + 602 + ], + "spans": [ + { + "bbox": [ + 131, + 590, + 203, + 602 + ], + "type": "text", + "content": "4 3D-GOI" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 131, + 613, + 257, + 624 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 613, + 257, + 624 + ], + "spans": [ + { + "bbox": [ + 131, + 613, + 257, + 624 + ], + "type": "text", + "content": "4.1 Problem Definition" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 130, + 629, + 479, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 629, + 479, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 629, + 479, + 666 + ], + "type": "text", + "content": "The problem we target is similar to the general definition of GAN inversion, with the difference being that we need to invert many more codes than existing methods (1 or 2) shown in Figure 2. The parameter " + }, + { + "bbox": [ + 130, + 629, + 479, + 666 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 130, + 629, + 479, + 666 + ], + "type": "text", + "content": " in GIRAFFE, which controls" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 413, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 413, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 413, + 91, + 447, + 100 + ], + "type": "text", + "content": "3D-GOI" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 481, + 100 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 168, + 113, + 465, + 236 + ], + "blocks": [ + { + "bbox": [ + 168, + 113, + 465, + 236 + ], + "lines": [ + { + "bbox": [ + 168, + 113, + 465, + 236 + ], + "spans": [ + { + "bbox": [ + 168, + 113, + 465, + 236 + ], + "type": "image", + "image_path": "6b599194f2d727cfbe2094a6fe49f6c4ac4fba1ba15e0f2c7c3c4fd36a44ee08.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 247, + 482, + 324 + ], + "lines": [ + { + "bbox": [ + 130, + 247, + 482, + 324 + ], + "spans": [ + { + "bbox": [ + 130, + 247, + 482, + 324 + ], + "type": "text", + "content": "Fig. 3: The overall framework of 3D-GOI. As shown in the upper half, the encoders are trained on single-object scenes, each time using " + }, + { + "bbox": [ + 130, + 247, + 482, + 324 + ], + "type": "inline_equation", + "content": "L_{enc}" + }, + { + "bbox": [ + 130, + 247, + 482, + 324 + ], + "type": "text", + "content": " to predict one " + }, + { + "bbox": [ + 130, + 247, + 482, + 324 + ], + "type": "inline_equation", + "content": "w, w \\in W" + }, + { + "bbox": [ + 130, + 247, + 482, + 324 + ], + "type": "text", + "content": ", while other codes use real values. The lower half depicts the inversion process for the multi-object scene. We first decompose objects and background from the scene, then use the trained encoder to extract coarse codes, and finally use the round-robin optimization algorithm to obtain precise codes. The green blocks indicate required training and the yellow blocks indicate fixed parameters." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 348, + 480, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 348, + 480, + 373 + ], + "spans": [ + { + "bbox": [ + 130, + 348, + 480, + 373 + ], + "type": "text", + "content": "the generation, can be divided into object attributes, background attributes, and pose attributes, denoted by " + }, + { + "bbox": [ + 130, + 348, + 480, + 373 + ], + "type": "inline_equation", + "content": "O" + }, + { + "bbox": [ + 130, + 348, + 480, + 373 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 348, + 480, + 373 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 130, + 348, + 480, + 373 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 130, + 348, + 480, + 373 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 130, + 348, + 480, + 373 + ], + "type": "text", + "content": ". Then, " + }, + { + "bbox": [ + 130, + 348, + 480, + 373 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 130, + 348, + 480, + 373 + ], + "type": "text", + "content": " can be expressed as follows:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 167, + 381, + 481, + 397 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 381, + 481, + 397 + ], + "spans": [ + { + "bbox": [ + 167, + 381, + 481, + 397 + ], + "type": "interline_equation", + "content": "W = \\{O _ {i} ^ {s h a p e}, O _ {i} ^ {a p p}, O _ {i} ^ {s}, O _ {i} ^ {t}, O _ {i} ^ {r}, B ^ {s h a p e}, B ^ {a p p}, C \\}, \\quad i = 1, \\dots , n, \\tag {5}", + "image_path": "33b89167da1369d444728f9a6b90d7306437f0ec9b1b78bce47b6d7600a30848.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 407, + 482, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 407, + 482, + 468 + ], + "spans": [ + { + "bbox": [ + 130, + 407, + 482, + 468 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 407, + 482, + 468 + ], + "type": "inline_equation", + "content": "O_{i}^{shape}" + }, + { + "bbox": [ + 130, + 407, + 482, + 468 + ], + "type": "text", + "content": " is the object shape latent code, " + }, + { + "bbox": [ + 130, + 407, + 482, + 468 + ], + "type": "inline_equation", + "content": "O_{i}^{app}" + }, + { + "bbox": [ + 130, + 407, + 482, + 468 + ], + "type": "text", + "content": " is the object appearance latent code, " + }, + { + "bbox": [ + 130, + 407, + 482, + 468 + ], + "type": "inline_equation", + "content": "O_{i}^{s}" + }, + { + "bbox": [ + 130, + 407, + 482, + 468 + ], + "type": "text", + "content": " is the object scale code, " + }, + { + "bbox": [ + 130, + 407, + 482, + 468 + ], + "type": "inline_equation", + "content": "O_{i}^{t}" + }, + { + "bbox": [ + 130, + 407, + 482, + 468 + ], + "type": "text", + "content": " is the object translation code, " + }, + { + "bbox": [ + 130, + 407, + 482, + 468 + ], + "type": "inline_equation", + "content": "O_{i}^{r}" + }, + { + "bbox": [ + 130, + 407, + 482, + 468 + ], + "type": "text", + "content": " is the object rotation code, " + }, + { + "bbox": [ + 130, + 407, + 482, + 468 + ], + "type": "inline_equation", + "content": "B^{shape}" + }, + { + "bbox": [ + 130, + 407, + 482, + 468 + ], + "type": "text", + "content": " is the background shape latent code, " + }, + { + "bbox": [ + 130, + 407, + 482, + 468 + ], + "type": "inline_equation", + "content": "B^{app}" + }, + { + "bbox": [ + 130, + 407, + 482, + 468 + ], + "type": "text", + "content": " is the background appearance latent code and " + }, + { + "bbox": [ + 130, + 407, + 482, + 468 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 130, + 407, + 482, + 468 + ], + "type": "text", + "content": " is the camera pose matrix. " + }, + { + "bbox": [ + 130, + 407, + 482, + 468 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 130, + 407, + 482, + 468 + ], + "type": "text", + "content": " denotes the " + }, + { + "bbox": [ + 130, + 407, + 482, + 468 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 130, + 407, + 482, + 468 + ], + "type": "text", + "content": " objects. The reconstruction part can be expressed as:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 242, + 478, + 481, + 495 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 242, + 478, + 481, + 495 + ], + "spans": [ + { + "bbox": [ + 242, + 478, + 481, + 495 + ], + "type": "interline_equation", + "content": "W ^ {*} = \\arg \\min _ {W} \\mathcal {L} (G (W, \\theta), I). \\tag {6}", + "image_path": "c3f2d7f113a76412d38b28d0abe9490039c7f6d46c34a5eca03122d13f4d2890.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 505, + 482, + 541 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 505, + 482, + 541 + ], + "spans": [ + { + "bbox": [ + 130, + 505, + 482, + 541 + ], + "type": "text", + "content": "According to Equation 5, we need to invert a total of " + }, + { + "bbox": [ + 130, + 505, + 482, + 541 + ], + "type": "inline_equation", + "content": "(5n + 3)" + }, + { + "bbox": [ + 130, + 505, + 482, + 541 + ], + "type": "text", + "content": " codes. Then, we are able to replace or interpolate any inverted code(s) to achieve multifaceted editing of multiple objects." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 131, + 560, + 268, + 572 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 560, + 268, + 572 + ], + "spans": [ + { + "bbox": [ + 131, + 560, + 268, + 572 + ], + "type": "text", + "content": "4.2 Scene Decomposition" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 581, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 581, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 581, + 482, + 666 + ], + "type": "text", + "content": "As mentioned, the GIRAFFE generator differs from typical GAN generators in that a large number of codes are involved and not a single code controls all the generated parts. Therefore, it is challenging to transform all codes using just one encoder or optimizer as in typical GAN Inversion methods. While a human can easily distinguish each object and some of its features (appearance, shape), a machine algorithm requires a large number of high-precision annotated samples to understand what code is expressed at what position in the image." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 212, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 212, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 212, + 100 + ], + "type": "text", + "content": "H. Li et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 151, + 114, + 201, + 167 + ], + "blocks": [ + { + "bbox": [ + 151, + 114, + 201, + 167 + ], + "lines": [ + { + "bbox": [ + 151, + 114, + 201, + 167 + ], + "spans": [ + { + "bbox": [ + 151, + 114, + 201, + 167 + ], + "type": "image", + "image_path": "32f752b839e368774c8e064f48d9a7f86df4150af26172f6875e4ba55247d341.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 159, + 167, + 193, + 176 + ], + "lines": [ + { + "bbox": [ + 159, + 167, + 193, + 176 + ], + "spans": [ + { + "bbox": [ + 159, + 167, + 193, + 176 + ], + "type": "text", + "content": "(a) Input" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 131, + 185, + 481, + 251 + ], + "lines": [ + { + "bbox": [ + 131, + 185, + 481, + 251 + ], + "spans": [ + { + "bbox": [ + 131, + 185, + 481, + 251 + ], + "type": "text", + "content": "Fig. 4: Scene decomposition. (a) The input image. (b) The feature weight map of car A, where the redder regions indicate a higher opacity and the bluer regions lower opacity. (c) The feature weight map of car B. (d) The feature weight map of the background. By integrating these maps, it becomes apparent that the region corresponding to car A predominantly consists of the feature representation of cars A and B. The background's visible area solely contains the background's feature representation." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 237, + 114, + 290, + 166 + ], + "blocks": [ + { + "bbox": [ + 237, + 114, + 290, + 166 + ], + "lines": [ + { + "bbox": [ + 237, + 114, + 290, + 166 + ], + "spans": [ + { + "bbox": [ + 237, + 114, + 290, + 166 + ], + "type": "image", + "image_path": "73ecd522f757a980eb426abbc10f4f10e12b01aefe644b592aea5d6dcf959342.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 245, + 167, + 282, + 176 + ], + "lines": [ + { + "bbox": [ + 245, + 167, + 282, + 176 + ], + "spans": [ + { + "bbox": [ + 245, + 167, + 282, + 176 + ], + "type": "text", + "content": "(b) Car A" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 325, + 114, + 376, + 166 + ], + "blocks": [ + { + "bbox": [ + 325, + 114, + 376, + 166 + ], + "lines": [ + { + "bbox": [ + 325, + 114, + 376, + 166 + ], + "spans": [ + { + "bbox": [ + 325, + 114, + 376, + 166 + ], + "type": "image", + "image_path": "de237f2656a16860d64607a8884433dbfa714d679556bec6eb876f4a75b524ec.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 333, + 167, + 369, + 176 + ], + "lines": [ + { + "bbox": [ + 333, + 167, + 369, + 176 + ], + "spans": [ + { + "bbox": [ + 333, + 167, + 369, + 176 + ], + "type": "text", + "content": "(c) Car B" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 413, + 114, + 463, + 166 + ], + "blocks": [ + { + "bbox": [ + 413, + 114, + 463, + 166 + ], + "lines": [ + { + "bbox": [ + 413, + 114, + 463, + 166 + ], + "spans": [ + { + "bbox": [ + 413, + 114, + 463, + 166 + ], + "type": "image", + "image_path": "34a97b6ab961fa6c051b449acca725226c85ad0215374b22dba85d90a3230fcd.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 410, + 167, + 467, + 176 + ], + "lines": [ + { + "bbox": [ + 410, + 167, + 467, + 176 + ], + "spans": [ + { + "bbox": [ + 410, + 167, + 467, + 176 + ], + "type": "text", + "content": "(d) Background" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 304, + 482, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 304, + 482, + 437 + ], + "spans": [ + { + "bbox": [ + 130, + 304, + 482, + 437 + ], + "type": "text", + "content": "A straightforward idea is that the attribute codes of an object will map to the corresponding position of the object in the image. For example, translation " + }, + { + "bbox": [ + 130, + 304, + 482, + 437 + ], + "type": "inline_equation", + "content": "(O^t)" + }, + { + "bbox": [ + 130, + 304, + 482, + 437 + ], + "type": "text", + "content": " and rotation " + }, + { + "bbox": [ + 130, + 304, + 482, + 437 + ], + "type": "inline_equation", + "content": "(O^r)" + }, + { + "bbox": [ + 130, + 304, + 482, + 437 + ], + "type": "text", + "content": " codes control the relative position of an object in the scene, scaling " + }, + { + "bbox": [ + 130, + 304, + 482, + 437 + ], + "type": "inline_equation", + "content": "(O^s)" + }, + { + "bbox": [ + 130, + 304, + 482, + 437 + ], + "type": "text", + "content": " and shape " + }, + { + "bbox": [ + 130, + 304, + 482, + 437 + ], + "type": "inline_equation", + "content": "(O^{shape})" + }, + { + "bbox": [ + 130, + 304, + 482, + 437 + ], + "type": "text", + "content": " codes determine the contour and shape of the object, and appearance " + }, + { + "bbox": [ + 130, + 304, + 482, + 437 + ], + "type": "inline_equation", + "content": "(O^{app})" + }, + { + "bbox": [ + 130, + 304, + 482, + 437 + ], + "type": "text", + "content": " codes control the appearance representation at the position of the object. The image obtained from segmentation precisely encompasses these three types of information, allowing us to invert it and obtain the five attribute codes for the corresponding object. Similarly, for codes " + }, + { + "bbox": [ + 130, + 304, + 482, + 437 + ], + "type": "inline_equation", + "content": "(B^{shape}, B^{app})" + }, + { + "bbox": [ + 130, + 304, + 482, + 437 + ], + "type": "text", + "content": " that generate the background, we can invert them using the segmented image of the background. Note that obtaining camera pose code " + }, + { + "bbox": [ + 130, + 304, + 482, + 437 + ], + "type": "inline_equation", + "content": "(C)" + }, + { + "bbox": [ + 130, + 304, + 482, + 437 + ], + "type": "text", + "content": " requires information from the entire rendered image." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 130, + 450, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 450, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 450, + 482, + 666 + ], + "type": "text", + "content": "We can qualitatively validate this idea. In Equation 3, we can see that an object's five attribute codes are mapped to the object's feature field and volume density through " + }, + { + "bbox": [ + 130, + 450, + 482, + 666 + ], + "type": "inline_equation", + "content": "h_{\\theta}" + }, + { + "bbox": [ + 130, + 450, + 482, + 666 + ], + "type": "text", + "content": ". As inferred from Equation 4, the scene's feature field is synthesized by weighting the feature fields of each object by density. Therefore, an object appears at its position because its feature field has a high-density weight at the corresponding location. Figure 4 displays the density of different objects at different positions during GIRAFFE's feature field composition process. The redder the higher the density, while the bluer the lower the density. As discussed, car A exhibits a high-density value within its area and near-zero density elsewhere - a similar pattern is seen with car B. The background, however, presents a non-uniform density distribution across the scene. We can consider that both car A and B and the background mainly manifest their feature fields within their visible areas. Hence, we apply a straightforward segmentation method to separate each object's feature field and get the codes. Segmenting each object also allows our encoder to pay more attention to each input object or background. As such, we can train the encoder on single-object scenes and then generalize it to multi-object scenes instead of directly training in multi-object scenes that involve more codes, to reduce computation cost." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 413, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 413, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 413, + 91, + 447, + 100 + ], + "type": "text", + "content": "3D-GOI" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 91, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 91, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 91, + 480, + 100 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 148, + 140, + 276, + 215 + ], + "blocks": [ + { + "bbox": [ + 148, + 140, + 276, + 215 + ], + "lines": [ + { + "bbox": [ + 148, + 140, + 276, + 215 + ], + "spans": [ + { + "bbox": [ + 148, + 140, + 276, + 215 + ], + "type": "image", + "image_path": "6a7668ad2dba3d3163f106f104fd8323f34ebb1a6ef168cfea5376fcefec75ab.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 162, + 216, + 262, + 224 + ], + "lines": [ + { + "bbox": [ + 162, + 216, + 262, + 224 + ], + "spans": [ + { + "bbox": [ + 162, + 216, + 262, + 224 + ], + "type": "text", + "content": "(a) Neural Rendering Block" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 334, + 114, + 462, + 213 + ], + "blocks": [ + { + "bbox": [ + 334, + 114, + 462, + 213 + ], + "lines": [ + { + "bbox": [ + 334, + 114, + 462, + 213 + ], + "spans": [ + { + "bbox": [ + 334, + 114, + 462, + 213 + ], + "type": "image", + "image_path": "d5bdb0e0efa9b7789efd91c1aec0a513a81f832a4806004bc0e37116528e341b.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 347, + 215, + 452, + 224 + ], + "lines": [ + { + "bbox": [ + 347, + 215, + 452, + 224 + ], + "spans": [ + { + "bbox": [ + 347, + 215, + 452, + 224 + ], + "type": "text", + "content": "(b) Neural Inversion Encoder" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 131, + 233, + 482, + 300 + ], + "lines": [ + { + "bbox": [ + 131, + 233, + 482, + 300 + ], + "spans": [ + { + "bbox": [ + 131, + 233, + 482, + 300 + ], + "type": "text", + "content": "Fig. 5: Neural Inversion Encoder. (a) The Neural Rendering Block in GIRAFFE [28], an upsampling process to generate image " + }, + { + "bbox": [ + 131, + 233, + 482, + 300 + ], + "type": "inline_equation", + "content": "\\hat{I}" + }, + { + "bbox": [ + 131, + 233, + 482, + 300 + ], + "type": "text", + "content": ". (b) The Neural Inversion Encoder opposes (a), which is a downsampling process. " + }, + { + "bbox": [ + 131, + 233, + 482, + 300 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 131, + 233, + 482, + 300 + ], + "type": "text", + "content": " is the input image, " + }, + { + "bbox": [ + 131, + 233, + 482, + 300 + ], + "type": "inline_equation", + "content": "H, W" + }, + { + "bbox": [ + 131, + 233, + 482, + 300 + ], + "type": "text", + "content": " are image height and width. " + }, + { + "bbox": [ + 131, + 233, + 482, + 300 + ], + "type": "inline_equation", + "content": "I_v" + }, + { + "bbox": [ + 131, + 233, + 482, + 300 + ], + "type": "text", + "content": " is the heatmap of the image, " + }, + { + "bbox": [ + 131, + 233, + 482, + 300 + ], + "type": "inline_equation", + "content": "H_v, W_v" + }, + { + "bbox": [ + 131, + 233, + 482, + 300 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 131, + 233, + 482, + 300 + ], + "type": "inline_equation", + "content": "M_f" + }, + { + "bbox": [ + 131, + 233, + 482, + 300 + ], + "type": "text", + "content": " are the dimensions of " + }, + { + "bbox": [ + 131, + 233, + 482, + 300 + ], + "type": "inline_equation", + "content": "I_v" + }, + { + "bbox": [ + 131, + 233, + 482, + 300 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 131, + 233, + 482, + 300 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 131, + 233, + 482, + 300 + ], + "type": "text", + "content": " is the code to be predicted, and " + }, + { + "bbox": [ + 131, + 233, + 482, + 300 + ], + "type": "inline_equation", + "content": "w_f" + }, + { + "bbox": [ + 131, + 233, + 482, + 300 + ], + "type": "text", + "content": " is the dimension of " + }, + { + "bbox": [ + 131, + 233, + 482, + 300 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 131, + 233, + 482, + 300 + ], + "type": "text", + "content": ". Up/Down means upsampling/downsampling." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 323, + 253, + 335 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 323, + 253, + 335 + ], + "spans": [ + { + "bbox": [ + 132, + 323, + 253, + 335 + ], + "type": "text", + "content": "4.3 Coarse Estimation" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 342, + 482, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 342, + 482, + 437 + ], + "spans": [ + { + "bbox": [ + 130, + 342, + 482, + 437 + ], + "type": "text", + "content": "The previous segmentation step roughly disentangles the codes. Unlike typical encoder-based methods, it's difficult to predict all codes using just one encoder. Therefore, we assign an encoder to each code, allowing each encoder to focus solely on predicting one code. Hence, we need a total of eight encoders. As shown in Figure 3, we input the object segmentation for the object attribute codes " + }, + { + "bbox": [ + 130, + 342, + 482, + 437 + ], + "type": "inline_equation", + "content": "(O^{shape}, O^{app}, O^s, O^t, O^r)" + }, + { + "bbox": [ + 130, + 342, + 482, + 437 + ], + "type": "text", + "content": ", the background segmentation for the background attribute codes " + }, + { + "bbox": [ + 130, + 342, + 482, + 437 + ], + "type": "inline_equation", + "content": "(B^{shape}, B^{app})" + }, + { + "bbox": [ + 130, + 342, + 482, + 437 + ], + "type": "text", + "content": ", and the original image for pose attribute code " + }, + { + "bbox": [ + 130, + 342, + 482, + 437 + ], + "type": "inline_equation", + "content": "(C)" + }, + { + "bbox": [ + 130, + 342, + 482, + 437 + ], + "type": "text", + "content": ". Different objects share the same encoder for the same attribute code." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 438, + 482, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 438, + 482, + 545 + ], + "spans": [ + { + "bbox": [ + 130, + 438, + 482, + 545 + ], + "type": "text", + "content": "We allocate an encoder called Neural Inversion Encoder with a similar structure to each code. Neural Inversion Encoder consists of three parts as Figure 5(b) shows. The first part employs a standard feature pyramid over a ResNet [12] backbone like in pSp [31] to extract the image features. The second part, in which we designed a structure opposite to GIRAFFE's Neural rendering Block based on its architecture as Figure 5(a) shows, downsamples the images layer by layer using a CNN and then uses skip connections [12] to combine the layers, yielding a one-dimensional feature. The third layer employs an MLP structure to acquire the corresponding dimension of different codes." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 545, + 482, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 545, + 482, + 594 + ], + "spans": [ + { + "bbox": [ + 130, + 545, + 482, + 594 + ], + "type": "text", + "content": "Training multiple encoders simultaneously is difficult to converge due to the large number of parameters. Hence, we use the dataset generated by GIRAFFE to retain the true values of each code and train an encoder for one code at a time, to keep the other codes at their true values, greatly smoothing the training." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "type": "text", + "content": "During encoder training, we use the Mean Squared Error (MSE) loss, perceptual loss (LPIPS) [41], and identity loss (ID) [11] between the reconstructed image and the original image, to be consistent with most 2D and 3D GAN inversion training methodologies. When training the affine codes (scale " + }, + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "type": "inline_equation", + "content": "O^s" + }, + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "type": "text", + "content": ", translation " + }, + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "type": "inline_equation", + "content": "O^t" + }, + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "type": "text", + "content": ", rotation " + }, + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "type": "inline_equation", + "content": "O^r" + }, + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "type": "text", + "content": "), we find that different combinations of values produce very similar images, e.g., moving an object forward and increasing its scale yield" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 212, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 212, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 212, + 100 + ], + "type": "text", + "content": "H. Li et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 133, + 132, + 462, + 312 + ], + "blocks": [ + { + "bbox": [ + 139, + 118, + 326, + 130 + ], + "lines": [ + { + "bbox": [ + 139, + 118, + 326, + 130 + ], + "spans": [ + { + "bbox": [ + 139, + 118, + 326, + 130 + ], + "type": "text", + "content": "Algorithm 1: Round-robin Optimization" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 133, + 132, + 462, + 312 + ], + "lines": [ + { + "bbox": [ + 133, + 132, + 462, + 312 + ], + "spans": [ + { + "bbox": [ + 133, + 132, + 462, + 312 + ], + "type": "text", + "content": "Data: all codes " + }, + { + "bbox": [ + 133, + 132, + 462, + 312 + ], + "type": "inline_equation", + "content": "w\\in W" + }, + { + "bbox": [ + 133, + 132, + 462, + 312 + ], + "type": "text", + "content": " predicted by encoders, fixed GIRAFFE generator " + }, + { + "bbox": [ + 133, + 132, + 462, + 312 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 133, + 132, + 462, + 312 + ], + "type": "text", + "content": " input image " + }, + { + "bbox": [ + 133, + 132, + 462, + 312 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 133, + 132, + 462, + 312 + ], + "type": "text", + "content": " 1 Initialize " + }, + { + "bbox": [ + 133, + 132, + 462, + 312 + ], + "type": "inline_equation", + "content": "lr\\_ w = 10^{-3},w\\in W" + }, + { + "bbox": [ + 133, + 132, + 462, + 312 + ], + "type": "text", + "content": " . \n2 while any " + }, + { + "bbox": [ + 133, + 132, + 462, + 312 + ], + "type": "inline_equation", + "content": "lr\\_ w > 10^{-5}" + }, + { + "bbox": [ + 133, + 132, + 462, + 312 + ], + "type": "text", + "content": " do \n3 foreach " + }, + { + "bbox": [ + 133, + 132, + 462, + 312 + ], + "type": "inline_equation", + "content": "w\\in W" + }, + { + "bbox": [ + 133, + 132, + 462, + 312 + ], + "type": "text", + "content": " do \n4 Sample " + }, + { + "bbox": [ + 133, + 132, + 462, + 312 + ], + "type": "inline_equation", + "content": "\\delta w" + }, + { + "bbox": [ + 133, + 132, + 462, + 312 + ], + "type": "text", + "content": " . \n5 Compute " + }, + { + "bbox": [ + 133, + 132, + 462, + 312 + ], + "type": "inline_equation", + "content": "\\delta \\mathcal{L}(w)" + }, + { + "bbox": [ + 133, + 132, + 462, + 312 + ], + "type": "text", + "content": " using Eq.8; \n6 end \n7 Compute rank_list using Eq.9; \n8 foreach " + }, + { + "bbox": [ + 133, + 132, + 462, + 312 + ], + "type": "inline_equation", + "content": "w\\in" + }, + { + "bbox": [ + 133, + 132, + 462, + 312 + ], + "type": "text", + "content": " rank_list and lr_w>10-5 do \n9 Optimization w with " + }, + { + "bbox": [ + 133, + 132, + 462, + 312 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{opt}" + }, + { + "bbox": [ + 133, + 132, + 462, + 312 + ], + "type": "text", + "content": " in Eq. 10 of I and G(W;0); \n10 if the " + }, + { + "bbox": [ + 133, + 132, + 462, + 312 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{opt}" + }, + { + "bbox": [ + 133, + 132, + 462, + 312 + ], + "type": "text", + "content": " ceases to decrease for five consecutive iterations then \n11 | lr_w=lr_w/2; \n12 end \n13 end \n14 end" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "code_body" + } + ], + "index": 3, + "sub_type": "algorithm" + }, + { + "bbox": [ + 130, + 339, + 479, + 374 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 339, + 479, + 374 + ], + "spans": [ + { + "bbox": [ + 130, + 339, + 479, + 374 + ], + "type": "text", + "content": "similar results. However, the encoder can only predict one value at a time, hence we add the MSE loss of the predicted " + }, + { + "bbox": [ + 130, + 339, + 479, + 374 + ], + "type": "inline_equation", + "content": "O^s" + }, + { + "bbox": [ + 130, + 339, + 479, + 374 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 339, + 479, + 374 + ], + "type": "inline_equation", + "content": "O^t" + }, + { + "bbox": [ + 130, + 339, + 479, + 374 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 339, + 479, + 374 + ], + "type": "inline_equation", + "content": "O^r" + }, + { + "bbox": [ + 130, + 339, + 479, + 374 + ], + "type": "text", + "content": " values, and their true values, to compel the encoder to predict the true value." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 234, + 387, + 480, + 400 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 234, + 387, + 480, + 400 + ], + "spans": [ + { + "bbox": [ + 234, + 387, + 480, + 400 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {e n c} = \\lambda_ {1} L _ {2} + \\lambda_ {2} L _ {l p i p s} + \\lambda_ {3} L _ {i d}, \\tag {7}", + "image_path": "278c88930633f7beb1f486d11fb123a68910e9f2ed8986e7c0adcd9e9ce8f9c4.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 403, + 479, + 440 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 403, + 479, + 440 + ], + "spans": [ + { + "bbox": [ + 130, + 403, + 479, + 440 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 403, + 479, + 440 + ], + "type": "inline_equation", + "content": "\\lambda_{i}, i = 1,2,3" + }, + { + "bbox": [ + 130, + 403, + 479, + 440 + ], + "type": "text", + "content": " represent the ratio coefficient between various losses. When training " + }, + { + "bbox": [ + 130, + 403, + 479, + 440 + ], + "type": "inline_equation", + "content": "O^s" + }, + { + "bbox": [ + 130, + 403, + 479, + 440 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 403, + 479, + 440 + ], + "type": "inline_equation", + "content": "O^t" + }, + { + "bbox": [ + 130, + 403, + 479, + 440 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 403, + 479, + 440 + ], + "type": "inline_equation", + "content": "O^r" + }, + { + "bbox": [ + 130, + 403, + 479, + 440 + ], + "type": "text", + "content": " code, the " + }, + { + "bbox": [ + 130, + 403, + 479, + 440 + ], + "type": "inline_equation", + "content": "L_2" + }, + { + "bbox": [ + 130, + 403, + 479, + 440 + ], + "type": "text", + "content": " loss includes the MSE loss between the real values of " + }, + { + "bbox": [ + 130, + 403, + 479, + 440 + ], + "type": "inline_equation", + "content": "O^s" + }, + { + "bbox": [ + 130, + 403, + 479, + 440 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 403, + 479, + 440 + ], + "type": "inline_equation", + "content": "O^t" + }, + { + "bbox": [ + 130, + 403, + 479, + 440 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 403, + 479, + 440 + ], + "type": "inline_equation", + "content": "O^r" + }, + { + "bbox": [ + 130, + 403, + 479, + 440 + ], + "type": "text", + "content": " and their predicted values." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 456, + 266, + 468 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 456, + 266, + 468 + ], + "spans": [ + { + "bbox": [ + 132, + 456, + 266, + 468 + ], + "type": "text", + "content": "4.4 Precise Optimization" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 474, + 481, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 474, + 481, + 641 + ], + "spans": [ + { + "bbox": [ + 130, + 474, + 481, + 641 + ], + "type": "text", + "content": "Pre-trained segmentation models have some segmentation errors and all encoder-based GAN inversion networks [31,34,35] usually cannot accurately obtain codes, necessitating refinements. Next, we optimize the coarse codes. Through experiments, we have found that using a single optimizer to optimize all latent codes tends to converge to local minima. Hence, we employ multiple optimizers, each handling a single code. The optimization order is crucial due to the variance of the disparity between the predicted and actual values across different encoders, and the different impact of code changes on the image, e.g., changes to " + }, + { + "bbox": [ + 130, + 474, + 481, + 641 + ], + "type": "inline_equation", + "content": "B^{shape}" + }, + { + "bbox": [ + 130, + 474, + 481, + 641 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 474, + 481, + 641 + ], + "type": "inline_equation", + "content": "B^{app}" + }, + { + "bbox": [ + 130, + 474, + 481, + 641 + ], + "type": "text", + "content": " codes controlling background generation mostly would have a larger impact on overall pixel values. Prioritizing the optimization of codes with significant disparity and a high potential for changing pixel values tends to yield superior results in our experiments. Hence, we propose an automated round-robin optimization algorithm (Algorithm 1) to sequentially optimize each code based on the image reconstructed in each round." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 641, + 481, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 641, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 641, + 481, + 665 + ], + "type": "text", + "content": "Algorithm 1 aims to add multiple minor disturbances to each code, and calculate the loss between the images reconstructed before and after the disturbance" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 413, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 413, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 413, + 91, + 447, + 100 + ], + "type": "text", + "content": "3D-GOI" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 481, + 100 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 189 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 189 + ], + "type": "text", + "content": "and the original image. A loss increase indicates that the current code value is relatively accurate, hence its optimization order can be postponed, and vice versa. For multiple codes that demand prioritized optimization, we compute their priorities using the partial derivatives of the loss variation and perturbation. We do not use backpropagation automatic differentiation here to ensure the current code value remains unchanged." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 187, + 198, + 481, + 213 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 187, + 198, + 481, + 213 + ], + "spans": [ + { + "bbox": [ + 187, + 198, + 481, + 213 + ], + "type": "interline_equation", + "content": "\\delta \\mathcal {L} (w) = \\mathcal {L} (G (W - \\{w \\}, w + \\delta w, \\theta), I) - \\mathcal {L} (G (W, \\theta), I), \\tag {8}", + "image_path": "ee7db10f6d20cac1d51de184df747cbbefac0cff59c0df4f15b45a9292c33dda.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 230, + 230, + 481, + 255 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 230, + 481, + 255 + ], + "spans": [ + { + "bbox": [ + 230, + 230, + 481, + 255 + ], + "type": "interline_equation", + "content": "\\operatorname {r a n k} _ {-} \\operatorname {l i s t} = F _ {\\operatorname {r a n k}} (\\delta \\mathcal {L} (w), \\frac {\\delta \\mathcal {L} (w)}{\\delta w}), \\tag {9}", + "image_path": "a3ca1dd2317419663dbeffa502afc242efebc92525e9a7ff312327fb679a44b9.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 131, + 257, + 480, + 294 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 257, + 480, + 294 + ], + "spans": [ + { + "bbox": [ + 131, + 257, + 480, + 294 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 131, + 257, + 480, + 294 + ], + "type": "inline_equation", + "content": "w \\in W" + }, + { + "bbox": [ + 131, + 257, + 480, + 294 + ], + "type": "text", + "content": " is one of the codes and " + }, + { + "bbox": [ + 131, + 257, + 480, + 294 + ], + "type": "inline_equation", + "content": "\\delta w" + }, + { + "bbox": [ + 131, + 257, + 480, + 294 + ], + "type": "text", + "content": " represents the minor disturbance of " + }, + { + "bbox": [ + 131, + 257, + 480, + 294 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 131, + 257, + 480, + 294 + ], + "type": "text", + "content": ". For the rotation angle " + }, + { + "bbox": [ + 131, + 257, + 480, + 294 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 131, + 257, + 480, + 294 + ], + "type": "text", + "content": ", we have found that adding a depth loss can accelerate its optimization. Thus, the loss " + }, + { + "bbox": [ + 131, + 257, + 480, + 294 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 131, + 257, + 480, + 294 + ], + "type": "text", + "content": " during optimization can be expressed as:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 214, + 304, + 481, + 317 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 214, + 304, + 481, + 317 + ], + "spans": [ + { + "bbox": [ + 214, + 304, + 481, + 317 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {o p t}} = \\lambda_ {1} L _ {2} + \\lambda_ {2} L _ {\\text {l p i p s}} + \\lambda_ {3} L _ {\\text {i d}} + \\lambda_ {4} L _ {\\text {d e e p}}. \\tag {10}", + "image_path": "47e4d5c2c21689f587cb3f44218d7ec99d048d6b3ef3f90bd46cdd1d85dcca3c.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 131, + 326, + 480, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 326, + 480, + 350 + ], + "spans": [ + { + "bbox": [ + 131, + 326, + 480, + 350 + ], + "type": "text", + "content": "This optimization method allows for more precise tuning of the codes for more accurate reconstruction and editing of the images." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 369, + 248, + 383 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 369, + 248, + 383 + ], + "spans": [ + { + "bbox": [ + 132, + 369, + 248, + 383 + ], + "type": "text", + "content": "5 Implementation" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 131, + 396, + 482, + 564 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 396, + 482, + 564 + ], + "spans": [ + { + "bbox": [ + 131, + 396, + 482, + 564 + ], + "type": "text", + "content": "Neural Inversion Encoder. The first part of our encoder uses ResNet50 to extract features. In the second part, we downsample the extracted features (512-dimensional) and the input RGB image (3-dimensional) together. The two features are added together through skip connections, as shown in Figure 5. In the downsampling module, we use a 2D convolution with a kernel of 3 and a stride of 1, and the LeakyReLU activation function, to obtain a 256-dimensional intermediate feature. For object shape/appearance attributes, the output dimension is 256, and we use four Fully Connected Layers " + }, + { + "bbox": [ + 131, + 396, + 482, + 564 + ], + "type": "inline_equation", + "content": "\\{4\\times FCL(256,256)\\}" + }, + { + "bbox": [ + 131, + 396, + 482, + 564 + ], + "type": "text", + "content": " to get the codes. For background shape/appearance attributes, the output dimension is 128, we use " + }, + { + "bbox": [ + 131, + 396, + 482, + 564 + ], + "type": "inline_equation", + "content": "\\{FCL(256,128) + 3\\times FCL(128,128)\\}" + }, + { + "bbox": [ + 131, + 396, + 482, + 564 + ], + "type": "text", + "content": " to get the codes. For object scale/translation attributes, the output dimension is 3, and we use the network " + }, + { + "bbox": [ + 131, + 396, + 482, + 564 + ], + "type": "inline_equation", + "content": "\\{FCL(2^i,2^{i - 1}) + FCL(8,3),i = 8,\\dots ,4\\}" + }, + { + "bbox": [ + 131, + 396, + 482, + 564 + ], + "type": "text", + "content": " to get the codes. For camera pose and rotation attributes, the output dimension is 1, and we use a similar network " + }, + { + "bbox": [ + 131, + 396, + 482, + 564 + ], + "type": "inline_equation", + "content": "\\{FCL(2^i,2^{i - 1}) + FCL(8,1),i = 8,\\dots ,4\\}" + }, + { + "bbox": [ + 131, + 396, + 482, + 564 + ], + "type": "text", + "content": " to get the codes." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 131, + 581, + 482, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 581, + 482, + 667 + ], + "spans": [ + { + "bbox": [ + 131, + 581, + 482, + 667 + ], + "type": "text", + "content": "Training and Optimization are carried out on a single NVIDIA A100 SXM GPU with 40GB of memory, using the Adam optimizer. The initial learning rate is set to " + }, + { + "bbox": [ + 131, + 581, + 482, + 667 + ], + "type": "inline_equation", + "content": "10^{-4}" + }, + { + "bbox": [ + 131, + 581, + 482, + 667 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 131, + 581, + 482, + 667 + ], + "type": "inline_equation", + "content": "10^{-3}" + }, + { + "bbox": [ + 131, + 581, + 482, + 667 + ], + "type": "text", + "content": ", respectively. Encoder training employs a batch size of 50. Each encoder took about 12 hours to train, and optimizing a single image of a complex multi-object scene took about 1 minute. For rotation features, it is difficult for the encoder to make accurate predictions for some images. Therefore, we uniformly sampled 20 values in the range of " + }, + { + "bbox": [ + 131, + 581, + 482, + 667 + ], + "type": "inline_equation", + "content": "[0, 360^{\\circ}]" + }, + { + "bbox": [ + 131, + 581, + 482, + 667 + ], + "type": "text", + "content": " for the" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 212, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 212, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 212, + 101 + ], + "type": "text", + "content": "H. Li et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "type": "text", + "content": "rotation parameters with large deviations. We selected the value that minimizes the loss in Equation 7 as the initial value for the optimization stage." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 140, + 480, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 140, + 480, + 200 + ], + "spans": [ + { + "bbox": [ + 130, + 140, + 480, + 200 + ], + "type": "text", + "content": "For LPIPS loss, we employ a pre-trained AlexNet [20]. For ID calculation, we employ a pre-trained Arcface [8] model in human face datasets and a pre-trained ResNet-50 [32] model in the car dataset. For depth loss, we use the pre-trained Dense Prediction Transformer model. We set " + }, + { + "bbox": [ + 130, + 140, + 480, + 200 + ], + "type": "inline_equation", + "content": "\\lambda_1 = 1" + }, + { + "bbox": [ + 130, + 140, + 480, + 200 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 140, + 480, + 200 + ], + "type": "inline_equation", + "content": "\\lambda_2 = 0.8" + }, + { + "bbox": [ + 130, + 140, + 480, + 200 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 130, + 140, + 480, + 200 + ], + "type": "inline_equation", + "content": "\\lambda_3 = 0.2" + }, + { + "bbox": [ + 130, + 140, + 480, + 200 + ], + "type": "text", + "content": " in Equation 7, as well as in Equation 10, in which " + }, + { + "bbox": [ + 130, + 140, + 480, + 200 + ], + "type": "inline_equation", + "content": "\\lambda_4 = 1" + }, + { + "bbox": [ + 130, + 140, + 480, + 200 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 131, + 218, + 224, + 232 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 218, + 224, + 232 + ], + "spans": [ + { + "bbox": [ + 131, + 218, + 224, + 232 + ], + "type": "text", + "content": "6 Experiment" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 243, + 480, + 375 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 243, + 480, + 375 + ], + "spans": [ + { + "bbox": [ + 130, + 243, + 480, + 375 + ], + "type": "text", + "content": "Datasets. To obtain the true values of the 3D information in GIRAFFE for stable training performance, we use the pre-trained model of GIRAFFE on CompCars [39] and Clevr [15] dataset to generate training datasets. For testing datasets, we also use GIRAFFE to generate images for multi-car datasets denoted as " + }, + { + "bbox": [ + 130, + 243, + 480, + 375 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 130, + 243, + 480, + 375 + ], + "type": "text", + "content": "-CompCars (CompCars is a single car image dataset) and use the original Clevr dataset for multi-geometry dataset (Clevr is a dataset that can be simulated to generate images of multiple geometries). We follow the codes setup in GIRAFFE. For CompCars, we use all the codes from Equation 5. For Clevr, we fixed the rotation, scale, and camera pose codes of the objects. For experiments on facial data, we utilized the FFHQ [17] dataset for training and the CelebA-HQ [16] dataset for testing." + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 133, + 396, + 216, + 425 + ], + "blocks": [ + { + "bbox": [ + 133, + 396, + 216, + 425 + ], + "lines": [ + { + "bbox": [ + 133, + 396, + 216, + 425 + ], + "spans": [ + { + "bbox": [ + 133, + 396, + 216, + 425 + ], + "type": "image", + "image_path": "6f4059d17e2a50ecd260ff620931b98b5225119e028eda9ef6e7d56ee1724817.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 138, + 426, + 211, + 434 + ], + "lines": [ + { + "bbox": [ + 138, + 426, + 211, + 434 + ], + "spans": [ + { + "bbox": [ + 138, + 426, + 211, + 434 + ], + "type": "text", + "content": "(a) Input, Co-R, Pre-R" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 221, + 396, + 303, + 423 + ], + "blocks": [ + { + "bbox": [ + 221, + 396, + 303, + 423 + ], + "lines": [ + { + "bbox": [ + 221, + 396, + 303, + 423 + ], + "spans": [ + { + "bbox": [ + 221, + 396, + 303, + 423 + ], + "type": "image", + "image_path": "99cedf4108539a0b8425f9c827c841ba85f659e75a0135cdbebade4b06e78bcf.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 235, + 425, + 290, + 434 + ], + "lines": [ + { + "bbox": [ + 235, + 425, + 290, + 434 + ], + "spans": [ + { + "bbox": [ + 235, + 425, + 290, + 434 + ], + "type": "text", + "content": "(b) Edit Shape" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 310, + 396, + 391, + 423 + ], + "blocks": [ + { + "bbox": [ + 310, + 396, + 391, + 423 + ], + "lines": [ + { + "bbox": [ + 310, + 396, + 391, + 423 + ], + "spans": [ + { + "bbox": [ + 310, + 396, + 391, + 423 + ], + "type": "image", + "image_path": "2275654dea41191fe4c6f5b09c9caf68899b269a5ad0f36e65b38deaf7be23c9.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 399, + 396, + 480, + 423 + ], + "blocks": [ + { + "bbox": [ + 399, + 396, + 480, + 423 + ], + "lines": [ + { + "bbox": [ + 399, + 396, + 480, + 423 + ], + "spans": [ + { + "bbox": [ + 399, + 396, + 480, + 423 + ], + "type": "image", + "image_path": "991a6fe46aa4683eb0fc5677e2cb36c4256517c60149ed10c0964379cb3e60d4.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 133, + 435, + 214, + 462 + ], + "blocks": [ + { + "bbox": [ + 133, + 435, + 214, + 462 + ], + "lines": [ + { + "bbox": [ + 133, + 435, + 214, + 462 + ], + "spans": [ + { + "bbox": [ + 133, + 435, + 214, + 462 + ], + "type": "image", + "image_path": "1520efaca4395ba6c7de916da78ec3f9597ecc58c0f54a579f92b0e2fe7c4ea2.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 136, + 464, + 212, + 472 + ], + "lines": [ + { + "bbox": [ + 136, + 464, + 212, + 472 + ], + "spans": [ + { + "bbox": [ + 136, + 464, + 212, + 472 + ], + "type": "text", + "content": "(e) Edit Bg Appearance" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 220, + 435, + 302, + 461 + ], + "blocks": [ + { + "bbox": [ + 220, + 435, + 302, + 461 + ], + "lines": [ + { + "bbox": [ + 220, + 435, + 302, + 461 + ], + "spans": [ + { + "bbox": [ + 220, + 435, + 302, + 461 + ], + "type": "image", + "image_path": "854b3f8d8dc6fb483a3dc03416093069dad5fba978b55fdfe9443411d6019faa.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 236, + 463, + 287, + 472 + ], + "lines": [ + { + "bbox": [ + 236, + 463, + 287, + 472 + ], + "spans": [ + { + "bbox": [ + 236, + 463, + 287, + 472 + ], + "type": "text", + "content": "(f) Edit Scale" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 131, + 481, + 479, + 503 + ], + "lines": [ + { + "bbox": [ + 131, + 481, + 479, + 503 + ], + "spans": [ + { + "bbox": [ + 131, + 481, + 479, + 503 + ], + "type": "text", + "content": "Fig. 6: Single-object editing on G-CompCars dataset. Co-R: coarse reconstruction. Pre-R: precise reconstruction." + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 309, + 435, + 389, + 461 + ], + "blocks": [ + { + "bbox": [ + 315, + 425, + 388, + 434 + ], + "lines": [ + { + "bbox": [ + 315, + 425, + 388, + 434 + ], + "spans": [ + { + "bbox": [ + 315, + 425, + 388, + 434 + ], + "type": "text", + "content": "(c) Edit Appearance" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 309, + 435, + 389, + 461 + ], + "lines": [ + { + "bbox": [ + 309, + 435, + 389, + 461 + ], + "spans": [ + { + "bbox": [ + 309, + 435, + 389, + 461 + ], + "type": "image", + "image_path": "51d3b0a65b9e7a72938464344889d7e345b16f5ad7796e65afef93d5677635ff.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 463, + 386, + 472 + ], + "lines": [ + { + "bbox": [ + 313, + 463, + 386, + 472 + ], + "spans": [ + { + "bbox": [ + 313, + 463, + 386, + 472 + ], + "type": "text", + "content": "(g) Edit Translation" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 397, + 435, + 477, + 461 + ], + "blocks": [ + { + "bbox": [ + 406, + 425, + 473, + 434 + ], + "lines": [ + { + "bbox": [ + 406, + 425, + 473, + 434 + ], + "spans": [ + { + "bbox": [ + 406, + 425, + 473, + 434 + ], + "type": "text", + "content": "(d) Edit Bg Shape" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 397, + 435, + 477, + 461 + ], + "lines": [ + { + "bbox": [ + 397, + 435, + 477, + 461 + ], + "spans": [ + { + "bbox": [ + 397, + 435, + 477, + 461 + ], + "type": "image", + "image_path": "205f9aba677d97b4083e3d4808322dcf56d9c73bef73c32dd668c810580242f3.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 405, + 463, + 469, + 472 + ], + "lines": [ + { + "bbox": [ + 405, + 463, + 469, + 472 + ], + "spans": [ + { + "bbox": [ + 405, + 463, + 469, + 472 + ], + "type": "text", + "content": "(h) Edit Rotation" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 133, + 548, + 214, + 575 + ], + "blocks": [ + { + "bbox": [ + 133, + 548, + 214, + 575 + ], + "lines": [ + { + "bbox": [ + 133, + 548, + 214, + 575 + ], + "spans": [ + { + "bbox": [ + 133, + 548, + 214, + 575 + ], + "type": "image", + "image_path": "3cc160d66d58ca39b5f80d2bfb80544b37ef9d4662e2bb7e6a8edf439e79d762.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 137, + 577, + 211, + 586 + ], + "lines": [ + { + "bbox": [ + 137, + 577, + 211, + 586 + ], + "spans": [ + { + "bbox": [ + 137, + 577, + 211, + 586 + ], + "type": "text", + "content": "(a) Input, Co-R, Pre-R" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 211, + 594, + 400, + 606 + ], + "lines": [ + { + "bbox": [ + 211, + 594, + 400, + 606 + ], + "spans": [ + { + "bbox": [ + 211, + 594, + 400, + 606 + ], + "type": "text", + "content": "Fig. 7: Single-object editing on Clevr dataset." + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_caption" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 221, + 548, + 302, + 575 + ], + "blocks": [ + { + "bbox": [ + 221, + 548, + 302, + 575 + ], + "lines": [ + { + "bbox": [ + 221, + 548, + 302, + 575 + ], + "spans": [ + { + "bbox": [ + 221, + 548, + 302, + 575 + ], + "type": "image", + "image_path": "68531de72840eeea6c399798738f89dc129896dba3dad36f2ae4d8f928278637.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 224, + 577, + 299, + 586 + ], + "lines": [ + { + "bbox": [ + 224, + 577, + 299, + 586 + ], + "spans": [ + { + "bbox": [ + 224, + 577, + 299, + 586 + ], + "type": "text", + "content": "(b) Edit Appearance" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 309, + 548, + 389, + 575 + ], + "blocks": [ + { + "bbox": [ + 309, + 548, + 389, + 575 + ], + "lines": [ + { + "bbox": [ + 309, + 548, + 389, + 575 + ], + "spans": [ + { + "bbox": [ + 309, + 548, + 389, + 575 + ], + "type": "image", + "image_path": "eabe5ff5c158e1947d8b1c338ba50b7a1cb724d2803459bd876c224c7fde9cbe.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 577, + 386, + 585 + ], + "lines": [ + { + "bbox": [ + 313, + 577, + 386, + 585 + ], + "spans": [ + { + "bbox": [ + 313, + 577, + 386, + 585 + ], + "type": "text", + "content": "(c) Edit Translation" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + } + ], + "index": 27 + }, + { + "type": "image", + "bbox": [ + 397, + 548, + 477, + 575 + ], + "blocks": [ + { + "bbox": [ + 397, + 548, + 477, + 575 + ], + "lines": [ + { + "bbox": [ + 397, + 548, + 477, + 575 + ], + "spans": [ + { + "bbox": [ + 397, + 548, + 477, + 575 + ], + "type": "image", + "image_path": "b996cbc5a8c079f6534fb10408582ad709d37d8611b2d01b7fd2aba80f8bdb99.jpg" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 409, + 576, + 465, + 586 + ], + "lines": [ + { + "bbox": [ + 409, + 576, + 465, + 586 + ], + "spans": [ + { + "bbox": [ + 409, + 576, + 465, + 586 + ], + "type": "text", + "content": "(d) Add Object" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_caption" + } + ], + "index": 29 + }, + { + "bbox": [ + 131, + 629, + 479, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 629, + 479, + 666 + ], + "spans": [ + { + "bbox": [ + 131, + 629, + 479, + 666 + ], + "type": "text", + "content": "Baselines. In the comparative experiments for our Neural Inversion Encoder, we benchmarked encoder-based inversion methods such as e4e [34] and pSp [31], which use the 2D GAN StyleGAN2 [18] as the generator, and E3DGE [21] and" + } + ] + } + ], + "index": 32 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 413, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 413, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 413, + 91, + 447, + 100 + ], + "type": "text", + "content": "3D-GOI" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 480, + 100 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 133, + 114, + 214, + 142 + ], + "blocks": [ + { + "bbox": [ + 133, + 114, + 214, + 142 + ], + "lines": [ + { + "bbox": [ + 133, + 114, + 214, + 142 + ], + "spans": [ + { + "bbox": [ + 133, + 114, + 214, + 142 + ], + "type": "image", + "image_path": "60418f3e4c648b49e1839e33d12ec2215c8f6326c07842f1b820aa250aeeb2dc.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 221, + 114, + 303, + 140 + ], + "blocks": [ + { + "bbox": [ + 221, + 114, + 303, + 140 + ], + "lines": [ + { + "bbox": [ + 221, + 114, + 303, + 140 + ], + "spans": [ + { + "bbox": [ + 221, + 114, + 303, + 140 + ], + "type": "image", + "image_path": "8f49759684059e939ed6d6e095d79b4d02d30b2e94163562e85e1596b75ffa22.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 310, + 114, + 392, + 140 + ], + "blocks": [ + { + "bbox": [ + 310, + 114, + 392, + 140 + ], + "lines": [ + { + "bbox": [ + 310, + 114, + 392, + 140 + ], + "spans": [ + { + "bbox": [ + 310, + 114, + 392, + 140 + ], + "type": "image", + "image_path": "7c83539bfa72b0b4fdb6826c942e43d6fe0154ee19229562f323f0583a8cf073.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 399, + 114, + 480, + 140 + ], + "blocks": [ + { + "bbox": [ + 399, + 114, + 480, + 140 + ], + "lines": [ + { + "bbox": [ + 399, + 114, + 480, + 140 + ], + "spans": [ + { + "bbox": [ + 399, + 114, + 480, + 140 + ], + "type": "image", + "image_path": "fd6d779db6c870b4d676f86dfc5e4ead06a0388acf5301039204102ff65f96bf.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 133, + 152, + 214, + 178 + ], + "blocks": [ + { + "bbox": [ + 137, + 143, + 211, + 152 + ], + "lines": [ + { + "bbox": [ + 137, + 143, + 211, + 152 + ], + "spans": [ + { + "bbox": [ + 137, + 143, + 211, + 152 + ], + "type": "text", + "content": "(a) Input, Co-R, Pre-R" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 133, + 152, + 214, + 178 + ], + "lines": [ + { + "bbox": [ + 133, + 152, + 214, + 178 + ], + "spans": [ + { + "bbox": [ + 133, + 152, + 214, + 178 + ], + "type": "image", + "image_path": "dbf24119eba1f34bb7ef704e4d4da88ca7727f295b605a11be74ed20ec2084e5.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 136, + 180, + 212, + 189 + ], + "lines": [ + { + "bbox": [ + 136, + 180, + 212, + 189 + ], + "spans": [ + { + "bbox": [ + 136, + 180, + 212, + 189 + ], + "type": "text", + "content": "(e) Edit Bg Appearance" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 221, + 152, + 302, + 178 + ], + "blocks": [ + { + "bbox": [ + 235, + 142, + 290, + 151 + ], + "lines": [ + { + "bbox": [ + 235, + 142, + 290, + 151 + ], + "spans": [ + { + "bbox": [ + 235, + 142, + 290, + 151 + ], + "type": "text", + "content": "(b) Edit Shape" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 221, + 152, + 302, + 178 + ], + "lines": [ + { + "bbox": [ + 221, + 152, + 302, + 178 + ], + "spans": [ + { + "bbox": [ + 221, + 152, + 302, + 178 + ], + "type": "image", + "image_path": "6645c84379197c8d8e3416110783a0b32f58bf6519325d6af307a62a3706559c.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 236, + 179, + 287, + 188 + ], + "lines": [ + { + "bbox": [ + 236, + 179, + 287, + 188 + ], + "spans": [ + { + "bbox": [ + 236, + 179, + 287, + 188 + ], + "type": "text", + "content": "(f) Edit Scale" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 309, + 152, + 389, + 178 + ], + "blocks": [ + { + "bbox": [ + 315, + 142, + 389, + 151 + ], + "lines": [ + { + "bbox": [ + 315, + 142, + 389, + 151 + ], + "spans": [ + { + "bbox": [ + 315, + 142, + 389, + 151 + ], + "type": "text", + "content": "(c) Edit Appearance" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 309, + 152, + 389, + 178 + ], + "lines": [ + { + "bbox": [ + 309, + 152, + 389, + 178 + ], + "spans": [ + { + "bbox": [ + 309, + 152, + 389, + 178 + ], + "type": "image", + "image_path": "30abf75e354c3a950a29c3e762c8eecdb9c73444e124f66bd307e777d3260acc.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 180, + 386, + 188 + ], + "lines": [ + { + "bbox": [ + 313, + 180, + 386, + 188 + ], + "spans": [ + { + "bbox": [ + 313, + 180, + 386, + 188 + ], + "type": "text", + "content": "(g) Edit Translation" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 396, + 152, + 477, + 178 + ], + "blocks": [ + { + "bbox": [ + 406, + 142, + 473, + 151 + ], + "lines": [ + { + "bbox": [ + 406, + 142, + 473, + 151 + ], + "spans": [ + { + "bbox": [ + 406, + 142, + 473, + 151 + ], + "type": "text", + "content": "(d) Edit Bg Shape" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 396, + 152, + 477, + 178 + ], + "lines": [ + { + "bbox": [ + 396, + 152, + 477, + 178 + ], + "spans": [ + { + "bbox": [ + 396, + 152, + 477, + 178 + ], + "type": "image", + "image_path": "10537903b1e9d923080b4ea771ba87384e6ae7365ed3818ed3f2bb257ab97396.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 405, + 180, + 469, + 188 + ], + "lines": [ + { + "bbox": [ + 405, + 180, + 469, + 188 + ], + "spans": [ + { + "bbox": [ + 405, + 180, + 469, + 188 + ], + "type": "text", + "content": "(h) Edit Rotation" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 133, + 220, + 214, + 248 + ], + "blocks": [ + { + "bbox": [ + 196, + 198, + 415, + 209 + ], + "lines": [ + { + "bbox": [ + 196, + 198, + 415, + 209 + ], + "spans": [ + { + "bbox": [ + 196, + 198, + 415, + 209 + ], + "type": "text", + "content": "Fig. 8: Multi-object editing on " + }, + { + "bbox": [ + 196, + 198, + 415, + 209 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 196, + 198, + 415, + 209 + ], + "type": "text", + "content": "-CompCars dataset." + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 133, + 220, + 214, + 248 + ], + "lines": [ + { + "bbox": [ + 133, + 220, + 214, + 248 + ], + "spans": [ + { + "bbox": [ + 133, + 220, + 214, + 248 + ], + "type": "image", + "image_path": "30e8fdff9c7e5516a6c5826bbb04fed4356d36de6a496ad5cb52a286212a90d7.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 137, + 250, + 211, + 258 + ], + "lines": [ + { + "bbox": [ + 137, + 250, + 211, + 258 + ], + "spans": [ + { + "bbox": [ + 137, + 250, + 211, + 258 + ], + "type": "text", + "content": "(a) Input, Co-R, Pre-R" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 212, + 266, + 400, + 277 + ], + "lines": [ + { + "bbox": [ + 212, + 266, + 400, + 277 + ], + "spans": [ + { + "bbox": [ + 212, + 266, + 400, + 277 + ], + "type": "text", + "content": "Fig. 9: Multi-object editing on Clevr dataset." + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_caption" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 221, + 220, + 302, + 247 + ], + "blocks": [ + { + "bbox": [ + 221, + 220, + 302, + 247 + ], + "lines": [ + { + "bbox": [ + 221, + 220, + 302, + 247 + ], + "spans": [ + { + "bbox": [ + 221, + 220, + 302, + 247 + ], + "type": "image", + "image_path": "5772291549f34b570d7b1c23b37d84d16e2f02bb13a33c5416c854eb724a3f42.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 224, + 248, + 299, + 257 + ], + "lines": [ + { + "bbox": [ + 224, + 248, + 299, + 257 + ], + "spans": [ + { + "bbox": [ + 224, + 248, + 299, + 257 + ], + "type": "text", + "content": "(b) Edit Appearance" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 309, + 220, + 390, + 247 + ], + "blocks": [ + { + "bbox": [ + 309, + 220, + 390, + 247 + ], + "lines": [ + { + "bbox": [ + 309, + 220, + 390, + 247 + ], + "spans": [ + { + "bbox": [ + 309, + 220, + 390, + 247 + ], + "type": "image", + "image_path": "a2f421f252ce2c58fae785455f661cc17a7b338c61852deaf059ca50e22bc62f.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 248, + 386, + 257 + ], + "lines": [ + { + "bbox": [ + 313, + 248, + 386, + 257 + ], + "spans": [ + { + "bbox": [ + 313, + 248, + 386, + 257 + ], + "type": "text", + "content": "(c) Edit Translation" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 396, + 220, + 478, + 248 + ], + "blocks": [ + { + "bbox": [ + 396, + 220, + 478, + 248 + ], + "lines": [ + { + "bbox": [ + 396, + 220, + 478, + 248 + ], + "spans": [ + { + "bbox": [ + 396, + 220, + 478, + 248 + ], + "type": "image", + "image_path": "8890e6aac8a10fca25e28769f159a166a9cbef09bea98748aa1ea62c4712be46.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 397, + 249, + 477, + 258 + ], + "lines": [ + { + "bbox": [ + 397, + 249, + 477, + 258 + ], + "spans": [ + { + "bbox": [ + 397, + 249, + 477, + 258 + ], + "type": "text", + "content": "(d) Add/Remove Objects" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + } + ], + "index": 25 + }, + { + "bbox": [ + 130, + 300, + 480, + 337 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 300, + 480, + 337 + ], + "spans": [ + { + "bbox": [ + 130, + 300, + 480, + 337 + ], + "type": "text", + "content": "TriplaneNet [5] that employ the 3D GAN EG3D [7] as the generator, on the generator of GIRAFFE. Additionally, we compared our encoder on StyleGAN2 with SOTA inversion methods HyperStyle [2] and HFGI [35] for StyleGAN2." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 133, + 338, + 479, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 338, + 479, + 361 + ], + "spans": [ + { + "bbox": [ + 133, + 338, + 479, + 361 + ], + "type": "text", + "content": "Metrics. We use Mean Squared Error (MSE), perceptual similarity loss (LPIPS) [41], and identity similarity (ID) to measure the quality of image reconstruction." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 131, + 378, + 288, + 389 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 378, + 288, + 389 + ], + "spans": [ + { + "bbox": [ + 131, + 378, + 288, + 389 + ], + "type": "text", + "content": "6.1 3D GAN Omni-Inversion" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 130, + 396, + 482, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 396, + 482, + 540 + ], + "spans": [ + { + "bbox": [ + 130, + 396, + 482, + 540 + ], + "type": "text", + "content": "Single-object Multifaceted Editing. In Figure 6 and Figure 7, (a) depict the original images, the coarsely reconstructed images produced by the Neural Inversion Encoder, and the precisely reconstructed images obtained via round-robin optimization. As Figure 7 shows, the simple scene structure of the Clevr dataset allows us to achieve remarkably accurate results using only the encoder (Co-Recon). However, for car images in Figure 6, predicting precise codes using the encoder only becomes challenging, necessitating the employment of the round-robin optimization algorithm to refine the code values for precise reconstruction (Pre-Recon). Figure 6 (b)-(h) and Figure 7 (b)-(d) show the editing results for different codes. As noted in Section 4.3, moving an object forward and increasing its scale yield similar results. Please refer to the Supplementary Material 3.1 for more results like camera pose and shape editing." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 130, + 557, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 557, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 557, + 482, + 666 + ], + "type": "text", + "content": "Multi-object Multifaceted Editing. We notice that the prediction for some object parameters (" + }, + { + "bbox": [ + 130, + 557, + 482, + 666 + ], + "type": "inline_equation", + "content": "O^{shape}" + }, + { + "bbox": [ + 130, + 557, + 482, + 666 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 557, + 482, + 666 + ], + "type": "inline_equation", + "content": "O^{app}" + }, + { + "bbox": [ + 130, + 557, + 482, + 666 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 557, + 482, + 666 + ], + "type": "inline_equation", + "content": "O^s" + }, + { + "bbox": [ + 130, + 557, + 482, + 666 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 557, + 482, + 666 + ], + "type": "inline_equation", + "content": "O^t" + }, + { + "bbox": [ + 130, + 557, + 482, + 666 + ], + "type": "text", + "content": ") are quite accurate. However, the prediction for the background codes deviates significantly. We speculate this is due to the significant differences in segmentation image input to the background encoder between multi-object scenes and single-object scenes. Therefore, background reconstruction requires further optimization. Figure 8 and Figure 9 depict the multifaceted editing outcomes for two cars and multiple Clevr objects, respectively. The images show individual edits of two objects in the left and middle images and collective edits at the right images in Figure 8 (b-c) and (f-h)." + } + ] + } + ], + "index": 32 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 212, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 212, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 212, + 100 + ], + "type": "text", + "content": "H. Li et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 147, + 114, + 288, + 213 + ], + "blocks": [ + { + "bbox": [ + 147, + 114, + 288, + 213 + ], + "lines": [ + { + "bbox": [ + 147, + 114, + 288, + 213 + ], + "spans": [ + { + "bbox": [ + 147, + 114, + 288, + 213 + ], + "type": "image", + "image_path": "504bf2bc8d429d65185a3c4f35d5eb0acfd376058e512e0f50c38b713f8d7545.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 133, + 214, + 301, + 238 + ], + "lines": [ + { + "bbox": [ + 133, + 214, + 301, + 238 + ], + "spans": [ + { + "bbox": [ + 133, + 214, + 301, + 238 + ], + "type": "text", + "content": "(a) Reconstruction results of different GAN inversion encoders using the generator of GI-RAFFE." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 310, + 114, + 477, + 213 + ], + "blocks": [ + { + "bbox": [ + 310, + 114, + 477, + 213 + ], + "lines": [ + { + "bbox": [ + 310, + 114, + 477, + 213 + ], + "spans": [ + { + "bbox": [ + 310, + 114, + 477, + 213 + ], + "type": "image", + "image_path": "c70a34891e1e695367e4d50bc62492f070229c74e463a0cb8fc5b240263bee74.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 310, + 214, + 477, + 238 + ], + "lines": [ + { + "bbox": [ + 310, + 214, + 477, + 238 + ], + "spans": [ + { + "bbox": [ + 310, + 214, + 477, + 238 + ], + "type": "text", + "content": "(b) Reconstruction results of different GAN inversion encoders using the generator of StyleGAN2." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 165, + 248, + 446, + 259 + ], + "lines": [ + { + "bbox": [ + 165, + 248, + 446, + 259 + ], + "spans": [ + { + "bbox": [ + 165, + 248, + 446, + 259 + ], + "type": "text", + "content": "Fig. 10: Reconstruction quality of different GAN inversion encoders." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 282, + 480, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 282, + 480, + 342 + ], + "spans": [ + { + "bbox": [ + 130, + 282, + 480, + 342 + ], + "type": "text", + "content": "As shown in Figure 8, the predictive discrepancy between the car's background and rotation angle on the left is considerable, requiring adjustments through the round-robin optimization. As illustrated in Figure 1, 2D/3D GAN inversion methods can not inverse multi-object scenes. More images pertaining to multi-object editing can be found in Supplementary Material 3.2." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 359, + 428, + 371 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 359, + 428, + 371 + ], + "spans": [ + { + "bbox": [ + 130, + 359, + 428, + 371 + ], + "type": "text", + "content": "6.2 Comparison Experiment of Neural Inversion Encoder" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 378, + 480, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 378, + 480, + 497 + ], + "spans": [ + { + "bbox": [ + 130, + 378, + 480, + 497 + ], + "type": "text", + "content": "For fair comparison and to eliminate the impact of the generator on the quality of the inverted image generation, we trained the encoders from the baseline methods by connecting them to the GIRAFFE generator using our Neural Inversion Encoder training approach and compared them with our Neural Inversion Encoder. At the same time, we also connected our encoder to StyleGAN2 and compared it with inversion methods based on StyleGAN2, thereby demonstrating the efficiency of our encoder design. Table 1 and Figure 10 quantitatively and qualitatively displays the comparison results on both the GIRAFFE and StyleGAN2 generators respectively. The results show that our Neural Inversion Encoder consistently outperforms baseline methods." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 131, + 515, + 237, + 526 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 515, + 237, + 526 + ], + "spans": [ + { + "bbox": [ + 131, + 515, + 237, + 526 + ], + "type": "text", + "content": "6.3 Ablation Study" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 533, + 480, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 533, + 480, + 616 + ], + "spans": [ + { + "bbox": [ + 130, + 533, + 480, + 616 + ], + "type": "text", + "content": "We conducted ablation experiments separately for the proposed Neural Inversion Encoder and the Round-robin Optimization algorithm. Table 2 displays the average ablation results of the Neural Inversion Encoder on various attribute codes, where NIB refers to Neural Inversion Block (the second part of the encoder) and MLP is the final part of the encoder. The results clearly show that our encoder structure is extremely effective and can predict code values more accurately. Please find the complete results in the Supplementary Material." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 130, + 617, + 480, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 617, + 480, + 652 + ], + "spans": [ + { + "bbox": [ + 130, + 617, + 480, + 652 + ], + "type": "text", + "content": "For the Round-robin optimization algorithm, we compared it with three fixed optimization order algorithms on both single-object and multi-object scenarios. The three fixed sequences are as follows:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 146, + 652, + 416, + 666 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 146, + 652, + 416, + 666 + ], + "spans": [ + { + "bbox": [ + 146, + 652, + 416, + 666 + ], + "type": "interline_equation", + "content": "\\text {O r d e r 1}: B ^ {\\text {s h a p e}}, B ^ {\\text {a p p}}, \\left\\{O _ {i} ^ {r}, O _ {i} ^ {t}, O _ {i} ^ {s} \\right\\} _ {i = 1} ^ {N}, \\left\\{O _ {i} ^ {\\text {s h a p e}}, O _ {i} ^ {\\text {a p p}} \\right\\} _ {i = 1} ^ {N}, C", + "image_path": "8ba839c9f89095f9c2539b3b31947c7b4144ca7b76ee03f200ef6e6f7f3593ca.jpg" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 413, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 413, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 413, + 91, + 447, + 100 + ], + "type": "text", + "content": "3D-GOI" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 138, + 156, + 474, + 259 + ], + "blocks": [ + { + "bbox": [ + 132, + 114, + 482, + 148 + ], + "lines": [ + { + "bbox": [ + 132, + 114, + 482, + 148 + ], + "spans": [ + { + "bbox": [ + 132, + 114, + 482, + 148 + ], + "type": "text", + "content": "Table 1: Reconstruction quality of different GAN inversion encoders using the generator of GIRAFFE and StyleGAN2. " + }, + { + "bbox": [ + 132, + 114, + 482, + 148 + ], + "type": "inline_equation", + "content": "\\downarrow" + }, + { + "bbox": [ + 132, + 114, + 482, + 148 + ], + "type": "text", + "content": " indicates the lower the better and " + }, + { + "bbox": [ + 132, + 114, + 482, + 148 + ], + "type": "inline_equation", + "content": "\\uparrow" + }, + { + "bbox": [ + 132, + 114, + 482, + 148 + ], + "type": "text", + "content": " indicates the higher the better." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 138, + 156, + 474, + 259 + ], + "lines": [ + { + "bbox": [ + 138, + 156, + 474, + 259 + ], + "spans": [ + { + "bbox": [ + 138, + 156, + 474, + 259 + ], + "type": "table", + "html": "
MethodGIRAFFE for GeneratorStyleGAN2 for Generator
MSE ↓LPIPS ↓ID↑MSE ↓LPIPS ↓ID↑
e4e [34]0.0310.3060.8670.0520.2000.502
pSp [31]0.0310.3010.8770.0340.1720.561
HyperStyle [2]---0.0190.0910.766
HFGI [35]---0.0230.1240.705
TriplaneNet [5]0.0290.2960.870---
E3DGE [21]0.0310.2990.881---
3D-GOI(Ours)0.0240.2620.8970.0170.0980.769
", + "image_path": "5ef36d58de3181decad7c356f1b493d30a66f7157ab31ef1615b443dd0a0da8e.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 161, + 312, + 298, + 360 + ], + "blocks": [ + { + "bbox": [ + 153, + 281, + 305, + 303 + ], + "lines": [ + { + "bbox": [ + 153, + 281, + 305, + 303 + ], + "spans": [ + { + "bbox": [ + 153, + 281, + 305, + 303 + ], + "type": "text", + "content": "Table 2: Ablation Study of the Neural Inversion Encoder." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 161, + 312, + 298, + 360 + ], + "lines": [ + { + "bbox": [ + 161, + 312, + 298, + 360 + ], + "spans": [ + { + "bbox": [ + 161, + 312, + 298, + 360 + ], + "type": "table", + "html": "
MethodMSE ↓LPIPS↓ID ↑
w/o NIB0.0230.2880.856
w/o MLP0.0150.1830.878
3D-GOI0.0100.1410.906
", + "image_path": "02531c2937d8b2595e5729007540c1d8415c9eda1bf4426c7e17f7d287e9b1c0.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 318, + 312, + 451, + 371 + ], + "blocks": [ + { + "bbox": [ + 312, + 270, + 461, + 304 + ], + "lines": [ + { + "bbox": [ + 312, + 270, + 461, + 304 + ], + "spans": [ + { + "bbox": [ + 312, + 270, + 461, + 304 + ], + "type": "text", + "content": "Table 3: The quantitative metrics of ablation study of the Round-robin Optimization algorithm." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 318, + 312, + 451, + 371 + ], + "lines": [ + { + "bbox": [ + 318, + 312, + 451, + 371 + ], + "spans": [ + { + "bbox": [ + 318, + 312, + 451, + 371 + ], + "type": "table", + "html": "
MethodMSE ↓LPIPS ↓ID↑
Order10.0160.1840.923
Order20.0190.2290.913
Order30.0190.2210.911
3D-GOI0.0080.1280.938
", + "image_path": "69f844ca47ea53092cb1e4c1240f2daae35dd82d06fa0e2cfbed2ecdc456ccda.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 147, + 394, + 417, + 408 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 147, + 394, + 417, + 408 + ], + "spans": [ + { + "bbox": [ + 147, + 394, + 417, + 408 + ], + "type": "interline_equation", + "content": "O r d e r 2: \\left\\{O _ {i} ^ {r}, O _ {i} ^ {t}, O _ {i} ^ {s} \\right\\} _ {i = 1} ^ {N}, \\left\\{O _ {i} ^ {\\text {s h a p e}}, O _ {i} ^ {\\text {a p p}} \\right\\} _ {i = 1} ^ {N}, B ^ {\\text {s h a p e}}, B ^ {\\text {a p p}}, C", + "image_path": "ebfc151ffefa632288e5d86502eb8c87c24060ef2072b68794b5b4fa1a29fe64.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 147, + 408, + 416, + 422 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 147, + 408, + 416, + 422 + ], + "spans": [ + { + "bbox": [ + 147, + 408, + 416, + 422 + ], + "type": "interline_equation", + "content": "O r d e r 3: C, \\{O _ {i} ^ {s h a p e}, O _ {i} ^ {a p p} \\} _ {i = 1} ^ {N}, \\{O _ {i} ^ {r}, O _ {i} ^ {t}, O _ {i} ^ {s} \\} _ {i = 1} ^ {N}, B ^ {s h a p e}, B ^ {a p p}", + "image_path": "434b7922637eec3bd32224deb5766eac739aee4c093856c09182beaa1402e36e.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 422, + 482, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 422, + 482, + 518 + ], + "spans": [ + { + "bbox": [ + 130, + 422, + 482, + 518 + ], + "type": "inline_equation", + "content": "\\{\\}_{i=1}^{N}" + }, + { + "bbox": [ + 130, + 422, + 482, + 518 + ], + "type": "text", + "content": " indicates that the elements inside " + }, + { + "bbox": [ + 130, + 422, + 482, + 518 + ], + "type": "inline_equation", + "content": "\\{\\}" + }, + { + "bbox": [ + 130, + 422, + 482, + 518 + ], + "type": "text", + "content": " are arranged in sequence from 1 to N. There are many possible sequence combinations, and here we chose the three with the best results for demonstration. As Table 3 shows, our method achieves the best results on all metrics, demonstrating the effectiveness of our Round-robin optimization algorithm. As mentioned in Section 4.4, optimizing features like the background first can enhance the optimization. Hence, Order1 performs much better than Order2 and Order3. Please see the Supplementary Material 3.5 for qualitative comparisons of these four methods on images." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 540, + 220, + 553 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 540, + 220, + 553 + ], + "spans": [ + { + "bbox": [ + 132, + 540, + 220, + 553 + ], + "type": "text", + "content": "7 Conclusion" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 130, + 569, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 569, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 569, + 482, + 666 + ], + "type": "text", + "content": "This paper introduces a 3D GAN inversion method, 3D-GOI, that enables multifaceted editing of scenes containing multiple objects. By using a segmentation approach to separate objects and background, then carrying out a coarse estimation followed by a precise optimization, 3D-GOI can accurately obtain the codes of the image. These codes are then used for multifaceted editing. To the best of our knowledge, 3D-GOI is the first method to attempt multi-object & multifaceted editing. We anticipate that 3D-GOI holds immense potential for future applications in fields such as VR/AR, and the Metaverse." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 212, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 212, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 212, + 101 + ], + "type": "text", + "content": "H. Li et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 133, + 114, + 246, + 129 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 114, + 246, + 129 + ], + "spans": [ + { + "bbox": [ + 133, + 114, + 246, + 129 + ], + "type": "text", + "content": "Acknowledgements" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 143, + 480, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 143, + 480, + 167 + ], + "spans": [ + { + "bbox": [ + 132, + 143, + 480, + 167 + ], + "type": "text", + "content": "This work was supported by the National Key Research and Development Program of China (2022YFB3105405, 2021YFC3300502)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 133, + 190, + 197, + 201 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 190, + 197, + 201 + ], + "spans": [ + { + "bbox": [ + 133, + 190, + 197, + 201 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 138, + 217, + 481, + 665 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 138, + 217, + 481, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 217, + 481, + 251 + ], + "spans": [ + { + "bbox": [ + 138, + 217, + 481, + 251 + ], + "type": "text", + "content": "1. Abdal, R., Qin, Y., Wonka, P.: Image2stylegan: How to embed images into the stylegan latent space? In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 4432-4441 (2019)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 138, + 251, + 481, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 251, + 481, + 285 + ], + "spans": [ + { + "bbox": [ + 138, + 251, + 481, + 285 + ], + "type": "text", + "content": "2. Alaluf, Y., Tov, O., Mokady, R., Gal, R., Bermano, A.: Hyperstyle: Stylegan inversion with hypernetworks for real image editing. In: Proceedings of the IEEE/CVF conference on computer Vision and pattern recognition. pp. 18511-18521 (2022)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 138, + 285, + 480, + 307 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 285, + 480, + 307 + ], + "spans": [ + { + "bbox": [ + 138, + 285, + 480, + 307 + ], + "type": "text", + "content": "3. Arad Hudson, D., Zitnick, L.: Compositional transformers for scene generation. Advances in Neural Information Processing Systems 34, 9506-9520 (2021)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 138, + 308, + 481, + 341 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 308, + 481, + 341 + ], + "spans": [ + { + "bbox": [ + 138, + 308, + 481, + 341 + ], + "type": "text", + "content": "4. Bau, D., Zhu, J.Y., Wulff, J., Peebles, W., Strobelt, H., Zhou, B., Torralba, A.: Seeing what a gan cannot generate. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 4502-4511 (2019)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 138, + 342, + 481, + 363 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 342, + 481, + 363 + ], + "spans": [ + { + "bbox": [ + 138, + 342, + 481, + 363 + ], + "type": "text", + "content": "5. Bhattacharai, A.R., Nießner, M., Sevastopolsky, A.: Triplanenet: An encoder for eg3d inversion. arXiv preprint arXiv:2303.13497 (2023)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 138, + 364, + 481, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 364, + 481, + 385 + ], + "spans": [ + { + "bbox": [ + 138, + 364, + 481, + 385 + ], + "type": "text", + "content": "6. Brock, A., Donahue, J., Simonyan, K.: Large scale gan training for high fidelity natural image synthesis. arXiv preprint arXiv:1809.11096 (2018)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 138, + 386, + 481, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 386, + 481, + 430 + ], + "spans": [ + { + "bbox": [ + 138, + 386, + 481, + 430 + ], + "type": "text", + "content": "7. Chan, E.R., Lin, C.Z., Chan, M.A., Nagano, K., Pan, B., De Mello, S., Gallo, O., Guibas, L.J., Tremblay, J., Khamis, S., et al.: Efficient geometry-aware 3d generative adversarial networks. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 16123-16133 (2022)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 138, + 431, + 481, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 431, + 481, + 464 + ], + "spans": [ + { + "bbox": [ + 138, + 431, + 481, + 464 + ], + "type": "text", + "content": "8. Deng, J., Guo, J., Xue, N., Zafeiriou, S.: Arcface: Additive angular margin loss for deep face recognition. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 4690-4699 (2019)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 138, + 464, + 481, + 497 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 464, + 481, + 497 + ], + "spans": [ + { + "bbox": [ + 138, + 464, + 481, + 497 + ], + "type": "text", + "content": "9. Deng, Y., Wang, B., Shum, H.Y.: Learning detailed radiance manifolds for high-fidelity and 3d-consistent portrait synthesis from monocular image. arXiv preprint arXiv:2211.13901 (2022)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 138, + 498, + 481, + 531 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 498, + 481, + 531 + ], + "spans": [ + { + "bbox": [ + 138, + 498, + 481, + 531 + ], + "type": "text", + "content": "10. Goodfellow, I., Pouget-Abadie, J., Mirza, M., Xu, B., Warde-Farley, D., Ozair, S., Courville, A., Bengio, Y.: Generative adversarial networks. Communications of the ACM 63(11), 139–144 (2020)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 138, + 532, + 481, + 564 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 532, + 481, + 564 + ], + "spans": [ + { + "bbox": [ + 138, + 532, + 481, + 564 + ], + "type": "text", + "content": "1. He, K., Fan, H., Wu, Y., Xie, S., Girshick, R.: Momentum contrast for unsupervised visual representation learning. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 9729-9738 (2020)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 138, + 565, + 481, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 565, + 481, + 597 + ], + "spans": [ + { + "bbox": [ + 138, + 565, + 481, + 597 + ], + "type": "text", + "content": "2. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 770-778 (2016)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 138, + 599, + 481, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 599, + 481, + 620 + ], + "spans": [ + { + "bbox": [ + 138, + 599, + 481, + 620 + ], + "type": "text", + "content": "3. Ho, J., Jain, A., Abbeel, P.: Denoising diffusion probabilistic models. Advances in neural information processing systems 33, 6840-6851 (2020)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 138, + 621, + 481, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 621, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 138, + 621, + 481, + 665 + ], + "type": "text", + "content": "4. Huh, M., Zhang, R., Zhu, J.Y., Paris, S., Hertzmann, A.: Transforming and projecting images into class-conditional generative networks. In: Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23–28, 2020, Proceedings, Part II 16. pp. 17–34. Springer (2020)" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 413, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 413, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 413, + 91, + 447, + 100 + ], + "type": "text", + "content": "3D-GOI" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 133, + 117, + 480, + 665 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 133, + 117, + 480, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 117, + 480, + 161 + ], + "spans": [ + { + "bbox": [ + 133, + 117, + 480, + 161 + ], + "type": "text", + "content": "15. Johnson, J., Hariharan, B., Van Der Maaten, L., Fei-Fei, L., Lawrence Zitnick, C., Girshick, R.: Clevr: A diagnostic dataset for compositional language and elementary visual reasoning. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 2901-2910 (2017)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 133, + 162, + 480, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 162, + 480, + 184 + ], + "spans": [ + { + "bbox": [ + 133, + 162, + 480, + 184 + ], + "type": "text", + "content": "16. Karras, T., Aila, T., Laine, S., Lehtinen, J.: Progressive growing of gans for improved quality, stability, and variation. arXiv preprint arXiv:1710.10196 (2017)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 134, + 185, + 480, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 185, + 480, + 217 + ], + "spans": [ + { + "bbox": [ + 134, + 185, + 480, + 217 + ], + "type": "text", + "content": "17. Karras, T., Laine, S., Aila, T.: A style-based generator architecture for generative adversarial networks. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 4401-4410 (2019)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 134, + 218, + 480, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 218, + 480, + 251 + ], + "spans": [ + { + "bbox": [ + 134, + 218, + 480, + 251 + ], + "type": "text", + "content": "18. Karras, T., Laine, S., Aittala, M., Hellsten, J., Lehtinen, J., Aila, T.: Analyzing and improving the image quality of stylegan. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 8110-8119 (2020)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 134, + 251, + 480, + 284 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 251, + 480, + 284 + ], + "spans": [ + { + "bbox": [ + 134, + 251, + 480, + 284 + ], + "type": "text", + "content": "19. Ko, J., Cho, K., Choi, D., Ryoo, K., Kim, S.: 3d gan inversion with pose optimization. In: Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision. pp. 2967-2976 (2023)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 134, + 285, + 480, + 307 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 285, + 480, + 307 + ], + "spans": [ + { + "bbox": [ + 134, + 285, + 480, + 307 + ], + "type": "text", + "content": "20. Krizhevsky, A., Sutskever, I., Hinton, G.E.: Imagenet classification with deep convolutional neural networks. Communications of the ACM 60(6), 84-90 (2017)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 134, + 308, + 480, + 341 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 308, + 480, + 341 + ], + "spans": [ + { + "bbox": [ + 134, + 308, + 480, + 341 + ], + "type": "text", + "content": "21. Lan, Y., Meng, X., Yang, S., Loy, C.C., Dai, B.: Self-supervised geometry-aware encoder for style-based 3d gan inversion. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 20940-20949 (2023)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 134, + 342, + 480, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 342, + 480, + 373 + ], + "spans": [ + { + "bbox": [ + 134, + 342, + 480, + 373 + ], + "type": "text", + "content": "22. Li, H., Shi, H., Zhang, W., Wu, W., Liao, Y., Wang, L., Lee, L.h., Zhou, P.: Dreamscene: 3d gaussian-based text-to-3d scene generation via formation pattern sampling. arXiv preprint arXiv:2404.03575 (2024)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 134, + 375, + 480, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 375, + 480, + 407 + ], + "spans": [ + { + "bbox": [ + 134, + 375, + 480, + 407 + ], + "type": "text", + "content": "23. Lin, Y., Bai, H., Li, S., Lu, H., Lin, X., Xiong, H., Wang, L.: Componerf: Text-guided multi-object compositional nerf with editable 3d scene layout. arXiv preprint arXiv:2303.13843 (2023)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 134, + 409, + 480, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 409, + 480, + 441 + ], + "spans": [ + { + "bbox": [ + 134, + 409, + 480, + 441 + ], + "type": "text", + "content": "24. Metzer, G., Richardson, E., Patashnik, O., Giryes, R., Cohen-Or, D.: Latentnerf for shape-guided generation of 3d shapes and textures. arXiv preprint arXiv:2211.07600 (2022)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 134, + 442, + 480, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 442, + 480, + 475 + ], + "spans": [ + { + "bbox": [ + 134, + 442, + 480, + 475 + ], + "type": "text", + "content": "25. Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM 65(1), 99-106 (2021)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 134, + 475, + 480, + 508 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 475, + 480, + 508 + ], + "spans": [ + { + "bbox": [ + 134, + 475, + 480, + 508 + ], + "type": "text", + "content": "26. Nguyen-Phuoc, T., Li, C., Theis, L., Richardt, C., Yang, Y.L.: Hologan: Unsupervised learning of 3d representations from natural images. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 7588-7597 (2019)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 134, + 509, + 480, + 542 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 509, + 480, + 542 + ], + "spans": [ + { + "bbox": [ + 134, + 509, + 480, + 542 + ], + "type": "text", + "content": "27. Nguyen-Phuoc, T.H., Richardt, C., Mai, L., Yang, Y., Mitra, N.: Blockgan: Learning 3d object-aware scene representations from unlabelled images. Advances in neural information processing systems 33, 6767–6778 (2020)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 134, + 543, + 480, + 575 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 543, + 480, + 575 + ], + "spans": [ + { + "bbox": [ + 134, + 543, + 480, + 575 + ], + "type": "text", + "content": "28. Niemeyer, M., Geiger, A.: Giraffe: Representing scenes as compositional generative neural feature fields. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 11453-11464 (2021)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 134, + 576, + 480, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 576, + 480, + 597 + ], + "spans": [ + { + "bbox": [ + 134, + 576, + 480, + 597 + ], + "type": "text", + "content": "29. Perarnau, G., Van De Weijer, J., Raducanu, B., Álvarez, J.M.: Invertible conditional gans for image editing. arXiv preprint arXiv:1611.06355 (2016)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 134, + 599, + 480, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 599, + 480, + 620 + ], + "spans": [ + { + "bbox": [ + 134, + 599, + 480, + 620 + ], + "type": "text", + "content": "30. Poole, B., Jain, A., Barron, J.T., Mildenhall, B.: Dreamfusion: Text-to-3d using 2d diffusion. arXiv preprint arXiv:2209.14988 (2022)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 134, + 621, + 480, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 621, + 480, + 665 + ], + "spans": [ + { + "bbox": [ + 134, + 621, + 480, + 665 + ], + "type": "text", + "content": "31. Richardson, E., Alaluf, Y., Patashnik, O., Nitzan, Y., Azar, Y., Shapiro, S., Cohen-Or, D.: Encoding in style: a stylegan encoder for image-to-image translation. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 2287-2296 (2021)" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 212, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 212, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 212, + 100 + ], + "type": "text", + "content": "H. Li et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 555 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 149 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 149 + ], + "type": "text", + "content": "32. Russakovsky, O., Deng, J., Su, H., Krause, J., Satheesh, S., Ma, S., Huang, Z., Karpathy, A., Khosla, A., Bernstein, M., et al.: Imagenet large scale visual recognition challenge. International journal of computer vision 115, 211-252 (2015)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 150, + 482, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 150, + 482, + 182 + ], + "spans": [ + { + "bbox": [ + 130, + 150, + 482, + 182 + ], + "type": "text", + "content": "33. Schwarz, K., Liao, Y., Niemeyer, M., Geiger, A.: Graf: Generative radiance fields for 3d-aware image synthesis. Advances in Neural Information Processing Systems 33, 20154-20166 (2020)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 182, + 482, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 182, + 482, + 215 + ], + "spans": [ + { + "bbox": [ + 130, + 182, + 482, + 215 + ], + "type": "text", + "content": "34. Tov, O., Alaluf, Y., Nitzan, Y., Patashnik, O., Cohen-Or, D.: Designing an encoder for stylegan image manipulation. ACM Transactions on Graphics (TOG) 40(4), 1-14 (2021)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 215, + 482, + 248 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 215, + 482, + 248 + ], + "spans": [ + { + "bbox": [ + 130, + 215, + 482, + 248 + ], + "type": "text", + "content": "35. Wang, T., Zhang, Y., Fan, Y., Wang, J., Chen, Q.: High-fidelity gan inversion for image attribute editing. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 11379-11388 (2022)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 248, + 482, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 248, + 482, + 281 + ], + "spans": [ + { + "bbox": [ + 130, + 248, + 482, + 281 + ], + "type": "text", + "content": "36. Wei, T., Chen, D., Zhou, W., Liao, J., Zhang, W., Yuan, L., Hua, G., Yu, N.: E2style: Improve the efficiency and effectiveness of stylegan inversion. IEEE Transactions on Image Processing 31, 3267-3280 (2022)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 281, + 482, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 281, + 482, + 304 + ], + "spans": [ + { + "bbox": [ + 130, + 281, + 482, + 304 + ], + "type": "text", + "content": "37. Xie, J., Ouyang, H., Piao, J., Lei, C., Chen, Q.: High-fidelity 3d gan inversion by pseudo-multi-view optimization. arXiv preprint arXiv:2211.15662 (2022)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 304, + 482, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 304, + 482, + 346 + ], + "spans": [ + { + "bbox": [ + 130, + 304, + 482, + 346 + ], + "type": "text", + "content": "38. Yang, H., Zhang, Z., Yan, S., Huang, H., Ma, C., Zheng, Y., Bajaj, C., Huang, Q.: Scene synthesis via uncertainty-driven attribute synchronization. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 5630-5640 (2021)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 347, + 482, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 347, + 482, + 380 + ], + "spans": [ + { + "bbox": [ + 130, + 347, + 482, + 380 + ], + "type": "text", + "content": "39. Yang, J., Li, H.: Dense, accurate optical flow estimation with piecewise parametric model. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 1019-1027 (2015)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 380, + 482, + 413 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 380, + 482, + 413 + ], + "spans": [ + { + "bbox": [ + 130, + 380, + 482, + 413 + ], + "type": "text", + "content": "40. Yin, F., Zhang, Y., Wang, X., Wang, T., Li, X., Gong, Y., Fan, Y., Cun, X., Shan, Y., Oztireli, C., et al.: 3d gan inversion with facial symmetry prior. arXiv preprint arXiv:2211.16927 (2022)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 413, + 482, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 413, + 482, + 445 + ], + "spans": [ + { + "bbox": [ + 130, + 413, + 482, + 445 + ], + "type": "text", + "content": "41. Zhang, R., Isola, P., Efros, A.A., Shechtman, E., Wang, O.: The unreasonable effectiveness of deep features as a perceptual metric. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 586-595 (2018)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 130, + 446, + 482, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 446, + 482, + 479 + ], + "spans": [ + { + "bbox": [ + 130, + 446, + 482, + 479 + ], + "type": "text", + "content": "42. Zhu, J., Shen, Y., Zhao, D., Zhou, B.: In-domain gan inversion for real image editing. In: Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XVII 16. pp. 592-608. Springer (2020)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 130, + 479, + 482, + 522 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 479, + 482, + 522 + ], + "spans": [ + { + "bbox": [ + 130, + 479, + 482, + 522 + ], + "type": "text", + "content": "43. Zhu, J.Y., Krahenbihl, P., Shechtman, E., Efros, A.A.: Generative visual manipulation on the natural image manifold. In: Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part V 14. pp. 597-613. Springer (2016)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 130, + 522, + 482, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 522, + 482, + 555 + ], + "spans": [ + { + "bbox": [ + 130, + 522, + 482, + 555 + ], + "type": "text", + "content": "44. Zhu, J.Y., Park, T., Isola, P., Efros, A.A.: Unpaired image-to-image translation using cycle-consistent adversarial networks. In: Proceedings of the IEEE international conference on computer vision. pp. 2223-2232 (2017)" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 413, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 413, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 413, + 91, + 447, + 100 + ], + "type": "text", + "content": "3D-GOI" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/3DEgo_ 3D Editing on the Go!/58ba5dca-f01b-44f3-bd07-614e4a4b113d_content_list.json b/2024/3DEgo_ 3D Editing on the Go!/58ba5dca-f01b-44f3-bd07-614e4a4b113d_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..56d64f5d3a10d1c71a8edc574eafe71a591af871 --- /dev/null +++ b/2024/3DEgo_ 3D Editing on the Go!/58ba5dca-f01b-44f3-bd07-614e4a4b113d_content_list.json @@ -0,0 +1,1834 @@ +[ + { + "type": "text", + "text": "3DEgo: 3D Editing on the Go!", + "text_level": 1, + "bbox": [ + 239, + 48, + 755, + 76 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Umar Khalid $^{1,*}$ , Hasan Iqbal $^{2,*}$ , Azib Farooq $^{3}$ , Jing Hua $^{2}$ , and Chen Chen $^{1}$", + "bbox": [ + 99, + 110, + 898, + 149 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 University of Central Florida, Orlando, FL, USA", + "bbox": [ + 244, + 165, + 750, + 183 + ], + "page_idx": 0 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "$^{2}$ Department of Computer Science, Wayne State University, Detroit, MI, USA", + "$^{3}$ Department of Computer Science and Software Engineering, Miami University, Oxford, OH, USA" + ], + "bbox": [ + 99, + 183, + 893, + 235 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract. We introduce 3DEgo to address a novel problem of directly synthesizing photorealistic 3D scenes from monocular videos guided by textual prompts. Conventional methods construct a text-conditioned 3D scene through a three-stage process, involving pose estimation using Structure-from-Motion (SfM) libraries like COLMAP, initializing the 3D model with unedited images, and iteratively updating the dataset with edited images to achieve a 3D scene with text fidelity. Our framework streamlines the conventional multi-stage 3D editing process into a single-stage workflow by overcoming the reliance on COLMAP and eliminating the cost of model initialization. We apply a diffusion model to edit video frames prior to 3D scene creation by incorporating our designed noise blender module for enhancing multi-view editing consistency, a step that does not require additional training or fine-tuning of T2I diffusion models. 3DEgo utilizes 3D Gaussian Splatting to create 3D scenes from the multi-view consistent edited frames, capitalizing on the inherent temporal continuity and explicit point cloud data. 3DEgo demonstrates remarkable editing precision, speed, and adaptability across a variety of video sources, as validated by extensive evaluations on six datasets, including our own prepared GS25 dataset. Project Page: https://3dego.github.io/", + "bbox": [ + 142, + 281, + 849, + 639 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Keywords: Gaussian Splatting $\\cdot$ 3D Editing $\\cdot$ Cross-View Consistency", + "bbox": [ + 142, + 655, + 849, + 673 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 77, + 705, + 309, + 723 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In the pursuit of constructing photo-realistic 3D scenes from monocular video sources, it is a common practice to use the Structure-from-Motion (SfM) library, COLMAP [40] for camera pose estimation. This step is critical for aligning frames extracted from the video, thereby facilitating the subsequent process of 3D scene reconstruction. To further edit these constructed 3D scenes, a meticulous process of frame-by-frame editing based on textual prompts is often employed [52]. Recent works, such as IN2N [11], estimate poses from frames using SfM [40] to initially train an unedited 3D scene. Upon initializing a 3D model, the training dataset is iteratively updated by adding edited images at a consistent rate", + "bbox": [ + 72, + 744, + 920, + 920 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "* Equal Contribution", + "bbox": [ + 82, + 928, + 297, + 947 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "of editing. This process of iterative dataset update demands significant computational resources and time. Due to challenges with initial edit consistency, IN2N [11] training necessitates the continuous addition of edited images to the dataset over a significantly large number of iterations. This issue stems from the inherent limitations present in Text-to-Image (T2I) diffusion models [4, 37], where achieving prompt-consistent edits across multiple images—especially those capturing the same scene—proves to be a formidable task [7, 19]. Such inconsistencies significantly undermine the effectiveness of 3D scene modifications, particularly when these altered frames are leveraged to generate unique views.", + "bbox": [ + 72, + 55, + 920, + 232 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this work, we address a novel problem of efficiently reconstructing 3D scenes directly from monocular videos without using COLMAP [40] aligned with the editing textual prompt. Specifically, we apply a diffusion model [4] to edit every frame of a given monocular video before creating a 3D scene. To address the challenge of consistent editing across all the frames, we introduce a novel noise blender module, which ensures each new edited view is conditioned upon its adjacent, previously edited views. This is achieved by calculating a weighted average of image-conditional noise estimations such that closer frames exert greater influence on the edit", + "bbox": [ + 75, + 258, + 421, + 666 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "ing outcome. Our editing strategy utilizes the IP2P [4] 2D editing diffusion model, which effectively employs both conditional and unconditional noise prediction. Consequently, our method achieves multi-view consistency without the necessity for extra training or fine-tuning, unlike prior approaches [7,27,46]. For 3D scene synthesis based on the edited views, our framework utilizes the Gaussian Splatting (GS) [17] technique, capitalizing on the temporal continuity of video data and the explicit representation of point clouds. Originally designed to work with pre-computed camera poses, 3D Gaussian Splatting presents us with the possibility to synthesize views and construct edited 3D scenes from monocular videos without the need for SfM pre-processing, overcoming one of NeRF's significant limitations [25].", + "bbox": [ + 72, + 666, + 920, + 882 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our method grows the 3D Gaussians of the scene continuously, from the edited frames, as the camera moves, eliminating the need for pre-computed camera poses and 3D model initialization on original un-edited frames to identify", + "bbox": [ + 72, + 889, + 920, + 949 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/edadeb4abbe79bc0073ff9e608a0767dd4aa50121b0dcd79be63c1ddeb6aee09.jpg", + "image_caption": [ + "Fig. 1: Our method, 3D Ego, streamlines the 3D editing process by merging a three-stage workflow into a singular, comprehensive framework. This efficiency is achieved by bypassing the need for COLMAP [40] for pose initialization and avoiding the initialization of the model with unedited images, unlike other existing approaches [7,11,19]." + ], + "image_footnote": [], + "bbox": [ + 457, + 282, + 895, + 482 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 77, + 16, + 94, + 30 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "U. Khalid et al.", + "bbox": [ + 159, + 14, + 314, + 30 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/469efc091e8d0632b4ee1c1082241badb787621bb72045fb697b819647255300.jpg", + "image_caption": [ + "Fig. 2: 3D Ego offers rapid, accurate, and adaptable 3D editing, bypassing the need for original 3D scene initialization and COLMAP poses. This ensures compatibility with videos from any source, including casual smartphone captures like the Van 360-degree scene. The above results identify three cases challenging for IN2N [11], where our method can convert a monocular video into customized 3D scenes using a streamlined, single-stage reconstruction process." + ], + "image_footnote": [], + "bbox": [ + 167, + 66, + 830, + 299 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "an affine transformation that maps the 3D Gaussians from frame $i$ to accurately render the pixels in frame $i + 1$ . Hence, our method 3DEgo condenses a three-stage 3D editing process into a single-stage, unified and efficient framework as shown in Figure 1. Our contributions are as follows:", + "bbox": [ + 72, + 469, + 924, + 547 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We tackle the novel challenge of directly transforming monocular videos into 3D scenes guided by editing text prompts, circumventing conventional 3D editing pipelines.", + "- We introduce a unique auto-regressive editing technique that enhances multiview consistency across edited views, seamlessly integrating with pre-trained diffusion models without the need for additional fine-tuning.", + "- We propose a COLMAP-free method using 3D Gaussian splatting for reconstructing 3D scenes from casually captured videos. This technique leverages the video's continuous time sequence for pose estimation and scene development, bypassing traditional SfM dependencies.", + "- We present an advanced technique for converting 2D masks into 3D space, enhancing editing accuracy through Pyramidal Gaussian Scoring (PGS), ensuring more stable and detailed refinement.", + "- Through extensive evaluations on six datasets—including our custom GS25 and others like IN2N, Mip-NeRF, NeRFstudio Dataset, Tanks & Temples, and CO3D-V2—we demonstrate our method's enhanced editing precision and efficiency, particularly with 360-degree and casually recorded videos, as illustrated in Fig. 2." + ], + "bbox": [ + 89, + 569, + 920, + 930 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "3DEgo: 3D Editing on the Go!", + "bbox": [ + 535, + 14, + 840, + 35 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 900, + 16, + 920, + 30 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Related Work", + "text_level": 1, + "bbox": [ + 79, + 53, + 329, + 73 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "A growing body of research is exploring diffusion models for text-driven image editing, introducing techniques that allow for precise modifications based on user-provided instructions [30,35,37,39]. While some approaches require explicit before-and-after captions [12] or specialized training [38], making them less accessible to non-experts, IP2P [4] simplifies the process by enabling direct textual edits on images, making advanced editing tools more widely accessible.", + "bbox": [ + 79, + 97, + 917, + 214 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Recently, diffusion models have also been employed for 3D editing, focusing on altering the geometry and appearance of 3D scenes [1,4,10,13,16,18,22-24, 26,28,31,43,44,48,49].", + "bbox": [ + 79, + 216, + 917, + 274 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Traditional NeRF representations, however, pose significant challenges for precise editing due to their implicit nature, leading to difficulties in localizing edits within a scene. Earlier efforts have mainly achieved global transformations [6, 14, 29, 45, 47, 51], with object-centric editing remaining a challenge. IN2N [11] introduced user-friendly text-based editing, though it might affect the entire scene. Recent studies [7, 19, 52] have attempted to tackle local editing and multi-view consistency challenges within the IN2N framework [11]. Yet, no existing approaches in the literature offer pose-free capabilities, nor can they create a text-conditioned 3D scene from arbitrary video footage. Nevertheless, existing 3D editing methods [11, 52] universally necessitate Structure-from-Motion (SfM) preprocessing. Recent studies like Nope-NeRF [3], BARF [25], and SC-NeRF [15] have introduced methodologies for pose optimization and calibration concurrent with the training of (unedited) NeRF.", + "bbox": [ + 77, + 276, + 920, + 526 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In this study, we present a novel method for constructing 3D scenes directly from textual prompts, utilizing monocular video frames without dependence on COLMAP poses [40], thus addressing unique challenges. Given the complexities NeRF's implicit nature introduces to simultaneous 3D reconstruction and camera registration, our approach leverages the advanced capabilities of 3D Gaussian Splatting (3DGS) [17] alongside a pre-trained 2D editing diffusion model for efficient 3D model creation.", + "bbox": [ + 77, + 528, + 917, + 663 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3 Method", + "text_level": 1, + "bbox": [ + 79, + 697, + 239, + 718 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Given a sequence of unposed images alongside camera intrinsics, we aim to recover the camera poses in sync with the edited frames and reconstruct a photorealistic 3D scene conditioned on the textual prompt.", + "bbox": [ + 79, + 743, + 917, + 800 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1 Preliminaries", + "text_level": 1, + "bbox": [ + 79, + 834, + 305, + 852 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In the domain of 3D scene modeling, 3D Gaussian splatting [17] emerges as a notable method. The method's strength lies in its succinct Gaussian representation coupled with an effective differential rendering technique, facilitating real-time, high-fidelity visualization. This approach models a 3D environment", + "bbox": [ + 79, + 869, + 917, + 947 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 79, + 17, + 94, + 29 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "U. Khalid et al.", + "bbox": [ + 159, + 14, + 314, + 30 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "using a collection of point-based 3D Gaussians, denoted as $\\mathcal{H}$ where each Gaussian $h = \\{\\mu, \\Sigma, c, \\alpha\\}$ . Here, $\\mu \\in \\mathbb{R}^3$ specifies the Gaussian's center location, $\\Sigma \\in \\mathbb{R}^{3 \\times 3}$ is the covariance matrix capturing the Gaussian's shape, $c \\in \\mathbb{R}^3$ is the color vector in RGB format represented in the three degrees of spherical harmonics (SH) coefficients, and $\\alpha \\in \\mathbb{R}$ denotes the Gaussian's opacity level. To optimize the parameters of 3D Gaussians to represent the scene, we need to render them into images in a differentiable manner. The rendering is achieved by approximating the projection of a 3D Gaussian along the depth dimension into pixel coordinates expressed as:", + "bbox": [ + 72, + 55, + 922, + 232 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nC = \\sum_ {p \\in \\mathcal {P}} c _ {p} \\tau_ {p} \\prod_ {k = 1} ^ {p - 1} (1 - \\alpha_ {k}), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 355, + 252, + 917, + 308 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathcal{P}$ are ordered points overlapping the pixel, and $\\tau_{p} = \\alpha_{p}e^{-\\frac{1}{2}(x_{p})^{T}\\Sigma^{-1}(x_{p})}$ quantifies the Gaussian's contribution to a specific image pixel, with $x_{p}$ measuring the distance from the pixel to the center of the $p$ -th Gaussian. In the original 3DGS, initial Gaussian parameters are refined to fit the scene, guided by ground truth poses obtained using SfM. Through differential rendering, the Gaussians' parameters, including position $\\mu$ , shape $\\Sigma$ , color $c$ , and opacity $\\alpha$ , are adjusted using a photometric loss function.", + "bbox": [ + 72, + 321, + 924, + 463 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2 Multi-View Consistent 2D Editing", + "text_level": 1, + "bbox": [ + 75, + 497, + 566, + 518 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In the first step, we perform 2D editing with key editing areas (KEA) based on the user-provided video, $V$ , and editing prompt, $\\mathcal{T}$ .", + "bbox": [ + 72, + 538, + 518, + 616 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "From the given video $V$ , we extract frames $\\{f_1, f_2, \\ldots, f_N\\}$ . Analyzing the textual prompt $\\mathcal{T}$ with a Large Language Model $\\mathcal{L}$ identifies key editing attributes $\\{A_1, A_2, \\ldots, A_k\\}$ , essential for editing, expressed as $\\mathcal{L}(\\mathcal{T}) \\to \\{A_1, A_2, \\ldots, A_k\\}$ . Utilizing these attributes, a segmentation model $\\mathcal{S}$ delineates editing regions in each frame $f_i$ by generating a mask $M_i$ with KEA marked as 1, and others as 0. The segmentation operation is defined as, $\\mathcal{S}(f_i, \\{A_1, A_2, \\ldots, A_k\\}) \\to M_i$ , $\\forall i \\in \\{1, \\ldots, N\\}$ . Subsequently, a 2D diffusion model $\\mathcal{E}$ selectively edits these regions in $f_i$ , as defined by $M_i$ , producing edited frames $\\{E_1, E_2, \\ldots, E_N\\}$ under guidance from $\\mathcal{T}$ , such that $\\mathcal{E}(f_i, M_i) \\to E_i$ .", + "bbox": [ + 72, + 617, + 523, + 949 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/74d84f2ece68fed6d31ab66173b672131851507fd70639dd1cb54b4f868e5623.jpg", + "image_caption": [ + "Fig. 3: Autoregressive Editing. At each denoising step, the model predicts $w + 1$ separate noises, which are then unified via weighted noise blender (Eq. 4) to predict $\\varepsilon_{\\theta}(e_t,f,\\mathcal{T},W)$ ." + ], + "image_footnote": [], + "bbox": [ + 544, + 586, + 893, + 785 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "3DEgo: 3D Editing on the Go!", + "bbox": [ + 535, + 14, + 840, + 34 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 903, + 16, + 917, + 30 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Consistent Multi-View2D Editing. As discussed above, differing from IN2N [11] that incorporates edited images gradually over several training iterations, our approach involves editing the entire dataset at once before the training starts. We desire 1) each edited frame, $E_{i}$ follows the editing prompt, $\\mathcal{T}$ , 2) retain the original images' semantic content, and 3) the edited images, $\\{E_1,E_2,\\dots ,E_N\\}$ are consistent with each other.", + "bbox": [ + 72, + 55, + 920, + 170 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "(i) Multi-view Consistent Mask. As $S$ doesn't guarantee consistent masks across the views of a casually recorded monocular video, we utilize a zero-shot point tracker [34] to ensure uniform mask generation across the views. The procedure starts by identifying query points in the initial video frame using the ground truth mask. Query points are extracted from these ground truth masks employing the K-Medoids [32] sampling method. This method utilizes the cluster centers from K-Medoids clustering as query points. This approach guarantees comprehensive coverage of the object's various sections and enhances resilience to noise and outliers.", + "bbox": [ + 72, + 172, + 917, + 344 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "(ii)Autoregressive Editing. To address the issue of preserving consistency across multiple views, we employ an autoregressive method that edits frames in sequence, with IP2P [4] editing restricted to the Key Editing Areas (KEA) as delineated by the relevant masks. Instead of editing each frame independently from just the input images - a process that can vary significantly between adjacent images - we integrate an autoregressive editing technique where the frame to be edited is conditioned on already edited adjacent frames.", + "bbox": [ + 72, + 347, + 917, + 482 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "As discussed above, we incorporate IP2P [4] as a 2D editing diffusion model. The standard noise prediction from IP2P's backbone that includes both conditional and unconditional editing is given as,", + "bbox": [ + 72, + 482, + 917, + 541 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {\\varepsilon} _ {\\theta} \\left(e _ {t}, f, \\mathcal {T}\\right) = \\varepsilon_ {\\theta} \\left(e _ {t}, \\varnothing_ {f}, \\varnothing_ {\\mathcal {T}}\\right) + s _ {f} \\left(\\varepsilon_ {\\theta} \\left(e _ {t}, f, \\varnothing_ {\\mathcal {T}}\\right) - \\varepsilon_ {\\theta} \\left(e _ {t}, \\varnothing_ {f}, \\varnothing_ {\\mathcal {T}}\\right)\\right) + s _ {\\mathcal {T}} \\left(\\varepsilon_ {\\theta} \\left(e _ {t}, f, \\mathcal {T}\\right) - \\varepsilon_ {\\theta} \\left(e _ {t}, f, \\varnothing_ {\\mathcal {T}}\\right)\\right) \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 101, + 557, + 915, + 573 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $s_f$ and $s_{\\mathcal{T}}$ are image and textual prompt guidance scale. We suggest enhancing the noise estimation process with our autoregressive training framework. Consider a set of $w$ views, represented as $W = \\{E_n\\}_{n=1}^w$ . Our goal is to model the distribution of the $i$ -th view image by utilizing its $w$ adjacent, already edited views. To achieve this, we calculate image-conditional noise estimation, $\\varepsilon_{\\theta}(e_t, E, \\emptyset_{\\mathcal{T}})$ across all frames in $W$ . The equation to compute the weighted average $\\bar{\\varepsilon}_{\\theta}$ of the noise estimates from all edited frames within $W$ , employing $\\beta$ as the weight for each frame, is delineated as follows:", + "bbox": [ + 72, + 586, + 920, + 741 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\bar {\\varepsilon} _ {\\theta} \\left(e _ {t}, \\varnothing_ {\\mathcal {T}}, W\\right) = \\sum_ {n = 1} ^ {w} \\beta_ {n} \\varepsilon_ {\\theta} ^ {n} \\left(e _ {t}, E _ {n}, \\varnothing_ {\\mathcal {T}}\\right) \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 302, + 754, + 915, + 801 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Here, $E_{n}$ represents the $n$ -th edited frame within $W$ , and $\\beta_{n}$ is the weight assigned to the $n$ -th frame's noise estimate. The condition that the sum of all $\\beta$ values over $w$ frames equals 1 is given by as, $\\sum_{n=1}^{w} \\beta_{n} = 1$ . This ensures that the weighted average is normalized. As we perform 2D editing without any pose priors, our weight parameter $\\beta$ is independent of the angle offset between the frame to be edited, $f_{n}$ and already edited frames in $W$ . To assign weight parameters with exponential decay, ensuring the closest frame receives the highest", + "bbox": [ + 72, + 811, + 920, + 949 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 77, + 16, + 94, + 30 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "U. Khalid et al.", + "bbox": [ + 159, + 14, + 314, + 30 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "weight, we can use an exponential decay function for the weight $\\beta_{n}$ of the $n$ -th frame in $W$ . By employing a decay factor $\\lambda_{d}$ ( $0 < \\lambda_{d} < 1$ ), the weight of each frame decreases exponentially as its distance from the target frame increases. The weight $\\beta_{n}$ for the $n$ -th frame is defined as, $\\beta_{n} = \\lambda_{d}^{w - n}$ . This ensures the, $E$ closest to the target, $f$ ( $n = 1$ ) receives the highest weight. To ensure the sum of the weights to 1, each weight is normalized by dividing by the sum of all weights, $\\beta_{n} = \\frac{\\lambda^{w - n}}{\\sum_{j = 1}^{w}\\lambda^{w - j}}$ . This normalization guarantees the sum of $\\beta_{n}$ across all $n$ equals 1, adhering to the constraint $\\sum_{n = 1}^{w}\\beta_{n} = 1$ .", + "bbox": [ + 77, + 53, + 917, + 219 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Our editing path is determined by the sequence of frames from the captured video. Therefore, during the editing of frame $f_{n}$ , we incorporate the previous $w$ edited frames into the set $W$ , assigning the highest weight $\\beta$ to $E_{n - 1}$ . Using Eq. 2 and Eq. 3, we define our score estimation function as following:", + "bbox": [ + 77, + 219, + 917, + 297 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\varepsilon_ {\\theta} \\left(e _ {t}, f, \\mathcal {T}, W\\right) = \\gamma_ {f} \\tilde {\\varepsilon} _ {\\theta} \\left(e _ {t}, f, \\mathcal {T}\\right) + \\gamma_ {E} \\bar {\\varepsilon} _ {\\theta} \\left(e _ {t}, \\varnothing_ {\\mathcal {T}}, W\\right) \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 237, + 310, + 912, + 328 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $\\gamma_{f}$ is a hyperparameter that determines the influence of the original frame undergoing editing on the noise estimation, and $\\gamma_{E}$ represents the significance of the noise estimation from adjacent edited views.", + "bbox": [ + 77, + 341, + 917, + 400 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.3 3D Scene Reconstruction", + "text_level": 1, + "bbox": [ + 79, + 424, + 450, + 443 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "After multi-view consistent 2D editing is achieved across all frames of the given video, $V$ , we leverage the edited frames $E_{i}$ and their corresponding masks $M_{i}$ to construct a 3D scene without any SfM pose initialization. Due to the explicit nature of 3DGS [17], determining the camera poses is essentially equivalent to estimating the transformation of a collection of 3D Gaussian points. Next, we will begin by introducing an extra Gaussian parameter for precise local editing. Subsequently, we will explore relative pose estimation through incremental frame inclusion. Lastly, we will examine the scene expansion, alongside a discussion on the losses integrated into our global optimization strategy.", + "bbox": [ + 77, + 453, + 917, + 630 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3D Gaussians Parameterization for Precise Editing. Projecting KEA (see Section 3.2) into 3D Gaussians, $\\mathcal{H}$ , using $M$ for KEA identity assignment, is essential for accurate editing. Therefore, we introduce a vector, $m$ associated with the Gaussian point, $h = \\{\\mu, \\Sigma, c, \\alpha, m\\}$ in the 3D Gaussian set, $\\mathcal{H}_i$ of the $i_{th}$ frame. The parameter $m$ is a learnable vector of length 2 corresponding to the number of labels in the segmentation map, $M$ . We optimize the newly introduced parameter $m$ to represent KEA identity during training. However, unlike the view-dependent Gaussian parameters, the KEA Identity remains uniform across different rendering views. Gaussian KEA identity ensures the continuous monitoring of each Gaussian's categorization as they evolve, thereby enabling the selective application of gradients, and the exclusive rendering of targeted objects, markedly enhancing processing efficiency in intricate scenes.", + "bbox": [ + 77, + 653, + 917, + 889 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Next, we delve into the training pipeline inspired by [3,8] in detail which consists of two stages: (i) Relative Pose Estimation, and (ii) Global 3D Scene Expansion.", + "bbox": [ + 77, + 889, + 917, + 947 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "3DEgo: 3D Editing on the Go!", + "bbox": [ + 535, + 14, + 837, + 32 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 903, + 16, + 917, + 30 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Per Frame View Initialization. To begin the training process, , we randomly pick a specific frame, denoted as $E_{i}$ . We then employ a pre-trained monocular depth estimator, symbolized by $\\mathcal{D}$ , to derive the depth map $D_{i}$ for $E_{i}$ . Utilizing $D_{i}$ , which provides strong geometric cues independent of camera parameters, we initialize 3DGS with points extracted from monocular depth through camera intrinsics and orthogonal projection. This initialization step involves learning a set of 3D Gaussians $\\mathcal{H}_i$ to minimize the photometric discrepancy between the rendered and current frames $E_{i}$ . The photometric loss, $\\mathcal{L}_{rgb}$ , optimize the conventional 3D Gaussian parameters including color $c$ , covariance $\\Sigma$ , mean $\\mu$ , and opacity $\\alpha$ . However, to initiate the KEA identity and adjust $m_g$ for 3D Gaussians, merely relying on $\\mathcal{L}_{rgb}$ is insufficient. Hence, we propose the KEA loss, denoted as $\\mathcal{L}_{KEA}$ , which encompasses the 2D mask $M_{i}$ corresponding to $E_{i}$ . We learn the KEA identity of each Gaussian point during training by applying $\\mathcal{L}_{KEA}$ loss $(\\mathcal{L}_{KEA})$ . Overall, 3D Gaussian optimization is defined as,", + "bbox": [ + 75, + 55, + 922, + 330 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {H} _ {i} ^ {*} = \\arg \\min _ {c, \\Sigma , \\mu , \\alpha} \\mathcal {L} _ {r g b} (\\mathcal {R} (\\mathcal {H} _ {i}), E _ {i}) + \\arg \\min _ {m} \\mathcal {L} _ {K E A} (\\mathcal {R} (\\mathcal {H} _ {i}), M _ {i}), \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 171, + 347, + 915, + 377 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "where $\\mathcal{R}$ signifies the 3DGS rendering function. The photometric loss $\\mathcal{L}_{rgb}$ as introduced in [17] is a blend of $\\mathcal{L}_1$ and D-SSIM losses:", + "bbox": [ + 75, + 386, + 917, + 424 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {r g b} = (1 - \\gamma) \\mathcal {L} _ {1} + \\gamma \\mathcal {L} _ {\\mathrm {D} - \\text {S S I M}}, \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 341, + 442, + 915, + 461 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "$\\mathcal{L}_{KEA}$ has two components to it. (i) 2D Binary Cross-Entropy Loss, and (ii) 3D Jensen-Shannon Divergence (JSD) Loss, and is defined as,", + "bbox": [ + 75, + 478, + 917, + 517 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {K E A} = \\lambda_ {B C E} \\mathcal {L} _ {B C E} + \\lambda_ {J S D} \\mathcal {L} _ {J S D} \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 314, + 539, + 915, + 559 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Let $\\mathcal{N}$ be the total number of pixels in the $M$ , and $\\mathcal{X}$ represent the set of all pixels. We calculate binary cross-entropy loss $\\mathcal{L}_{BCE}$ as following,", + "bbox": [ + 75, + 567, + 917, + 608 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {B C E} = - \\frac {1}{\\mathcal {N}} \\sum_ {x \\in \\mathcal {X}} \\left[ M _ {i} (x) \\log \\left(\\mathcal {R} \\left(\\mathcal {H} _ {i}, m\\right) (x)\\right) + \\left(1 - M _ {i} (x)\\right) \\log \\left(1 - \\mathcal {R} \\left(\\mathcal {H} _ {i}, m\\right) (x)\\right) \\right] \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 108, + 639, + 915, + 679 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "where $M(x)$ is the value of the ground truth mask at pixel $x$ , indicating whether the pixel belongs to the foreground (1) or the background (0). The sum computes the total loss over all pixels, and the division by $\\mathcal{N}$ normalizes the loss, making it independent of the image size. A rendering operation, denoted as $\\mathcal{R}(\\mathcal{H}_i, m)(x)$ , produces $m_{\\mathcal{R}}$ for a given pixel $x$ , which represents the weighted sum of the vector $m$ values for the overlapping Gaussians associated with that pixel. Here, $m$ and $m_{\\mathcal{R}}$ both have a dimensionality of 2 which is intentionally kept the same as the number of classes in mask labels. We apply softmax function on $m_{\\mathcal{R}}$ to extract KEA identity given as, KEA Identity = softmax(m_R). The softmax output is interpreted as either 0, indicating a position outside the KEA, or 1, denoting a location within the KEA.", + "bbox": [ + 75, + 695, + 917, + 908 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "To enhance the accuracy of Gaussian KEA identity assignment, we also introduce an unsupervised 3D Regularization Loss to directly influence the learning", + "bbox": [ + 75, + 910, + 917, + 947 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 77, + 16, + 94, + 29 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "U. Khalid et al.", + "bbox": [ + 159, + 14, + 314, + 30 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "of Identity vector $m$ . This 3D Regularization Loss utilizes spatial consistency in 3D, ensuring that the Identity vector, $m$ of the top $k$ -nearest 3D Gaussians are similar in feature space. Specifically, we employ a symmetrical and bounded loss based on the Jensen-Shannon Divergence,", + "bbox": [ + 75, + 55, + 920, + 134 + ], + "page_idx": 8 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {J S D}} = \\frac {1}{2 Y Z} \\sum_ {y = 1} ^ {Y} \\sum_ {z = 1} ^ {Z} \\left[ S \\left(m _ {y}\\right) \\log \\left(\\frac {2 S \\left(m _ {y}\\right)}{S \\left(m _ {y}\\right) + S \\left(m _ {z} ^ {\\prime}\\right)}\\right) + S \\left(m _ {z} ^ {\\prime}\\right) \\log \\left(\\frac {2 S \\left(m _ {z} ^ {\\prime}\\right)}{S \\left(m _ {y}\\right) + S \\left(m _ {z} ^ {\\prime}\\right)}\\right) \\right] \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 99, + 164, + 915, + 208 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Here, $S$ indicates the softmax function, and $m_z'$ represents the $z^{th}$ Identity vector from the $Z$ nearest neighbors in 3D space.", + "bbox": [ + 77, + 222, + 917, + 261 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Relative Pose Initialization. Next, the relative camera pose is estimated for each new frame added to the training scheme. $\\mathcal{H}_i^*$ is transformed via a learnable SE-3 affine transformation $\\mathcal{M}_i$ to the subsequent frame $i + 1$ , where $\\mathcal{H}_{i + 1} = \\mathcal{M}_i\\odot \\mathcal{H}_i$ . Optimizing transformation $\\mathcal{M}_i$ entails minimizing the photometric loss between the rendered image and the next frame $E_{i + 1}$ ,", + "bbox": [ + 77, + 261, + 917, + 360 + ], + "page_idx": 8 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {M} _ {i} ^ {*} = \\arg \\min _ {\\mathcal {M} _ {i}} \\mathcal {L} _ {r g b} \\left(\\mathcal {R} \\left(\\mathcal {M} _ {i} \\odot \\mathcal {H} _ {i}\\right), E _ {i + 1}\\right), \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 285, + 375, + 912, + 400 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In this optimization step, we keep the attributes of $\\mathcal{H}_i^*$ fixed to distinguish camera motion from other Gaussian transformations such as pruning, densification, and self-rotation. Applying the above 3DGS initialization to sequential image pairs enables inferring relative poses across frames. However, accumulated pose errors could adversely affect the optimization of a global scene. To tackle this challenge, we propose the gradual, sequential expansion of the 3DGS.", + "bbox": [ + 77, + 414, + 917, + 531 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Gradual 3D Scene Expansion. As illustrated above, beginning with frame $E_{i}$ , we initiate with a collection of 3D Gaussian points, setting the camera pose to an orthogonal configuration. Then, we calculate the relative camera pose between frames $E_{i}$ and $E_{i+1}$ . After estimating the relative camera poses, we propose to expand the 3DGS scene. This all-inclusive 3DGS optimization refines the collection of 3D Gaussian points, including all attributes, across $I$ iterations, taking the calculated relative pose and the two observed frames as inputs. With the availability of the next frame $E_{i+2}$ after $I$ iterations, we repeat the above procedure: estimating the relative pose between $E_{i+1}$ and $E_{i+2}$ , and expanding the scene with all-inclusive 3DGS.", + "bbox": [ + 77, + 557, + 917, + 752 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "To perform all-inclusive 3DGS optimization, we increase the density of the Gaussians currently under reconstruction as new frames are introduced. Following [17], we identify candidates for densification by evaluating the average magnitude of position gradients in view-space. To focus densification on these yet-to-be-observed areas, we enhance the density of the universal 3DGS every $I$ step, synchronized with the rate of new frame addition. We continue to expand the 3D Gaussian points until the conclusion of the input sequence. Through the repetitive application of both frame-relative pose estimation and all-inclusive scene expansion, 3D Gaussians evolve from an initial partial point cloud to a complete point cloud that encapsulates the entire scene over the sequence. In", + "bbox": [ + 77, + 754, + 917, + 947 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "3DEgo: 3D Editing on the Go!", + "bbox": [ + 535, + 14, + 837, + 32 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 900, + 16, + 917, + 30 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "our global optimization stage, we still utilize the $\\mathcal{L}_{KEA}$ loss as new Gaussians are added during densification.", + "bbox": [ + 77, + 55, + 917, + 92 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Pyramidal Feature Scoring. While our 2D consistent editing approach, detailed in Section 3.2, addresses various editing discrepancies, to rectify any residual inconsistencies in 2D editing, we introduce a pyramidal feature scoring method tailored for Gaussians in Key Editing Areas (KEA) identified with an identity of 1. This method begins by capturing the attributes of all Gaussians marked with KEA identity equal to 1 during initialization, establishing them as anchor points. With each densification step, these anchors are updated to mirror the present attributes of the Gaussians. Throughout the training phase, an intrapoint cloud loss, $\\mathcal{L}_{ipc}$ is utilized to compare the anchor state with the Gaussians' current state, maintaining that the Gaussians remain closely aligned with their initial anchors. $\\mathcal{L}_{ipc}$ is defined as the weighted mean square error (MSE) between the anchor Gaussian and current Gaussian parameters with the older Gaussians getting higher weightage.", + "bbox": [ + 77, + 95, + 917, + 346 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Regularizing Estimated Pose. Further, to optimize the estimated relative pose between subsequent Gaussian set, we introduce point cloud loss, $\\mathcal{L}_{pc}$ similar as in [3]. While we expand the scene, $\\mathcal{L}_{ipc}$ limits the deviation of the Gaussian parameters while $\\mathcal{L}_{pc}$ regularizes the all-inclusive pose estimation.", + "bbox": [ + 77, + 347, + 917, + 424 + ], + "page_idx": 9 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {p c} = D _ {\\text {C h a m f e r}} \\left(\\mathcal {M} _ {i} ^ {*} \\mathcal {H} _ {i} ^ {*}, \\mathcal {H} _ {i + 1} ^ {*}\\right) \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 334, + 440, + 917, + 461 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Given two Gaussians, $h_i$ and $h_j$ , each characterized by multiple parameters encapsulated in their parameter vectors $\\pmb{\\theta}_i$ and $\\pmb{\\theta}_j$ respectively, the Chamfer distance $D_{\\mathrm{Chamfer}}$ between $h_i$ and $h_j$ can be formulated as:", + "bbox": [ + 77, + 476, + 917, + 534 + ], + "page_idx": 9 + }, + { + "type": "equation", + "text": "\n$$\nD _ {\\text {C h a m f e r}} \\left(h _ {i}, h _ {j}\\right) = \\sum_ {p \\in \\boldsymbol {\\theta} _ {i}} \\min _ {q \\in \\boldsymbol {\\theta} _ {j}} \\| p - q \\| ^ {2} + \\sum_ {q \\in \\boldsymbol {\\theta} _ {j}} \\min _ {p \\in \\boldsymbol {\\theta} _ {i}} \\| q - p \\| ^ {2} \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 198, + 551, + 917, + 593 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "This equation calculates the Chamfer distance by summing the squared Euclidean distances from each parameter in $h_i$ to its closest counterpart in $h_j$ , and vice versa, thereby quantifying the similarity between the two Gaussians across all included parameters such as color, opacity, etc. Combining all the loss components results in the total loss function during scene expansion,", + "bbox": [ + 77, + 601, + 917, + 699 + ], + "page_idx": 9 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {T} = \\lambda_ {r g b} \\mathcal {L} _ {r g b} + \\lambda_ {K E A} \\mathcal {L} _ {K E A} + \\lambda_ {i p c} \\mathcal {L} _ {i p c} + \\lambda_ {p c} \\mathcal {L} _ {p c} \\tag {13}\n$$\n", + "text_format": "latex", + "bbox": [ + 232, + 718, + 917, + 739 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "where $\\lambda_{rgb}$ , $\\lambda_{KEA}$ , $\\lambda_{ipc}$ and $\\lambda_{pc}$ act as weighting factors for the respective loss terms.", + "bbox": [ + 77, + 747, + 917, + 783 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4 Evaluation", + "text_level": 1, + "bbox": [ + 77, + 816, + 280, + 835 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4.1 Implementation Details", + "text_level": 1, + "bbox": [ + 77, + 856, + 430, + 876 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "In our approach, we employ PyTorch [33] for the development, specifically focusing on 3D Gaussian splatting. GPT-3.5 Turbo [5] is used for identifying the editing attributes to identify the KEA. For segmentation purposes, SAM [20] is", + "bbox": [ + 77, + 889, + 917, + 947 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 79, + 16, + 104, + 30 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "U. Khalid et al.", + "bbox": [ + 159, + 14, + 314, + 30 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/0351ef6feaffe92abe2ec50b202730e2d32ee18fc22a8fc970fbb0d8a29b1e7b.jpg", + "image_caption": [ + "Fig. 4: Qualitative comparison of our method with the IN2N [11] over two separate scenes. When the editing prompt requests \"Give the wheels Blue Color and Make the recyclebins brown,\" IN2N [11] inadvertently alters the complete van color to blue as well, instead of just changing the tire color. It must be noted that IN2N [11] uses poses from COLMAP, while 3DEgo estimates poses while constructing the 3D scene." + ], + "image_footnote": [], + "bbox": [ + 145, + 66, + 849, + 300 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "used to generate the masks based on the key editing attributes identifying the KIA. For zero-shot point tracking, we employ a point-tracker as proposed in [34]. The editing tasks are facilitated by the Instruct Pix2Pix [4] 2D diffusion model by incorporating the masks to limit the editing within KEA. Additional details are in supplementary material.", + "bbox": [ + 72, + 443, + 917, + 541 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "4.2 Baseline and Datasets", + "text_level": 1, + "bbox": [ + 75, + 567, + 411, + 586 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "We carry out experiments across a variety of public datasets as well as our prepared GS25 dataset.", + "bbox": [ + 72, + 600, + 917, + 635 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "GS25 Dataset comprises 25 casually captured monocular videos using mobile phones for comprehensive 3D scene analysis. This approach ensures the dataset's utility in exploring and enhancing 360-degree real-world scene reconstruction technologies. To further assess the efficacy of the proposed 3D editing framework, we also", + "bbox": [ + 72, + 637, + 435, + 830 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "conducted comparisons across 5 public datasets: (i) IN2N [11], (ii) Mip-NeRF [2], (iii) NeRFstudio Dataset [42], (iv) Tanks & Temples [21] and (v) CO3D-V2 [36]. We specifically validate the robustness of our approach on the CO3D dataset, which comprises thousands of object-centric videos. In our study, we introduce a unique problem, making direct comparisons with prior research challenging. Nonetheless, to assess the robustness of our method, we contrast it with", + "bbox": [ + 72, + 830, + 920, + 949 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/760066c9f9ca3ceb60d2dd9260c07ed26cb6923817580a08fb52fd64f6ccc417.jpg", + "table_caption": [ + "Table 1: Average runtime efficiency across 25 edits from the GS25 dataset (Approx. minutes)." + ], + "table_footnote": [], + "table_body": "
MethodCOLMAPModel InitializationScene Editing
Instruct-N2N [11]13min22min250min
OursXX25min
", + "bbox": [ + 457, + 731, + 917, + 783 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "3DEgo: 3D Editing on the Go!", + "bbox": [ + 535, + 14, + 840, + 35 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 893, + 16, + 915, + 29 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/925c3495878c636a1fbf26474f4b49f3010100b61042a60bafa656e3454642a8.jpg", + "image_caption": [ + "Original 3DGS" + ], + "image_footnote": [], + "bbox": [ + 145, + 56, + 377, + 211 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/62102cc1824d996f42819b4d55fa360d73db2b34f855d4b9a0d4cc6849991742.jpg", + "image_caption": [ + "Gaussian Grouping", + "Fig. 5: Our approach surpasses Gaussian Grouping [50] in 3D object elimination across different scenes from GS25 and Tanks & Temple datasets. 3DEgo is capable of eliminating substantial objects like statues from the entire scene while significantly minimizing artifacts and avoiding a blurred background." + ], + "image_footnote": [], + "bbox": [ + 377, + 56, + 612, + 211 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/1b563c3ada12b3a9f0e10367c47480bca2fe84223fa16856a1b990cfeafe00c2.jpg", + "image_caption": [ + "Ours" + ], + "image_footnote": [], + "bbox": [ + 612, + 56, + 847, + 211 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "state-of-the-art (SOTA) 3D editing techniques that rely on poses derived from COLMAP. Additionally, we present quantitative evaluations alongside pose-free 3D reconstruction approaches, specifically NoPeNeRF [3], and BARF [25]. In the pose-free comparison, we substitute only our 3D scene reconstruction component with theirs while maintaining our original editing framework unchanged. We present a time-cost analysis in Table 1 that underscores the rapid text-conditioned 3D reconstruction capabilities of 3DEgo.", + "bbox": [ + 72, + 354, + 917, + 491 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "4.3 Qualitative Evaluation", + "text_level": 1, + "bbox": [ + 75, + 523, + 418, + 541 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "As demonstrated in Figure 4, our method demonstrates exceptional prowess in local editing, enabling precise modifications within specific regions of a 3D scene without affecting the overall integrity. Our method also excels in multi-attribute editing, seamlessly combining changes across color, texture, and geometry within a single coherent edit. We also evaluate our method for the object removal task. The goal of 3D object removal is to eliminate an object from a 3D environment, potentially leaving behind voids due to the lack of observational", + "bbox": [ + 72, + 559, + 920, + 695 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/4e6b52468bedfb8ac8b76509fcd467b09868f63d817765b67e780b1350a16e3a.jpg", + "table_caption": [ + "Table 2: Comparing With Pose-known Methods. Quantitative evaluation of 200 edits across GS25, IN2N, Mip-NeRF, NeRFstudio, Tanks & Temples, and CO3D-V2 datasets against the methods that incorporate COLMAP poses. The top-performing results are emphasized in bold." + ], + "table_footnote": [], + "table_body": "
DatasetsDreamEditorIN2NOurs
CTIS↑CDCR↑E-PSNR↑CTIS↑CDCR↑E-PSNR↑CTIS↑CDCR↑E-PSNR↑
GS25 (Ours)0.1550.88622.7500.1420.89223.1300.1690.92523.660
Mip-NeRF0.1490.89623.9200.1640.91722.1700.1750.90124.250
NeRFstudio0.1560.90323.6700.1710.90925.1300.1630.93124.990
CO3D-V20.1740.91524.8800.1630.92425.1800.1790.93626.020
IN2N0.1670.92124.7800.1790.91026.5100.1830.92526.390
Tanks & Temples0.1500.89623.9700.1700.90123.1100.1640.91524.190
", + "bbox": [ + 84, + 817, + 912, + 944 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 79, + 16, + 104, + 30 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "U. Khalid et al.", + "bbox": [ + 159, + 14, + 314, + 30 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/1083c5f08b329a7f464636523f22a3edf554067018c5a7d3de1f0ed7c7317721.jpg", + "table_caption": [ + "Table 3: Comparing With Pose-Unknown Methods. Quantitative analysis of 200 edits applied to six datasets, comparing methods proposed for NeRF reconstruction without known camera poses. The top-performing results are emphasized in bold." + ], + "table_footnote": [], + "table_body": "
DatasetsBARF [25]Nope-NeRF [3]Ours
CTIS↑CDCR↑E-PSNR↑CTIS↑CDCR↑E-PSNR↑CTIS↑CDCR↑E-PSNR↑
GS25 (Ours)0.1390.79720.4780.1280.75319.6600.1690.92523.660
Mip-NeRF0.1340.80621.3320.1470.82018.7990.1750.90124.250
NeRFstudio0.1400.81320.1160.1380.77321.3600.1630.93124.990
CO3D-V20.1570.82021.1480.1290.82417.9710.1790.93626.020
IN2N0.1500.82922.0920.1610.81822.6040.1830.92526.390
Tanks & Temples0.1350.80621.5730.1570.81020.9040.1640.91524.190
", + "bbox": [ + 82, + 123, + 912, + 248 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "data. For the object removal task, we identify and remove the regions based on the 2D mask, $M$ . Subsequently, we focus on inpainting these \"invisible regions\" in the original 2D frames using LAMA [41]. In Figure 5, we demonstrate our 3DEgo's effectiveness in object removal compared to Gaussian Grouping. Our method's reconstruction output notably surpasses that of Gaussian Grouping [50] in terms of retaining spatial accuracy and ensuring consistency across multiple views.", + "bbox": [ + 72, + 287, + 922, + 424 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "4.4 Quantitative Evaluation", + "text_level": 1, + "bbox": [ + 75, + 453, + 438, + 474 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "In our quantitative analysis, we employ three key metrics: CLIP Text-Image Direction Similarity (CTIS) [9], CLIP Direction Consistency Score (CDCR) [11], and Edit PSNR (EPSNR). We perform 200 edits across the six datasets listed above. We present quantitative comparisons with COLMAP-based 3D editing techniques in Table 2. Additionally, we extend our evaluation by integrating pose-free 3D reconstruction methods into our pipeline, with the performance outcomes detailed in Table 3.", + "bbox": [ + 72, + 487, + 476, + 759 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "5 Ablations", + "text_level": 1, + "bbox": [ + 75, + 790, + 268, + 811 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "To assess the influence of different elements within our framework, we em", + "bbox": [ + 72, + 830, + 476, + 869 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "ploy PSNR, SSIM, and LPIPS metrics across several configurations. Given that images undergo editing before the training of a 3D model, our focus is on determining the effect of various losses on the model's rendering quality. The outcomes are documented in Table 4, showcasing IP2P+COLMAP as the baseline, where", + "bbox": [ + 72, + 871, + 922, + 949 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/79af30484e8ab9c49609aed1c8f35a64f4e7afcdbc1eacb37ba9f96cfa954664.jpg", + "image_caption": [ + "Fig.6: Our method, 3D Ego achieves precise editing without using any SfM poses. To construct the IP2P+COLMAP 3D scene, we train nefacto [42] model on IP2P [4] edited frames." + ], + "image_footnote": [], + "bbox": [ + 532, + 533, + 871, + 671 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "3DEgo: 3D Editing on the Go!", + "bbox": [ + 535, + 14, + 840, + 34 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 893, + 16, + 917, + 30 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "images are edited using the standard IP2P approach [4] and COLMAP-derived poses are utilized for 3D scene construction.", + "bbox": [ + 72, + 55, + 920, + 92 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Although the $\\mathrm{IP2P + COLMAP}$ setup demonstrates limited textual fidelity due to editing inconsistencies (see Figure 6), we are only interested in the rendering quality in this analysis to ascertain our approach's effectiveness. Table 4 illustrates the effects of different optimization hyperparameters on the global scene expansion. The findings reveal that excluding $\\mathcal{L}_{KEA}$ in the scene expansion process minimally affects ren", + "bbox": [ + 72, + 95, + 479, + 346 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "dering quality. On the other hand, densification resulting in the inferior", + "bbox": [ + 72, + 346, + 506, + 386 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/5bd7ca0f1f5513f5d6f14d719a3dc067ec42dcd561ad2c10aca81becb2c984eb.jpg", + "table_caption": [ + "Table 4: Ablation study results on GS25 dataset." + ], + "table_footnote": [], + "table_body": "
MethodPSNR↑SSIM↑LPIPS↓
Ours27.860.900.18
IP2P+COLMAP23.870.790.23
Ours w/o LKEA26.730.880.19
Ours w/o Lipc22.460.0.780.24
Ours w/o Lpc25.180.840.20
", + "bbox": [ + 510, + 188, + 910, + 305 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "6 Limitation", + "text_level": 1, + "bbox": [ + 75, + 414, + 280, + 435 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Our approach depends on the pretrained IP2P model [4], which has inherent limitations, especially evident in specific scenarios. For instance, Figure 7 shows the challenge with the prompt \"Make the car golden and give wheels blue color\". Unlike IN2N [11], which introduces unspecific color changes on the van's windows. Our method offers more targeted editing but falls short of generating ideal results due to IP2P's limitations in handling precise editing tas", + "bbox": [ + 72, + 453, + 479, + 705 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "7 Conclusion", + "text_level": 1, + "bbox": [ + 75, + 734, + 288, + 754 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "3DEgo marks a pivotal advancement in 3D scene reconstruction from monocular videos, eliminating the need for conventional pose estimation methods and model initialization. Our method integrates frame-by-frame editing with advanced consistency techniques to efficiently generate photorealistic 3D scenes directly from textual prompts. Demonstrated across multiple datasets, our approach showcases superior editing speed, precision, and flexibility. 3DEgo not only simplifies the 3D editing process but also broadens the scope for creative content generation from readily available video sources. This work lays the groundwork for future innovations in accessible and intuitive 3D content creation tools.", + "bbox": [ + 72, + 773, + 920, + 947 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/3f096e8aea33f2dc404290312cea3cb32bce9fe044e4fe46daac12b6c3cfcdfc.jpg", + "image_caption": [ + "Original 3D Model", + "Fig. 7: Due to the limitations of the IP2P model, our method inadvertently alters the colors of the van's windows, which is not the desired outcome." + ], + "image_footnote": [], + "bbox": [ + 496, + 497, + 704, + 546 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/f5aefd096304432b44d46e7031178743d1ed19d68f9ed2aa66c3a3e7fc7a8f7b.jpg", + "image_caption": [ + "\"Make the car golden and give wheels blue color\"" + ], + "image_footnote": [], + "bbox": [ + 704, + 497, + 917, + 544 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 79, + 16, + 104, + 30 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "U. Khalid et al.", + "bbox": [ + 159, + 14, + 314, + 30 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Acknowledgement", + "text_level": 1, + "bbox": [ + 77, + 53, + 338, + 76 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "This work was partially supported by the NSF under Grant Numbers OAC-1910469 and OAC-2311245.", + "bbox": [ + 75, + 92, + 920, + 131 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 77, + 162, + 237, + 183 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "1. Bao, C., Zhang, Y., Yang, B., Fan, T., Yang, Z., Bao, H., Zhang, G., Cui, Z.: Sine: Semantic-driven image-based nerf editing with prior-guided editing field. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 20919-20929 (2023)", + "2. Barron, J.T., Mildenhall, B., Verbin, D., Srinivasan, P.P., Hedman, P.: Mipnerf 360: Unbounded anti-aliased neural radiance fields. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5470-5479 (2022)", + "3. Bian, W., Wang, Z., Li, K., Bian, J.W., Prisacariu, V.A.: Nope-nerf: Optimising neural radiance field with no pose prior. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 4160-4169 (2023)", + "4. Brooks, T., Holynski, A., Efros, A.A.: Instructpix2pix: Learning to follow image editing instructions. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 18392-18402 (2023)", + "5. Brown, T., Mann, B., Ryder, N., Subbiah, M., Kaplan, J.D., Dhariwal, P., Neelakantan, A., Shyam, P., Sastry, G., Askell, A., et al.: Language models are few-shot learners. Advances in neural information processing systems 33, 1877-1901 (2020)", + "6. Chiang, P.Z., Tsai, M.S., Tseng, H.Y., Lai, W.S., Chiu, W.C.: Stylizing 3d scene via implicit representation and hypernetwork. In: Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision. pp. 1475-1484 (2022)", + "7. Dong, J., Wang, Y.X.: Vica-nerf: View-consistency-aware 3d editing of neural radiance fields. Advances in Neural Information Processing Systems 36 (2024)", + "8. Fu, Y., Liu, S., Kulkarni, A., Kautz, J., Efros, A.A., Wang, X.: Colmap-free 3d gaussian splatting (2023), https://arxiv.org/abs/2312.07504", + "9. Gal, R., Patashnik, O., Maron, H., Chechik, G., Cohen-Or, D.: Stylegan-nada: Clipguided domain adaptation of image generators. arXiv preprint arXiv:2108.00946 (2021)", + "10. Gao, W., Aigerman, N., Groueix, T., Kim, V.G., Hanocka, R.: Textdeformer: Geometry manipulation using text guidance. arXiv preprint arXiv:2304.13348 (2023)", + "11. Haque, A., Tancik, M., Efros, A.A., Holynski, A., Kanazawa, A.: Instruct-nerf2nerf: Editing 3d scenes with instructions. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 19740-19750 (2023)", + "12. Hertz, A., Mokady, R., Tenenbaum, J., Aberman, K., Pritch, Y., Cohen-Or, D.: Prompt-to-prompt image editing with cross attention control. arXiv preprint arXiv:2208.01626 (2022)", + "13. Hong, F., Zhang, M., Pan, L., Cai, Z., Yang, L., Liu, Z.: Avatarclip: Zero-shot text-driven generation and animation of 3d avatars. ACM Transactions on Graphics (TOG) 41(4), 1-19 (2022)", + "14. Huang, Y.H., He, Y., Yuan, Y.J., Lai, Y.K., Gao, L.: Stylizednerf: consistent 3d scene stylization as stylized nerf via 2d-3d mutual learning. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 18342-18352 (2022)" + ], + "bbox": [ + 79, + 201, + 920, + 947 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "3DEgo: 3D Editing on the Go!", + "bbox": [ + 535, + 14, + 840, + 34 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 893, + 16, + 917, + 30 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "15. Jeong, Y., Ahn, S., Choy, C., Anandkumar, A., Cho, M., Park, J.: Self-calibrating neural radiance fields. In: ICCV (2021)", + "16. Karim, N., Khalid, U., Iqbal, H., Hua, J., Chen, C.: Free-editor: Zero-shot text-driven 3d scene editing. arXiv preprint arXiv:2312.13663 (2023)", + "17. Kerbl, B., Kopanas, G., Leimkuhler, T., Drettakis, G.: 3d gaussian splatting for real-time radiance field rendering. ACM Transactions on Graphics (ToG) 42(4), 1-14 (2023)", + "18. Khalid, U., Iqbal, H., Karim, N., Hua, J., Chen, C.: Latentedirector: Text driven local editing of 3d scenes. arXiv preprint arXiv:2312.09313 (2023)", + "19. Kim, S., Lee, K., Choi, J.S., Jeong, J., Sohn, K., Shin, J.: Collaborative score distillation for consistent visual editing. In: Thirty-seventh Conference on Neural Information Processing Systems (2023), https://openreview.net/forum?id=0tEjORCGFD", + "20. Kirillov, A., Mintun, E., Ravi, N., Mao, H., Rolland, C., Gustafson, L., Xiao, T., Whitehead, S., Berg, A.C., Lo, W.Y., et al.: Segment anything. arXiv preprint arXiv:2304.02643 (2023)", + "21. Knapitsch, A., Park, J., Zhou, Q.Y., Koltun, V.: Tanks and temples: Benchmarking large-scale scene reconstruction. ACM Transactions on Graphics (2017)", + "22. Kobayashi, S., Matsumoto, E., Sitzmann, V.: Decomposing nerf for editing via feature field distillation. arXiv preprint arXiv:2205.15585 (2022)", + "23. Li, Y., Lin, Z.H., Forsyth, D., Huang, J.B., Wang, S.: Climatenerf: Physically-based neural rendering for extreme climate synthesis. arXiv e-prints pp. arXiv-2211 (2022)", + "24. Li, Y., Dou, Y., Shi, Y., Lei, Y., Chen, X., Zhang, Y., Zhou, P., Ni, B.: Focaldreamer: Text-driven 3d editing via focal-fusion assembly. arXiv preprint arXiv:2308.10608 (2023)", + "25. Lin, C.H., Ma, W.C., Torralba, A., Lucey, S.: Barf: Bundle-adjusting neural radiance fields. In: ICCV (2021)", + "26. Liu, H.K., Shen, I., Chen, B.Y., et al.: Nerf-in: Free-form nerf inpainting with rgb-d priors. arXiv preprint arXiv:2206.04901 (2022)", + "27. Long, X., Guo, Y.C., Lin, C., Liu, Y., Dou, Z., Liu, L., Ma, Y., Zhang, S.H., Habermann, M., Theobalt, C., et al.: Wonder3d: Single image to 3d using cross-domain diffusion. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 9970-9980 (2024)", + "28. Michel, O., Bar-On, R., Liu, R., et al.: Text2mesh: Text-driven neural stylization for meshes. In: CVPR 2022. pp. 13492-13502 (2022)", + "29. Nguyen-Phuoc, T., Liu, F., Xiao, L.: Snerf: stylized neural implicit representations for 3d scenes. arXiv preprint arXiv:2207.02363 (2022)", + "30. Nichol, A., Dhariwal, P., Ramesh, A., Shyam, P., Mishkin, P., McGrew, B., Sutskever, I., Chen, M.: Glide: Towards photorealistic image generation and editing with text-guided diffusion models. arXiv preprint arXiv:2112.10741 (2021)", + "31. Noguchi, A., Sun, X., Lin, S., Harada, T.: Neural articulated radiance field. In: ICCV 2021. pp. 5762-5772 (2021)", + "32. Park, H.S., Jun, C.H.: A simple and fast algorithm for k-medoids clustering. Expert systems with applications 36(2), 3336-3341 (2009)", + "33. Paszke, A., Gross, S., Massa, F., Lerer, A., Bradbury, J., Chanan, G., Killeen, T., Lin, Z., Gimelshein, N., Antiga, L., et al.: Pytorch: An imperative style, high-performance deep learning library. Advances in neural information processing systems 32 (2019)", + "34. Rajic, F., Ke, L., Tai, Y.W., Tang, C.K., Danelljan, M., Yu, F.: Segment anything meets point tracking. arXiv preprint arXiv:2307.01197 (2023)" + ], + "bbox": [ + 79, + 56, + 917, + 947 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 79, + 16, + 104, + 30 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "U. Khalid et al.", + "bbox": [ + 157, + 14, + 314, + 30 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "35. Ramesh, A., Dhariwal, P., Nichol, A., Chu, C., Chen, M.: Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125 (2022)", + "36. Reizenstein, J., Shapovalov, R., Henzler, P., Sbordone, L., Labatut, P., Novotny, D.: Common objects in 3d: Large-scale learning and evaluation of real-life 3d category reconstruction. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 10901-10911 (2021)", + "37. Rombach, R., Blattmann, A., Lorenz, D., Esser, P., Ommer, B.: High-resolution image synthesis with latent diffusion models. In: CVPR 2022. pp. 10684-10695 (2022)", + "38. Ruiz, N., Li, Y., Jampani, V., Pritch, Y., Rubinstein, M., Aberman, K.: Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 22500-22510 (2023)", + "39. Sahara, C., Chan, W., Saxena, S.e.a.: Photorealistic text-to-image diffusion models with deep language understanding. NeurIPS 2022 35, 36479-36494 (2022)", + "40. Schonberger, J.L., Frahm, J.M.: Structure-from-motion revisited. In: CVPR (2016)", + "41. Suvorov, R., Logacheva, E., Mashikhin, A., Remizova, A., Ashukha, A., Silvestrov, A., Kong, N., Goka, H., Park, K., Lempitsky, V.: Resolution-robust large mask inpainting with fourier convolutions. In: Proceedings of the IEEE/CVF winter conference on applications of computer vision. pp. 2149-2159 (2022)", + "42. Tancik, M., Weber, E., Ng, E., Li, R., Yi, B., Wang, T., Kristoffersen, A., Austin, J., Salahi, K., Ahuja, A., et al.: Nerfstudio: A modular framework for neural radiance field development. In: ACM SIGGRAPH 2023 Conference Proceedings. pp. 1-12 (2023)", + "43. Tschernezki, V., Laina, I., Larlus, D., Vedaldi, A.: Neural feature fusion fields: 3d distillation of self-supervised 2d image representations. In: 2022 International Conference on 3D Vision (3DV). pp. 443-453. IEEE (2022)", + "44. Wang, C., Chai, M., He, M., et al.: Clip-nerf: Text-and-image driven manipulation of neural radiance fields. In: CVPR 2022. pp. 3835-3844 (2022)", + "45. Wang, C., Jiang, R., Chai, M., He, M., Chen, D., Liao, J.: Nerf-art: Text-driven neural radiance fields stylization. IEEE Transactions on Visualization and Computer Graphics (2023)", + "46. Weng, H., Yang, T., Wang, J., Li, Y., Zhang, T., Chen, C., Zhang, L.: Consistent123: Improve consistency for one image to 3d object synthesis. arXiv preprint arXiv:2310.08092 (2023)", + "47. Wu, Q., Tan, J., Xu, K.: Palettenerf: Palette-based color editing for nerfs. arXiv preprint arXiv:2212.12871 (2022)", + "48. Xu, T., Harada, T.: Deforming radiance fields with cages. In: Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part XXXIII. pp. 159-175. Springer (2022)", + "49. Yang, B., Bao, C., Zeng, J., Bao, H., Zhang, Y., Cui, Z., Zhang, G.: Neumesh: Learning disentangled neural mesh-based implicit field for geometry and texture editing. In: European Conference on Computer Vision. pp. 597-614. Springer (2022)", + "50. Ye, M., Danelljan, M., Yu, F., Ke, L.: Gaussian grouping: Segment and edit anything in 3d scenes. arXiv preprint arXiv:2312.00732 (2023)", + "51. Zhang, K., Kolkin, N., Bi, S., Luan, F., Xu, Z., Shechtman, E., Snavely, N.: Arf: Artistic radiance fields. In: European Conference on Computer Vision. pp. 717-733. Springer (2022)" + ], + "bbox": [ + 75, + 56, + 917, + 949 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "3DEgo: 3D Editing on the Go!", + "bbox": [ + 535, + 14, + 840, + 34 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 893, + 16, + 917, + 30 + ], + "page_idx": 16 + }, + { + "type": "ref_text", + "text": "52. Zhuang, J., Wang, C., Lin, L., Liu, L., Li, G.: Dreameditor: Text-driven 3d scene editing with neural fields. In: SIGGRAPH Asia 2023 Conference Papers. pp. 1-10 (2023)", + "bbox": [ + 77, + 56, + 920, + 110 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 79, + 16, + 104, + 30 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "U. Khalid et al.", + "bbox": [ + 159, + 14, + 314, + 30 + ], + "page_idx": 17 + } +] \ No newline at end of file diff --git a/2024/3DEgo_ 3D Editing on the Go!/58ba5dca-f01b-44f3-bd07-614e4a4b113d_model.json b/2024/3DEgo_ 3D Editing on the Go!/58ba5dca-f01b-44f3-bd07-614e4a4b113d_model.json new file mode 100644 index 0000000000000000000000000000000000000000..092bbf1efde7ff962e918dfa7817a0b155f1d1a7 --- /dev/null +++ b/2024/3DEgo_ 3D Editing on the Go!/58ba5dca-f01b-44f3-bd07-614e4a4b113d_model.json @@ -0,0 +1,2524 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.24, + 0.05, + 0.756, + 0.078 + ], + "angle": 0, + "content": "3DEgo: 3D Editing on the Go!" + }, + { + "type": "text", + "bbox": [ + 0.101, + 0.111, + 0.9, + 0.151 + ], + "angle": 0, + "content": "Umar Khalid\\(^{1,*}\\), Hasan Iqbal\\(^{2,*}\\), Azib Farooq\\(^{3}\\), Jing Hua\\(^{2}\\), and Chen Chen\\(^{1}\\)" + }, + { + "type": "text", + "bbox": [ + 0.245, + 0.166, + 0.751, + 0.184 + ], + "angle": 0, + "content": "1 University of Central Florida, Orlando, FL, USA" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.184, + 0.888, + 0.202 + ], + "angle": 0, + "content": "\\(^{2}\\) Department of Computer Science, Wayne State University, Detroit, MI, USA" + }, + { + "type": "text", + "bbox": [ + 0.101, + 0.203, + 0.895, + 0.237 + ], + "angle": 0, + "content": "\\(^{3}\\) Department of Computer Science and Software Engineering, Miami University, Oxford, OH, USA" + }, + { + "type": "list", + "bbox": [ + 0.101, + 0.184, + 0.895, + 0.237 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.144, + 0.282, + 0.851, + 0.64 + ], + "angle": 0, + "content": "Abstract. We introduce 3DEgo to address a novel problem of directly synthesizing photorealistic 3D scenes from monocular videos guided by textual prompts. Conventional methods construct a text-conditioned 3D scene through a three-stage process, involving pose estimation using Structure-from-Motion (SfM) libraries like COLMAP, initializing the 3D model with unedited images, and iteratively updating the dataset with edited images to achieve a 3D scene with text fidelity. Our framework streamlines the conventional multi-stage 3D editing process into a single-stage workflow by overcoming the reliance on COLMAP and eliminating the cost of model initialization. We apply a diffusion model to edit video frames prior to 3D scene creation by incorporating our designed noise blender module for enhancing multi-view editing consistency, a step that does not require additional training or fine-tuning of T2I diffusion models. 3DEgo utilizes 3D Gaussian Splatting to create 3D scenes from the multi-view consistent edited frames, capitalizing on the inherent temporal continuity and explicit point cloud data. 3DEgo demonstrates remarkable editing precision, speed, and adaptability across a variety of video sources, as validated by extensive evaluations on six datasets, including our own prepared GS25 dataset. Project Page: https://3dego.github.io/" + }, + { + "type": "text", + "bbox": [ + 0.145, + 0.656, + 0.851, + 0.674 + ], + "angle": 0, + "content": "Keywords: Gaussian Splatting \\(\\cdot\\) 3D Editing \\(\\cdot\\) Cross-View Consistency" + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.706, + 0.311, + 0.725 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.745, + 0.922, + 0.921 + ], + "angle": 0, + "content": "In the pursuit of constructing photo-realistic 3D scenes from monocular video sources, it is a common practice to use the Structure-from-Motion (SfM) library, COLMAP [40] for camera pose estimation. This step is critical for aligning frames extracted from the video, thereby facilitating the subsequent process of 3D scene reconstruction. To further edit these constructed 3D scenes, a meticulous process of frame-by-frame editing based on textual prompts is often employed [52]. Recent works, such as IN2N [11], estimate poses from frames using SfM [40] to initially train an unedited 3D scene. Upon initializing a 3D model, the training dataset is iteratively updated by adding edited images at a consistent rate" + }, + { + "type": "page_footnote", + "bbox": [ + 0.084, + 0.93, + 0.298, + 0.948 + ], + "angle": 0, + "content": "* Equal Contribution" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.079, + 0.017, + 0.096, + 0.031 + ], + "angle": 0, + "content": "2" + }, + { + "type": "header", + "bbox": [ + 0.16, + 0.016, + 0.315, + 0.032 + ], + "angle": 0, + "content": "U. Khalid et al." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.056, + 0.921, + 0.234 + ], + "angle": 0, + "content": "of editing. This process of iterative dataset update demands significant computational resources and time. Due to challenges with initial edit consistency, IN2N [11] training necessitates the continuous addition of edited images to the dataset over a significantly large number of iterations. This issue stems from the inherent limitations present in Text-to-Image (T2I) diffusion models [4, 37], where achieving prompt-consistent edits across multiple images—especially those capturing the same scene—proves to be a formidable task [7, 19]. Such inconsistencies significantly undermine the effectiveness of 3D scene modifications, particularly when these altered frames are leveraged to generate unique views." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.26, + 0.423, + 0.667 + ], + "angle": 0, + "content": "In this work, we address a novel problem of efficiently reconstructing 3D scenes directly from monocular videos without using COLMAP [40] aligned with the editing textual prompt. Specifically, we apply a diffusion model [4] to edit every frame of a given monocular video before creating a 3D scene. To address the challenge of consistent editing across all the frames, we introduce a novel noise blender module, which ensures each new edited view is conditioned upon its adjacent, previously edited views. This is achieved by calculating a weighted average of image-conditional noise estimations such that closer frames exert greater influence on the edit" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.668, + 0.921, + 0.884 + ], + "angle": 0, + "content": "ing outcome. Our editing strategy utilizes the IP2P [4] 2D editing diffusion model, which effectively employs both conditional and unconditional noise prediction. Consequently, our method achieves multi-view consistency without the necessity for extra training or fine-tuning, unlike prior approaches [7,27,46]. For 3D scene synthesis based on the edited views, our framework utilizes the Gaussian Splatting (GS) [17] technique, capitalizing on the temporal continuity of video data and the explicit representation of point clouds. Originally designed to work with pre-computed camera poses, 3D Gaussian Splatting presents us with the possibility to synthesize views and construct edited 3D scenes from monocular videos without the need for SfM pre-processing, overcoming one of NeRF's significant limitations [25]." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.891, + 0.922, + 0.951 + ], + "angle": 0, + "content": "Our method grows the 3D Gaussians of the scene continuously, from the edited frames, as the camera moves, eliminating the need for pre-computed camera poses and 3D model initialization on original un-edited frames to identify" + }, + { + "type": "image", + "bbox": [ + 0.458, + 0.283, + 0.896, + 0.484 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.434, + 0.496, + 0.92, + 0.624 + ], + "angle": 0, + "content": "Fig. 1: Our method, 3D Ego, streamlines the 3D editing process by merging a three-stage workflow into a singular, comprehensive framework. This efficiency is achieved by bypassing the need for COLMAP [40] for pose initialization and avoiding the initialization of the model with unedited images, unlike other existing approaches [7,11,19]." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.536, + 0.016, + 0.842, + 0.036 + ], + "angle": 0, + "content": "3DEgo: 3D Editing on the Go!" + }, + { + "type": "page_number", + "bbox": [ + 0.903, + 0.017, + 0.922, + 0.032 + ], + "angle": 0, + "content": "3" + }, + { + "type": "image", + "bbox": [ + 0.168, + 0.068, + 0.831, + 0.3 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.319, + 0.926, + 0.427 + ], + "angle": 0, + "content": "Fig. 2: 3D Ego offers rapid, accurate, and adaptable 3D editing, bypassing the need for original 3D scene initialization and COLMAP poses. This ensures compatibility with videos from any source, including casual smartphone captures like the Van 360-degree scene. The above results identify three cases challenging for IN2N [11], where our method can convert a monocular video into customized 3D scenes using a streamlined, single-stage reconstruction process." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.47, + 0.926, + 0.549 + ], + "angle": 0, + "content": "an affine transformation that maps the 3D Gaussians from frame \\(i\\) to accurately render the pixels in frame \\(i + 1\\). Hence, our method 3DEgo condenses a three-stage 3D editing process into a single-stage, unified and efficient framework as shown in Figure 1. Our contributions are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.57, + 0.92, + 0.628 + ], + "angle": 0, + "content": "- We tackle the novel challenge of directly transforming monocular videos into 3D scenes guided by editing text prompts, circumventing conventional 3D editing pipelines." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.631, + 0.92, + 0.688 + ], + "angle": 0, + "content": "- We introduce a unique auto-regressive editing technique that enhances multiview consistency across edited views, seamlessly integrating with pre-trained diffusion models without the need for additional fine-tuning." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.691, + 0.92, + 0.769 + ], + "angle": 0, + "content": "- We propose a COLMAP-free method using 3D Gaussian splatting for reconstructing 3D scenes from casually captured videos. This technique leverages the video's continuous time sequence for pose estimation and scene development, bypassing traditional SfM dependencies." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.771, + 0.918, + 0.829 + ], + "angle": 0, + "content": "- We present an advanced technique for converting 2D masks into 3D space, enhancing editing accuracy through Pyramidal Gaussian Scoring (PGS), ensuring more stable and detailed refinement." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.831, + 0.922, + 0.931 + ], + "angle": 0, + "content": "- Through extensive evaluations on six datasets—including our custom GS25 and others like IN2N, Mip-NeRF, NeRFstudio Dataset, Tanks & Temples, and CO3D-V2—we demonstrate our method's enhanced editing precision and efficiency, particularly with 360-degree and casually recorded videos, as illustrated in Fig. 2." + }, + { + "type": "list", + "bbox": [ + 0.09, + 0.57, + 0.922, + 0.931 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.08, + 0.018, + 0.095, + 0.03 + ], + "angle": 0, + "content": "4" + }, + { + "type": "header", + "bbox": [ + 0.16, + 0.016, + 0.315, + 0.032 + ], + "angle": 0, + "content": "U. Khalid et al." + }, + { + "type": "title", + "bbox": [ + 0.081, + 0.054, + 0.331, + 0.074 + ], + "angle": 0, + "content": "2 Related Work" + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.099, + 0.919, + 0.215 + ], + "angle": 0, + "content": "A growing body of research is exploring diffusion models for text-driven image editing, introducing techniques that allow for precise modifications based on user-provided instructions [30,35,37,39]. While some approaches require explicit before-and-after captions [12] or specialized training [38], making them less accessible to non-experts, IP2P [4] simplifies the process by enabling direct textual edits on images, making advanced editing tools more widely accessible." + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.217, + 0.918, + 0.275 + ], + "angle": 0, + "content": "Recently, diffusion models have also been employed for 3D editing, focusing on altering the geometry and appearance of 3D scenes [1,4,10,13,16,18,22-24, 26,28,31,43,44,48,49]." + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.277, + 0.921, + 0.528 + ], + "angle": 0, + "content": "Traditional NeRF representations, however, pose significant challenges for precise editing due to their implicit nature, leading to difficulties in localizing edits within a scene. Earlier efforts have mainly achieved global transformations [6, 14, 29, 45, 47, 51], with object-centric editing remaining a challenge. IN2N [11] introduced user-friendly text-based editing, though it might affect the entire scene. Recent studies [7, 19, 52] have attempted to tackle local editing and multi-view consistency challenges within the IN2N framework [11]. Yet, no existing approaches in the literature offer pose-free capabilities, nor can they create a text-conditioned 3D scene from arbitrary video footage. Nevertheless, existing 3D editing methods [11, 52] universally necessitate Structure-from-Motion (SfM) preprocessing. Recent studies like Nope-NeRF [3], BARF [25], and SC-NeRF [15] have introduced methodologies for pose optimization and calibration concurrent with the training of (unedited) NeRF." + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.53, + 0.92, + 0.664 + ], + "angle": 0, + "content": "In this study, we present a novel method for constructing 3D scenes directly from textual prompts, utilizing monocular video frames without dependence on COLMAP poses [40], thus addressing unique challenges. Given the complexities NeRF's implicit nature introduces to simultaneous 3D reconstruction and camera registration, our approach leverages the advanced capabilities of 3D Gaussian Splatting (3DGS) [17] alongside a pre-trained 2D editing diffusion model for efficient 3D model creation." + }, + { + "type": "title", + "bbox": [ + 0.081, + 0.699, + 0.241, + 0.719 + ], + "angle": 0, + "content": "3 Method" + }, + { + "type": "text", + "bbox": [ + 0.081, + 0.744, + 0.918, + 0.801 + ], + "angle": 0, + "content": "Given a sequence of unposed images alongside camera intrinsics, we aim to recover the camera poses in sync with the edited frames and reconstruct a photorealistic 3D scene conditioned on the textual prompt." + }, + { + "type": "title", + "bbox": [ + 0.081, + 0.835, + 0.306, + 0.853 + ], + "angle": 0, + "content": "3.1 Preliminaries" + }, + { + "type": "text", + "bbox": [ + 0.081, + 0.871, + 0.918, + 0.948 + ], + "angle": 0, + "content": "In the domain of 3D scene modeling, 3D Gaussian splatting [17] emerges as a notable method. The method's strength lies in its succinct Gaussian representation coupled with an effective differential rendering technique, facilitating real-time, high-fidelity visualization. This approach models a 3D environment" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.536, + 0.015, + 0.842, + 0.035 + ], + "angle": 0, + "content": "3DEgo: 3D Editing on the Go!" + }, + { + "type": "page_number", + "bbox": [ + 0.904, + 0.017, + 0.92, + 0.031 + ], + "angle": 0, + "content": "5" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.056, + 0.923, + 0.234 + ], + "angle": 0, + "content": "using a collection of point-based 3D Gaussians, denoted as \\(\\mathcal{H}\\) where each Gaussian \\(h = \\{\\mu, \\Sigma, c, \\alpha\\}\\). Here, \\(\\mu \\in \\mathbb{R}^3\\) specifies the Gaussian's center location, \\(\\Sigma \\in \\mathbb{R}^{3 \\times 3}\\) is the covariance matrix capturing the Gaussian's shape, \\(c \\in \\mathbb{R}^3\\) is the color vector in RGB format represented in the three degrees of spherical harmonics (SH) coefficients, and \\(\\alpha \\in \\mathbb{R}\\) denotes the Gaussian's opacity level. To optimize the parameters of 3D Gaussians to represent the scene, we need to render them into images in a differentiable manner. The rendering is achieved by approximating the projection of a 3D Gaussian along the depth dimension into pixel coordinates expressed as:" + }, + { + "type": "equation", + "bbox": [ + 0.358, + 0.253, + 0.92, + 0.309 + ], + "angle": 0, + "content": "\\[\nC = \\sum_ {p \\in \\mathcal {P}} c _ {p} \\tau_ {p} \\prod_ {k = 1} ^ {p - 1} (1 - \\alpha_ {k}), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.322, + 0.926, + 0.465 + ], + "angle": 0, + "content": "where \\(\\mathcal{P}\\) are ordered points overlapping the pixel, and \\(\\tau_{p} = \\alpha_{p}e^{-\\frac{1}{2}(x_{p})^{T}\\Sigma^{-1}(x_{p})}\\) quantifies the Gaussian's contribution to a specific image pixel, with \\(x_{p}\\) measuring the distance from the pixel to the center of the \\(p\\)-th Gaussian. In the original 3DGS, initial Gaussian parameters are refined to fit the scene, guided by ground truth poses obtained using SfM. Through differential rendering, the Gaussians' parameters, including position \\(\\mu\\), shape \\(\\Sigma\\), color \\(c\\), and opacity \\(\\alpha\\), are adjusted using a photometric loss function." + }, + { + "type": "title", + "bbox": [ + 0.076, + 0.498, + 0.567, + 0.52 + ], + "angle": 0, + "content": "3.2 Multi-View Consistent 2D Editing" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.539, + 0.52, + 0.617 + ], + "angle": 0, + "content": "In the first step, we perform 2D editing with key editing areas (KEA) based on the user-provided video, \\(V\\), and editing prompt, \\(\\mathcal{T}\\)." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.619, + 0.524, + 0.951 + ], + "angle": 0, + "content": "From the given video \\(V\\), we extract frames \\(\\{f_1, f_2, \\ldots, f_N\\}\\). Analyzing the textual prompt \\(\\mathcal{T}\\) with a Large Language Model \\(\\mathcal{L}\\) identifies key editing attributes \\(\\{A_1, A_2, \\ldots, A_k\\}\\), essential for editing, expressed as \\(\\mathcal{L}(\\mathcal{T}) \\to \\{A_1, A_2, \\ldots, A_k\\}\\). Utilizing these attributes, a segmentation model \\(\\mathcal{S}\\) delineates editing regions in each frame \\(f_i\\) by generating a mask \\(M_i\\) with KEA marked as 1, and others as 0. The segmentation operation is defined as, \\(\\mathcal{S}(f_i, \\{A_1, A_2, \\ldots, A_k\\}) \\to M_i\\), \\(\\forall i \\in \\{1, \\ldots, N\\}\\). Subsequently, a 2D diffusion model \\(\\mathcal{E}\\) selectively edits these regions in \\(f_i\\), as defined by \\(M_i\\), producing edited frames \\(\\{E_1, E_2, \\ldots, E_N\\}\\) under guidance from \\(\\mathcal{T}\\), such that \\(\\mathcal{E}(f_i, M_i) \\to E_i\\)." + }, + { + "type": "image", + "bbox": [ + 0.545, + 0.587, + 0.895, + 0.786 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.535, + 0.802, + 0.924, + 0.895 + ], + "angle": 0, + "content": "Fig. 3: Autoregressive Editing. At each denoising step, the model predicts \\( w + 1 \\) separate noises, which are then unified via weighted noise blender (Eq. 4) to predict \\( \\varepsilon_{\\theta}(e_t,f,\\mathcal{T},W) \\)." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.079, + 0.017, + 0.096, + 0.032 + ], + "angle": 0, + "content": "6" + }, + { + "type": "header", + "bbox": [ + 0.16, + 0.016, + 0.315, + 0.032 + ], + "angle": 0, + "content": "U. Khalid et al." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.056, + 0.921, + 0.171 + ], + "angle": 0, + "content": "Consistent Multi-View2D Editing. As discussed above, differing from IN2N [11] that incorporates edited images gradually over several training iterations, our approach involves editing the entire dataset at once before the training starts. We desire 1) each edited frame, \\( E_{i} \\) follows the editing prompt, \\( \\mathcal{T} \\), 2) retain the original images' semantic content, and 3) the edited images, \\( \\{E_1,E_2,\\dots ,E_N\\} \\) are consistent with each other." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.173, + 0.92, + 0.346 + ], + "angle": 0, + "content": "(i) Multi-view Consistent Mask. As \\( S \\) doesn't guarantee consistent masks across the views of a casually recorded monocular video, we utilize a zero-shot point tracker [34] to ensure uniform mask generation across the views. The procedure starts by identifying query points in the initial video frame using the ground truth mask. Query points are extracted from these ground truth masks employing the K-Medoids [32] sampling method. This method utilizes the cluster centers from K-Medoids clustering as query points. This approach guarantees comprehensive coverage of the object's various sections and enhances resilience to noise and outliers." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.348, + 0.92, + 0.483 + ], + "angle": 0, + "content": "(ii)Autoregressive Editing. To address the issue of preserving consistency across multiple views, we employ an autoregressive method that edits frames in sequence, with IP2P [4] editing restricted to the Key Editing Areas (KEA) as delineated by the relevant masks. Instead of editing each frame independently from just the input images - a process that can vary significantly between adjacent images - we integrate an autoregressive editing technique where the frame to be edited is conditioned on already edited adjacent frames." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.484, + 0.919, + 0.543 + ], + "angle": 0, + "content": "As discussed above, we incorporate IP2P [4] as a 2D editing diffusion model. The standard noise prediction from IP2P's backbone that includes both conditional and unconditional editing is given as," + }, + { + "type": "equation", + "bbox": [ + 0.103, + 0.558, + 0.916, + 0.575 + ], + "angle": 0, + "content": "\\[\n\\tilde {\\varepsilon} _ {\\theta} \\left(e _ {t}, f, \\mathcal {T}\\right) = \\varepsilon_ {\\theta} \\left(e _ {t}, \\varnothing_ {f}, \\varnothing_ {\\mathcal {T}}\\right) + s _ {f} \\left(\\varepsilon_ {\\theta} \\left(e _ {t}, f, \\varnothing_ {\\mathcal {T}}\\right) - \\varepsilon_ {\\theta} \\left(e _ {t}, \\varnothing_ {f}, \\varnothing_ {\\mathcal {T}}\\right)\\right) + s _ {\\mathcal {T}} \\left(\\varepsilon_ {\\theta} \\left(e _ {t}, f, \\mathcal {T}\\right) - \\varepsilon_ {\\theta} \\left(e _ {t}, f, \\varnothing_ {\\mathcal {T}}\\right)\\right) \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.588, + 0.922, + 0.743 + ], + "angle": 0, + "content": "where \\( s_f \\) and \\( s_{\\mathcal{T}} \\) are image and textual prompt guidance scale. We suggest enhancing the noise estimation process with our autoregressive training framework. Consider a set of \\( w \\) views, represented as \\( W = \\{E_n\\}_{n=1}^w \\). Our goal is to model the distribution of the \\( i \\)-th view image by utilizing its \\( w \\) adjacent, already edited views. To achieve this, we calculate image-conditional noise estimation, \\( \\varepsilon_{\\theta}(e_t, E, \\emptyset_{\\mathcal{T}}) \\) across all frames in \\( W \\). The equation to compute the weighted average \\( \\bar{\\varepsilon}_{\\theta} \\) of the noise estimates from all edited frames within \\( W \\), employing \\( \\beta \\) as the weight for each frame, is delineated as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.303, + 0.755, + 0.916, + 0.802 + ], + "angle": 0, + "content": "\\[\n\\bar {\\varepsilon} _ {\\theta} \\left(e _ {t}, \\varnothing_ {\\mathcal {T}}, W\\right) = \\sum_ {n = 1} ^ {w} \\beta_ {n} \\varepsilon_ {\\theta} ^ {n} \\left(e _ {t}, E _ {n}, \\varnothing_ {\\mathcal {T}}\\right) \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.813, + 0.922, + 0.95 + ], + "angle": 0, + "content": "Here, \\( E_{n} \\) represents the \\( n \\)-th edited frame within \\( W \\), and \\( \\beta_{n} \\) is the weight assigned to the \\( n \\)-th frame's noise estimate. The condition that the sum of all \\( \\beta \\) values over \\( w \\) frames equals 1 is given by as, \\( \\sum_{n=1}^{w} \\beta_{n} = 1 \\). This ensures that the weighted average is normalized. As we perform 2D editing without any pose priors, our weight parameter \\( \\beta \\) is independent of the angle offset between the frame to be edited, \\( f_{n} \\) and already edited frames in \\( W \\). To assign weight parameters with exponential decay, ensuring the closest frame receives the highest" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.536, + 0.015, + 0.84, + 0.033 + ], + "angle": 0, + "content": "3DEgo: 3D Editing on the Go!" + }, + { + "type": "page_number", + "bbox": [ + 0.904, + 0.017, + 0.92, + 0.031 + ], + "angle": 0, + "content": "7" + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.054, + 0.92, + 0.221 + ], + "angle": 0, + "content": "weight, we can use an exponential decay function for the weight \\(\\beta_{n}\\) of the \\(n\\)-th frame in \\(W\\). By employing a decay factor \\(\\lambda_{d}\\) (\\(0 < \\lambda_{d} < 1\\)), the weight of each frame decreases exponentially as its distance from the target frame increases. The weight \\(\\beta_{n}\\) for the \\(n\\)-th frame is defined as, \\(\\beta_{n} = \\lambda_{d}^{w - n}\\). This ensures the, \\(E\\) closest to the target, \\(f\\) (\\(n = 1\\)) receives the highest weight. To ensure the sum of the weights to 1, each weight is normalized by dividing by the sum of all weights, \\(\\beta_{n} = \\frac{\\lambda^{w - n}}{\\sum_{j = 1}^{w}\\lambda^{w - j}}\\). This normalization guarantees the sum of \\(\\beta_{n}\\) across all \\(n\\) equals 1, adhering to the constraint \\(\\sum_{n = 1}^{w}\\beta_{n} = 1\\)." + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.221, + 0.919, + 0.298 + ], + "angle": 0, + "content": "Our editing path is determined by the sequence of frames from the captured video. Therefore, during the editing of frame \\( f_{n} \\), we incorporate the previous \\( w \\) edited frames into the set \\( W \\), assigning the highest weight \\( \\beta \\) to \\( E_{n - 1} \\). Using Eq. 2 and Eq. 3, we define our score estimation function as following:" + }, + { + "type": "equation", + "bbox": [ + 0.238, + 0.312, + 0.915, + 0.33 + ], + "angle": 0, + "content": "\\[\n\\varepsilon_ {\\theta} \\left(e _ {t}, f, \\mathcal {T}, W\\right) = \\gamma_ {f} \\tilde {\\varepsilon} _ {\\theta} \\left(e _ {t}, f, \\mathcal {T}\\right) + \\gamma_ {E} \\bar {\\varepsilon} _ {\\theta} \\left(e _ {t}, \\varnothing_ {\\mathcal {T}}, W\\right) \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.342, + 0.919, + 0.401 + ], + "angle": 0, + "content": "where \\(\\gamma_{f}\\) is a hyperparameter that determines the influence of the original frame undergoing editing on the noise estimation, and \\(\\gamma_{E}\\) represents the significance of the noise estimation from adjacent edited views." + }, + { + "type": "title", + "bbox": [ + 0.08, + 0.426, + 0.451, + 0.445 + ], + "angle": 0, + "content": "3.3 3D Scene Reconstruction" + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.455, + 0.92, + 0.631 + ], + "angle": 0, + "content": "After multi-view consistent 2D editing is achieved across all frames of the given video, \\( V \\), we leverage the edited frames \\( E_{i} \\) and their corresponding masks \\( M_{i} \\) to construct a 3D scene without any SfM pose initialization. Due to the explicit nature of 3DGS [17], determining the camera poses is essentially equivalent to estimating the transformation of a collection of 3D Gaussian points. Next, we will begin by introducing an extra Gaussian parameter for precise local editing. Subsequently, we will explore relative pose estimation through incremental frame inclusion. Lastly, we will examine the scene expansion, alongside a discussion on the losses integrated into our global optimization strategy." + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.655, + 0.92, + 0.89 + ], + "angle": 0, + "content": "3D Gaussians Parameterization for Precise Editing. Projecting KEA (see Section 3.2) into 3D Gaussians, \\(\\mathcal{H}\\), using \\(M\\) for KEA identity assignment, is essential for accurate editing. Therefore, we introduce a vector, \\(m\\) associated with the Gaussian point, \\(h = \\{\\mu, \\Sigma, c, \\alpha, m\\}\\) in the 3D Gaussian set, \\(\\mathcal{H}_i\\) of the \\(i_{th}\\) frame. The parameter \\(m\\) is a learnable vector of length 2 corresponding to the number of labels in the segmentation map, \\(M\\). We optimize the newly introduced parameter \\(m\\) to represent KEA identity during training. However, unlike the view-dependent Gaussian parameters, the KEA Identity remains uniform across different rendering views. Gaussian KEA identity ensures the continuous monitoring of each Gaussian's categorization as they evolve, thereby enabling the selective application of gradients, and the exclusive rendering of targeted objects, markedly enhancing processing efficiency in intricate scenes." + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.89, + 0.919, + 0.949 + ], + "angle": 0, + "content": "Next, we delve into the training pipeline inspired by [3,8] in detail which consists of two stages: (i) Relative Pose Estimation, and (ii) Global 3D Scene Expansion." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.079, + 0.017, + 0.096, + 0.03 + ], + "angle": 0, + "content": "8" + }, + { + "type": "header", + "bbox": [ + 0.16, + 0.016, + 0.315, + 0.032 + ], + "angle": 0, + "content": "U. Khalid et al." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.056, + 0.924, + 0.331 + ], + "angle": 0, + "content": "Per Frame View Initialization. To begin the training process, , we randomly pick a specific frame, denoted as \\( E_{i} \\). We then employ a pre-trained monocular depth estimator, symbolized by \\( \\mathcal{D} \\), to derive the depth map \\( D_{i} \\) for \\( E_{i} \\). Utilizing \\( D_{i} \\), which provides strong geometric cues independent of camera parameters, we initialize 3DGS with points extracted from monocular depth through camera intrinsics and orthogonal projection. This initialization step involves learning a set of 3D Gaussians \\( \\mathcal{H}_i \\) to minimize the photometric discrepancy between the rendered and current frames \\( E_{i} \\). The photometric loss, \\( \\mathcal{L}_{rgb} \\), optimize the conventional 3D Gaussian parameters including color \\( c \\), covariance \\( \\Sigma \\), mean \\( \\mu \\), and opacity \\( \\alpha \\). However, to initiate the KEA identity and adjust \\( m_g \\) for 3D Gaussians, merely relying on \\( \\mathcal{L}_{rgb} \\) is insufficient. Hence, we propose the KEA loss, denoted as \\( \\mathcal{L}_{KEA} \\), which encompasses the 2D mask \\( M_{i} \\) corresponding to \\( E_{i} \\). We learn the KEA identity of each Gaussian point during training by applying \\( \\mathcal{L}_{KEA} \\) loss \\( (\\mathcal{L}_{KEA}) \\). Overall, 3D Gaussian optimization is defined as," + }, + { + "type": "equation", + "bbox": [ + 0.173, + 0.349, + 0.917, + 0.378 + ], + "angle": 0, + "content": "\\[\n\\mathcal {H} _ {i} ^ {*} = \\arg \\min _ {c, \\Sigma , \\mu , \\alpha} \\mathcal {L} _ {r g b} (\\mathcal {R} (\\mathcal {H} _ {i}), E _ {i}) + \\arg \\min _ {m} \\mathcal {L} _ {K E A} (\\mathcal {R} (\\mathcal {H} _ {i}), M _ {i}), \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.387, + 0.92, + 0.426 + ], + "angle": 0, + "content": "where \\(\\mathcal{R}\\) signifies the 3DGS rendering function. The photometric loss \\(\\mathcal{L}_{rgb}\\) as introduced in [17] is a blend of \\(\\mathcal{L}_1\\) and D-SSIM losses:" + }, + { + "type": "equation", + "bbox": [ + 0.342, + 0.443, + 0.917, + 0.463 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {r g b} = (1 - \\gamma) \\mathcal {L} _ {1} + \\gamma \\mathcal {L} _ {\\mathrm {D} - \\text {S S I M}}, \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.479, + 0.92, + 0.518 + ], + "angle": 0, + "content": "\\(\\mathcal{L}_{KEA}\\) has two components to it. (i) 2D Binary Cross-Entropy Loss, and (ii) 3D Jensen-Shannon Divergence (JSD) Loss, and is defined as," + }, + { + "type": "equation", + "bbox": [ + 0.317, + 0.541, + 0.917, + 0.56 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {K E A} = \\lambda_ {B C E} \\mathcal {L} _ {B C E} + \\lambda_ {J S D} \\mathcal {L} _ {J S D} \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.569, + 0.919, + 0.609 + ], + "angle": 0, + "content": "Let \\(\\mathcal{N}\\) be the total number of pixels in the \\(M\\), and \\(\\mathcal{X}\\) represent the set of all pixels. We calculate binary cross-entropy loss \\(\\mathcal{L}_{BCE}\\) as following," + }, + { + "type": "equation", + "bbox": [ + 0.109, + 0.64, + 0.917, + 0.68 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {B C E} = - \\frac {1}{\\mathcal {N}} \\sum_ {x \\in \\mathcal {X}} \\left[ M _ {i} (x) \\log \\left(\\mathcal {R} \\left(\\mathcal {H} _ {i}, m\\right) (x)\\right) + \\left(1 - M _ {i} (x)\\right) \\log \\left(1 - \\mathcal {R} \\left(\\mathcal {H} _ {i}, m\\right) (x)\\right) \\right] \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.696, + 0.92, + 0.909 + ], + "angle": 0, + "content": "where \\( M(x) \\) is the value of the ground truth mask at pixel \\( x \\), indicating whether the pixel belongs to the foreground (1) or the background (0). The sum computes the total loss over all pixels, and the division by \\( \\mathcal{N} \\) normalizes the loss, making it independent of the image size. A rendering operation, denoted as \\( \\mathcal{R}(\\mathcal{H}_i, m)(x) \\), produces \\( m_{\\mathcal{R}} \\) for a given pixel \\( x \\), which represents the weighted sum of the vector \\( m \\) values for the overlapping Gaussians associated with that pixel. Here, \\( m \\) and \\( m_{\\mathcal{R}} \\) both have a dimensionality of 2 which is intentionally kept the same as the number of classes in mask labels. We apply softmax function on \\( m_{\\mathcal{R}} \\) to extract KEA identity given as, KEA Identity = softmax(m_R). The softmax output is interpreted as either 0, indicating a position outside the KEA, or 1, denoting a location within the KEA." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.911, + 0.919, + 0.949 + ], + "angle": 0, + "content": "To enhance the accuracy of Gaussian KEA identity assignment, we also introduce an unsupervised 3D Regularization Loss to directly influence the learning" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.536, + 0.015, + 0.84, + 0.033 + ], + "angle": 0, + "content": "3DEgo: 3D Editing on the Go!" + }, + { + "type": "page_number", + "bbox": [ + 0.903, + 0.017, + 0.919, + 0.031 + ], + "angle": 0, + "content": "9" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.056, + 0.921, + 0.136 + ], + "angle": 0, + "content": "of Identity vector \\( m \\). This 3D Regularization Loss utilizes spatial consistency in 3D, ensuring that the Identity vector, \\( m \\) of the top \\( k \\)-nearest 3D Gaussians are similar in feature space. Specifically, we employ a symmetrical and bounded loss based on the Jensen-Shannon Divergence," + }, + { + "type": "equation", + "bbox": [ + 0.101, + 0.165, + 0.916, + 0.209 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {J S D}} = \\frac {1}{2 Y Z} \\sum_ {y = 1} ^ {Y} \\sum_ {z = 1} ^ {Z} \\left[ S \\left(m _ {y}\\right) \\log \\left(\\frac {2 S \\left(m _ {y}\\right)}{S \\left(m _ {y}\\right) + S \\left(m _ {z} ^ {\\prime}\\right)}\\right) + S \\left(m _ {z} ^ {\\prime}\\right) \\log \\left(\\frac {2 S \\left(m _ {z} ^ {\\prime}\\right)}{S \\left(m _ {y}\\right) + S \\left(m _ {z} ^ {\\prime}\\right)}\\right) \\right] \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.223, + 0.919, + 0.262 + ], + "angle": 0, + "content": "Here, \\( S \\) indicates the softmax function, and \\( m_z' \\) represents the \\( z^{th} \\) Identity vector from the \\( Z \\) nearest neighbors in 3D space." + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.263, + 0.92, + 0.361 + ], + "angle": 0, + "content": "Relative Pose Initialization. Next, the relative camera pose is estimated for each new frame added to the training scheme. \\(\\mathcal{H}_i^*\\) is transformed via a learnable SE-3 affine transformation \\(\\mathcal{M}_i\\) to the subsequent frame \\(i + 1\\), where \\(\\mathcal{H}_{i + 1} = \\mathcal{M}_i\\odot \\mathcal{H}_i\\). Optimizing transformation \\(\\mathcal{M}_i\\) entails minimizing the photometric loss between the rendered image and the next frame \\(E_{i + 1}\\)," + }, + { + "type": "equation", + "bbox": [ + 0.286, + 0.376, + 0.914, + 0.401 + ], + "angle": 0, + "content": "\\[\n\\mathcal {M} _ {i} ^ {*} = \\arg \\min _ {\\mathcal {M} _ {i}} \\mathcal {L} _ {r g b} \\left(\\mathcal {R} \\left(\\mathcal {M} _ {i} \\odot \\mathcal {H} _ {i}\\right), E _ {i + 1}\\right), \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.415, + 0.92, + 0.533 + ], + "angle": 0, + "content": "In this optimization step, we keep the attributes of \\(\\mathcal{H}_i^*\\) fixed to distinguish camera motion from other Gaussian transformations such as pruning, densification, and self-rotation. Applying the above 3DGS initialization to sequential image pairs enables inferring relative poses across frames. However, accumulated pose errors could adversely affect the optimization of a global scene. To tackle this challenge, we propose the gradual, sequential expansion of the 3DGS." + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.559, + 0.92, + 0.753 + ], + "angle": 0, + "content": "Gradual 3D Scene Expansion. As illustrated above, beginning with frame \\( E_{i} \\), we initiate with a collection of 3D Gaussian points, setting the camera pose to an orthogonal configuration. Then, we calculate the relative camera pose between frames \\( E_{i} \\) and \\( E_{i+1} \\). After estimating the relative camera poses, we propose to expand the 3DGS scene. This all-inclusive 3DGS optimization refines the collection of 3D Gaussian points, including all attributes, across \\( I \\) iterations, taking the calculated relative pose and the two observed frames as inputs. With the availability of the next frame \\( E_{i+2} \\) after \\( I \\) iterations, we repeat the above procedure: estimating the relative pose between \\( E_{i+1} \\) and \\( E_{i+2} \\), and expanding the scene with all-inclusive 3DGS." + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.755, + 0.92, + 0.949 + ], + "angle": 0, + "content": "To perform all-inclusive 3DGS optimization, we increase the density of the Gaussians currently under reconstruction as new frames are introduced. Following [17], we identify candidates for densification by evaluating the average magnitude of position gradients in view-space. To focus densification on these yet-to-be-observed areas, we enhance the density of the universal 3DGS every \\(I\\) step, synchronized with the rate of new frame addition. We continue to expand the 3D Gaussian points until the conclusion of the input sequence. Through the repetitive application of both frame-relative pose estimation and all-inclusive scene expansion, 3D Gaussians evolve from an initial partial point cloud to a complete point cloud that encapsulates the entire scene over the sequence. In" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.08, + 0.017, + 0.106, + 0.032 + ], + "angle": 0, + "content": "10" + }, + { + "type": "header", + "bbox": [ + 0.16, + 0.016, + 0.315, + 0.032 + ], + "angle": 0, + "content": "U. Khalid et al." + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.056, + 0.919, + 0.094 + ], + "angle": 0, + "content": "our global optimization stage, we still utilize the \\(\\mathcal{L}_{KEA}\\) loss as new Gaussians are added during densification." + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.096, + 0.92, + 0.347 + ], + "angle": 0, + "content": "Pyramidal Feature Scoring. While our 2D consistent editing approach, detailed in Section 3.2, addresses various editing discrepancies, to rectify any residual inconsistencies in 2D editing, we introduce a pyramidal feature scoring method tailored for Gaussians in Key Editing Areas (KEA) identified with an identity of 1. This method begins by capturing the attributes of all Gaussians marked with KEA identity equal to 1 during initialization, establishing them as anchor points. With each densification step, these anchors are updated to mirror the present attributes of the Gaussians. Throughout the training phase, an intrapoint cloud loss, \\(\\mathcal{L}_{ipc}\\) is utilized to compare the anchor state with the Gaussians' current state, maintaining that the Gaussians remain closely aligned with their initial anchors. \\(\\mathcal{L}_{ipc}\\) is defined as the weighted mean square error (MSE) between the anchor Gaussian and current Gaussian parameters with the older Gaussians getting higher weightage." + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.348, + 0.92, + 0.426 + ], + "angle": 0, + "content": "Regularizing Estimated Pose. Further, to optimize the estimated relative pose between subsequent Gaussian set, we introduce point cloud loss, \\(\\mathcal{L}_{pc}\\) similar as in [3]. While we expand the scene, \\(\\mathcal{L}_{ipc}\\) limits the deviation of the Gaussian parameters while \\(\\mathcal{L}_{pc}\\) regularizes the all-inclusive pose estimation." + }, + { + "type": "equation", + "bbox": [ + 0.336, + 0.442, + 0.919, + 0.463 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {p c} = D _ {\\text {C h a m f e r}} \\left(\\mathcal {M} _ {i} ^ {*} \\mathcal {H} _ {i} ^ {*}, \\mathcal {H} _ {i + 1} ^ {*}\\right) \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.477, + 0.919, + 0.536 + ], + "angle": 0, + "content": "Given two Gaussians, \\( h_i \\) and \\( h_j \\), each characterized by multiple parameters encapsulated in their parameter vectors \\( \\pmb{\\theta}_i \\) and \\( \\pmb{\\theta}_j \\) respectively, the Chamfer distance \\( D_{\\mathrm{Chamfer}} \\) between \\( h_i \\) and \\( h_j \\) can be formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.2, + 0.552, + 0.919, + 0.595 + ], + "angle": 0, + "content": "\\[\nD _ {\\text {C h a m f e r}} \\left(h _ {i}, h _ {j}\\right) = \\sum_ {p \\in \\boldsymbol {\\theta} _ {i}} \\min _ {q \\in \\boldsymbol {\\theta} _ {j}} \\| p - q \\| ^ {2} + \\sum_ {q \\in \\boldsymbol {\\theta} _ {j}} \\min _ {p \\in \\boldsymbol {\\theta} _ {i}} \\| q - p \\| ^ {2} \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.602, + 0.92, + 0.7 + ], + "angle": 0, + "content": "This equation calculates the Chamfer distance by summing the squared Euclidean distances from each parameter in \\( h_i \\) to its closest counterpart in \\( h_j \\), and vice versa, thereby quantifying the similarity between the two Gaussians across all included parameters such as color, opacity, etc. Combining all the loss components results in the total loss function during scene expansion," + }, + { + "type": "equation", + "bbox": [ + 0.233, + 0.719, + 0.919, + 0.74 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {T} = \\lambda_ {r g b} \\mathcal {L} _ {r g b} + \\lambda_ {K E A} \\mathcal {L} _ {K E A} + \\lambda_ {i p c} \\mathcal {L} _ {i p c} + \\lambda_ {p c} \\mathcal {L} _ {p c} \\tag {13}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.748, + 0.919, + 0.785 + ], + "angle": 0, + "content": "where \\(\\lambda_{rgb}\\), \\(\\lambda_{KEA}\\), \\(\\lambda_{ipc}\\) and \\(\\lambda_{pc}\\) act as weighting factors for the respective loss terms." + }, + { + "type": "title", + "bbox": [ + 0.079, + 0.817, + 0.283, + 0.837 + ], + "angle": 0, + "content": "4 Evaluation" + }, + { + "type": "title", + "bbox": [ + 0.079, + 0.858, + 0.432, + 0.877 + ], + "angle": 0, + "content": "4.1 Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.89, + 0.92, + 0.949 + ], + "angle": 0, + "content": "In our approach, we employ PyTorch [33] for the development, specifically focusing on 3D Gaussian splatting. GPT-3.5 Turbo [5] is used for identifying the editing attributes to identify the KEA. For segmentation purposes, SAM [20] is" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.536, + 0.015, + 0.842, + 0.036 + ], + "angle": 0, + "content": "3DEgo: 3D Editing on the Go!" + }, + { + "type": "page_number", + "bbox": [ + 0.894, + 0.017, + 0.917, + 0.03 + ], + "angle": 0, + "content": "11" + }, + { + "type": "image", + "bbox": [ + 0.147, + 0.068, + 0.852, + 0.301 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.318, + 0.922, + 0.41 + ], + "angle": 0, + "content": "Fig. 4: Qualitative comparison of our method with the IN2N [11] over two separate scenes. When the editing prompt requests \"Give the wheels Blue Color and Make the recyclebins brown,\" IN2N [11] inadvertently alters the complete van color to blue as well, instead of just changing the tire color. It must be noted that IN2N [11] uses poses from COLMAP, while 3DEgo estimates poses while constructing the 3D scene." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.444, + 0.92, + 0.542 + ], + "angle": 0, + "content": "used to generate the masks based on the key editing attributes identifying the KIA. For zero-shot point tracking, we employ a point-tracker as proposed in [34]. The editing tasks are facilitated by the Instruct Pix2Pix [4] 2D diffusion model by incorporating the masks to limit the editing within KEA. Additional details are in supplementary material." + }, + { + "type": "title", + "bbox": [ + 0.076, + 0.569, + 0.414, + 0.587 + ], + "angle": 0, + "content": "4.2 Baseline and Datasets" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.6, + 0.918, + 0.637 + ], + "angle": 0, + "content": "We carry out experiments across a variety of public datasets as well as our prepared GS25 dataset." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.638, + 0.437, + 0.831 + ], + "angle": 0, + "content": "GS25 Dataset comprises 25 casually captured monocular videos using mobile phones for comprehensive 3D scene analysis. This approach ensures the dataset's utility in exploring and enhancing 360-degree real-world scene reconstruction technologies. To further assess the efficacy of the proposed 3D editing framework, we also" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.832, + 0.922, + 0.95 + ], + "angle": 0, + "content": "conducted comparisons across 5 public datasets: (i) IN2N [11], (ii) Mip-NeRF [2], (iii) NeRFstudio Dataset [42], (iv) Tanks & Temples [21] and (v) CO3D-V2 [36]. We specifically validate the robustness of our approach on the CO3D dataset, which comprises thousands of object-centric videos. In our study, we introduce a unique problem, making direct comparisons with prior research challenging. Nonetheless, to assess the robustness of our method, we contrast it with" + }, + { + "type": "table_caption", + "bbox": [ + 0.453, + 0.696, + 0.919, + 0.731 + ], + "angle": 0, + "content": "Table 1: Average runtime efficiency across 25 edits from the GS25 dataset (Approx. minutes)." + }, + { + "type": "table", + "bbox": [ + 0.46, + 0.733, + 0.92, + 0.784 + ], + "angle": 0, + "content": "
MethodCOLMAPModel InitializationScene Editing
Instruct-N2N [11]13min22min250min
OursXX25min
" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.08, + 0.017, + 0.105, + 0.031 + ], + "angle": 0, + "content": "12" + }, + { + "type": "header", + "bbox": [ + 0.16, + 0.016, + 0.315, + 0.032 + ], + "angle": 0, + "content": "U. Khalid et al." + }, + { + "type": "image", + "bbox": [ + 0.147, + 0.057, + 0.379, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.219, + 0.216, + 0.31, + 0.228 + ], + "angle": 0, + "content": "Original 3DGS" + }, + { + "type": "image", + "bbox": [ + 0.38, + 0.057, + 0.613, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.437, + 0.215, + 0.562, + 0.227 + ], + "angle": 0, + "content": "Gaussian Grouping" + }, + { + "type": "image", + "bbox": [ + 0.615, + 0.057, + 0.848, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.715, + 0.215, + 0.75, + 0.226 + ], + "angle": 0, + "content": "Ours" + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.243, + 0.921, + 0.314 + ], + "angle": 0, + "content": "Fig. 5: Our approach surpasses Gaussian Grouping [50] in 3D object elimination across different scenes from GS25 and Tanks & Temple datasets. 3DEgo is capable of eliminating substantial objects like statues from the entire scene while significantly minimizing artifacts and avoiding a blurred background." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.355, + 0.92, + 0.492 + ], + "angle": 0, + "content": "state-of-the-art (SOTA) 3D editing techniques that rely on poses derived from COLMAP. Additionally, we present quantitative evaluations alongside pose-free 3D reconstruction approaches, specifically NoPeNeRF [3], and BARF [25]. In the pose-free comparison, we substitute only our 3D scene reconstruction component with theirs while maintaining our original editing framework unchanged. We present a time-cost analysis in Table 1 that underscores the rapid text-conditioned 3D reconstruction capabilities of 3DEgo." + }, + { + "type": "title", + "bbox": [ + 0.076, + 0.524, + 0.42, + 0.543 + ], + "angle": 0, + "content": "4.3 Qualitative Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.56, + 0.922, + 0.696 + ], + "angle": 0, + "content": "As demonstrated in Figure 4, our method demonstrates exceptional prowess in local editing, enabling precise modifications within specific regions of a 3D scene without affecting the overall integrity. Our method also excels in multi-attribute editing, seamlessly combining changes across color, texture, and geometry within a single coherent edit. We also evaluate our method for the object removal task. The goal of 3D object removal is to eliminate an object from a 3D environment, potentially leaving behind voids due to the lack of observational" + }, + { + "type": "table_caption", + "bbox": [ + 0.075, + 0.731, + 0.92, + 0.802 + ], + "angle": 0, + "content": "Table 2: Comparing With Pose-known Methods. Quantitative evaluation of 200 edits across GS25, IN2N, Mip-NeRF, NeRFstudio, Tanks & Temples, and CO3D-V2 datasets against the methods that incorporate COLMAP poses. The top-performing results are emphasized in bold." + }, + { + "type": "table", + "bbox": [ + 0.085, + 0.818, + 0.915, + 0.945 + ], + "angle": 0, + "content": "
DatasetsDreamEditorIN2NOurs
CTIS↑CDCR↑E-PSNR↑CTIS↑CDCR↑E-PSNR↑CTIS↑CDCR↑E-PSNR↑
GS25 (Ours)0.1550.88622.7500.1420.89223.1300.1690.92523.660
Mip-NeRF0.1490.89623.9200.1640.91722.1700.1750.90124.250
NeRFstudio0.1560.90323.6700.1710.90925.1300.1630.93124.990
CO3D-V20.1740.91524.8800.1630.92425.1800.1790.93626.020
IN2N0.1670.92124.7800.1790.91026.5100.1830.92526.390
Tanks & Temples0.1500.89623.9700.1700.90123.1100.1640.91524.190
" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.536, + 0.015, + 0.842, + 0.035 + ], + "angle": 0, + "content": "3DEgo: 3D Editing on the Go!" + }, + { + "type": "page_number", + "bbox": [ + 0.894, + 0.017, + 0.92, + 0.032 + ], + "angle": 0, + "content": "13" + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.054, + 0.924, + 0.109 + ], + "angle": 0, + "content": "Table 3: Comparing With Pose-Unknown Methods. Quantitative analysis of 200 edits applied to six datasets, comparing methods proposed for NeRF reconstruction without known camera poses. The top-performing results are emphasized in bold." + }, + { + "type": "table", + "bbox": [ + 0.084, + 0.124, + 0.913, + 0.25 + ], + "angle": 0, + "content": "
DatasetsBARF [25]Nope-NeRF [3]Ours
CTIS↑CDCR↑E-PSNR↑CTIS↑CDCR↑E-PSNR↑CTIS↑CDCR↑E-PSNR↑
GS25 (Ours)0.1390.79720.4780.1280.75319.6600.1690.92523.660
Mip-NeRF0.1340.80621.3320.1470.82018.7990.1750.90124.250
NeRFstudio0.1400.81320.1160.1380.77321.3600.1630.93124.990
CO3D-V20.1570.82021.1480.1290.82417.9710.1790.93626.020
IN2N0.1500.82922.0920.1610.81822.6040.1830.92526.390
Tanks & Temples0.1350.80621.5730.1570.81020.9040.1640.91524.190
" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.289, + 0.924, + 0.425 + ], + "angle": 0, + "content": "data. For the object removal task, we identify and remove the regions based on the 2D mask, \\( M \\). Subsequently, we focus on inpainting these \"invisible regions\" in the original 2D frames using LAMA [41]. In Figure 5, we demonstrate our 3DEgo's effectiveness in object removal compared to Gaussian Grouping. Our method's reconstruction output notably surpasses that of Gaussian Grouping [50] in terms of retaining spatial accuracy and ensuring consistency across multiple views." + }, + { + "type": "title", + "bbox": [ + 0.076, + 0.455, + 0.44, + 0.475 + ], + "angle": 0, + "content": "4.4 Quantitative Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.488, + 0.479, + 0.76 + ], + "angle": 0, + "content": "In our quantitative analysis, we employ three key metrics: CLIP Text-Image Direction Similarity (CTIS) [9], CLIP Direction Consistency Score (CDCR) [11], and Edit PSNR (EPSNR). We perform 200 edits across the six datasets listed above. We present quantitative comparisons with COLMAP-based 3D editing techniques in Table 2. Additionally, we extend our evaluation by integrating pose-free 3D reconstruction methods into our pipeline, with the performance outcomes detailed in Table 3." + }, + { + "type": "title", + "bbox": [ + 0.076, + 0.791, + 0.271, + 0.812 + ], + "angle": 0, + "content": "5 Ablations" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.832, + 0.479, + 0.871 + ], + "angle": 0, + "content": "To assess the influence of different elements within our framework, we em" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.872, + 0.924, + 0.95 + ], + "angle": 0, + "content": "ploy PSNR, SSIM, and LPIPS metrics across several configurations. Given that images undergo editing before the training of a 3D model, our focus is on determining the effect of various losses on the model's rendering quality. The outcomes are documented in Table 4, showcasing IP2P+COLMAP as the baseline, where" + }, + { + "type": "image", + "bbox": [ + 0.533, + 0.534, + 0.873, + 0.673 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.493, + 0.686, + 0.922, + 0.777 + ], + "angle": 0, + "content": "Fig.6: Our method, 3D Ego achieves precise editing without using any SfM poses. To construct the IP2P+COLMAP 3D scene, we train nefacto [42] model on IP2P [4] edited frames." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.08, + 0.017, + 0.105, + 0.031 + ], + "angle": 0, + "content": "14" + }, + { + "type": "header", + "bbox": [ + 0.16, + 0.016, + 0.315, + 0.032 + ], + "angle": 0, + "content": "U. Khalid et al." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.056, + 0.921, + 0.094 + ], + "angle": 0, + "content": "images are edited using the standard IP2P approach [4] and COLMAP-derived poses are utilized for 3D scene construction." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.096, + 0.481, + 0.347 + ], + "angle": 0, + "content": "Although the \\(\\mathrm{IP2P + COLMAP}\\) setup demonstrates limited textual fidelity due to editing inconsistencies (see Figure 6), we are only interested in the rendering quality in this analysis to ascertain our approach's effectiveness. Table 4 illustrates the effects of different optimization hyperparameters on the global scene expansion. The findings reveal that excluding \\(\\mathcal{L}_{KEA}\\) in the scene expansion process minimally affects ren" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.347, + 0.508, + 0.387 + ], + "angle": 0, + "content": "dering quality. On the other hand, densification resulting in the inferior" + }, + { + "type": "table_caption", + "bbox": [ + 0.494, + 0.152, + 0.92, + 0.186 + ], + "angle": 0, + "content": "Table 4: Ablation study results on GS25 dataset." + }, + { + "type": "table", + "bbox": [ + 0.513, + 0.189, + 0.911, + 0.306 + ], + "angle": 0, + "content": "
MethodPSNR↑SSIM↑LPIPS↓
Ours27.860.900.18
IP2P+COLMAP23.870.790.23
Ours w/o LKEA26.730.880.19
Ours w/o Lipc22.460.0.780.24
Ours w/o Lpc25.180.840.20
" + }, + { + "type": "title", + "bbox": [ + 0.076, + 0.415, + 0.282, + 0.436 + ], + "angle": 0, + "content": "6 Limitation" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.455, + 0.48, + 0.707 + ], + "angle": 0, + "content": "Our approach depends on the pretrained IP2P model [4], which has inherent limitations, especially evident in specific scenarios. For instance, Figure 7 shows the challenge with the prompt \"Make the car golden and give wheels blue color\". Unlike IN2N [11], which introduces unspecific color changes on the van's windows. Our method offers more targeted editing but falls short of generating ideal results due to IP2P's limitations in handling precise editing tas" + }, + { + "type": "title", + "bbox": [ + 0.076, + 0.735, + 0.289, + 0.756 + ], + "angle": 0, + "content": "7 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.774, + 0.922, + 0.948 + ], + "angle": 0, + "content": "3DEgo marks a pivotal advancement in 3D scene reconstruction from monocular videos, eliminating the need for conventional pose estimation methods and model initialization. Our method integrates frame-by-frame editing with advanced consistency techniques to efficiently generate photorealistic 3D scenes directly from textual prompts. Demonstrated across multiple datasets, our approach showcases superior editing speed, precision, and flexibility. 3DEgo not only simplifies the 3D editing process but also broadens the scope for creative content generation from readily available video sources. This work lays the groundwork for future innovations in accessible and intuitive 3D content creation tools." + }, + { + "type": "image", + "bbox": [ + 0.497, + 0.498, + 0.705, + 0.547 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.557, + 0.547, + 0.638, + 0.555 + ], + "angle": 0, + "content": "Original 3D Model" + }, + { + "type": "image", + "bbox": [ + 0.706, + 0.499, + 0.919, + 0.546 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.716, + 0.547, + 0.915, + 0.556 + ], + "angle": 0, + "content": "\"Make the car golden and give wheels blue color\"" + }, + { + "type": "image_caption", + "bbox": [ + 0.493, + 0.572, + 0.92, + 0.643 + ], + "angle": 0, + "content": "Fig. 7: Due to the limitations of the IP2P model, our method inadvertently alters the colors of the van's windows, which is not the desired outcome." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.536, + 0.015, + 0.842, + 0.035 + ], + "angle": 0, + "content": "3DEgo: 3D Editing on the Go!" + }, + { + "type": "page_number", + "bbox": [ + 0.894, + 0.017, + 0.92, + 0.032 + ], + "angle": 0, + "content": "15" + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.054, + 0.341, + 0.077 + ], + "angle": 0, + "content": "Acknowledgement" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.094, + 0.921, + 0.133 + ], + "angle": 0, + "content": "This work was partially supported by the NSF under Grant Numbers OAC-1910469 and OAC-2311245." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.163, + 0.238, + 0.184 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.089, + 0.202, + 0.92, + 0.274 + ], + "angle": 0, + "content": "1. Bao, C., Zhang, Y., Yang, B., Fan, T., Yang, Z., Bao, H., Zhang, G., Cui, Z.: Sine: Semantic-driven image-based nerf editing with prior-guided editing field. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 20919-20929 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.275, + 0.921, + 0.345 + ], + "angle": 0, + "content": "2. Barron, J.T., Mildenhall, B., Verbin, D., Srinivasan, P.P., Hedman, P.: Mipnerf 360: Unbounded anti-aliased neural radiance fields. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5470-5479 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.089, + 0.346, + 0.921, + 0.399 + ], + "angle": 0, + "content": "3. Bian, W., Wang, Z., Li, K., Bian, J.W., Prisacariu, V.A.: Nope-nerf: Optimising neural radiance field with no pose prior. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 4160-4169 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.4, + 0.921, + 0.451 + ], + "angle": 0, + "content": "4. Brooks, T., Holynski, A., Efros, A.A.: Instructpix2pix: Learning to follow image editing instructions. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 18392-18402 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.452, + 0.921, + 0.505 + ], + "angle": 0, + "content": "5. Brown, T., Mann, B., Ryder, N., Subbiah, M., Kaplan, J.D., Dhariwal, P., Neelakantan, A., Shyam, P., Sastry, G., Askell, A., et al.: Language models are few-shot learners. Advances in neural information processing systems 33, 1877-1901 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.505, + 0.921, + 0.558 + ], + "angle": 0, + "content": "6. Chiang, P.Z., Tsai, M.S., Tseng, H.Y., Lai, W.S., Chiu, W.C.: Stylizing 3d scene via implicit representation and hypernetwork. In: Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision. pp. 1475-1484 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.559, + 0.921, + 0.594 + ], + "angle": 0, + "content": "7. Dong, J., Wang, Y.X.: Vica-nerf: View-consistency-aware 3d editing of neural radiance fields. Advances in Neural Information Processing Systems 36 (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.595, + 0.921, + 0.629 + ], + "angle": 0, + "content": "8. Fu, Y., Liu, S., Kulkarni, A., Kautz, J., Efros, A.A., Wang, X.: Colmap-free 3d gaussian splatting (2023), https://arxiv.org/abs/2312.07504" + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.63, + 0.921, + 0.682 + ], + "angle": 0, + "content": "9. Gal, R., Patashnik, O., Maron, H., Chechik, G., Cohen-Or, D.: Stylegan-nada: Clipguided domain adaptation of image generators. arXiv preprint arXiv:2108.00946 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.082, + 0.683, + 0.92, + 0.718 + ], + "angle": 0, + "content": "10. Gao, W., Aigerman, N., Groueix, T., Kim, V.G., Hanocka, R.: Textdeformer: Geometry manipulation using text guidance. arXiv preprint arXiv:2304.13348 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.719, + 0.92, + 0.771 + ], + "angle": 0, + "content": "11. Haque, A., Tancik, M., Efros, A.A., Holynski, A., Kanazawa, A.: Instruct-nerf2nerf: Editing 3d scenes with instructions. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 19740-19750 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.772, + 0.92, + 0.824 + ], + "angle": 0, + "content": "12. Hertz, A., Mokady, R., Tenenbaum, J., Aberman, K., Pritch, Y., Cohen-Or, D.: Prompt-to-prompt image editing with cross attention control. arXiv preprint arXiv:2208.01626 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.825, + 0.92, + 0.877 + ], + "angle": 0, + "content": "13. Hong, F., Zhang, M., Pan, L., Cai, Z., Yang, L., Liu, Z.: Avatarclip: Zero-shot text-driven generation and animation of 3d avatars. ACM Transactions on Graphics (TOG) 41(4), 1-19 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.878, + 0.92, + 0.949 + ], + "angle": 0, + "content": "14. Huang, Y.H., He, Y., Yuan, Y.J., Lai, Y.K., Gao, L.: Stylizednerf: consistent 3d scene stylization as stylized nerf via 2d-3d mutual learning. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 18342-18352 (2022)" + }, + { + "type": "list", + "bbox": [ + 0.081, + 0.202, + 0.921, + 0.949 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.08, + 0.017, + 0.106, + 0.032 + ], + "angle": 0, + "content": "16" + }, + { + "type": "header", + "bbox": [ + 0.159, + 0.016, + 0.315, + 0.032 + ], + "angle": 0, + "content": "U. Khalid et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.057, + 0.918, + 0.093 + ], + "angle": 0, + "content": "15. Jeong, Y., Ahn, S., Choy, C., Anandkumar, A., Cho, M., Park, J.: Self-calibrating neural radiance fields. In: ICCV (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.093, + 0.918, + 0.129 + ], + "angle": 0, + "content": "16. Karim, N., Khalid, U., Iqbal, H., Hua, J., Chen, C.: Free-editor: Zero-shot text-driven 3d scene editing. arXiv preprint arXiv:2312.13663 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.129, + 0.918, + 0.18 + ], + "angle": 0, + "content": "17. Kerbl, B., Kopanas, G., Leimkuhler, T., Drettakis, G.: 3d gaussian splatting for real-time radiance field rendering. ACM Transactions on Graphics (ToG) 42(4), 1-14 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.18, + 0.918, + 0.215 + ], + "angle": 0, + "content": "18. Khalid, U., Iqbal, H., Karim, N., Hua, J., Chen, C.: Latentedirector: Text driven local editing of 3d scenes. arXiv preprint arXiv:2312.09313 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.215, + 0.918, + 0.285 + ], + "angle": 0, + "content": "19. Kim, S., Lee, K., Choi, J.S., Jeong, J., Sohn, K., Shin, J.: Collaborative score distillation for consistent visual editing. In: Thirty-seventh Conference on Neural Information Processing Systems (2023), https://openreview.net/forum?id=0tEjORCGFD" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.285, + 0.918, + 0.338 + ], + "angle": 0, + "content": "20. Kirillov, A., Mintun, E., Ravi, N., Mao, H., Rolland, C., Gustafson, L., Xiao, T., Whitehead, S., Berg, A.C., Lo, W.Y., et al.: Segment anything. arXiv preprint arXiv:2304.02643 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.338, + 0.918, + 0.373 + ], + "angle": 0, + "content": "21. Knapitsch, A., Park, J., Zhou, Q.Y., Koltun, V.: Tanks and temples: Benchmarking large-scale scene reconstruction. ACM Transactions on Graphics (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.373, + 0.918, + 0.407 + ], + "angle": 0, + "content": "22. Kobayashi, S., Matsumoto, E., Sitzmann, V.: Decomposing nerf for editing via feature field distillation. arXiv preprint arXiv:2205.15585 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.407, + 0.918, + 0.46 + ], + "angle": 0, + "content": "23. Li, Y., Lin, Z.H., Forsyth, D., Huang, J.B., Wang, S.: Climatenerf: Physically-based neural rendering for extreme climate synthesis. arXiv e-prints pp. arXiv-2211 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.46, + 0.918, + 0.513 + ], + "angle": 0, + "content": "24. Li, Y., Dou, Y., Shi, Y., Lei, Y., Chen, X., Zhang, Y., Zhou, P., Ni, B.: Focaldreamer: Text-driven 3d editing via focal-fusion assembly. arXiv preprint arXiv:2308.10608 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.513, + 0.918, + 0.546 + ], + "angle": 0, + "content": "25. Lin, C.H., Ma, W.C., Torralba, A., Lucey, S.: Barf: Bundle-adjusting neural radiance fields. In: ICCV (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.546, + 0.918, + 0.582 + ], + "angle": 0, + "content": "26. Liu, H.K., Shen, I., Chen, B.Y., et al.: Nerf-in: Free-form nerf inpainting with rgb-d priors. arXiv preprint arXiv:2206.04901 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.582, + 0.918, + 0.653 + ], + "angle": 0, + "content": "27. Long, X., Guo, Y.C., Lin, C., Liu, Y., Dou, Z., Liu, L., Ma, Y., Zhang, S.H., Habermann, M., Theobalt, C., et al.: Wonder3d: Single image to 3d using cross-domain diffusion. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 9970-9980 (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.653, + 0.918, + 0.687 + ], + "angle": 0, + "content": "28. Michel, O., Bar-On, R., Liu, R., et al.: Text2mesh: Text-driven neural stylization for meshes. In: CVPR 2022. pp. 13492-13502 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.687, + 0.918, + 0.722 + ], + "angle": 0, + "content": "29. Nguyen-Phuoc, T., Liu, F., Xiao, L.: Snerf: stylized neural implicit representations for 3d scenes. arXiv preprint arXiv:2207.02363 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.722, + 0.918, + 0.774 + ], + "angle": 0, + "content": "30. Nichol, A., Dhariwal, P., Ramesh, A., Shyam, P., Mishkin, P., McGrew, B., Sutskever, I., Chen, M.: Glide: Towards photorealistic image generation and editing with text-guided diffusion models. arXiv preprint arXiv:2112.10741 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.774, + 0.918, + 0.809 + ], + "angle": 0, + "content": "31. Noguchi, A., Sun, X., Lin, S., Harada, T.: Neural articulated radiance field. In: ICCV 2021. pp. 5762-5772 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.809, + 0.918, + 0.844 + ], + "angle": 0, + "content": "32. Park, H.S., Jun, C.H.: A simple and fast algorithm for k-medoids clustering. Expert systems with applications 36(2), 3336-3341 (2009)" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.844, + 0.918, + 0.914 + ], + "angle": 0, + "content": "33. Paszke, A., Gross, S., Massa, F., Lerer, A., Bradbury, J., Chanan, G., Killeen, T., Lin, Z., Gimelshein, N., Antiga, L., et al.: Pytorch: An imperative style, high-performance deep learning library. Advances in neural information processing systems 32 (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.914, + 0.918, + 0.949 + ], + "angle": 0, + "content": "34. Rajic, F., Ke, L., Tai, Y.W., Tang, C.K., Danelljan, M., Yu, F.: Segment anything meets point tracking. arXiv preprint arXiv:2307.01197 (2023)" + }, + { + "type": "list", + "bbox": [ + 0.08, + 0.057, + 0.918, + 0.949 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.536, + 0.015, + 0.842, + 0.035 + ], + "angle": 0, + "content": "3DEgo: 3D Editing on the Go!" + }, + { + "type": "page_number", + "bbox": [ + 0.894, + 0.017, + 0.92, + 0.032 + ], + "angle": 0, + "content": "17" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.057, + 0.92, + 0.111 + ], + "angle": 0, + "content": "35. Ramesh, A., Dhariwal, P., Nichol, A., Chu, C., Chen, M.: Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.112, + 0.92, + 0.183 + ], + "angle": 0, + "content": "36. Reizenstein, J., Shapovalov, R., Henzler, P., Sbordone, L., Labatut, P., Novotny, D.: Common objects in 3d: Large-scale learning and evaluation of real-life 3d category reconstruction. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 10901-10911 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.183, + 0.92, + 0.236 + ], + "angle": 0, + "content": "37. Rombach, R., Blattmann, A., Lorenz, D., Esser, P., Ommer, B.: High-resolution image synthesis with latent diffusion models. In: CVPR 2022. pp. 10684-10695 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.236, + 0.92, + 0.307 + ], + "angle": 0, + "content": "38. Ruiz, N., Li, Y., Jampani, V., Pritch, Y., Rubinstein, M., Aberman, K.: Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 22500-22510 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.308, + 0.918, + 0.343 + ], + "angle": 0, + "content": "39. Sahara, C., Chan, W., Saxena, S.e.a.: Photorealistic text-to-image diffusion models with deep language understanding. NeurIPS 2022 35, 36479-36494 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.343, + 0.918, + 0.361 + ], + "angle": 0, + "content": "40. Schonberger, J.L., Frahm, J.M.: Structure-from-motion revisited. In: CVPR (2016)" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.361, + 0.92, + 0.432 + ], + "angle": 0, + "content": "41. Suvorov, R., Logacheva, E., Mashikhin, A., Remizova, A., Ashukha, A., Silvestrov, A., Kong, N., Goka, H., Park, K., Lempitsky, V.: Resolution-robust large mask inpainting with fourier convolutions. In: Proceedings of the IEEE/CVF winter conference on applications of computer vision. pp. 2149-2159 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.432, + 0.92, + 0.503 + ], + "angle": 0, + "content": "42. Tancik, M., Weber, E., Ng, E., Li, R., Yi, B., Wang, T., Kristoffersen, A., Austin, J., Salahi, K., Ahuja, A., et al.: Nerfstudio: A modular framework for neural radiance field development. In: ACM SIGGRAPH 2023 Conference Proceedings. pp. 1-12 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.504, + 0.92, + 0.557 + ], + "angle": 0, + "content": "43. Tschernezki, V., Laina, I., Larlus, D., Vedaldi, A.: Neural feature fusion fields: 3d distillation of self-supervised 2d image representations. In: 2022 International Conference on 3D Vision (3DV). pp. 443-453. IEEE (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.557, + 0.918, + 0.592 + ], + "angle": 0, + "content": "44. Wang, C., Chai, M., He, M., et al.: Clip-nerf: Text-and-image driven manipulation of neural radiance fields. In: CVPR 2022. pp. 3835-3844 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.592, + 0.92, + 0.646 + ], + "angle": 0, + "content": "45. Wang, C., Jiang, R., Chai, M., He, M., Chen, D., Liao, J.: Nerf-art: Text-driven neural radiance fields stylization. IEEE Transactions on Visualization and Computer Graphics (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.646, + 0.92, + 0.699 + ], + "angle": 0, + "content": "46. Weng, H., Yang, T., Wang, J., Li, Y., Zhang, T., Chen, C., Zhang, L.: Consistent123: Improve consistency for one image to 3d object synthesis. arXiv preprint arXiv:2310.08092 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.7, + 0.92, + 0.735 + ], + "angle": 0, + "content": "47. Wu, Q., Tan, J., Xu, K.: Palettenerf: Palette-based color editing for nerfs. arXiv preprint arXiv:2212.12871 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.735, + 0.92, + 0.789 + ], + "angle": 0, + "content": "48. Xu, T., Harada, T.: Deforming radiance fields with cages. In: Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part XXXIII. pp. 159-175. Springer (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.789, + 0.92, + 0.86 + ], + "angle": 0, + "content": "49. Yang, B., Bao, C., Zeng, J., Bao, H., Zhang, Y., Cui, Z., Zhang, G.: Neumesh: Learning disentangled neural mesh-based implicit field for geometry and texture editing. In: European Conference on Computer Vision. pp. 597-614. Springer (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.86, + 0.918, + 0.896 + ], + "angle": 0, + "content": "50. Ye, M., Danelljan, M., Yu, F., Ke, L.: Gaussian grouping: Segment and edit anything in 3d scenes. arXiv preprint arXiv:2312.00732 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.896, + 0.92, + 0.95 + ], + "angle": 0, + "content": "51. Zhang, K., Kolkin, N., Bi, S., Luan, F., Xu, Z., Shechtman, E., Snavely, N.: Arf: Artistic radiance fields. In: European Conference on Computer Vision. pp. 717-733. Springer (2022)" + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.057, + 0.92, + 0.95 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.08, + 0.017, + 0.105, + 0.031 + ], + "angle": 0, + "content": "18" + }, + { + "type": "header", + "bbox": [ + 0.16, + 0.016, + 0.315, + 0.032 + ], + "angle": 0, + "content": "U. Khalid et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.057, + 0.922, + 0.112 + ], + "angle": 0, + "content": "52. Zhuang, J., Wang, C., Lin, L., Liu, L., Li, G.: Dreameditor: Text-driven 3d scene editing with neural fields. In: SIGGRAPH Asia 2023 Conference Papers. pp. 1-10 (2023)" + } + ] +] \ No newline at end of file diff --git a/2024/3DEgo_ 3D Editing on the Go!/58ba5dca-f01b-44f3-bd07-614e4a4b113d_origin.pdf b/2024/3DEgo_ 3D Editing on the Go!/58ba5dca-f01b-44f3-bd07-614e4a4b113d_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..d03a4fef821054ddcb43f525d6b646cef30b7b06 --- /dev/null +++ b/2024/3DEgo_ 3D Editing on the Go!/58ba5dca-f01b-44f3-bd07-614e4a4b113d_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af0b838119f7e8303ebf7be2368360d0d7898ff3b12757834462f3f89b902bd3 +size 3247202 diff --git a/2024/3DEgo_ 3D Editing on the Go!/full.md b/2024/3DEgo_ 3D Editing on the Go!/full.md new file mode 100644 index 0000000000000000000000000000000000000000..41521d63282ef33a5ae9cb1b61c326abf39a8f73 --- /dev/null +++ b/2024/3DEgo_ 3D Editing on the Go!/full.md @@ -0,0 +1,332 @@ +# 3DEgo: 3D Editing on the Go! + +Umar Khalid $^{1,*}$ , Hasan Iqbal $^{2,*}$ , Azib Farooq $^{3}$ , Jing Hua $^{2}$ , and Chen Chen $^{1}$ + +1 University of Central Florida, Orlando, FL, USA + +$^{2}$ Department of Computer Science, Wayne State University, Detroit, MI, USA +$^{3}$ Department of Computer Science and Software Engineering, Miami University, Oxford, OH, USA + +Abstract. We introduce 3DEgo to address a novel problem of directly synthesizing photorealistic 3D scenes from monocular videos guided by textual prompts. Conventional methods construct a text-conditioned 3D scene through a three-stage process, involving pose estimation using Structure-from-Motion (SfM) libraries like COLMAP, initializing the 3D model with unedited images, and iteratively updating the dataset with edited images to achieve a 3D scene with text fidelity. Our framework streamlines the conventional multi-stage 3D editing process into a single-stage workflow by overcoming the reliance on COLMAP and eliminating the cost of model initialization. We apply a diffusion model to edit video frames prior to 3D scene creation by incorporating our designed noise blender module for enhancing multi-view editing consistency, a step that does not require additional training or fine-tuning of T2I diffusion models. 3DEgo utilizes 3D Gaussian Splatting to create 3D scenes from the multi-view consistent edited frames, capitalizing on the inherent temporal continuity and explicit point cloud data. 3DEgo demonstrates remarkable editing precision, speed, and adaptability across a variety of video sources, as validated by extensive evaluations on six datasets, including our own prepared GS25 dataset. Project Page: https://3dego.github.io/ + +Keywords: Gaussian Splatting $\cdot$ 3D Editing $\cdot$ Cross-View Consistency + +# 1 Introduction + +In the pursuit of constructing photo-realistic 3D scenes from monocular video sources, it is a common practice to use the Structure-from-Motion (SfM) library, COLMAP [40] for camera pose estimation. This step is critical for aligning frames extracted from the video, thereby facilitating the subsequent process of 3D scene reconstruction. To further edit these constructed 3D scenes, a meticulous process of frame-by-frame editing based on textual prompts is often employed [52]. Recent works, such as IN2N [11], estimate poses from frames using SfM [40] to initially train an unedited 3D scene. Upon initializing a 3D model, the training dataset is iteratively updated by adding edited images at a consistent rate + +of editing. This process of iterative dataset update demands significant computational resources and time. Due to challenges with initial edit consistency, IN2N [11] training necessitates the continuous addition of edited images to the dataset over a significantly large number of iterations. This issue stems from the inherent limitations present in Text-to-Image (T2I) diffusion models [4, 37], where achieving prompt-consistent edits across multiple images—especially those capturing the same scene—proves to be a formidable task [7, 19]. Such inconsistencies significantly undermine the effectiveness of 3D scene modifications, particularly when these altered frames are leveraged to generate unique views. + +In this work, we address a novel problem of efficiently reconstructing 3D scenes directly from monocular videos without using COLMAP [40] aligned with the editing textual prompt. Specifically, we apply a diffusion model [4] to edit every frame of a given monocular video before creating a 3D scene. To address the challenge of consistent editing across all the frames, we introduce a novel noise blender module, which ensures each new edited view is conditioned upon its adjacent, previously edited views. This is achieved by calculating a weighted average of image-conditional noise estimations such that closer frames exert greater influence on the edit + +ing outcome. Our editing strategy utilizes the IP2P [4] 2D editing diffusion model, which effectively employs both conditional and unconditional noise prediction. Consequently, our method achieves multi-view consistency without the necessity for extra training or fine-tuning, unlike prior approaches [7,27,46]. For 3D scene synthesis based on the edited views, our framework utilizes the Gaussian Splatting (GS) [17] technique, capitalizing on the temporal continuity of video data and the explicit representation of point clouds. Originally designed to work with pre-computed camera poses, 3D Gaussian Splatting presents us with the possibility to synthesize views and construct edited 3D scenes from monocular videos without the need for SfM pre-processing, overcoming one of NeRF's significant limitations [25]. + +Our method grows the 3D Gaussians of the scene continuously, from the edited frames, as the camera moves, eliminating the need for pre-computed camera poses and 3D model initialization on original un-edited frames to identify + +![](images/edadeb4abbe79bc0073ff9e608a0767dd4aa50121b0dcd79be63c1ddeb6aee09.jpg) +Fig. 1: Our method, 3D Ego, streamlines the 3D editing process by merging a three-stage workflow into a singular, comprehensive framework. This efficiency is achieved by bypassing the need for COLMAP [40] for pose initialization and avoiding the initialization of the model with unedited images, unlike other existing approaches [7,11,19]. + +![](images/469efc091e8d0632b4ee1c1082241badb787621bb72045fb697b819647255300.jpg) +Fig. 2: 3D Ego offers rapid, accurate, and adaptable 3D editing, bypassing the need for original 3D scene initialization and COLMAP poses. This ensures compatibility with videos from any source, including casual smartphone captures like the Van 360-degree scene. The above results identify three cases challenging for IN2N [11], where our method can convert a monocular video into customized 3D scenes using a streamlined, single-stage reconstruction process. + +an affine transformation that maps the 3D Gaussians from frame $i$ to accurately render the pixels in frame $i + 1$ . Hence, our method 3DEgo condenses a three-stage 3D editing process into a single-stage, unified and efficient framework as shown in Figure 1. Our contributions are as follows: + +- We tackle the novel challenge of directly transforming monocular videos into 3D scenes guided by editing text prompts, circumventing conventional 3D editing pipelines. +- We introduce a unique auto-regressive editing technique that enhances multiview consistency across edited views, seamlessly integrating with pre-trained diffusion models without the need for additional fine-tuning. +- We propose a COLMAP-free method using 3D Gaussian splatting for reconstructing 3D scenes from casually captured videos. This technique leverages the video's continuous time sequence for pose estimation and scene development, bypassing traditional SfM dependencies. +- We present an advanced technique for converting 2D masks into 3D space, enhancing editing accuracy through Pyramidal Gaussian Scoring (PGS), ensuring more stable and detailed refinement. +- Through extensive evaluations on six datasets—including our custom GS25 and others like IN2N, Mip-NeRF, NeRFstudio Dataset, Tanks & Temples, and CO3D-V2—we demonstrate our method's enhanced editing precision and efficiency, particularly with 360-degree and casually recorded videos, as illustrated in Fig. 2. + +# 2 Related Work + +A growing body of research is exploring diffusion models for text-driven image editing, introducing techniques that allow for precise modifications based on user-provided instructions [30,35,37,39]. While some approaches require explicit before-and-after captions [12] or specialized training [38], making them less accessible to non-experts, IP2P [4] simplifies the process by enabling direct textual edits on images, making advanced editing tools more widely accessible. + +Recently, diffusion models have also been employed for 3D editing, focusing on altering the geometry and appearance of 3D scenes [1,4,10,13,16,18,22-24, 26,28,31,43,44,48,49]. + +Traditional NeRF representations, however, pose significant challenges for precise editing due to their implicit nature, leading to difficulties in localizing edits within a scene. Earlier efforts have mainly achieved global transformations [6, 14, 29, 45, 47, 51], with object-centric editing remaining a challenge. IN2N [11] introduced user-friendly text-based editing, though it might affect the entire scene. Recent studies [7, 19, 52] have attempted to tackle local editing and multi-view consistency challenges within the IN2N framework [11]. Yet, no existing approaches in the literature offer pose-free capabilities, nor can they create a text-conditioned 3D scene from arbitrary video footage. Nevertheless, existing 3D editing methods [11, 52] universally necessitate Structure-from-Motion (SfM) preprocessing. Recent studies like Nope-NeRF [3], BARF [25], and SC-NeRF [15] have introduced methodologies for pose optimization and calibration concurrent with the training of (unedited) NeRF. + +In this study, we present a novel method for constructing 3D scenes directly from textual prompts, utilizing monocular video frames without dependence on COLMAP poses [40], thus addressing unique challenges. Given the complexities NeRF's implicit nature introduces to simultaneous 3D reconstruction and camera registration, our approach leverages the advanced capabilities of 3D Gaussian Splatting (3DGS) [17] alongside a pre-trained 2D editing diffusion model for efficient 3D model creation. + +# 3 Method + +Given a sequence of unposed images alongside camera intrinsics, we aim to recover the camera poses in sync with the edited frames and reconstruct a photorealistic 3D scene conditioned on the textual prompt. + +# 3.1 Preliminaries + +In the domain of 3D scene modeling, 3D Gaussian splatting [17] emerges as a notable method. The method's strength lies in its succinct Gaussian representation coupled with an effective differential rendering technique, facilitating real-time, high-fidelity visualization. This approach models a 3D environment + +using a collection of point-based 3D Gaussians, denoted as $\mathcal{H}$ where each Gaussian $h = \{\mu, \Sigma, c, \alpha\}$ . Here, $\mu \in \mathbb{R}^3$ specifies the Gaussian's center location, $\Sigma \in \mathbb{R}^{3 \times 3}$ is the covariance matrix capturing the Gaussian's shape, $c \in \mathbb{R}^3$ is the color vector in RGB format represented in the three degrees of spherical harmonics (SH) coefficients, and $\alpha \in \mathbb{R}$ denotes the Gaussian's opacity level. To optimize the parameters of 3D Gaussians to represent the scene, we need to render them into images in a differentiable manner. The rendering is achieved by approximating the projection of a 3D Gaussian along the depth dimension into pixel coordinates expressed as: + +$$ +C = \sum_ {p \in \mathcal {P}} c _ {p} \tau_ {p} \prod_ {k = 1} ^ {p - 1} (1 - \alpha_ {k}), \tag {1} +$$ + +where $\mathcal{P}$ are ordered points overlapping the pixel, and $\tau_{p} = \alpha_{p}e^{-\frac{1}{2}(x_{p})^{T}\Sigma^{-1}(x_{p})}$ quantifies the Gaussian's contribution to a specific image pixel, with $x_{p}$ measuring the distance from the pixel to the center of the $p$ -th Gaussian. In the original 3DGS, initial Gaussian parameters are refined to fit the scene, guided by ground truth poses obtained using SfM. Through differential rendering, the Gaussians' parameters, including position $\mu$ , shape $\Sigma$ , color $c$ , and opacity $\alpha$ , are adjusted using a photometric loss function. + +# 3.2 Multi-View Consistent 2D Editing + +In the first step, we perform 2D editing with key editing areas (KEA) based on the user-provided video, $V$ , and editing prompt, $\mathcal{T}$ . + +From the given video $V$ , we extract frames $\{f_1, f_2, \ldots, f_N\}$ . Analyzing the textual prompt $\mathcal{T}$ with a Large Language Model $\mathcal{L}$ identifies key editing attributes $\{A_1, A_2, \ldots, A_k\}$ , essential for editing, expressed as $\mathcal{L}(\mathcal{T}) \to \{A_1, A_2, \ldots, A_k\}$ . Utilizing these attributes, a segmentation model $\mathcal{S}$ delineates editing regions in each frame $f_i$ by generating a mask $M_i$ with KEA marked as 1, and others as 0. The segmentation operation is defined as, $\mathcal{S}(f_i, \{A_1, A_2, \ldots, A_k\}) \to M_i$ , $\forall i \in \{1, \ldots, N\}$ . Subsequently, a 2D diffusion model $\mathcal{E}$ selectively edits these regions in $f_i$ , as defined by $M_i$ , producing edited frames $\{E_1, E_2, \ldots, E_N\}$ under guidance from $\mathcal{T}$ , such that $\mathcal{E}(f_i, M_i) \to E_i$ . + +![](images/74d84f2ece68fed6d31ab66173b672131851507fd70639dd1cb54b4f868e5623.jpg) +Fig. 3: Autoregressive Editing. At each denoising step, the model predicts $w + 1$ separate noises, which are then unified via weighted noise blender (Eq. 4) to predict $\varepsilon_{\theta}(e_t,f,\mathcal{T},W)$ . + +Consistent Multi-View2D Editing. As discussed above, differing from IN2N [11] that incorporates edited images gradually over several training iterations, our approach involves editing the entire dataset at once before the training starts. We desire 1) each edited frame, $E_{i}$ follows the editing prompt, $\mathcal{T}$ , 2) retain the original images' semantic content, and 3) the edited images, $\{E_1,E_2,\dots ,E_N\}$ are consistent with each other. + +(i) Multi-view Consistent Mask. As $S$ doesn't guarantee consistent masks across the views of a casually recorded monocular video, we utilize a zero-shot point tracker [34] to ensure uniform mask generation across the views. The procedure starts by identifying query points in the initial video frame using the ground truth mask. Query points are extracted from these ground truth masks employing the K-Medoids [32] sampling method. This method utilizes the cluster centers from K-Medoids clustering as query points. This approach guarantees comprehensive coverage of the object's various sections and enhances resilience to noise and outliers. + +(ii)Autoregressive Editing. To address the issue of preserving consistency across multiple views, we employ an autoregressive method that edits frames in sequence, with IP2P [4] editing restricted to the Key Editing Areas (KEA) as delineated by the relevant masks. Instead of editing each frame independently from just the input images - a process that can vary significantly between adjacent images - we integrate an autoregressive editing technique where the frame to be edited is conditioned on already edited adjacent frames. + +As discussed above, we incorporate IP2P [4] as a 2D editing diffusion model. The standard noise prediction from IP2P's backbone that includes both conditional and unconditional editing is given as, + +$$ +\tilde {\varepsilon} _ {\theta} \left(e _ {t}, f, \mathcal {T}\right) = \varepsilon_ {\theta} \left(e _ {t}, \varnothing_ {f}, \varnothing_ {\mathcal {T}}\right) + s _ {f} \left(\varepsilon_ {\theta} \left(e _ {t}, f, \varnothing_ {\mathcal {T}}\right) - \varepsilon_ {\theta} \left(e _ {t}, \varnothing_ {f}, \varnothing_ {\mathcal {T}}\right)\right) + s _ {\mathcal {T}} \left(\varepsilon_ {\theta} \left(e _ {t}, f, \mathcal {T}\right) - \varepsilon_ {\theta} \left(e _ {t}, f, \varnothing_ {\mathcal {T}}\right)\right) \tag {2} +$$ + +where $s_f$ and $s_{\mathcal{T}}$ are image and textual prompt guidance scale. We suggest enhancing the noise estimation process with our autoregressive training framework. Consider a set of $w$ views, represented as $W = \{E_n\}_{n=1}^w$ . Our goal is to model the distribution of the $i$ -th view image by utilizing its $w$ adjacent, already edited views. To achieve this, we calculate image-conditional noise estimation, $\varepsilon_{\theta}(e_t, E, \emptyset_{\mathcal{T}})$ across all frames in $W$ . The equation to compute the weighted average $\bar{\varepsilon}_{\theta}$ of the noise estimates from all edited frames within $W$ , employing $\beta$ as the weight for each frame, is delineated as follows: + +$$ +\bar {\varepsilon} _ {\theta} \left(e _ {t}, \varnothing_ {\mathcal {T}}, W\right) = \sum_ {n = 1} ^ {w} \beta_ {n} \varepsilon_ {\theta} ^ {n} \left(e _ {t}, E _ {n}, \varnothing_ {\mathcal {T}}\right) \tag {3} +$$ + +Here, $E_{n}$ represents the $n$ -th edited frame within $W$ , and $\beta_{n}$ is the weight assigned to the $n$ -th frame's noise estimate. The condition that the sum of all $\beta$ values over $w$ frames equals 1 is given by as, $\sum_{n=1}^{w} \beta_{n} = 1$ . This ensures that the weighted average is normalized. As we perform 2D editing without any pose priors, our weight parameter $\beta$ is independent of the angle offset between the frame to be edited, $f_{n}$ and already edited frames in $W$ . To assign weight parameters with exponential decay, ensuring the closest frame receives the highest + +weight, we can use an exponential decay function for the weight $\beta_{n}$ of the $n$ -th frame in $W$ . By employing a decay factor $\lambda_{d}$ ( $0 < \lambda_{d} < 1$ ), the weight of each frame decreases exponentially as its distance from the target frame increases. The weight $\beta_{n}$ for the $n$ -th frame is defined as, $\beta_{n} = \lambda_{d}^{w - n}$ . This ensures the, $E$ closest to the target, $f$ ( $n = 1$ ) receives the highest weight. To ensure the sum of the weights to 1, each weight is normalized by dividing by the sum of all weights, $\beta_{n} = \frac{\lambda^{w - n}}{\sum_{j = 1}^{w}\lambda^{w - j}}$ . This normalization guarantees the sum of $\beta_{n}$ across all $n$ equals 1, adhering to the constraint $\sum_{n = 1}^{w}\beta_{n} = 1$ . + +Our editing path is determined by the sequence of frames from the captured video. Therefore, during the editing of frame $f_{n}$ , we incorporate the previous $w$ edited frames into the set $W$ , assigning the highest weight $\beta$ to $E_{n - 1}$ . Using Eq. 2 and Eq. 3, we define our score estimation function as following: + +$$ +\varepsilon_ {\theta} \left(e _ {t}, f, \mathcal {T}, W\right) = \gamma_ {f} \tilde {\varepsilon} _ {\theta} \left(e _ {t}, f, \mathcal {T}\right) + \gamma_ {E} \bar {\varepsilon} _ {\theta} \left(e _ {t}, \varnothing_ {\mathcal {T}}, W\right) \tag {4} +$$ + +where $\gamma_{f}$ is a hyperparameter that determines the influence of the original frame undergoing editing on the noise estimation, and $\gamma_{E}$ represents the significance of the noise estimation from adjacent edited views. + +# 3.3 3D Scene Reconstruction + +After multi-view consistent 2D editing is achieved across all frames of the given video, $V$ , we leverage the edited frames $E_{i}$ and their corresponding masks $M_{i}$ to construct a 3D scene without any SfM pose initialization. Due to the explicit nature of 3DGS [17], determining the camera poses is essentially equivalent to estimating the transformation of a collection of 3D Gaussian points. Next, we will begin by introducing an extra Gaussian parameter for precise local editing. Subsequently, we will explore relative pose estimation through incremental frame inclusion. Lastly, we will examine the scene expansion, alongside a discussion on the losses integrated into our global optimization strategy. + +3D Gaussians Parameterization for Precise Editing. Projecting KEA (see Section 3.2) into 3D Gaussians, $\mathcal{H}$ , using $M$ for KEA identity assignment, is essential for accurate editing. Therefore, we introduce a vector, $m$ associated with the Gaussian point, $h = \{\mu, \Sigma, c, \alpha, m\}$ in the 3D Gaussian set, $\mathcal{H}_i$ of the $i_{th}$ frame. The parameter $m$ is a learnable vector of length 2 corresponding to the number of labels in the segmentation map, $M$ . We optimize the newly introduced parameter $m$ to represent KEA identity during training. However, unlike the view-dependent Gaussian parameters, the KEA Identity remains uniform across different rendering views. Gaussian KEA identity ensures the continuous monitoring of each Gaussian's categorization as they evolve, thereby enabling the selective application of gradients, and the exclusive rendering of targeted objects, markedly enhancing processing efficiency in intricate scenes. + +Next, we delve into the training pipeline inspired by [3,8] in detail which consists of two stages: (i) Relative Pose Estimation, and (ii) Global 3D Scene Expansion. + +Per Frame View Initialization. To begin the training process, , we randomly pick a specific frame, denoted as $E_{i}$ . We then employ a pre-trained monocular depth estimator, symbolized by $\mathcal{D}$ , to derive the depth map $D_{i}$ for $E_{i}$ . Utilizing $D_{i}$ , which provides strong geometric cues independent of camera parameters, we initialize 3DGS with points extracted from monocular depth through camera intrinsics and orthogonal projection. This initialization step involves learning a set of 3D Gaussians $\mathcal{H}_i$ to minimize the photometric discrepancy between the rendered and current frames $E_{i}$ . The photometric loss, $\mathcal{L}_{rgb}$ , optimize the conventional 3D Gaussian parameters including color $c$ , covariance $\Sigma$ , mean $\mu$ , and opacity $\alpha$ . However, to initiate the KEA identity and adjust $m_g$ for 3D Gaussians, merely relying on $\mathcal{L}_{rgb}$ is insufficient. Hence, we propose the KEA loss, denoted as $\mathcal{L}_{KEA}$ , which encompasses the 2D mask $M_{i}$ corresponding to $E_{i}$ . We learn the KEA identity of each Gaussian point during training by applying $\mathcal{L}_{KEA}$ loss $(\mathcal{L}_{KEA})$ . Overall, 3D Gaussian optimization is defined as, + +$$ +\mathcal {H} _ {i} ^ {*} = \arg \min _ {c, \Sigma , \mu , \alpha} \mathcal {L} _ {r g b} (\mathcal {R} (\mathcal {H} _ {i}), E _ {i}) + \arg \min _ {m} \mathcal {L} _ {K E A} (\mathcal {R} (\mathcal {H} _ {i}), M _ {i}), \tag {5} +$$ + +where $\mathcal{R}$ signifies the 3DGS rendering function. The photometric loss $\mathcal{L}_{rgb}$ as introduced in [17] is a blend of $\mathcal{L}_1$ and D-SSIM losses: + +$$ +\mathcal {L} _ {r g b} = (1 - \gamma) \mathcal {L} _ {1} + \gamma \mathcal {L} _ {\mathrm {D} - \text {S S I M}}, \tag {6} +$$ + +$\mathcal{L}_{KEA}$ has two components to it. (i) 2D Binary Cross-Entropy Loss, and (ii) 3D Jensen-Shannon Divergence (JSD) Loss, and is defined as, + +$$ +\mathcal {L} _ {K E A} = \lambda_ {B C E} \mathcal {L} _ {B C E} + \lambda_ {J S D} \mathcal {L} _ {J S D} \tag {7} +$$ + +Let $\mathcal{N}$ be the total number of pixels in the $M$ , and $\mathcal{X}$ represent the set of all pixels. We calculate binary cross-entropy loss $\mathcal{L}_{BCE}$ as following, + +$$ +\mathcal {L} _ {B C E} = - \frac {1}{\mathcal {N}} \sum_ {x \in \mathcal {X}} \left[ M _ {i} (x) \log \left(\mathcal {R} \left(\mathcal {H} _ {i}, m\right) (x)\right) + \left(1 - M _ {i} (x)\right) \log \left(1 - \mathcal {R} \left(\mathcal {H} _ {i}, m\right) (x)\right) \right] \tag {8} +$$ + +where $M(x)$ is the value of the ground truth mask at pixel $x$ , indicating whether the pixel belongs to the foreground (1) or the background (0). The sum computes the total loss over all pixels, and the division by $\mathcal{N}$ normalizes the loss, making it independent of the image size. A rendering operation, denoted as $\mathcal{R}(\mathcal{H}_i, m)(x)$ , produces $m_{\mathcal{R}}$ for a given pixel $x$ , which represents the weighted sum of the vector $m$ values for the overlapping Gaussians associated with that pixel. Here, $m$ and $m_{\mathcal{R}}$ both have a dimensionality of 2 which is intentionally kept the same as the number of classes in mask labels. We apply softmax function on $m_{\mathcal{R}}$ to extract KEA identity given as, KEA Identity = softmax(m_R). The softmax output is interpreted as either 0, indicating a position outside the KEA, or 1, denoting a location within the KEA. + +To enhance the accuracy of Gaussian KEA identity assignment, we also introduce an unsupervised 3D Regularization Loss to directly influence the learning + +of Identity vector $m$ . This 3D Regularization Loss utilizes spatial consistency in 3D, ensuring that the Identity vector, $m$ of the top $k$ -nearest 3D Gaussians are similar in feature space. Specifically, we employ a symmetrical and bounded loss based on the Jensen-Shannon Divergence, + +$$ +\mathcal {L} _ {\mathrm {J S D}} = \frac {1}{2 Y Z} \sum_ {y = 1} ^ {Y} \sum_ {z = 1} ^ {Z} \left[ S \left(m _ {y}\right) \log \left(\frac {2 S \left(m _ {y}\right)}{S \left(m _ {y}\right) + S \left(m _ {z} ^ {\prime}\right)}\right) + S \left(m _ {z} ^ {\prime}\right) \log \left(\frac {2 S \left(m _ {z} ^ {\prime}\right)}{S \left(m _ {y}\right) + S \left(m _ {z} ^ {\prime}\right)}\right) \right] \tag {9} +$$ + +Here, $S$ indicates the softmax function, and $m_z'$ represents the $z^{th}$ Identity vector from the $Z$ nearest neighbors in 3D space. + +Relative Pose Initialization. Next, the relative camera pose is estimated for each new frame added to the training scheme. $\mathcal{H}_i^*$ is transformed via a learnable SE-3 affine transformation $\mathcal{M}_i$ to the subsequent frame $i + 1$ , where $\mathcal{H}_{i + 1} = \mathcal{M}_i\odot \mathcal{H}_i$ . Optimizing transformation $\mathcal{M}_i$ entails minimizing the photometric loss between the rendered image and the next frame $E_{i + 1}$ , + +$$ +\mathcal {M} _ {i} ^ {*} = \arg \min _ {\mathcal {M} _ {i}} \mathcal {L} _ {r g b} \left(\mathcal {R} \left(\mathcal {M} _ {i} \odot \mathcal {H} _ {i}\right), E _ {i + 1}\right), \tag {10} +$$ + +In this optimization step, we keep the attributes of $\mathcal{H}_i^*$ fixed to distinguish camera motion from other Gaussian transformations such as pruning, densification, and self-rotation. Applying the above 3DGS initialization to sequential image pairs enables inferring relative poses across frames. However, accumulated pose errors could adversely affect the optimization of a global scene. To tackle this challenge, we propose the gradual, sequential expansion of the 3DGS. + +Gradual 3D Scene Expansion. As illustrated above, beginning with frame $E_{i}$ , we initiate with a collection of 3D Gaussian points, setting the camera pose to an orthogonal configuration. Then, we calculate the relative camera pose between frames $E_{i}$ and $E_{i+1}$ . After estimating the relative camera poses, we propose to expand the 3DGS scene. This all-inclusive 3DGS optimization refines the collection of 3D Gaussian points, including all attributes, across $I$ iterations, taking the calculated relative pose and the two observed frames as inputs. With the availability of the next frame $E_{i+2}$ after $I$ iterations, we repeat the above procedure: estimating the relative pose between $E_{i+1}$ and $E_{i+2}$ , and expanding the scene with all-inclusive 3DGS. + +To perform all-inclusive 3DGS optimization, we increase the density of the Gaussians currently under reconstruction as new frames are introduced. Following [17], we identify candidates for densification by evaluating the average magnitude of position gradients in view-space. To focus densification on these yet-to-be-observed areas, we enhance the density of the universal 3DGS every $I$ step, synchronized with the rate of new frame addition. We continue to expand the 3D Gaussian points until the conclusion of the input sequence. Through the repetitive application of both frame-relative pose estimation and all-inclusive scene expansion, 3D Gaussians evolve from an initial partial point cloud to a complete point cloud that encapsulates the entire scene over the sequence. In + +our global optimization stage, we still utilize the $\mathcal{L}_{KEA}$ loss as new Gaussians are added during densification. + +Pyramidal Feature Scoring. While our 2D consistent editing approach, detailed in Section 3.2, addresses various editing discrepancies, to rectify any residual inconsistencies in 2D editing, we introduce a pyramidal feature scoring method tailored for Gaussians in Key Editing Areas (KEA) identified with an identity of 1. This method begins by capturing the attributes of all Gaussians marked with KEA identity equal to 1 during initialization, establishing them as anchor points. With each densification step, these anchors are updated to mirror the present attributes of the Gaussians. Throughout the training phase, an intrapoint cloud loss, $\mathcal{L}_{ipc}$ is utilized to compare the anchor state with the Gaussians' current state, maintaining that the Gaussians remain closely aligned with their initial anchors. $\mathcal{L}_{ipc}$ is defined as the weighted mean square error (MSE) between the anchor Gaussian and current Gaussian parameters with the older Gaussians getting higher weightage. + +Regularizing Estimated Pose. Further, to optimize the estimated relative pose between subsequent Gaussian set, we introduce point cloud loss, $\mathcal{L}_{pc}$ similar as in [3]. While we expand the scene, $\mathcal{L}_{ipc}$ limits the deviation of the Gaussian parameters while $\mathcal{L}_{pc}$ regularizes the all-inclusive pose estimation. + +$$ +\mathcal {L} _ {p c} = D _ {\text {C h a m f e r}} \left(\mathcal {M} _ {i} ^ {*} \mathcal {H} _ {i} ^ {*}, \mathcal {H} _ {i + 1} ^ {*}\right) \tag {11} +$$ + +Given two Gaussians, $h_i$ and $h_j$ , each characterized by multiple parameters encapsulated in their parameter vectors $\pmb{\theta}_i$ and $\pmb{\theta}_j$ respectively, the Chamfer distance $D_{\mathrm{Chamfer}}$ between $h_i$ and $h_j$ can be formulated as: + +$$ +D _ {\text {C h a m f e r}} \left(h _ {i}, h _ {j}\right) = \sum_ {p \in \boldsymbol {\theta} _ {i}} \min _ {q \in \boldsymbol {\theta} _ {j}} \| p - q \| ^ {2} + \sum_ {q \in \boldsymbol {\theta} _ {j}} \min _ {p \in \boldsymbol {\theta} _ {i}} \| q - p \| ^ {2} \tag {12} +$$ + +This equation calculates the Chamfer distance by summing the squared Euclidean distances from each parameter in $h_i$ to its closest counterpart in $h_j$ , and vice versa, thereby quantifying the similarity between the two Gaussians across all included parameters such as color, opacity, etc. Combining all the loss components results in the total loss function during scene expansion, + +$$ +\mathcal {L} _ {T} = \lambda_ {r g b} \mathcal {L} _ {r g b} + \lambda_ {K E A} \mathcal {L} _ {K E A} + \lambda_ {i p c} \mathcal {L} _ {i p c} + \lambda_ {p c} \mathcal {L} _ {p c} \tag {13} +$$ + +where $\lambda_{rgb}$ , $\lambda_{KEA}$ , $\lambda_{ipc}$ and $\lambda_{pc}$ act as weighting factors for the respective loss terms. + +# 4 Evaluation + +# 4.1 Implementation Details + +In our approach, we employ PyTorch [33] for the development, specifically focusing on 3D Gaussian splatting. GPT-3.5 Turbo [5] is used for identifying the editing attributes to identify the KEA. For segmentation purposes, SAM [20] is + +![](images/0351ef6feaffe92abe2ec50b202730e2d32ee18fc22a8fc970fbb0d8a29b1e7b.jpg) +Fig. 4: Qualitative comparison of our method with the IN2N [11] over two separate scenes. When the editing prompt requests "Give the wheels Blue Color and Make the recyclebins brown," IN2N [11] inadvertently alters the complete van color to blue as well, instead of just changing the tire color. It must be noted that IN2N [11] uses poses from COLMAP, while 3DEgo estimates poses while constructing the 3D scene. + +used to generate the masks based on the key editing attributes identifying the KIA. For zero-shot point tracking, we employ a point-tracker as proposed in [34]. The editing tasks are facilitated by the Instruct Pix2Pix [4] 2D diffusion model by incorporating the masks to limit the editing within KEA. Additional details are in supplementary material. + +# 4.2 Baseline and Datasets + +We carry out experiments across a variety of public datasets as well as our prepared GS25 dataset. + +GS25 Dataset comprises 25 casually captured monocular videos using mobile phones for comprehensive 3D scene analysis. This approach ensures the dataset's utility in exploring and enhancing 360-degree real-world scene reconstruction technologies. To further assess the efficacy of the proposed 3D editing framework, we also + +conducted comparisons across 5 public datasets: (i) IN2N [11], (ii) Mip-NeRF [2], (iii) NeRFstudio Dataset [42], (iv) Tanks & Temples [21] and (v) CO3D-V2 [36]. We specifically validate the robustness of our approach on the CO3D dataset, which comprises thousands of object-centric videos. In our study, we introduce a unique problem, making direct comparisons with prior research challenging. Nonetheless, to assess the robustness of our method, we contrast it with + +Table 1: Average runtime efficiency across 25 edits from the GS25 dataset (Approx. minutes). + +
MethodCOLMAPModel InitializationScene Editing
Instruct-N2N [11]13min22min250min
OursXX25min
+ +![](images/925c3495878c636a1fbf26474f4b49f3010100b61042a60bafa656e3454642a8.jpg) +Original 3DGS + +![](images/62102cc1824d996f42819b4d55fa360d73db2b34f855d4b9a0d4cc6849991742.jpg) +Gaussian Grouping +Fig. 5: Our approach surpasses Gaussian Grouping [50] in 3D object elimination across different scenes from GS25 and Tanks & Temple datasets. 3DEgo is capable of eliminating substantial objects like statues from the entire scene while significantly minimizing artifacts and avoiding a blurred background. + +![](images/1b563c3ada12b3a9f0e10367c47480bca2fe84223fa16856a1b990cfeafe00c2.jpg) +Ours + +state-of-the-art (SOTA) 3D editing techniques that rely on poses derived from COLMAP. Additionally, we present quantitative evaluations alongside pose-free 3D reconstruction approaches, specifically NoPeNeRF [3], and BARF [25]. In the pose-free comparison, we substitute only our 3D scene reconstruction component with theirs while maintaining our original editing framework unchanged. We present a time-cost analysis in Table 1 that underscores the rapid text-conditioned 3D reconstruction capabilities of 3DEgo. + +# 4.3 Qualitative Evaluation + +As demonstrated in Figure 4, our method demonstrates exceptional prowess in local editing, enabling precise modifications within specific regions of a 3D scene without affecting the overall integrity. Our method also excels in multi-attribute editing, seamlessly combining changes across color, texture, and geometry within a single coherent edit. We also evaluate our method for the object removal task. The goal of 3D object removal is to eliminate an object from a 3D environment, potentially leaving behind voids due to the lack of observational + +Table 2: Comparing With Pose-known Methods. Quantitative evaluation of 200 edits across GS25, IN2N, Mip-NeRF, NeRFstudio, Tanks & Temples, and CO3D-V2 datasets against the methods that incorporate COLMAP poses. The top-performing results are emphasized in bold. + +
DatasetsDreamEditorIN2NOurs
CTIS↑CDCR↑E-PSNR↑CTIS↑CDCR↑E-PSNR↑CTIS↑CDCR↑E-PSNR↑
GS25 (Ours)0.1550.88622.7500.1420.89223.1300.1690.92523.660
Mip-NeRF0.1490.89623.9200.1640.91722.1700.1750.90124.250
NeRFstudio0.1560.90323.6700.1710.90925.1300.1630.93124.990
CO3D-V20.1740.91524.8800.1630.92425.1800.1790.93626.020
IN2N0.1670.92124.7800.1790.91026.5100.1830.92526.390
Tanks & Temples0.1500.89623.9700.1700.90123.1100.1640.91524.190
+ +Table 3: Comparing With Pose-Unknown Methods. Quantitative analysis of 200 edits applied to six datasets, comparing methods proposed for NeRF reconstruction without known camera poses. The top-performing results are emphasized in bold. + +
DatasetsBARF [25]Nope-NeRF [3]Ours
CTIS↑CDCR↑E-PSNR↑CTIS↑CDCR↑E-PSNR↑CTIS↑CDCR↑E-PSNR↑
GS25 (Ours)0.1390.79720.4780.1280.75319.6600.1690.92523.660
Mip-NeRF0.1340.80621.3320.1470.82018.7990.1750.90124.250
NeRFstudio0.1400.81320.1160.1380.77321.3600.1630.93124.990
CO3D-V20.1570.82021.1480.1290.82417.9710.1790.93626.020
IN2N0.1500.82922.0920.1610.81822.6040.1830.92526.390
Tanks & Temples0.1350.80621.5730.1570.81020.9040.1640.91524.190
+ +data. For the object removal task, we identify and remove the regions based on the 2D mask, $M$ . Subsequently, we focus on inpainting these "invisible regions" in the original 2D frames using LAMA [41]. In Figure 5, we demonstrate our 3DEgo's effectiveness in object removal compared to Gaussian Grouping. Our method's reconstruction output notably surpasses that of Gaussian Grouping [50] in terms of retaining spatial accuracy and ensuring consistency across multiple views. + +# 4.4 Quantitative Evaluation + +In our quantitative analysis, we employ three key metrics: CLIP Text-Image Direction Similarity (CTIS) [9], CLIP Direction Consistency Score (CDCR) [11], and Edit PSNR (EPSNR). We perform 200 edits across the six datasets listed above. We present quantitative comparisons with COLMAP-based 3D editing techniques in Table 2. Additionally, we extend our evaluation by integrating pose-free 3D reconstruction methods into our pipeline, with the performance outcomes detailed in Table 3. + +# 5 Ablations + +To assess the influence of different elements within our framework, we em + +ploy PSNR, SSIM, and LPIPS metrics across several configurations. Given that images undergo editing before the training of a 3D model, our focus is on determining the effect of various losses on the model's rendering quality. The outcomes are documented in Table 4, showcasing IP2P+COLMAP as the baseline, where + +![](images/79af30484e8ab9c49609aed1c8f35a64f4e7afcdbc1eacb37ba9f96cfa954664.jpg) +Fig.6: Our method, 3D Ego achieves precise editing without using any SfM poses. To construct the IP2P+COLMAP 3D scene, we train nefacto [42] model on IP2P [4] edited frames. + +images are edited using the standard IP2P approach [4] and COLMAP-derived poses are utilized for 3D scene construction. + +Although the $\mathrm{IP2P + COLMAP}$ setup demonstrates limited textual fidelity due to editing inconsistencies (see Figure 6), we are only interested in the rendering quality in this analysis to ascertain our approach's effectiveness. Table 4 illustrates the effects of different optimization hyperparameters on the global scene expansion. The findings reveal that excluding $\mathcal{L}_{KEA}$ in the scene expansion process minimally affects ren + +dering quality. On the other hand, densification resulting in the inferior + +Table 4: Ablation study results on GS25 dataset. + +
MethodPSNR↑SSIM↑LPIPS↓
Ours27.860.900.18
IP2P+COLMAP23.870.790.23
Ours w/o LKEA26.730.880.19
Ours w/o Lipc22.460.0.780.24
Ours w/o Lpc25.180.840.20
+ +# 6 Limitation + +Our approach depends on the pretrained IP2P model [4], which has inherent limitations, especially evident in specific scenarios. For instance, Figure 7 shows the challenge with the prompt "Make the car golden and give wheels blue color". Unlike IN2N [11], which introduces unspecific color changes on the van's windows. Our method offers more targeted editing but falls short of generating ideal results due to IP2P's limitations in handling precise editing tas + +# 7 Conclusion + +3DEgo marks a pivotal advancement in 3D scene reconstruction from monocular videos, eliminating the need for conventional pose estimation methods and model initialization. Our method integrates frame-by-frame editing with advanced consistency techniques to efficiently generate photorealistic 3D scenes directly from textual prompts. Demonstrated across multiple datasets, our approach showcases superior editing speed, precision, and flexibility. 3DEgo not only simplifies the 3D editing process but also broadens the scope for creative content generation from readily available video sources. This work lays the groundwork for future innovations in accessible and intuitive 3D content creation tools. + +![](images/3f096e8aea33f2dc404290312cea3cb32bce9fe044e4fe46daac12b6c3cfcdfc.jpg) +Original 3D Model +Fig. 7: Due to the limitations of the IP2P model, our method inadvertently alters the colors of the van's windows, which is not the desired outcome. + +![](images/f5aefd096304432b44d46e7031178743d1ed19d68f9ed2aa66c3a3e7fc7a8f7b.jpg) +"Make the car golden and give wheels blue color" + +# Acknowledgement + +This work was partially supported by the NSF under Grant Numbers OAC-1910469 and OAC-2311245. + +# References + +1. Bao, C., Zhang, Y., Yang, B., Fan, T., Yang, Z., Bao, H., Zhang, G., Cui, Z.: Sine: Semantic-driven image-based nerf editing with prior-guided editing field. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 20919-20929 (2023) +2. Barron, J.T., Mildenhall, B., Verbin, D., Srinivasan, P.P., Hedman, P.: Mipnerf 360: Unbounded anti-aliased neural radiance fields. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5470-5479 (2022) +3. Bian, W., Wang, Z., Li, K., Bian, J.W., Prisacariu, V.A.: Nope-nerf: Optimising neural radiance field with no pose prior. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 4160-4169 (2023) +4. Brooks, T., Holynski, A., Efros, A.A.: Instructpix2pix: Learning to follow image editing instructions. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 18392-18402 (2023) +5. Brown, T., Mann, B., Ryder, N., Subbiah, M., Kaplan, J.D., Dhariwal, P., Neelakantan, A., Shyam, P., Sastry, G., Askell, A., et al.: Language models are few-shot learners. Advances in neural information processing systems 33, 1877-1901 (2020) +6. Chiang, P.Z., Tsai, M.S., Tseng, H.Y., Lai, W.S., Chiu, W.C.: Stylizing 3d scene via implicit representation and hypernetwork. In: Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision. pp. 1475-1484 (2022) +7. Dong, J., Wang, Y.X.: Vica-nerf: View-consistency-aware 3d editing of neural radiance fields. Advances in Neural Information Processing Systems 36 (2024) +8. Fu, Y., Liu, S., Kulkarni, A., Kautz, J., Efros, A.A., Wang, X.: Colmap-free 3d gaussian splatting (2023), https://arxiv.org/abs/2312.07504 +9. Gal, R., Patashnik, O., Maron, H., Chechik, G., Cohen-Or, D.: Stylegan-nada: Clipguided domain adaptation of image generators. arXiv preprint arXiv:2108.00946 (2021) +10. Gao, W., Aigerman, N., Groueix, T., Kim, V.G., Hanocka, R.: Textdeformer: Geometry manipulation using text guidance. arXiv preprint arXiv:2304.13348 (2023) +11. Haque, A., Tancik, M., Efros, A.A., Holynski, A., Kanazawa, A.: Instruct-nerf2nerf: Editing 3d scenes with instructions. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 19740-19750 (2023) +12. Hertz, A., Mokady, R., Tenenbaum, J., Aberman, K., Pritch, Y., Cohen-Or, D.: Prompt-to-prompt image editing with cross attention control. arXiv preprint arXiv:2208.01626 (2022) +13. Hong, F., Zhang, M., Pan, L., Cai, Z., Yang, L., Liu, Z.: Avatarclip: Zero-shot text-driven generation and animation of 3d avatars. ACM Transactions on Graphics (TOG) 41(4), 1-19 (2022) +14. Huang, Y.H., He, Y., Yuan, Y.J., Lai, Y.K., Gao, L.: Stylizednerf: consistent 3d scene stylization as stylized nerf via 2d-3d mutual learning. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 18342-18352 (2022) + +15. Jeong, Y., Ahn, S., Choy, C., Anandkumar, A., Cho, M., Park, J.: Self-calibrating neural radiance fields. In: ICCV (2021) +16. Karim, N., Khalid, U., Iqbal, H., Hua, J., Chen, C.: Free-editor: Zero-shot text-driven 3d scene editing. arXiv preprint arXiv:2312.13663 (2023) +17. Kerbl, B., Kopanas, G., Leimkuhler, T., Drettakis, G.: 3d gaussian splatting for real-time radiance field rendering. ACM Transactions on Graphics (ToG) 42(4), 1-14 (2023) +18. Khalid, U., Iqbal, H., Karim, N., Hua, J., Chen, C.: Latentedirector: Text driven local editing of 3d scenes. arXiv preprint arXiv:2312.09313 (2023) +19. Kim, S., Lee, K., Choi, J.S., Jeong, J., Sohn, K., Shin, J.: Collaborative score distillation for consistent visual editing. In: Thirty-seventh Conference on Neural Information Processing Systems (2023), https://openreview.net/forum?id=0tEjORCGFD +20. Kirillov, A., Mintun, E., Ravi, N., Mao, H., Rolland, C., Gustafson, L., Xiao, T., Whitehead, S., Berg, A.C., Lo, W.Y., et al.: Segment anything. arXiv preprint arXiv:2304.02643 (2023) +21. Knapitsch, A., Park, J., Zhou, Q.Y., Koltun, V.: Tanks and temples: Benchmarking large-scale scene reconstruction. ACM Transactions on Graphics (2017) +22. Kobayashi, S., Matsumoto, E., Sitzmann, V.: Decomposing nerf for editing via feature field distillation. arXiv preprint arXiv:2205.15585 (2022) +23. Li, Y., Lin, Z.H., Forsyth, D., Huang, J.B., Wang, S.: Climatenerf: Physically-based neural rendering for extreme climate synthesis. arXiv e-prints pp. arXiv-2211 (2022) +24. Li, Y., Dou, Y., Shi, Y., Lei, Y., Chen, X., Zhang, Y., Zhou, P., Ni, B.: Focaldreamer: Text-driven 3d editing via focal-fusion assembly. arXiv preprint arXiv:2308.10608 (2023) +25. Lin, C.H., Ma, W.C., Torralba, A., Lucey, S.: Barf: Bundle-adjusting neural radiance fields. In: ICCV (2021) +26. Liu, H.K., Shen, I., Chen, B.Y., et al.: Nerf-in: Free-form nerf inpainting with rgb-d priors. arXiv preprint arXiv:2206.04901 (2022) +27. Long, X., Guo, Y.C., Lin, C., Liu, Y., Dou, Z., Liu, L., Ma, Y., Zhang, S.H., Habermann, M., Theobalt, C., et al.: Wonder3d: Single image to 3d using cross-domain diffusion. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 9970-9980 (2024) +28. Michel, O., Bar-On, R., Liu, R., et al.: Text2mesh: Text-driven neural stylization for meshes. In: CVPR 2022. pp. 13492-13502 (2022) +29. Nguyen-Phuoc, T., Liu, F., Xiao, L.: Snerf: stylized neural implicit representations for 3d scenes. arXiv preprint arXiv:2207.02363 (2022) +30. Nichol, A., Dhariwal, P., Ramesh, A., Shyam, P., Mishkin, P., McGrew, B., Sutskever, I., Chen, M.: Glide: Towards photorealistic image generation and editing with text-guided diffusion models. arXiv preprint arXiv:2112.10741 (2021) +31. Noguchi, A., Sun, X., Lin, S., Harada, T.: Neural articulated radiance field. In: ICCV 2021. pp. 5762-5772 (2021) +32. Park, H.S., Jun, C.H.: A simple and fast algorithm for k-medoids clustering. Expert systems with applications 36(2), 3336-3341 (2009) +33. Paszke, A., Gross, S., Massa, F., Lerer, A., Bradbury, J., Chanan, G., Killeen, T., Lin, Z., Gimelshein, N., Antiga, L., et al.: Pytorch: An imperative style, high-performance deep learning library. Advances in neural information processing systems 32 (2019) +34. Rajic, F., Ke, L., Tai, Y.W., Tang, C.K., Danelljan, M., Yu, F.: Segment anything meets point tracking. arXiv preprint arXiv:2307.01197 (2023) + +35. Ramesh, A., Dhariwal, P., Nichol, A., Chu, C., Chen, M.: Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125 (2022) +36. Reizenstein, J., Shapovalov, R., Henzler, P., Sbordone, L., Labatut, P., Novotny, D.: Common objects in 3d: Large-scale learning and evaluation of real-life 3d category reconstruction. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 10901-10911 (2021) +37. Rombach, R., Blattmann, A., Lorenz, D., Esser, P., Ommer, B.: High-resolution image synthesis with latent diffusion models. In: CVPR 2022. pp. 10684-10695 (2022) +38. Ruiz, N., Li, Y., Jampani, V., Pritch, Y., Rubinstein, M., Aberman, K.: Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 22500-22510 (2023) +39. Sahara, C., Chan, W., Saxena, S.e.a.: Photorealistic text-to-image diffusion models with deep language understanding. NeurIPS 2022 35, 36479-36494 (2022) +40. Schonberger, J.L., Frahm, J.M.: Structure-from-motion revisited. In: CVPR (2016) +41. Suvorov, R., Logacheva, E., Mashikhin, A., Remizova, A., Ashukha, A., Silvestrov, A., Kong, N., Goka, H., Park, K., Lempitsky, V.: Resolution-robust large mask inpainting with fourier convolutions. In: Proceedings of the IEEE/CVF winter conference on applications of computer vision. pp. 2149-2159 (2022) +42. Tancik, M., Weber, E., Ng, E., Li, R., Yi, B., Wang, T., Kristoffersen, A., Austin, J., Salahi, K., Ahuja, A., et al.: Nerfstudio: A modular framework for neural radiance field development. In: ACM SIGGRAPH 2023 Conference Proceedings. pp. 1-12 (2023) +43. Tschernezki, V., Laina, I., Larlus, D., Vedaldi, A.: Neural feature fusion fields: 3d distillation of self-supervised 2d image representations. In: 2022 International Conference on 3D Vision (3DV). pp. 443-453. IEEE (2022) +44. Wang, C., Chai, M., He, M., et al.: Clip-nerf: Text-and-image driven manipulation of neural radiance fields. In: CVPR 2022. pp. 3835-3844 (2022) +45. Wang, C., Jiang, R., Chai, M., He, M., Chen, D., Liao, J.: Nerf-art: Text-driven neural radiance fields stylization. IEEE Transactions on Visualization and Computer Graphics (2023) +46. Weng, H., Yang, T., Wang, J., Li, Y., Zhang, T., Chen, C., Zhang, L.: Consistent123: Improve consistency for one image to 3d object synthesis. arXiv preprint arXiv:2310.08092 (2023) +47. Wu, Q., Tan, J., Xu, K.: Palettenerf: Palette-based color editing for nerfs. arXiv preprint arXiv:2212.12871 (2022) +48. Xu, T., Harada, T.: Deforming radiance fields with cages. In: Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part XXXIII. pp. 159-175. Springer (2022) +49. Yang, B., Bao, C., Zeng, J., Bao, H., Zhang, Y., Cui, Z., Zhang, G.: Neumesh: Learning disentangled neural mesh-based implicit field for geometry and texture editing. In: European Conference on Computer Vision. pp. 597-614. Springer (2022) +50. Ye, M., Danelljan, M., Yu, F., Ke, L.: Gaussian grouping: Segment and edit anything in 3d scenes. arXiv preprint arXiv:2312.00732 (2023) +51. Zhang, K., Kolkin, N., Bi, S., Luan, F., Xu, Z., Shechtman, E., Snavely, N.: Arf: Artistic radiance fields. In: European Conference on Computer Vision. pp. 717-733. Springer (2022) + +52. Zhuang, J., Wang, C., Lin, L., Liu, L., Li, G.: Dreameditor: Text-driven 3d scene editing with neural fields. In: SIGGRAPH Asia 2023 Conference Papers. pp. 1-10 (2023) \ No newline at end of file diff --git a/2024/3DEgo_ 3D Editing on the Go!/images.zip b/2024/3DEgo_ 3D Editing on the Go!/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..5288ff737558b948bf7a56edd7567e06d2a56c76 --- /dev/null +++ b/2024/3DEgo_ 3D Editing on the Go!/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:04b43147010da193dd0af50e24d5349ab988219ed47f8e70229588e88f192f92 +size 480848 diff --git a/2024/3DEgo_ 3D Editing on the Go!/layout.json b/2024/3DEgo_ 3D Editing on the Go!/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..32fd3533752787ca695645f914a1a78399919e59 --- /dev/null +++ b/2024/3DEgo_ 3D Editing on the Go!/layout.json @@ -0,0 +1,11472 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 99, + 30, + 312, + 47 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 30, + 312, + 47 + ], + "spans": [ + { + "bbox": [ + 99, + 30, + 312, + 47 + ], + "type": "text", + "content": "3DEgo: 3D Editing on the Go!" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 41, + 68, + 371, + 92 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 68, + 371, + 92 + ], + "spans": [ + { + "bbox": [ + 41, + 68, + 371, + 92 + ], + "type": "text", + "content": "Umar Khalid" + }, + { + "bbox": [ + 41, + 68, + 371, + 92 + ], + "type": "inline_equation", + "content": "^{1,*}" + }, + { + "bbox": [ + 41, + 68, + 371, + 92 + ], + "type": "text", + "content": ", Hasan Iqbal" + }, + { + "bbox": [ + 41, + 68, + 371, + 92 + ], + "type": "inline_equation", + "content": "^{2,*}" + }, + { + "bbox": [ + 41, + 68, + 371, + 92 + ], + "type": "text", + "content": ", Azib Farooq" + }, + { + "bbox": [ + 41, + 68, + 371, + 92 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 41, + 68, + 371, + 92 + ], + "type": "text", + "content": ", Jing Hua" + }, + { + "bbox": [ + 41, + 68, + 371, + 92 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 41, + 68, + 371, + 92 + ], + "type": "text", + "content": ", and Chen Chen" + }, + { + "bbox": [ + 41, + 68, + 371, + 92 + ], + "type": "inline_equation", + "content": "^{1}" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 101, + 102, + 310, + 113 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 101, + 102, + 310, + 113 + ], + "spans": [ + { + "bbox": [ + 101, + 102, + 310, + 113 + ], + "type": "text", + "content": "1 University of Central Florida, Orlando, FL, USA" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 41, + 113, + 369, + 145 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 42, + 113, + 366, + 124 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 113, + 366, + 124 + ], + "spans": [ + { + "bbox": [ + 42, + 113, + 366, + 124 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 42, + 113, + 366, + 124 + ], + "type": "text", + "content": " Department of Computer Science, Wayne State University, Detroit, MI, USA" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 41, + 124, + 369, + 145 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 124, + 369, + 145 + ], + "spans": [ + { + "bbox": [ + 41, + 124, + 369, + 145 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 41, + 124, + 369, + 145 + ], + "type": "text", + "content": " Department of Computer Science and Software Engineering, Miami University, Oxford, OH, USA" + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 59, + 173, + 351, + 393 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 173, + 351, + 393 + ], + "spans": [ + { + "bbox": [ + 59, + 173, + 351, + 393 + ], + "type": "text", + "content": "Abstract. We introduce 3DEgo to address a novel problem of directly synthesizing photorealistic 3D scenes from monocular videos guided by textual prompts. Conventional methods construct a text-conditioned 3D scene through a three-stage process, involving pose estimation using Structure-from-Motion (SfM) libraries like COLMAP, initializing the 3D model with unedited images, and iteratively updating the dataset with edited images to achieve a 3D scene with text fidelity. Our framework streamlines the conventional multi-stage 3D editing process into a single-stage workflow by overcoming the reliance on COLMAP and eliminating the cost of model initialization. We apply a diffusion model to edit video frames prior to 3D scene creation by incorporating our designed noise blender module for enhancing multi-view editing consistency, a step that does not require additional training or fine-tuning of T2I diffusion models. 3DEgo utilizes 3D Gaussian Splatting to create 3D scenes from the multi-view consistent edited frames, capitalizing on the inherent temporal continuity and explicit point cloud data. 3DEgo demonstrates remarkable editing precision, speed, and adaptability across a variety of video sources, as validated by extensive evaluations on six datasets, including our own prepared GS25 dataset. Project Page: https://3dego.github.io/" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 59, + 403, + 351, + 414 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 403, + 351, + 414 + ], + "spans": [ + { + "bbox": [ + 59, + 403, + 351, + 414 + ], + "type": "text", + "content": "Keywords: Gaussian Splatting " + }, + { + "bbox": [ + 59, + 403, + 351, + 414 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 59, + 403, + 351, + 414 + ], + "type": "text", + "content": " 3D Editing " + }, + { + "bbox": [ + 59, + 403, + 351, + 414 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 59, + 403, + 351, + 414 + ], + "type": "text", + "content": " Cross-View Consistency" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 32, + 434, + 128, + 445 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 434, + 128, + 445 + ], + "spans": [ + { + "bbox": [ + 32, + 434, + 128, + 445 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 30, + 458, + 380, + 566 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 458, + 380, + 566 + ], + "spans": [ + { + "bbox": [ + 30, + 458, + 380, + 566 + ], + "type": "text", + "content": "In the pursuit of constructing photo-realistic 3D scenes from monocular video sources, it is a common practice to use the Structure-from-Motion (SfM) library, COLMAP [40] for camera pose estimation. This step is critical for aligning frames extracted from the video, thereby facilitating the subsequent process of 3D scene reconstruction. To further edit these constructed 3D scenes, a meticulous process of frame-by-frame editing based on textual prompts is often employed [52]. Recent works, such as IN2N [11], estimate poses from frames using SfM [40] to initially train an unedited 3D scene. Upon initializing a 3D model, the training dataset is iteratively updated by adding edited images at a consistent rate" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 34, + 571, + 123, + 583 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 34, + 571, + 123, + 583 + ], + "spans": [ + { + "bbox": [ + 34, + 571, + 123, + 583 + ], + "type": "text", + "content": "* Equal Contribution" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 413, + 615 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 30, + 34, + 380, + 143 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 34, + 380, + 143 + ], + "spans": [ + { + "bbox": [ + 30, + 34, + 380, + 143 + ], + "type": "text", + "content": "of editing. This process of iterative dataset update demands significant computational resources and time. Due to challenges with initial edit consistency, IN2N [11] training necessitates the continuous addition of edited images to the dataset over a significantly large number of iterations. This issue stems from the inherent limitations present in Text-to-Image (T2I) diffusion models [4, 37], where achieving prompt-consistent edits across multiple images—especially those capturing the same scene—proves to be a formidable task [7, 19]. Such inconsistencies significantly undermine the effectiveness of 3D scene modifications, particularly when these altered frames are leveraged to generate unique views." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 31, + 159, + 174, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 31, + 159, + 174, + 410 + ], + "spans": [ + { + "bbox": [ + 31, + 159, + 174, + 410 + ], + "type": "text", + "content": "In this work, we address a novel problem of efficiently reconstructing 3D scenes directly from monocular videos without using COLMAP [40] aligned with the editing textual prompt. Specifically, we apply a diffusion model [4] to edit every frame of a given monocular video before creating a 3D scene. To address the challenge of consistent editing across all the frames, we introduce a novel noise blender module, which ensures each new edited view is conditioned upon its adjacent, previously edited views. This is achieved by calculating a weighted average of image-conditional noise estimations such that closer frames exert greater influence on the edit" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 30, + 410, + 380, + 543 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 410, + 380, + 543 + ], + "spans": [ + { + "bbox": [ + 30, + 410, + 380, + 543 + ], + "type": "text", + "content": "ing outcome. Our editing strategy utilizes the IP2P [4] 2D editing diffusion model, which effectively employs both conditional and unconditional noise prediction. Consequently, our method achieves multi-view consistency without the necessity for extra training or fine-tuning, unlike prior approaches [7,27,46]. For 3D scene synthesis based on the edited views, our framework utilizes the Gaussian Splatting (GS) [17] technique, capitalizing on the temporal continuity of video data and the explicit representation of point clouds. Originally designed to work with pre-computed camera poses, 3D Gaussian Splatting presents us with the possibility to synthesize views and construct edited 3D scenes from monocular videos without the need for SfM pre-processing, overcoming one of NeRF's significant limitations [25]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 30, + 547, + 380, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 547, + 380, + 584 + ], + "spans": [ + { + "bbox": [ + 30, + 547, + 380, + 584 + ], + "type": "text", + "content": "Our method grows the 3D Gaussians of the scene continuously, from the edited frames, as the camera moves, eliminating the need for pre-computed camera poses and 3D model initialization on original un-edited frames to identify" + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 189, + 174, + 370, + 297 + ], + "blocks": [ + { + "bbox": [ + 189, + 174, + 370, + 297 + ], + "lines": [ + { + "bbox": [ + 189, + 174, + 370, + 297 + ], + "spans": [ + { + "bbox": [ + 189, + 174, + 370, + 297 + ], + "type": "image", + "image_path": "edadeb4abbe79bc0073ff9e608a0767dd4aa50121b0dcd79be63c1ddeb6aee09.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 179, + 305, + 379, + 383 + ], + "lines": [ + { + "bbox": [ + 179, + 305, + 379, + 383 + ], + "spans": [ + { + "bbox": [ + 179, + 305, + 379, + 383 + ], + "type": "text", + "content": "Fig. 1: Our method, 3D Ego, streamlines the 3D editing process by merging a three-stage workflow into a singular, comprehensive framework. This efficiency is achieved by bypassing the need for COLMAP [40] for pose initialization and avoiding the initialization of the model with unedited images, unlike other existing approaches [7,11,19]." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 32, + 10, + 39, + 19 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 10, + 39, + 19 + ], + "spans": [ + { + "bbox": [ + 32, + 10, + 39, + 19 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 66, + 9, + 130, + 19 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 9, + 130, + 19 + ], + "spans": [ + { + "bbox": [ + 66, + 9, + 130, + 19 + ], + "type": "text", + "content": "U. Khalid et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 413, + 615 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 69, + 41, + 343, + 184 + ], + "blocks": [ + { + "bbox": [ + 69, + 41, + 343, + 184 + ], + "lines": [ + { + "bbox": [ + 69, + 41, + 343, + 184 + ], + "spans": [ + { + "bbox": [ + 69, + 41, + 343, + 184 + ], + "type": "image", + "image_path": "469efc091e8d0632b4ee1c1082241badb787621bb72045fb697b819647255300.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 30, + 196, + 382, + 262 + ], + "lines": [ + { + "bbox": [ + 30, + 196, + 382, + 262 + ], + "spans": [ + { + "bbox": [ + 30, + 196, + 382, + 262 + ], + "type": "text", + "content": "Fig. 2: 3D Ego offers rapid, accurate, and adaptable 3D editing, bypassing the need for original 3D scene initialization and COLMAP poses. This ensures compatibility with videos from any source, including casual smartphone captures like the Van 360-degree scene. The above results identify three cases challenging for IN2N [11], where our method can convert a monocular video into customized 3D scenes using a streamlined, single-stage reconstruction process." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 30, + 289, + 382, + 337 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 289, + 382, + 337 + ], + "spans": [ + { + "bbox": [ + 30, + 289, + 382, + 337 + ], + "type": "text", + "content": "an affine transformation that maps the 3D Gaussians from frame " + }, + { + "bbox": [ + 30, + 289, + 382, + 337 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 30, + 289, + 382, + 337 + ], + "type": "text", + "content": " to accurately render the pixels in frame " + }, + { + "bbox": [ + 30, + 289, + 382, + 337 + ], + "type": "inline_equation", + "content": "i + 1" + }, + { + "bbox": [ + 30, + 289, + 382, + 337 + ], + "type": "text", + "content": ". Hence, our method 3DEgo condenses a three-stage 3D editing process into a single-stage, unified and efficient framework as shown in Figure 1. Our contributions are as follows:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 37, + 350, + 380, + 572 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 37, + 350, + 379, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 37, + 350, + 379, + 386 + ], + "spans": [ + { + "bbox": [ + 37, + 350, + 379, + 386 + ], + "type": "text", + "content": "- We tackle the novel challenge of directly transforming monocular videos into 3D scenes guided by editing text prompts, circumventing conventional 3D editing pipelines." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 37, + 388, + 379, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 37, + 388, + 379, + 423 + ], + "spans": [ + { + "bbox": [ + 37, + 388, + 379, + 423 + ], + "type": "text", + "content": "- We introduce a unique auto-regressive editing technique that enhances multiview consistency across edited views, seamlessly integrating with pre-trained diffusion models without the need for additional fine-tuning." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 37, + 424, + 379, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 37, + 424, + 379, + 472 + ], + "spans": [ + { + "bbox": [ + 37, + 424, + 379, + 472 + ], + "type": "text", + "content": "- We propose a COLMAP-free method using 3D Gaussian splatting for reconstructing 3D scenes from casually captured videos. This technique leverages the video's continuous time sequence for pose estimation and scene development, bypassing traditional SfM dependencies." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 37, + 474, + 379, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 37, + 474, + 379, + 509 + ], + "spans": [ + { + "bbox": [ + 37, + 474, + 379, + 509 + ], + "type": "text", + "content": "- We present an advanced technique for converting 2D masks into 3D space, enhancing editing accuracy through Pyramidal Gaussian Scoring (PGS), ensuring more stable and detailed refinement." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 37, + 511, + 380, + 572 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 37, + 511, + 380, + 572 + ], + "spans": [ + { + "bbox": [ + 37, + 511, + 380, + 572 + ], + "type": "text", + "content": "- Through extensive evaluations on six datasets—including our custom GS25 and others like IN2N, Mip-NeRF, NeRFstudio Dataset, Tanks & Temples, and CO3D-V2—we demonstrate our method's enhanced editing precision and efficiency, particularly with 360-degree and casually recorded videos, as illustrated in Fig. 2." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 221, + 9, + 347, + 22 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 221, + 9, + 347, + 22 + ], + "spans": [ + { + "bbox": [ + 221, + 9, + 347, + 22 + ], + "type": "text", + "content": "3DEgo: 3D Editing on the Go!" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 372, + 10, + 380, + 19 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 372, + 10, + 380, + 19 + ], + "spans": [ + { + "bbox": [ + 372, + 10, + 380, + 19 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 413, + 615 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 33, + 33, + 136, + 45 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 33, + 136, + 45 + ], + "spans": [ + { + "bbox": [ + 33, + 33, + 136, + 45 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 33, + 60, + 379, + 132 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 60, + 379, + 132 + ], + "spans": [ + { + "bbox": [ + 33, + 60, + 379, + 132 + ], + "type": "text", + "content": "A growing body of research is exploring diffusion models for text-driven image editing, introducing techniques that allow for precise modifications based on user-provided instructions [30,35,37,39]. While some approaches require explicit before-and-after captions [12] or specialized training [38], making them less accessible to non-experts, IP2P [4] simplifies the process by enabling direct textual edits on images, making advanced editing tools more widely accessible." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 33, + 133, + 379, + 169 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 133, + 379, + 169 + ], + "spans": [ + { + "bbox": [ + 33, + 133, + 379, + 169 + ], + "type": "text", + "content": "Recently, diffusion models have also been employed for 3D editing, focusing on altering the geometry and appearance of 3D scenes [1,4,10,13,16,18,22-24, 26,28,31,43,44,48,49]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 32, + 170, + 380, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 170, + 380, + 324 + ], + "spans": [ + { + "bbox": [ + 32, + 170, + 380, + 324 + ], + "type": "text", + "content": "Traditional NeRF representations, however, pose significant challenges for precise editing due to their implicit nature, leading to difficulties in localizing edits within a scene. Earlier efforts have mainly achieved global transformations [6, 14, 29, 45, 47, 51], with object-centric editing remaining a challenge. IN2N [11] introduced user-friendly text-based editing, though it might affect the entire scene. Recent studies [7, 19, 52] have attempted to tackle local editing and multi-view consistency challenges within the IN2N framework [11]. Yet, no existing approaches in the literature offer pose-free capabilities, nor can they create a text-conditioned 3D scene from arbitrary video footage. Nevertheless, existing 3D editing methods [11, 52] universally necessitate Structure-from-Motion (SfM) preprocessing. Recent studies like Nope-NeRF [3], BARF [25], and SC-NeRF [15] have introduced methodologies for pose optimization and calibration concurrent with the training of (unedited) NeRF." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 32, + 325, + 379, + 408 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 325, + 379, + 408 + ], + "spans": [ + { + "bbox": [ + 32, + 325, + 379, + 408 + ], + "type": "text", + "content": "In this study, we present a novel method for constructing 3D scenes directly from textual prompts, utilizing monocular video frames without dependence on COLMAP poses [40], thus addressing unique challenges. Given the complexities NeRF's implicit nature introduces to simultaneous 3D reconstruction and camera registration, our approach leverages the advanced capabilities of 3D Gaussian Splatting (3DGS) [17] alongside a pre-trained 2D editing diffusion model for efficient 3D model creation." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 33, + 429, + 99, + 442 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 429, + 99, + 442 + ], + "spans": [ + { + "bbox": [ + 33, + 429, + 99, + 442 + ], + "type": "text", + "content": "3 Method" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 33, + 457, + 379, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 457, + 379, + 492 + ], + "spans": [ + { + "bbox": [ + 33, + 457, + 379, + 492 + ], + "type": "text", + "content": "Given a sequence of unposed images alongside camera intrinsics, we aim to recover the camera poses in sync with the edited frames and reconstruct a photorealistic 3D scene conditioned on the textual prompt." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 33, + 513, + 126, + 524 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 513, + 126, + 524 + ], + "spans": [ + { + "bbox": [ + 33, + 513, + 126, + 524 + ], + "type": "text", + "content": "3.1 Preliminaries" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 33, + 535, + 379, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 535, + 379, + 583 + ], + "spans": [ + { + "bbox": [ + 33, + 535, + 379, + 583 + ], + "type": "text", + "content": "In the domain of 3D scene modeling, 3D Gaussian splatting [17] emerges as a notable method. The method's strength lies in its succinct Gaussian representation coupled with an effective differential rendering technique, facilitating real-time, high-fidelity visualization. This approach models a 3D environment" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 33, + 11, + 39, + 18 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 11, + 39, + 18 + ], + "spans": [ + { + "bbox": [ + 33, + 11, + 39, + 18 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 66, + 9, + 130, + 19 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 9, + 130, + 19 + ], + "spans": [ + { + "bbox": [ + 66, + 9, + 130, + 19 + ], + "type": "text", + "content": "U. Khalid et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 413, + 615 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 30, + 34, + 381, + 143 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 34, + 381, + 143 + ], + "spans": [ + { + "bbox": [ + 30, + 34, + 381, + 143 + ], + "type": "text", + "content": "using a collection of point-based 3D Gaussians, denoted as " + }, + { + "bbox": [ + 30, + 34, + 381, + 143 + ], + "type": "inline_equation", + "content": "\\mathcal{H}" + }, + { + "bbox": [ + 30, + 34, + 381, + 143 + ], + "type": "text", + "content": " where each Gaussian " + }, + { + "bbox": [ + 30, + 34, + 381, + 143 + ], + "type": "inline_equation", + "content": "h = \\{\\mu, \\Sigma, c, \\alpha\\}" + }, + { + "bbox": [ + 30, + 34, + 381, + 143 + ], + "type": "text", + "content": ". Here, " + }, + { + "bbox": [ + 30, + 34, + 381, + 143 + ], + "type": "inline_equation", + "content": "\\mu \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 30, + 34, + 381, + 143 + ], + "type": "text", + "content": " specifies the Gaussian's center location, " + }, + { + "bbox": [ + 30, + 34, + 381, + 143 + ], + "type": "inline_equation", + "content": "\\Sigma \\in \\mathbb{R}^{3 \\times 3}" + }, + { + "bbox": [ + 30, + 34, + 381, + 143 + ], + "type": "text", + "content": " is the covariance matrix capturing the Gaussian's shape, " + }, + { + "bbox": [ + 30, + 34, + 381, + 143 + ], + "type": "inline_equation", + "content": "c \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 30, + 34, + 381, + 143 + ], + "type": "text", + "content": " is the color vector in RGB format represented in the three degrees of spherical harmonics (SH) coefficients, and " + }, + { + "bbox": [ + 30, + 34, + 381, + 143 + ], + "type": "inline_equation", + "content": "\\alpha \\in \\mathbb{R}" + }, + { + "bbox": [ + 30, + 34, + 381, + 143 + ], + "type": "text", + "content": " denotes the Gaussian's opacity level. To optimize the parameters of 3D Gaussians to represent the scene, we need to render them into images in a differentiable manner. The rendering is achieved by approximating the projection of a 3D Gaussian along the depth dimension into pixel coordinates expressed as:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 147, + 155, + 379, + 190 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 147, + 155, + 379, + 190 + ], + "spans": [ + { + "bbox": [ + 147, + 155, + 379, + 190 + ], + "type": "interline_equation", + "content": "C = \\sum_ {p \\in \\mathcal {P}} c _ {p} \\tau_ {p} \\prod_ {k = 1} ^ {p - 1} (1 - \\alpha_ {k}), \\tag {1}", + "image_path": "db084b96dc69e89078a0919bf811973d15be4e9fb0a5bcaf67e56c9737bef2b9.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 30, + 198, + 382, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 198, + 382, + 285 + ], + "spans": [ + { + "bbox": [ + 30, + 198, + 382, + 285 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 30, + 198, + 382, + 285 + ], + "type": "inline_equation", + "content": "\\mathcal{P}" + }, + { + "bbox": [ + 30, + 198, + 382, + 285 + ], + "type": "text", + "content": " are ordered points overlapping the pixel, and " + }, + { + "bbox": [ + 30, + 198, + 382, + 285 + ], + "type": "inline_equation", + "content": "\\tau_{p} = \\alpha_{p}e^{-\\frac{1}{2}(x_{p})^{T}\\Sigma^{-1}(x_{p})}" + }, + { + "bbox": [ + 30, + 198, + 382, + 285 + ], + "type": "text", + "content": " quantifies the Gaussian's contribution to a specific image pixel, with " + }, + { + "bbox": [ + 30, + 198, + 382, + 285 + ], + "type": "inline_equation", + "content": "x_{p}" + }, + { + "bbox": [ + 30, + 198, + 382, + 285 + ], + "type": "text", + "content": " measuring the distance from the pixel to the center of the " + }, + { + "bbox": [ + 30, + 198, + 382, + 285 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 30, + 198, + 382, + 285 + ], + "type": "text", + "content": "-th Gaussian. In the original 3DGS, initial Gaussian parameters are refined to fit the scene, guided by ground truth poses obtained using SfM. Through differential rendering, the Gaussians' parameters, including position " + }, + { + "bbox": [ + 30, + 198, + 382, + 285 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 30, + 198, + 382, + 285 + ], + "type": "text", + "content": ", shape " + }, + { + "bbox": [ + 30, + 198, + 382, + 285 + ], + "type": "inline_equation", + "content": "\\Sigma" + }, + { + "bbox": [ + 30, + 198, + 382, + 285 + ], + "type": "text", + "content": ", color " + }, + { + "bbox": [ + 30, + 198, + 382, + 285 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 30, + 198, + 382, + 285 + ], + "type": "text", + "content": ", and opacity " + }, + { + "bbox": [ + 30, + 198, + 382, + 285 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 30, + 198, + 382, + 285 + ], + "type": "text", + "content": ", are adjusted using a photometric loss function." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 31, + 306, + 234, + 319 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 31, + 306, + 234, + 319 + ], + "spans": [ + { + "bbox": [ + 31, + 306, + 234, + 319 + ], + "type": "text", + "content": "3.2 Multi-View Consistent 2D Editing" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 30, + 331, + 214, + 379 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 331, + 214, + 379 + ], + "spans": [ + { + "bbox": [ + 30, + 331, + 214, + 379 + ], + "type": "text", + "content": "In the first step, we perform 2D editing with key editing areas (KEA) based on the user-provided video, " + }, + { + "bbox": [ + 30, + 331, + 214, + 379 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 30, + 331, + 214, + 379 + ], + "type": "text", + "content": ", and editing prompt, " + }, + { + "bbox": [ + 30, + 331, + 214, + 379 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 30, + 331, + 214, + 379 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 30, + 380, + 216, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 380, + 216, + 584 + ], + "spans": [ + { + "bbox": [ + 30, + 380, + 216, + 584 + ], + "type": "text", + "content": "From the given video " + }, + { + "bbox": [ + 30, + 380, + 216, + 584 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 30, + 380, + 216, + 584 + ], + "type": "text", + "content": ", we extract frames " + }, + { + "bbox": [ + 30, + 380, + 216, + 584 + ], + "type": "inline_equation", + "content": "\\{f_1, f_2, \\ldots, f_N\\}" + }, + { + "bbox": [ + 30, + 380, + 216, + 584 + ], + "type": "text", + "content": ". Analyzing the textual prompt " + }, + { + "bbox": [ + 30, + 380, + 216, + 584 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 30, + 380, + 216, + 584 + ], + "type": "text", + "content": " with a Large Language Model " + }, + { + "bbox": [ + 30, + 380, + 216, + 584 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 30, + 380, + 216, + 584 + ], + "type": "text", + "content": " identifies key editing attributes " + }, + { + "bbox": [ + 30, + 380, + 216, + 584 + ], + "type": "inline_equation", + "content": "\\{A_1, A_2, \\ldots, A_k\\}" + }, + { + "bbox": [ + 30, + 380, + 216, + 584 + ], + "type": "text", + "content": ", essential for editing, expressed as " + }, + { + "bbox": [ + 30, + 380, + 216, + 584 + ], + "type": "inline_equation", + "content": "\\mathcal{L}(\\mathcal{T}) \\to \\{A_1, A_2, \\ldots, A_k\\}" + }, + { + "bbox": [ + 30, + 380, + 216, + 584 + ], + "type": "text", + "content": ". Utilizing these attributes, a segmentation model " + }, + { + "bbox": [ + 30, + 380, + 216, + 584 + ], + "type": "inline_equation", + "content": "\\mathcal{S}" + }, + { + "bbox": [ + 30, + 380, + 216, + 584 + ], + "type": "text", + "content": " delineates editing regions in each frame " + }, + { + "bbox": [ + 30, + 380, + 216, + 584 + ], + "type": "inline_equation", + "content": "f_i" + }, + { + "bbox": [ + 30, + 380, + 216, + 584 + ], + "type": "text", + "content": " by generating a mask " + }, + { + "bbox": [ + 30, + 380, + 216, + 584 + ], + "type": "inline_equation", + "content": "M_i" + }, + { + "bbox": [ + 30, + 380, + 216, + 584 + ], + "type": "text", + "content": " with KEA marked as 1, and others as 0. The segmentation operation is defined as, " + }, + { + "bbox": [ + 30, + 380, + 216, + 584 + ], + "type": "inline_equation", + "content": "\\mathcal{S}(f_i, \\{A_1, A_2, \\ldots, A_k\\}) \\to M_i" + }, + { + "bbox": [ + 30, + 380, + 216, + 584 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 30, + 380, + 216, + 584 + ], + "type": "inline_equation", + "content": "\\forall i \\in \\{1, \\ldots, N\\}" + }, + { + "bbox": [ + 30, + 380, + 216, + 584 + ], + "type": "text", + "content": ". Subsequently, a 2D diffusion model " + }, + { + "bbox": [ + 30, + 380, + 216, + 584 + ], + "type": "inline_equation", + "content": "\\mathcal{E}" + }, + { + "bbox": [ + 30, + 380, + 216, + 584 + ], + "type": "text", + "content": " selectively edits these regions in " + }, + { + "bbox": [ + 30, + 380, + 216, + 584 + ], + "type": "inline_equation", + "content": "f_i" + }, + { + "bbox": [ + 30, + 380, + 216, + 584 + ], + "type": "text", + "content": ", as defined by " + }, + { + "bbox": [ + 30, + 380, + 216, + 584 + ], + "type": "inline_equation", + "content": "M_i" + }, + { + "bbox": [ + 30, + 380, + 216, + 584 + ], + "type": "text", + "content": ", producing edited frames " + }, + { + "bbox": [ + 30, + 380, + 216, + 584 + ], + "type": "inline_equation", + "content": "\\{E_1, E_2, \\ldots, E_N\\}" + }, + { + "bbox": [ + 30, + 380, + 216, + 584 + ], + "type": "text", + "content": " under guidance from " + }, + { + "bbox": [ + 30, + 380, + 216, + 584 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 30, + 380, + 216, + 584 + ], + "type": "text", + "content": ", such that " + }, + { + "bbox": [ + 30, + 380, + 216, + 584 + ], + "type": "inline_equation", + "content": "\\mathcal{E}(f_i, M_i) \\to E_i" + }, + { + "bbox": [ + 30, + 380, + 216, + 584 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 225, + 361, + 369, + 483 + ], + "blocks": [ + { + "bbox": [ + 225, + 361, + 369, + 483 + ], + "lines": [ + { + "bbox": [ + 225, + 361, + 369, + 483 + ], + "spans": [ + { + "bbox": [ + 225, + 361, + 369, + 483 + ], + "type": "image", + "image_path": "74d84f2ece68fed6d31ab66173b672131851507fd70639dd1cb54b4f868e5623.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 220, + 493, + 381, + 550 + ], + "lines": [ + { + "bbox": [ + 220, + 493, + 381, + 550 + ], + "spans": [ + { + "bbox": [ + 220, + 493, + 381, + 550 + ], + "type": "text", + "content": "Fig. 3: Autoregressive Editing. At each denoising step, the model predicts " + }, + { + "bbox": [ + 220, + 493, + 381, + 550 + ], + "type": "inline_equation", + "content": "w + 1" + }, + { + "bbox": [ + 220, + 493, + 381, + 550 + ], + "type": "text", + "content": " separate noises, which are then unified via weighted noise blender (Eq. 4) to predict " + }, + { + "bbox": [ + 220, + 493, + 381, + 550 + ], + "type": "inline_equation", + "content": "\\varepsilon_{\\theta}(e_t,f,\\mathcal{T},W)" + }, + { + "bbox": [ + 220, + 493, + 381, + 550 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 221, + 9, + 347, + 21 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 221, + 9, + 347, + 21 + ], + "spans": [ + { + "bbox": [ + 221, + 9, + 347, + 21 + ], + "type": "text", + "content": "3DEgo: 3D Editing on the Go!" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 373, + 10, + 379, + 19 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 373, + 10, + 379, + 19 + ], + "spans": [ + { + "bbox": [ + 373, + 10, + 379, + 19 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 413, + 615 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 30, + 34, + 380, + 105 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 34, + 380, + 105 + ], + "spans": [ + { + "bbox": [ + 30, + 34, + 380, + 105 + ], + "type": "text", + "content": "Consistent Multi-View2D Editing. As discussed above, differing from IN2N [11] that incorporates edited images gradually over several training iterations, our approach involves editing the entire dataset at once before the training starts. We desire 1) each edited frame, " + }, + { + "bbox": [ + 30, + 34, + 380, + 105 + ], + "type": "inline_equation", + "content": "E_{i}" + }, + { + "bbox": [ + 30, + 34, + 380, + 105 + ], + "type": "text", + "content": " follows the editing prompt, " + }, + { + "bbox": [ + 30, + 34, + 380, + 105 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 30, + 34, + 380, + 105 + ], + "type": "text", + "content": ", 2) retain the original images' semantic content, and 3) the edited images, " + }, + { + "bbox": [ + 30, + 34, + 380, + 105 + ], + "type": "inline_equation", + "content": "\\{E_1,E_2,\\dots ,E_N\\}" + }, + { + "bbox": [ + 30, + 34, + 380, + 105 + ], + "type": "text", + "content": " are consistent with each other." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 30, + 106, + 379, + 212 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 106, + 379, + 212 + ], + "spans": [ + { + "bbox": [ + 30, + 106, + 379, + 212 + ], + "type": "text", + "content": "(i) Multi-view Consistent Mask. As " + }, + { + "bbox": [ + 30, + 106, + 379, + 212 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 30, + 106, + 379, + 212 + ], + "type": "text", + "content": " doesn't guarantee consistent masks across the views of a casually recorded monocular video, we utilize a zero-shot point tracker [34] to ensure uniform mask generation across the views. The procedure starts by identifying query points in the initial video frame using the ground truth mask. Query points are extracted from these ground truth masks employing the K-Medoids [32] sampling method. This method utilizes the cluster centers from K-Medoids clustering as query points. This approach guarantees comprehensive coverage of the object's various sections and enhances resilience to noise and outliers." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 30, + 214, + 379, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 214, + 379, + 297 + ], + "spans": [ + { + "bbox": [ + 30, + 214, + 379, + 297 + ], + "type": "text", + "content": "(ii)Autoregressive Editing. To address the issue of preserving consistency across multiple views, we employ an autoregressive method that edits frames in sequence, with IP2P [4] editing restricted to the Key Editing Areas (KEA) as delineated by the relevant masks. Instead of editing each frame independently from just the input images - a process that can vary significantly between adjacent images - we integrate an autoregressive editing technique where the frame to be edited is conditioned on already edited adjacent frames." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 30, + 297, + 379, + 333 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 297, + 379, + 333 + ], + "spans": [ + { + "bbox": [ + 30, + 297, + 379, + 333 + ], + "type": "text", + "content": "As discussed above, we incorporate IP2P [4] as a 2D editing diffusion model. The standard noise prediction from IP2P's backbone that includes both conditional and unconditional editing is given as," + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 42, + 343, + 378, + 353 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 343, + 378, + 353 + ], + "spans": [ + { + "bbox": [ + 42, + 343, + 378, + 353 + ], + "type": "interline_equation", + "content": "\\tilde {\\varepsilon} _ {\\theta} \\left(e _ {t}, f, \\mathcal {T}\\right) = \\varepsilon_ {\\theta} \\left(e _ {t}, \\varnothing_ {f}, \\varnothing_ {\\mathcal {T}}\\right) + s _ {f} \\left(\\varepsilon_ {\\theta} \\left(e _ {t}, f, \\varnothing_ {\\mathcal {T}}\\right) - \\varepsilon_ {\\theta} \\left(e _ {t}, \\varnothing_ {f}, \\varnothing_ {\\mathcal {T}}\\right)\\right) + s _ {\\mathcal {T}} \\left(\\varepsilon_ {\\theta} \\left(e _ {t}, f, \\mathcal {T}\\right) - \\varepsilon_ {\\theta} \\left(e _ {t}, f, \\varnothing_ {\\mathcal {T}}\\right)\\right) \\tag {2}", + "image_path": "e4e52ec90ddee52a55fa150a73ac281ef64390302aa11c8c5cccf8fb99393a2b.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 30, + 361, + 380, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 361, + 380, + 456 + ], + "spans": [ + { + "bbox": [ + 30, + 361, + 380, + 456 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 30, + 361, + 380, + 456 + ], + "type": "inline_equation", + "content": "s_f" + }, + { + "bbox": [ + 30, + 361, + 380, + 456 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 30, + 361, + 380, + 456 + ], + "type": "inline_equation", + "content": "s_{\\mathcal{T}}" + }, + { + "bbox": [ + 30, + 361, + 380, + 456 + ], + "type": "text", + "content": " are image and textual prompt guidance scale. We suggest enhancing the noise estimation process with our autoregressive training framework. Consider a set of " + }, + { + "bbox": [ + 30, + 361, + 380, + 456 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 30, + 361, + 380, + 456 + ], + "type": "text", + "content": " views, represented as " + }, + { + "bbox": [ + 30, + 361, + 380, + 456 + ], + "type": "inline_equation", + "content": "W = \\{E_n\\}_{n=1}^w" + }, + { + "bbox": [ + 30, + 361, + 380, + 456 + ], + "type": "text", + "content": ". Our goal is to model the distribution of the " + }, + { + "bbox": [ + 30, + 361, + 380, + 456 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 30, + 361, + 380, + 456 + ], + "type": "text", + "content": "-th view image by utilizing its " + }, + { + "bbox": [ + 30, + 361, + 380, + 456 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 30, + 361, + 380, + 456 + ], + "type": "text", + "content": " adjacent, already edited views. To achieve this, we calculate image-conditional noise estimation, " + }, + { + "bbox": [ + 30, + 361, + 380, + 456 + ], + "type": "inline_equation", + "content": "\\varepsilon_{\\theta}(e_t, E, \\emptyset_{\\mathcal{T}})" + }, + { + "bbox": [ + 30, + 361, + 380, + 456 + ], + "type": "text", + "content": " across all frames in " + }, + { + "bbox": [ + 30, + 361, + 380, + 456 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 30, + 361, + 380, + 456 + ], + "type": "text", + "content": ". The equation to compute the weighted average " + }, + { + "bbox": [ + 30, + 361, + 380, + 456 + ], + "type": "inline_equation", + "content": "\\bar{\\varepsilon}_{\\theta}" + }, + { + "bbox": [ + 30, + 361, + 380, + 456 + ], + "type": "text", + "content": " of the noise estimates from all edited frames within " + }, + { + "bbox": [ + 30, + 361, + 380, + 456 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 30, + 361, + 380, + 456 + ], + "type": "text", + "content": ", employing " + }, + { + "bbox": [ + 30, + 361, + 380, + 456 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 30, + 361, + 380, + 456 + ], + "type": "text", + "content": " as the weight for each frame, is delineated as follows:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 125, + 464, + 378, + 493 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 125, + 464, + 378, + 493 + ], + "spans": [ + { + "bbox": [ + 125, + 464, + 378, + 493 + ], + "type": "interline_equation", + "content": "\\bar {\\varepsilon} _ {\\theta} \\left(e _ {t}, \\varnothing_ {\\mathcal {T}}, W\\right) = \\sum_ {n = 1} ^ {w} \\beta_ {n} \\varepsilon_ {\\theta} ^ {n} \\left(e _ {t}, E _ {n}, \\varnothing_ {\\mathcal {T}}\\right) \\tag {3}", + "image_path": "0a082d9094fba4dbeb8b8b5d63e6749a53861c5230d1815a29b6440078bc4aea.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 30, + 499, + 380, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 499, + 380, + 584 + ], + "spans": [ + { + "bbox": [ + 30, + 499, + 380, + 584 + ], + "type": "text", + "content": "Here, " + }, + { + "bbox": [ + 30, + 499, + 380, + 584 + ], + "type": "inline_equation", + "content": "E_{n}" + }, + { + "bbox": [ + 30, + 499, + 380, + 584 + ], + "type": "text", + "content": " represents the " + }, + { + "bbox": [ + 30, + 499, + 380, + 584 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 30, + 499, + 380, + 584 + ], + "type": "text", + "content": "-th edited frame within " + }, + { + "bbox": [ + 30, + 499, + 380, + 584 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 30, + 499, + 380, + 584 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 30, + 499, + 380, + 584 + ], + "type": "inline_equation", + "content": "\\beta_{n}" + }, + { + "bbox": [ + 30, + 499, + 380, + 584 + ], + "type": "text", + "content": " is the weight assigned to the " + }, + { + "bbox": [ + 30, + 499, + 380, + 584 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 30, + 499, + 380, + 584 + ], + "type": "text", + "content": "-th frame's noise estimate. The condition that the sum of all " + }, + { + "bbox": [ + 30, + 499, + 380, + 584 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 30, + 499, + 380, + 584 + ], + "type": "text", + "content": " values over " + }, + { + "bbox": [ + 30, + 499, + 380, + 584 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 30, + 499, + 380, + 584 + ], + "type": "text", + "content": " frames equals 1 is given by as, " + }, + { + "bbox": [ + 30, + 499, + 380, + 584 + ], + "type": "inline_equation", + "content": "\\sum_{n=1}^{w} \\beta_{n} = 1" + }, + { + "bbox": [ + 30, + 499, + 380, + 584 + ], + "type": "text", + "content": ". This ensures that the weighted average is normalized. As we perform 2D editing without any pose priors, our weight parameter " + }, + { + "bbox": [ + 30, + 499, + 380, + 584 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 30, + 499, + 380, + 584 + ], + "type": "text", + "content": " is independent of the angle offset between the frame to be edited, " + }, + { + "bbox": [ + 30, + 499, + 380, + 584 + ], + "type": "inline_equation", + "content": "f_{n}" + }, + { + "bbox": [ + 30, + 499, + 380, + 584 + ], + "type": "text", + "content": " and already edited frames in " + }, + { + "bbox": [ + 30, + 499, + 380, + 584 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 30, + 499, + 380, + 584 + ], + "type": "text", + "content": ". To assign weight parameters with exponential decay, ensuring the closest frame receives the highest" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 32, + 10, + 39, + 19 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 10, + 39, + 19 + ], + "spans": [ + { + "bbox": [ + 32, + 10, + 39, + 19 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 66, + 9, + 130, + 19 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 9, + 130, + 19 + ], + "spans": [ + { + "bbox": [ + 66, + 9, + 130, + 19 + ], + "type": "text", + "content": "U. Khalid et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 413, + 615 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 32, + 33, + 379, + 135 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 33, + 379, + 135 + ], + "spans": [ + { + "bbox": [ + 32, + 33, + 379, + 135 + ], + "type": "text", + "content": "weight, we can use an exponential decay function for the weight " + }, + { + "bbox": [ + 32, + 33, + 379, + 135 + ], + "type": "inline_equation", + "content": "\\beta_{n}" + }, + { + "bbox": [ + 32, + 33, + 379, + 135 + ], + "type": "text", + "content": " of the " + }, + { + "bbox": [ + 32, + 33, + 379, + 135 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 32, + 33, + 379, + 135 + ], + "type": "text", + "content": "-th frame in " + }, + { + "bbox": [ + 32, + 33, + 379, + 135 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 32, + 33, + 379, + 135 + ], + "type": "text", + "content": ". By employing a decay factor " + }, + { + "bbox": [ + 32, + 33, + 379, + 135 + ], + "type": "inline_equation", + "content": "\\lambda_{d}" + }, + { + "bbox": [ + 32, + 33, + 379, + 135 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 32, + 33, + 379, + 135 + ], + "type": "inline_equation", + "content": "0 < \\lambda_{d} < 1" + }, + { + "bbox": [ + 32, + 33, + 379, + 135 + ], + "type": "text", + "content": "), the weight of each frame decreases exponentially as its distance from the target frame increases. The weight " + }, + { + "bbox": [ + 32, + 33, + 379, + 135 + ], + "type": "inline_equation", + "content": "\\beta_{n}" + }, + { + "bbox": [ + 32, + 33, + 379, + 135 + ], + "type": "text", + "content": " for the " + }, + { + "bbox": [ + 32, + 33, + 379, + 135 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 32, + 33, + 379, + 135 + ], + "type": "text", + "content": "-th frame is defined as, " + }, + { + "bbox": [ + 32, + 33, + 379, + 135 + ], + "type": "inline_equation", + "content": "\\beta_{n} = \\lambda_{d}^{w - n}" + }, + { + "bbox": [ + 32, + 33, + 379, + 135 + ], + "type": "text", + "content": ". This ensures the, " + }, + { + "bbox": [ + 32, + 33, + 379, + 135 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 32, + 33, + 379, + 135 + ], + "type": "text", + "content": " closest to the target, " + }, + { + "bbox": [ + 32, + 33, + 379, + 135 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 32, + 33, + 379, + 135 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 32, + 33, + 379, + 135 + ], + "type": "inline_equation", + "content": "n = 1" + }, + { + "bbox": [ + 32, + 33, + 379, + 135 + ], + "type": "text", + "content": ") receives the highest weight. To ensure the sum of the weights to 1, each weight is normalized by dividing by the sum of all weights, " + }, + { + "bbox": [ + 32, + 33, + 379, + 135 + ], + "type": "inline_equation", + "content": "\\beta_{n} = \\frac{\\lambda^{w - n}}{\\sum_{j = 1}^{w}\\lambda^{w - j}}" + }, + { + "bbox": [ + 32, + 33, + 379, + 135 + ], + "type": "text", + "content": ". This normalization guarantees the sum of " + }, + { + "bbox": [ + 32, + 33, + 379, + 135 + ], + "type": "inline_equation", + "content": "\\beta_{n}" + }, + { + "bbox": [ + 32, + 33, + 379, + 135 + ], + "type": "text", + "content": " across all " + }, + { + "bbox": [ + 32, + 33, + 379, + 135 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 32, + 33, + 379, + 135 + ], + "type": "text", + "content": " equals 1, adhering to the constraint " + }, + { + "bbox": [ + 32, + 33, + 379, + 135 + ], + "type": "inline_equation", + "content": "\\sum_{n = 1}^{w}\\beta_{n} = 1" + }, + { + "bbox": [ + 32, + 33, + 379, + 135 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 32, + 135, + 379, + 183 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 135, + 379, + 183 + ], + "spans": [ + { + "bbox": [ + 32, + 135, + 379, + 183 + ], + "type": "text", + "content": "Our editing path is determined by the sequence of frames from the captured video. Therefore, during the editing of frame " + }, + { + "bbox": [ + 32, + 135, + 379, + 183 + ], + "type": "inline_equation", + "content": "f_{n}" + }, + { + "bbox": [ + 32, + 135, + 379, + 183 + ], + "type": "text", + "content": ", we incorporate the previous " + }, + { + "bbox": [ + 32, + 135, + 379, + 183 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 32, + 135, + 379, + 183 + ], + "type": "text", + "content": " edited frames into the set " + }, + { + "bbox": [ + 32, + 135, + 379, + 183 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 32, + 135, + 379, + 183 + ], + "type": "text", + "content": ", assigning the highest weight " + }, + { + "bbox": [ + 32, + 135, + 379, + 183 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 32, + 135, + 379, + 183 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 32, + 135, + 379, + 183 + ], + "type": "inline_equation", + "content": "E_{n - 1}" + }, + { + "bbox": [ + 32, + 135, + 379, + 183 + ], + "type": "text", + "content": ". Using Eq. 2 and Eq. 3, we define our score estimation function as following:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 98, + 191, + 377, + 202 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 191, + 377, + 202 + ], + "spans": [ + { + "bbox": [ + 98, + 191, + 377, + 202 + ], + "type": "interline_equation", + "content": "\\varepsilon_ {\\theta} \\left(e _ {t}, f, \\mathcal {T}, W\\right) = \\gamma_ {f} \\tilde {\\varepsilon} _ {\\theta} \\left(e _ {t}, f, \\mathcal {T}\\right) + \\gamma_ {E} \\bar {\\varepsilon} _ {\\theta} \\left(e _ {t}, \\varnothing_ {\\mathcal {T}}, W\\right) \\tag {4}", + "image_path": "9b32577b6af33863b7c6de25d21d994c220c6babb28339cf026b061a07aeed6f.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 32, + 210, + 379, + 246 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 210, + 379, + 246 + ], + "spans": [ + { + "bbox": [ + 32, + 210, + 379, + 246 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 32, + 210, + 379, + 246 + ], + "type": "inline_equation", + "content": "\\gamma_{f}" + }, + { + "bbox": [ + 32, + 210, + 379, + 246 + ], + "type": "text", + "content": " is a hyperparameter that determines the influence of the original frame undergoing editing on the noise estimation, and " + }, + { + "bbox": [ + 32, + 210, + 379, + 246 + ], + "type": "inline_equation", + "content": "\\gamma_{E}" + }, + { + "bbox": [ + 32, + 210, + 379, + 246 + ], + "type": "text", + "content": " represents the significance of the noise estimation from adjacent edited views." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 33, + 261, + 186, + 273 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 261, + 186, + 273 + ], + "spans": [ + { + "bbox": [ + 33, + 261, + 186, + 273 + ], + "type": "text", + "content": "3.3 3D Scene Reconstruction" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 32, + 279, + 379, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 279, + 379, + 388 + ], + "spans": [ + { + "bbox": [ + 32, + 279, + 379, + 388 + ], + "type": "text", + "content": "After multi-view consistent 2D editing is achieved across all frames of the given video, " + }, + { + "bbox": [ + 32, + 279, + 379, + 388 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 32, + 279, + 379, + 388 + ], + "type": "text", + "content": ", we leverage the edited frames " + }, + { + "bbox": [ + 32, + 279, + 379, + 388 + ], + "type": "inline_equation", + "content": "E_{i}" + }, + { + "bbox": [ + 32, + 279, + 379, + 388 + ], + "type": "text", + "content": " and their corresponding masks " + }, + { + "bbox": [ + 32, + 279, + 379, + 388 + ], + "type": "inline_equation", + "content": "M_{i}" + }, + { + "bbox": [ + 32, + 279, + 379, + 388 + ], + "type": "text", + "content": " to construct a 3D scene without any SfM pose initialization. Due to the explicit nature of 3DGS [17], determining the camera poses is essentially equivalent to estimating the transformation of a collection of 3D Gaussian points. Next, we will begin by introducing an extra Gaussian parameter for precise local editing. Subsequently, we will explore relative pose estimation through incremental frame inclusion. Lastly, we will examine the scene expansion, alongside a discussion on the losses integrated into our global optimization strategy." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 32, + 402, + 379, + 547 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 402, + 379, + 547 + ], + "spans": [ + { + "bbox": [ + 32, + 402, + 379, + 547 + ], + "type": "text", + "content": "3D Gaussians Parameterization for Precise Editing. Projecting KEA (see Section 3.2) into 3D Gaussians, " + }, + { + "bbox": [ + 32, + 402, + 379, + 547 + ], + "type": "inline_equation", + "content": "\\mathcal{H}" + }, + { + "bbox": [ + 32, + 402, + 379, + 547 + ], + "type": "text", + "content": ", using " + }, + { + "bbox": [ + 32, + 402, + 379, + 547 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 32, + 402, + 379, + 547 + ], + "type": "text", + "content": " for KEA identity assignment, is essential for accurate editing. Therefore, we introduce a vector, " + }, + { + "bbox": [ + 32, + 402, + 379, + 547 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 32, + 402, + 379, + 547 + ], + "type": "text", + "content": " associated with the Gaussian point, " + }, + { + "bbox": [ + 32, + 402, + 379, + 547 + ], + "type": "inline_equation", + "content": "h = \\{\\mu, \\Sigma, c, \\alpha, m\\}" + }, + { + "bbox": [ + 32, + 402, + 379, + 547 + ], + "type": "text", + "content": " in the 3D Gaussian set, " + }, + { + "bbox": [ + 32, + 402, + 379, + 547 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_i" + }, + { + "bbox": [ + 32, + 402, + 379, + 547 + ], + "type": "text", + "content": " of the " + }, + { + "bbox": [ + 32, + 402, + 379, + 547 + ], + "type": "inline_equation", + "content": "i_{th}" + }, + { + "bbox": [ + 32, + 402, + 379, + 547 + ], + "type": "text", + "content": " frame. The parameter " + }, + { + "bbox": [ + 32, + 402, + 379, + 547 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 32, + 402, + 379, + 547 + ], + "type": "text", + "content": " is a learnable vector of length 2 corresponding to the number of labels in the segmentation map, " + }, + { + "bbox": [ + 32, + 402, + 379, + 547 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 32, + 402, + 379, + 547 + ], + "type": "text", + "content": ". We optimize the newly introduced parameter " + }, + { + "bbox": [ + 32, + 402, + 379, + 547 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 32, + 402, + 379, + 547 + ], + "type": "text", + "content": " to represent KEA identity during training. However, unlike the view-dependent Gaussian parameters, the KEA Identity remains uniform across different rendering views. Gaussian KEA identity ensures the continuous monitoring of each Gaussian's categorization as they evolve, thereby enabling the selective application of gradients, and the exclusive rendering of targeted objects, markedly enhancing processing efficiency in intricate scenes." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 32, + 547, + 379, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 547, + 379, + 583 + ], + "spans": [ + { + "bbox": [ + 32, + 547, + 379, + 583 + ], + "type": "text", + "content": "Next, we delve into the training pipeline inspired by [3,8] in detail which consists of two stages: (i) Relative Pose Estimation, and (ii) Global 3D Scene Expansion." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 221, + 9, + 346, + 20 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 221, + 9, + 346, + 20 + ], + "spans": [ + { + "bbox": [ + 221, + 9, + 346, + 20 + ], + "type": "text", + "content": "3DEgo: 3D Editing on the Go!" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 373, + 10, + 379, + 19 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 373, + 10, + 379, + 19 + ], + "spans": [ + { + "bbox": [ + 373, + 10, + 379, + 19 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 413, + 615 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 31, + 34, + 381, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 31, + 34, + 381, + 203 + ], + "spans": [ + { + "bbox": [ + 31, + 34, + 381, + 203 + ], + "type": "text", + "content": "Per Frame View Initialization. To begin the training process, , we randomly pick a specific frame, denoted as " + }, + { + "bbox": [ + 31, + 34, + 381, + 203 + ], + "type": "inline_equation", + "content": "E_{i}" + }, + { + "bbox": [ + 31, + 34, + 381, + 203 + ], + "type": "text", + "content": ". We then employ a pre-trained monocular depth estimator, symbolized by " + }, + { + "bbox": [ + 31, + 34, + 381, + 203 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 31, + 34, + 381, + 203 + ], + "type": "text", + "content": ", to derive the depth map " + }, + { + "bbox": [ + 31, + 34, + 381, + 203 + ], + "type": "inline_equation", + "content": "D_{i}" + }, + { + "bbox": [ + 31, + 34, + 381, + 203 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 31, + 34, + 381, + 203 + ], + "type": "inline_equation", + "content": "E_{i}" + }, + { + "bbox": [ + 31, + 34, + 381, + 203 + ], + "type": "text", + "content": ". Utilizing " + }, + { + "bbox": [ + 31, + 34, + 381, + 203 + ], + "type": "inline_equation", + "content": "D_{i}" + }, + { + "bbox": [ + 31, + 34, + 381, + 203 + ], + "type": "text", + "content": ", which provides strong geometric cues independent of camera parameters, we initialize 3DGS with points extracted from monocular depth through camera intrinsics and orthogonal projection. This initialization step involves learning a set of 3D Gaussians " + }, + { + "bbox": [ + 31, + 34, + 381, + 203 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_i" + }, + { + "bbox": [ + 31, + 34, + 381, + 203 + ], + "type": "text", + "content": " to minimize the photometric discrepancy between the rendered and current frames " + }, + { + "bbox": [ + 31, + 34, + 381, + 203 + ], + "type": "inline_equation", + "content": "E_{i}" + }, + { + "bbox": [ + 31, + 34, + 381, + 203 + ], + "type": "text", + "content": ". The photometric loss, " + }, + { + "bbox": [ + 31, + 34, + 381, + 203 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{rgb}" + }, + { + "bbox": [ + 31, + 34, + 381, + 203 + ], + "type": "text", + "content": ", optimize the conventional 3D Gaussian parameters including color " + }, + { + "bbox": [ + 31, + 34, + 381, + 203 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 31, + 34, + 381, + 203 + ], + "type": "text", + "content": ", covariance " + }, + { + "bbox": [ + 31, + 34, + 381, + 203 + ], + "type": "inline_equation", + "content": "\\Sigma" + }, + { + "bbox": [ + 31, + 34, + 381, + 203 + ], + "type": "text", + "content": ", mean " + }, + { + "bbox": [ + 31, + 34, + 381, + 203 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 31, + 34, + 381, + 203 + ], + "type": "text", + "content": ", and opacity " + }, + { + "bbox": [ + 31, + 34, + 381, + 203 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 31, + 34, + 381, + 203 + ], + "type": "text", + "content": ". However, to initiate the KEA identity and adjust " + }, + { + "bbox": [ + 31, + 34, + 381, + 203 + ], + "type": "inline_equation", + "content": "m_g" + }, + { + "bbox": [ + 31, + 34, + 381, + 203 + ], + "type": "text", + "content": " for 3D Gaussians, merely relying on " + }, + { + "bbox": [ + 31, + 34, + 381, + 203 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{rgb}" + }, + { + "bbox": [ + 31, + 34, + 381, + 203 + ], + "type": "text", + "content": " is insufficient. Hence, we propose the KEA loss, denoted as " + }, + { + "bbox": [ + 31, + 34, + 381, + 203 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{KEA}" + }, + { + "bbox": [ + 31, + 34, + 381, + 203 + ], + "type": "text", + "content": ", which encompasses the 2D mask " + }, + { + "bbox": [ + 31, + 34, + 381, + 203 + ], + "type": "inline_equation", + "content": "M_{i}" + }, + { + "bbox": [ + 31, + 34, + 381, + 203 + ], + "type": "text", + "content": " corresponding to " + }, + { + "bbox": [ + 31, + 34, + 381, + 203 + ], + "type": "inline_equation", + "content": "E_{i}" + }, + { + "bbox": [ + 31, + 34, + 381, + 203 + ], + "type": "text", + "content": ". We learn the KEA identity of each Gaussian point during training by applying " + }, + { + "bbox": [ + 31, + 34, + 381, + 203 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{KEA}" + }, + { + "bbox": [ + 31, + 34, + 381, + 203 + ], + "type": "text", + "content": " loss " + }, + { + "bbox": [ + 31, + 34, + 381, + 203 + ], + "type": "inline_equation", + "content": "(\\mathcal{L}_{KEA})" + }, + { + "bbox": [ + 31, + 34, + 381, + 203 + ], + "type": "text", + "content": ". Overall, 3D Gaussian optimization is defined as," + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 71, + 214, + 378, + 232 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 214, + 378, + 232 + ], + "spans": [ + { + "bbox": [ + 71, + 214, + 378, + 232 + ], + "type": "interline_equation", + "content": "\\mathcal {H} _ {i} ^ {*} = \\arg \\min _ {c, \\Sigma , \\mu , \\alpha} \\mathcal {L} _ {r g b} (\\mathcal {R} (\\mathcal {H} _ {i}), E _ {i}) + \\arg \\min _ {m} \\mathcal {L} _ {K E A} (\\mathcal {R} (\\mathcal {H} _ {i}), M _ {i}), \\tag {5}", + "image_path": "e5263c3e3396c167328b3414b80b4d0ee3719fc6999b0301b3e549845b5ce8bc.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 31, + 238, + 379, + 261 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 31, + 238, + 379, + 261 + ], + "spans": [ + { + "bbox": [ + 31, + 238, + 379, + 261 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 31, + 238, + 379, + 261 + ], + "type": "inline_equation", + "content": "\\mathcal{R}" + }, + { + "bbox": [ + 31, + 238, + 379, + 261 + ], + "type": "text", + "content": " signifies the 3DGS rendering function. The photometric loss " + }, + { + "bbox": [ + 31, + 238, + 379, + 261 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{rgb}" + }, + { + "bbox": [ + 31, + 238, + 379, + 261 + ], + "type": "text", + "content": " as introduced in [17] is a blend of " + }, + { + "bbox": [ + 31, + 238, + 379, + 261 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_1" + }, + { + "bbox": [ + 31, + 238, + 379, + 261 + ], + "type": "text", + "content": " and D-SSIM losses:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 141, + 272, + 378, + 284 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 272, + 378, + 284 + ], + "spans": [ + { + "bbox": [ + 141, + 272, + 378, + 284 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {r g b} = (1 - \\gamma) \\mathcal {L} _ {1} + \\gamma \\mathcal {L} _ {\\mathrm {D} - \\text {S S I M}}, \\tag {6}", + "image_path": "2b6d167ae0d64bd30624d1537b1ce3dd2a336c160961e5dde9970edea7790145.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 31, + 294, + 379, + 318 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 31, + 294, + 379, + 318 + ], + "spans": [ + { + "bbox": [ + 31, + 294, + 379, + 318 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{KEA}" + }, + { + "bbox": [ + 31, + 294, + 379, + 318 + ], + "type": "text", + "content": " has two components to it. (i) 2D Binary Cross-Entropy Loss, and (ii) 3D Jensen-Shannon Divergence (JSD) Loss, and is defined as," + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 332, + 378, + 344 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 332, + 378, + 344 + ], + "spans": [ + { + "bbox": [ + 130, + 332, + 378, + 344 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {K E A} = \\lambda_ {B C E} \\mathcal {L} _ {B C E} + \\lambda_ {J S D} \\mathcal {L} _ {J S D} \\tag {7}", + "image_path": "58a55ec883138d8f3c2a0e0c76e7dd13d05f95055753a9f513fc55dc5e476259.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 31, + 349, + 379, + 374 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 31, + 349, + 379, + 374 + ], + "spans": [ + { + "bbox": [ + 31, + 349, + 379, + 374 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 31, + 349, + 379, + 374 + ], + "type": "inline_equation", + "content": "\\mathcal{N}" + }, + { + "bbox": [ + 31, + 349, + 379, + 374 + ], + "type": "text", + "content": " be the total number of pixels in the " + }, + { + "bbox": [ + 31, + 349, + 379, + 374 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 31, + 349, + 379, + 374 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 31, + 349, + 379, + 374 + ], + "type": "inline_equation", + "content": "\\mathcal{X}" + }, + { + "bbox": [ + 31, + 349, + 379, + 374 + ], + "type": "text", + "content": " represent the set of all pixels. We calculate binary cross-entropy loss " + }, + { + "bbox": [ + 31, + 349, + 379, + 374 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{BCE}" + }, + { + "bbox": [ + 31, + 349, + 379, + 374 + ], + "type": "text", + "content": " as following," + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 45, + 393, + 378, + 418 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 393, + 378, + 418 + ], + "spans": [ + { + "bbox": [ + 45, + 393, + 378, + 418 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {B C E} = - \\frac {1}{\\mathcal {N}} \\sum_ {x \\in \\mathcal {X}} \\left[ M _ {i} (x) \\log \\left(\\mathcal {R} \\left(\\mathcal {H} _ {i}, m\\right) (x)\\right) + \\left(1 - M _ {i} (x)\\right) \\log \\left(1 - \\mathcal {R} \\left(\\mathcal {H} _ {i}, m\\right) (x)\\right) \\right] \\tag {8}", + "image_path": "66007d81cd1c4f7c5916c0ddafe55823361dd6032e9effbb01d9090c8d2b9e2a.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 31, + 428, + 379, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 31, + 428, + 379, + 559 + ], + "spans": [ + { + "bbox": [ + 31, + 428, + 379, + 559 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 31, + 428, + 379, + 559 + ], + "type": "inline_equation", + "content": "M(x)" + }, + { + "bbox": [ + 31, + 428, + 379, + 559 + ], + "type": "text", + "content": " is the value of the ground truth mask at pixel " + }, + { + "bbox": [ + 31, + 428, + 379, + 559 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 31, + 428, + 379, + 559 + ], + "type": "text", + "content": ", indicating whether the pixel belongs to the foreground (1) or the background (0). The sum computes the total loss over all pixels, and the division by " + }, + { + "bbox": [ + 31, + 428, + 379, + 559 + ], + "type": "inline_equation", + "content": "\\mathcal{N}" + }, + { + "bbox": [ + 31, + 428, + 379, + 559 + ], + "type": "text", + "content": " normalizes the loss, making it independent of the image size. A rendering operation, denoted as " + }, + { + "bbox": [ + 31, + 428, + 379, + 559 + ], + "type": "inline_equation", + "content": "\\mathcal{R}(\\mathcal{H}_i, m)(x)" + }, + { + "bbox": [ + 31, + 428, + 379, + 559 + ], + "type": "text", + "content": ", produces " + }, + { + "bbox": [ + 31, + 428, + 379, + 559 + ], + "type": "inline_equation", + "content": "m_{\\mathcal{R}}" + }, + { + "bbox": [ + 31, + 428, + 379, + 559 + ], + "type": "text", + "content": " for a given pixel " + }, + { + "bbox": [ + 31, + 428, + 379, + 559 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 31, + 428, + 379, + 559 + ], + "type": "text", + "content": ", which represents the weighted sum of the vector " + }, + { + "bbox": [ + 31, + 428, + 379, + 559 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 31, + 428, + 379, + 559 + ], + "type": "text", + "content": " values for the overlapping Gaussians associated with that pixel. Here, " + }, + { + "bbox": [ + 31, + 428, + 379, + 559 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 31, + 428, + 379, + 559 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 31, + 428, + 379, + 559 + ], + "type": "inline_equation", + "content": "m_{\\mathcal{R}}" + }, + { + "bbox": [ + 31, + 428, + 379, + 559 + ], + "type": "text", + "content": " both have a dimensionality of 2 which is intentionally kept the same as the number of classes in mask labels. We apply softmax function on " + }, + { + "bbox": [ + 31, + 428, + 379, + 559 + ], + "type": "inline_equation", + "content": "m_{\\mathcal{R}}" + }, + { + "bbox": [ + 31, + 428, + 379, + 559 + ], + "type": "text", + "content": " to extract KEA identity given as, KEA Identity = softmax(m_R). The softmax output is interpreted as either 0, indicating a position outside the KEA, or 1, denoting a location within the KEA." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 31, + 560, + 379, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 31, + 560, + 379, + 583 + ], + "spans": [ + { + "bbox": [ + 31, + 560, + 379, + 583 + ], + "type": "text", + "content": "To enhance the accuracy of Gaussian KEA identity assignment, we also introduce an unsupervised 3D Regularization Loss to directly influence the learning" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 32, + 10, + 39, + 18 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 10, + 39, + 18 + ], + "spans": [ + { + "bbox": [ + 32, + 10, + 39, + 18 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 66, + 9, + 130, + 19 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 9, + 130, + 19 + ], + "spans": [ + { + "bbox": [ + 66, + 9, + 130, + 19 + ], + "type": "text", + "content": "U. Khalid et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 413, + 615 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 31, + 34, + 380, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 31, + 34, + 380, + 83 + ], + "spans": [ + { + "bbox": [ + 31, + 34, + 380, + 83 + ], + "type": "text", + "content": "of Identity vector " + }, + { + "bbox": [ + 31, + 34, + 380, + 83 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 31, + 34, + 380, + 83 + ], + "type": "text", + "content": ". This 3D Regularization Loss utilizes spatial consistency in 3D, ensuring that the Identity vector, " + }, + { + "bbox": [ + 31, + 34, + 380, + 83 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 31, + 34, + 380, + 83 + ], + "type": "text", + "content": " of the top " + }, + { + "bbox": [ + 31, + 34, + 380, + 83 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 31, + 34, + 380, + 83 + ], + "type": "text", + "content": "-nearest 3D Gaussians are similar in feature space. Specifically, we employ a symmetrical and bounded loss based on the Jensen-Shannon Divergence," + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 41, + 101, + 378, + 128 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 101, + 378, + 128 + ], + "spans": [ + { + "bbox": [ + 41, + 101, + 378, + 128 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {J S D}} = \\frac {1}{2 Y Z} \\sum_ {y = 1} ^ {Y} \\sum_ {z = 1} ^ {Z} \\left[ S \\left(m _ {y}\\right) \\log \\left(\\frac {2 S \\left(m _ {y}\\right)}{S \\left(m _ {y}\\right) + S \\left(m _ {z} ^ {\\prime}\\right)}\\right) + S \\left(m _ {z} ^ {\\prime}\\right) \\log \\left(\\frac {2 S \\left(m _ {z} ^ {\\prime}\\right)}{S \\left(m _ {y}\\right) + S \\left(m _ {z} ^ {\\prime}\\right)}\\right) \\right] \\tag {9}", + "image_path": "c83139847e5602cabc2fdd4f1fd1f120d6c0bb5fe805e8e5775304936dc21c93.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 32, + 137, + 379, + 161 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 137, + 379, + 161 + ], + "spans": [ + { + "bbox": [ + 32, + 137, + 379, + 161 + ], + "type": "text", + "content": "Here, " + }, + { + "bbox": [ + 32, + 137, + 379, + 161 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 32, + 137, + 379, + 161 + ], + "type": "text", + "content": " indicates the softmax function, and " + }, + { + "bbox": [ + 32, + 137, + 379, + 161 + ], + "type": "inline_equation", + "content": "m_z'" + }, + { + "bbox": [ + 32, + 137, + 379, + 161 + ], + "type": "text", + "content": " represents the " + }, + { + "bbox": [ + 32, + 137, + 379, + 161 + ], + "type": "inline_equation", + "content": "z^{th}" + }, + { + "bbox": [ + 32, + 137, + 379, + 161 + ], + "type": "text", + "content": " Identity vector from the " + }, + { + "bbox": [ + 32, + 137, + 379, + 161 + ], + "type": "inline_equation", + "content": "Z" + }, + { + "bbox": [ + 32, + 137, + 379, + 161 + ], + "type": "text", + "content": " nearest neighbors in 3D space." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 32, + 161, + 379, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 161, + 379, + 222 + ], + "spans": [ + { + "bbox": [ + 32, + 161, + 379, + 222 + ], + "type": "text", + "content": "Relative Pose Initialization. Next, the relative camera pose is estimated for each new frame added to the training scheme. " + }, + { + "bbox": [ + 32, + 161, + 379, + 222 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_i^*" + }, + { + "bbox": [ + 32, + 161, + 379, + 222 + ], + "type": "text", + "content": " is transformed via a learnable SE-3 affine transformation " + }, + { + "bbox": [ + 32, + 161, + 379, + 222 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_i" + }, + { + "bbox": [ + 32, + 161, + 379, + 222 + ], + "type": "text", + "content": " to the subsequent frame " + }, + { + "bbox": [ + 32, + 161, + 379, + 222 + ], + "type": "inline_equation", + "content": "i + 1" + }, + { + "bbox": [ + 32, + 161, + 379, + 222 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 32, + 161, + 379, + 222 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_{i + 1} = \\mathcal{M}_i\\odot \\mathcal{H}_i" + }, + { + "bbox": [ + 32, + 161, + 379, + 222 + ], + "type": "text", + "content": ". Optimizing transformation " + }, + { + "bbox": [ + 32, + 161, + 379, + 222 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_i" + }, + { + "bbox": [ + 32, + 161, + 379, + 222 + ], + "type": "text", + "content": " entails minimizing the photometric loss between the rendered image and the next frame " + }, + { + "bbox": [ + 32, + 161, + 379, + 222 + ], + "type": "inline_equation", + "content": "E_{i + 1}" + }, + { + "bbox": [ + 32, + 161, + 379, + 222 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 118, + 231, + 377, + 246 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 231, + 377, + 246 + ], + "spans": [ + { + "bbox": [ + 118, + 231, + 377, + 246 + ], + "type": "interline_equation", + "content": "\\mathcal {M} _ {i} ^ {*} = \\arg \\min _ {\\mathcal {M} _ {i}} \\mathcal {L} _ {r g b} \\left(\\mathcal {R} \\left(\\mathcal {M} _ {i} \\odot \\mathcal {H} _ {i}\\right), E _ {i + 1}\\right), \\tag {10}", + "image_path": "e88ebf7cdb12c6faf8295d320cb4fdad92ba046c64afb7684ba0c52752974a9d.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 32, + 255, + 379, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 255, + 379, + 327 + ], + "spans": [ + { + "bbox": [ + 32, + 255, + 379, + 327 + ], + "type": "text", + "content": "In this optimization step, we keep the attributes of " + }, + { + "bbox": [ + 32, + 255, + 379, + 327 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_i^*" + }, + { + "bbox": [ + 32, + 255, + 379, + 327 + ], + "type": "text", + "content": " fixed to distinguish camera motion from other Gaussian transformations such as pruning, densification, and self-rotation. Applying the above 3DGS initialization to sequential image pairs enables inferring relative poses across frames. However, accumulated pose errors could adversely affect the optimization of a global scene. To tackle this challenge, we propose the gradual, sequential expansion of the 3DGS." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 32, + 343, + 379, + 463 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 343, + 379, + 463 + ], + "spans": [ + { + "bbox": [ + 32, + 343, + 379, + 463 + ], + "type": "text", + "content": "Gradual 3D Scene Expansion. As illustrated above, beginning with frame " + }, + { + "bbox": [ + 32, + 343, + 379, + 463 + ], + "type": "inline_equation", + "content": "E_{i}" + }, + { + "bbox": [ + 32, + 343, + 379, + 463 + ], + "type": "text", + "content": ", we initiate with a collection of 3D Gaussian points, setting the camera pose to an orthogonal configuration. Then, we calculate the relative camera pose between frames " + }, + { + "bbox": [ + 32, + 343, + 379, + 463 + ], + "type": "inline_equation", + "content": "E_{i}" + }, + { + "bbox": [ + 32, + 343, + 379, + 463 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 32, + 343, + 379, + 463 + ], + "type": "inline_equation", + "content": "E_{i+1}" + }, + { + "bbox": [ + 32, + 343, + 379, + 463 + ], + "type": "text", + "content": ". After estimating the relative camera poses, we propose to expand the 3DGS scene. This all-inclusive 3DGS optimization refines the collection of 3D Gaussian points, including all attributes, across " + }, + { + "bbox": [ + 32, + 343, + 379, + 463 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 32, + 343, + 379, + 463 + ], + "type": "text", + "content": " iterations, taking the calculated relative pose and the two observed frames as inputs. With the availability of the next frame " + }, + { + "bbox": [ + 32, + 343, + 379, + 463 + ], + "type": "inline_equation", + "content": "E_{i+2}" + }, + { + "bbox": [ + 32, + 343, + 379, + 463 + ], + "type": "text", + "content": " after " + }, + { + "bbox": [ + 32, + 343, + 379, + 463 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 32, + 343, + 379, + 463 + ], + "type": "text", + "content": " iterations, we repeat the above procedure: estimating the relative pose between " + }, + { + "bbox": [ + 32, + 343, + 379, + 463 + ], + "type": "inline_equation", + "content": "E_{i+1}" + }, + { + "bbox": [ + 32, + 343, + 379, + 463 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 32, + 343, + 379, + 463 + ], + "type": "inline_equation", + "content": "E_{i+2}" + }, + { + "bbox": [ + 32, + 343, + 379, + 463 + ], + "type": "text", + "content": ", and expanding the scene with all-inclusive 3DGS." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 32, + 464, + 379, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 464, + 379, + 583 + ], + "spans": [ + { + "bbox": [ + 32, + 464, + 379, + 583 + ], + "type": "text", + "content": "To perform all-inclusive 3DGS optimization, we increase the density of the Gaussians currently under reconstruction as new frames are introduced. Following [17], we identify candidates for densification by evaluating the average magnitude of position gradients in view-space. To focus densification on these yet-to-be-observed areas, we enhance the density of the universal 3DGS every " + }, + { + "bbox": [ + 32, + 464, + 379, + 583 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 32, + 464, + 379, + 583 + ], + "type": "text", + "content": " step, synchronized with the rate of new frame addition. We continue to expand the 3D Gaussian points until the conclusion of the input sequence. Through the repetitive application of both frame-relative pose estimation and all-inclusive scene expansion, 3D Gaussians evolve from an initial partial point cloud to a complete point cloud that encapsulates the entire scene over the sequence. In" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 221, + 9, + 346, + 20 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 221, + 9, + 346, + 20 + ], + "spans": [ + { + "bbox": [ + 221, + 9, + 346, + 20 + ], + "type": "text", + "content": "3DEgo: 3D Editing on the Go!" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 372, + 10, + 379, + 19 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 372, + 10, + 379, + 19 + ], + "spans": [ + { + "bbox": [ + 372, + 10, + 379, + 19 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 413, + 615 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 32, + 34, + 379, + 57 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 34, + 379, + 57 + ], + "spans": [ + { + "bbox": [ + 32, + 34, + 379, + 57 + ], + "type": "text", + "content": "our global optimization stage, we still utilize the " + }, + { + "bbox": [ + 32, + 34, + 379, + 57 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{KEA}" + }, + { + "bbox": [ + 32, + 34, + 379, + 57 + ], + "type": "text", + "content": " loss as new Gaussians are added during densification." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 32, + 59, + 379, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 59, + 379, + 213 + ], + "spans": [ + { + "bbox": [ + 32, + 59, + 379, + 213 + ], + "type": "text", + "content": "Pyramidal Feature Scoring. While our 2D consistent editing approach, detailed in Section 3.2, addresses various editing discrepancies, to rectify any residual inconsistencies in 2D editing, we introduce a pyramidal feature scoring method tailored for Gaussians in Key Editing Areas (KEA) identified with an identity of 1. This method begins by capturing the attributes of all Gaussians marked with KEA identity equal to 1 during initialization, establishing them as anchor points. With each densification step, these anchors are updated to mirror the present attributes of the Gaussians. Throughout the training phase, an intrapoint cloud loss, " + }, + { + "bbox": [ + 32, + 59, + 379, + 213 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{ipc}" + }, + { + "bbox": [ + 32, + 59, + 379, + 213 + ], + "type": "text", + "content": " is utilized to compare the anchor state with the Gaussians' current state, maintaining that the Gaussians remain closely aligned with their initial anchors. " + }, + { + "bbox": [ + 32, + 59, + 379, + 213 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{ipc}" + }, + { + "bbox": [ + 32, + 59, + 379, + 213 + ], + "type": "text", + "content": " is defined as the weighted mean square error (MSE) between the anchor Gaussian and current Gaussian parameters with the older Gaussians getting higher weightage." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 32, + 214, + 379, + 261 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 214, + 379, + 261 + ], + "spans": [ + { + "bbox": [ + 32, + 214, + 379, + 261 + ], + "type": "text", + "content": "Regularizing Estimated Pose. Further, to optimize the estimated relative pose between subsequent Gaussian set, we introduce point cloud loss, " + }, + { + "bbox": [ + 32, + 214, + 379, + 261 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{pc}" + }, + { + "bbox": [ + 32, + 214, + 379, + 261 + ], + "type": "text", + "content": " similar as in [3]. While we expand the scene, " + }, + { + "bbox": [ + 32, + 214, + 379, + 261 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{ipc}" + }, + { + "bbox": [ + 32, + 214, + 379, + 261 + ], + "type": "text", + "content": " limits the deviation of the Gaussian parameters while " + }, + { + "bbox": [ + 32, + 214, + 379, + 261 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{pc}" + }, + { + "bbox": [ + 32, + 214, + 379, + 261 + ], + "type": "text", + "content": " regularizes the all-inclusive pose estimation." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 138, + 271, + 379, + 284 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 271, + 379, + 284 + ], + "spans": [ + { + "bbox": [ + 138, + 271, + 379, + 284 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {p c} = D _ {\\text {C h a m f e r}} \\left(\\mathcal {M} _ {i} ^ {*} \\mathcal {H} _ {i} ^ {*}, \\mathcal {H} _ {i + 1} ^ {*}\\right) \\tag {11}", + "image_path": "3260b233b2712a2ba8dd6beac0121700dc401db83619b51c52920966392cf8db.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 32, + 293, + 379, + 329 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 293, + 379, + 329 + ], + "spans": [ + { + "bbox": [ + 32, + 293, + 379, + 329 + ], + "type": "text", + "content": "Given two Gaussians, " + }, + { + "bbox": [ + 32, + 293, + 379, + 329 + ], + "type": "inline_equation", + "content": "h_i" + }, + { + "bbox": [ + 32, + 293, + 379, + 329 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 32, + 293, + 379, + 329 + ], + "type": "inline_equation", + "content": "h_j" + }, + { + "bbox": [ + 32, + 293, + 379, + 329 + ], + "type": "text", + "content": ", each characterized by multiple parameters encapsulated in their parameter vectors " + }, + { + "bbox": [ + 32, + 293, + 379, + 329 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}_i" + }, + { + "bbox": [ + 32, + 293, + 379, + 329 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 32, + 293, + 379, + 329 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}_j" + }, + { + "bbox": [ + 32, + 293, + 379, + 329 + ], + "type": "text", + "content": " respectively, the Chamfer distance " + }, + { + "bbox": [ + 32, + 293, + 379, + 329 + ], + "type": "inline_equation", + "content": "D_{\\mathrm{Chamfer}}" + }, + { + "bbox": [ + 32, + 293, + 379, + 329 + ], + "type": "text", + "content": " between " + }, + { + "bbox": [ + 32, + 293, + 379, + 329 + ], + "type": "inline_equation", + "content": "h_i" + }, + { + "bbox": [ + 32, + 293, + 379, + 329 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 32, + 293, + 379, + 329 + ], + "type": "inline_equation", + "content": "h_j" + }, + { + "bbox": [ + 32, + 293, + 379, + 329 + ], + "type": "text", + "content": " can be formulated as:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 82, + 339, + 379, + 365 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 339, + 379, + 365 + ], + "spans": [ + { + "bbox": [ + 82, + 339, + 379, + 365 + ], + "type": "interline_equation", + "content": "D _ {\\text {C h a m f e r}} \\left(h _ {i}, h _ {j}\\right) = \\sum_ {p \\in \\boldsymbol {\\theta} _ {i}} \\min _ {q \\in \\boldsymbol {\\theta} _ {j}} \\| p - q \\| ^ {2} + \\sum_ {q \\in \\boldsymbol {\\theta} _ {j}} \\min _ {p \\in \\boldsymbol {\\theta} _ {i}} \\| q - p \\| ^ {2} \\tag {12}", + "image_path": "a860fbf8b73c09b34fb906cb47046b397bad858b6a3063a2e138118f2d830500.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 32, + 370, + 379, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 370, + 379, + 430 + ], + "spans": [ + { + "bbox": [ + 32, + 370, + 379, + 430 + ], + "type": "text", + "content": "This equation calculates the Chamfer distance by summing the squared Euclidean distances from each parameter in " + }, + { + "bbox": [ + 32, + 370, + 379, + 430 + ], + "type": "inline_equation", + "content": "h_i" + }, + { + "bbox": [ + 32, + 370, + 379, + 430 + ], + "type": "text", + "content": " to its closest counterpart in " + }, + { + "bbox": [ + 32, + 370, + 379, + 430 + ], + "type": "inline_equation", + "content": "h_j" + }, + { + "bbox": [ + 32, + 370, + 379, + 430 + ], + "type": "text", + "content": ", and vice versa, thereby quantifying the similarity between the two Gaussians across all included parameters such as color, opacity, etc. Combining all the loss components results in the total loss function during scene expansion," + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 96, + 442, + 379, + 455 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 442, + 379, + 455 + ], + "spans": [ + { + "bbox": [ + 96, + 442, + 379, + 455 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {T} = \\lambda_ {r g b} \\mathcal {L} _ {r g b} + \\lambda_ {K E A} \\mathcal {L} _ {K E A} + \\lambda_ {i p c} \\mathcal {L} _ {i p c} + \\lambda_ {p c} \\mathcal {L} _ {p c} \\tag {13}", + "image_path": "2fb2e3d6155d895dc0b665ec41ae7b61410de48d7546addcb100af18696af6b5.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 32, + 460, + 379, + 482 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 460, + 379, + 482 + ], + "spans": [ + { + "bbox": [ + 32, + 460, + 379, + 482 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 32, + 460, + 379, + 482 + ], + "type": "inline_equation", + "content": "\\lambda_{rgb}" + }, + { + "bbox": [ + 32, + 460, + 379, + 482 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 32, + 460, + 379, + 482 + ], + "type": "inline_equation", + "content": "\\lambda_{KEA}" + }, + { + "bbox": [ + 32, + 460, + 379, + 482 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 32, + 460, + 379, + 482 + ], + "type": "inline_equation", + "content": "\\lambda_{ipc}" + }, + { + "bbox": [ + 32, + 460, + 379, + 482 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 32, + 460, + 379, + 482 + ], + "type": "inline_equation", + "content": "\\lambda_{pc}" + }, + { + "bbox": [ + 32, + 460, + 379, + 482 + ], + "type": "text", + "content": " act as weighting factors for the respective loss terms." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 32, + 502, + 116, + 514 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 502, + 116, + 514 + ], + "spans": [ + { + "bbox": [ + 32, + 502, + 116, + 514 + ], + "type": "text", + "content": "4 Evaluation" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 32, + 527, + 178, + 539 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 527, + 178, + 539 + ], + "spans": [ + { + "bbox": [ + 32, + 527, + 178, + 539 + ], + "type": "text", + "content": "4.1 Implementation Details" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 32, + 547, + 379, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 547, + 379, + 583 + ], + "spans": [ + { + "bbox": [ + 32, + 547, + 379, + 583 + ], + "type": "text", + "content": "In our approach, we employ PyTorch [33] for the development, specifically focusing on 3D Gaussian splatting. GPT-3.5 Turbo [5] is used for identifying the editing attributes to identify the KEA. For segmentation purposes, SAM [20] is" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 33, + 10, + 43, + 19 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 10, + 43, + 19 + ], + "spans": [ + { + "bbox": [ + 33, + 10, + 43, + 19 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 66, + 9, + 130, + 19 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 9, + 130, + 19 + ], + "spans": [ + { + "bbox": [ + 66, + 9, + 130, + 19 + ], + "type": "text", + "content": "U. Khalid et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 413, + 615 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 60, + 41, + 351, + 185 + ], + "blocks": [ + { + "bbox": [ + 60, + 41, + 351, + 185 + ], + "lines": [ + { + "bbox": [ + 60, + 41, + 351, + 185 + ], + "spans": [ + { + "bbox": [ + 60, + 41, + 351, + 185 + ], + "type": "image", + "image_path": "0351ef6feaffe92abe2ec50b202730e2d32ee18fc22a8fc970fbb0d8a29b1e7b.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 30, + 195, + 380, + 252 + ], + "lines": [ + { + "bbox": [ + 30, + 195, + 380, + 252 + ], + "spans": [ + { + "bbox": [ + 30, + 195, + 380, + 252 + ], + "type": "text", + "content": "Fig. 4: Qualitative comparison of our method with the IN2N [11] over two separate scenes. When the editing prompt requests \"Give the wheels Blue Color and Make the recyclebins brown,\" IN2N [11] inadvertently alters the complete van color to blue as well, instead of just changing the tire color. It must be noted that IN2N [11] uses poses from COLMAP, while 3DEgo estimates poses while constructing the 3D scene." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 30, + 273, + 379, + 333 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 273, + 379, + 333 + ], + "spans": [ + { + "bbox": [ + 30, + 273, + 379, + 333 + ], + "type": "text", + "content": "used to generate the masks based on the key editing attributes identifying the KIA. For zero-shot point tracking, we employ a point-tracker as proposed in [34]. The editing tasks are facilitated by the Instruct Pix2Pix [4] 2D diffusion model by incorporating the masks to limit the editing within KEA. Additional details are in supplementary material." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 31, + 349, + 170, + 361 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 31, + 349, + 170, + 361 + ], + "spans": [ + { + "bbox": [ + 31, + 349, + 170, + 361 + ], + "type": "text", + "content": "4.2 Baseline and Datasets" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 30, + 369, + 379, + 391 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 369, + 379, + 391 + ], + "spans": [ + { + "bbox": [ + 30, + 369, + 379, + 391 + ], + "type": "text", + "content": "We carry out experiments across a variety of public datasets as well as our prepared GS25 dataset." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 30, + 392, + 180, + 511 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 392, + 180, + 511 + ], + "spans": [ + { + "bbox": [ + 30, + 392, + 180, + 511 + ], + "type": "text", + "content": "GS25 Dataset comprises 25 casually captured monocular videos using mobile phones for comprehensive 3D scene analysis. This approach ensures the dataset's utility in exploring and enhancing 360-degree real-world scene reconstruction technologies. To further assess the efficacy of the proposed 3D editing framework, we also" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 30, + 511, + 380, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 511, + 380, + 584 + ], + "spans": [ + { + "bbox": [ + 30, + 511, + 380, + 584 + ], + "type": "text", + "content": "conducted comparisons across 5 public datasets: (i) IN2N [11], (ii) Mip-NeRF [2], (iii) NeRFstudio Dataset [42], (iv) Tanks & Temples [21] and (v) CO3D-V2 [36]. We specifically validate the robustness of our approach on the CO3D dataset, which comprises thousands of object-centric videos. In our study, we introduce a unique problem, making direct comparisons with prior research challenging. Nonetheless, to assess the robustness of our method, we contrast it with" + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 189, + 450, + 379, + 482 + ], + "blocks": [ + { + "bbox": [ + 187, + 428, + 379, + 449 + ], + "lines": [ + { + "bbox": [ + 187, + 428, + 379, + 449 + ], + "spans": [ + { + "bbox": [ + 187, + 428, + 379, + 449 + ], + "type": "text", + "content": "Table 1: Average runtime efficiency across 25 edits from the GS25 dataset (Approx. minutes)." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 189, + 450, + 379, + 482 + ], + "lines": [ + { + "bbox": [ + 189, + 450, + 379, + 482 + ], + "spans": [ + { + "bbox": [ + 189, + 450, + 379, + 482 + ], + "type": "table", + "html": "
MethodCOLMAPModel InitializationScene Editing
Instruct-N2N [11]13min22min250min
OursXX25min
", + "image_path": "760066c9f9ca3ceb60d2dd9260c07ed26cb6923817580a08fb52fd64f6ccc417.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 221, + 9, + 347, + 22 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 221, + 9, + 347, + 22 + ], + "spans": [ + { + "bbox": [ + 221, + 9, + 347, + 22 + ], + "type": "text", + "content": "3DEgo: 3D Editing on the Go!" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 369, + 10, + 378, + 18 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 369, + 10, + 378, + 18 + ], + "spans": [ + { + "bbox": [ + 369, + 10, + 378, + 18 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 413, + 615 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 60, + 35, + 156, + 130 + ], + "blocks": [ + { + "bbox": [ + 60, + 35, + 156, + 130 + ], + "lines": [ + { + "bbox": [ + 60, + 35, + 156, + 130 + ], + "spans": [ + { + "bbox": [ + 60, + 35, + 156, + 130 + ], + "type": "image", + "image_path": "925c3495878c636a1fbf26474f4b49f3010100b61042a60bafa656e3454642a8.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 90, + 132, + 128, + 140 + ], + "lines": [ + { + "bbox": [ + 90, + 132, + 128, + 140 + ], + "spans": [ + { + "bbox": [ + 90, + 132, + 128, + 140 + ], + "type": "text", + "content": "Original 3DGS" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 156, + 35, + 253, + 130 + ], + "blocks": [ + { + "bbox": [ + 156, + 35, + 253, + 130 + ], + "lines": [ + { + "bbox": [ + 156, + 35, + 253, + 130 + ], + "spans": [ + { + "bbox": [ + 156, + 35, + 253, + 130 + ], + "type": "image", + "image_path": "62102cc1824d996f42819b4d55fa360d73db2b34f855d4b9a0d4cc6849991742.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 180, + 132, + 232, + 139 + ], + "lines": [ + { + "bbox": [ + 180, + 132, + 232, + 139 + ], + "spans": [ + { + "bbox": [ + 180, + 132, + 232, + 139 + ], + "type": "text", + "content": "Gaussian Grouping" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 30, + 149, + 380, + 193 + ], + "lines": [ + { + "bbox": [ + 30, + 149, + 380, + 193 + ], + "spans": [ + { + "bbox": [ + 30, + 149, + 380, + 193 + ], + "type": "text", + "content": "Fig. 5: Our approach surpasses Gaussian Grouping [50] in 3D object elimination across different scenes from GS25 and Tanks & Temple datasets. 3DEgo is capable of eliminating substantial objects like statues from the entire scene while significantly minimizing artifacts and avoiding a blurred background." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 253, + 35, + 350, + 130 + ], + "blocks": [ + { + "bbox": [ + 253, + 35, + 350, + 130 + ], + "lines": [ + { + "bbox": [ + 253, + 35, + 350, + 130 + ], + "spans": [ + { + "bbox": [ + 253, + 35, + 350, + 130 + ], + "type": "image", + "image_path": "1b563c3ada12b3a9f0e10367c47480bca2fe84223fa16856a1b990cfeafe00c2.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 295, + 132, + 309, + 138 + ], + "lines": [ + { + "bbox": [ + 295, + 132, + 309, + 138 + ], + "spans": [ + { + "bbox": [ + 295, + 132, + 309, + 138 + ], + "type": "text", + "content": "Ours" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 30, + 218, + 379, + 302 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 218, + 379, + 302 + ], + "spans": [ + { + "bbox": [ + 30, + 218, + 379, + 302 + ], + "type": "text", + "content": "state-of-the-art (SOTA) 3D editing techniques that rely on poses derived from COLMAP. Additionally, we present quantitative evaluations alongside pose-free 3D reconstruction approaches, specifically NoPeNeRF [3], and BARF [25]. In the pose-free comparison, we substitute only our 3D scene reconstruction component with theirs while maintaining our original editing framework unchanged. We present a time-cost analysis in Table 1 that underscores the rapid text-conditioned 3D reconstruction capabilities of 3DEgo." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 31, + 322, + 173, + 333 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 31, + 322, + 173, + 333 + ], + "spans": [ + { + "bbox": [ + 31, + 322, + 173, + 333 + ], + "type": "text", + "content": "4.3 Qualitative Evaluation" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 30, + 344, + 380, + 428 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 344, + 380, + 428 + ], + "spans": [ + { + "bbox": [ + 30, + 344, + 380, + 428 + ], + "type": "text", + "content": "As demonstrated in Figure 4, our method demonstrates exceptional prowess in local editing, enabling precise modifications within specific regions of a 3D scene without affecting the overall integrity. Our method also excels in multi-attribute editing, seamlessly combining changes across color, texture, and geometry within a single coherent edit. We also evaluate our method for the object removal task. The goal of 3D object removal is to eliminate an object from a 3D environment, potentially leaving behind voids due to the lack of observational" + } + ] + } + ], + "index": 11 + }, + { + "type": "table", + "bbox": [ + 35, + 503, + 377, + 581 + ], + "blocks": [ + { + "bbox": [ + 30, + 449, + 379, + 493 + ], + "lines": [ + { + "bbox": [ + 30, + 449, + 379, + 493 + ], + "spans": [ + { + "bbox": [ + 30, + 449, + 379, + 493 + ], + "type": "text", + "content": "Table 2: Comparing With Pose-known Methods. Quantitative evaluation of 200 edits across GS25, IN2N, Mip-NeRF, NeRFstudio, Tanks & Temples, and CO3D-V2 datasets against the methods that incorporate COLMAP poses. The top-performing results are emphasized in bold." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 35, + 503, + 377, + 581 + ], + "lines": [ + { + "bbox": [ + 35, + 503, + 377, + 581 + ], + "spans": [ + { + "bbox": [ + 35, + 503, + 377, + 581 + ], + "type": "table", + "html": "
DatasetsDreamEditorIN2NOurs
CTIS↑CDCR↑E-PSNR↑CTIS↑CDCR↑E-PSNR↑CTIS↑CDCR↑E-PSNR↑
GS25 (Ours)0.1550.88622.7500.1420.89223.1300.1690.92523.660
Mip-NeRF0.1490.89623.9200.1640.91722.1700.1750.90124.250
NeRFstudio0.1560.90323.6700.1710.90925.1300.1630.93124.990
CO3D-V20.1740.91524.8800.1630.92425.1800.1790.93626.020
IN2N0.1670.92124.7800.1790.91026.5100.1830.92526.390
Tanks & Temples0.1500.89623.9700.1700.90123.1100.1640.91524.190
", + "image_path": "4e6b52468bedfb8ac8b76509fcd467b09868f63d817765b67e780b1350a16e3a.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_body" + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 33, + 10, + 43, + 19 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 10, + 43, + 19 + ], + "spans": [ + { + "bbox": [ + 33, + 10, + 43, + 19 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 66, + 9, + 130, + 19 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 9, + 130, + 19 + ], + "spans": [ + { + "bbox": [ + 66, + 9, + 130, + 19 + ], + "type": "text", + "content": "U. Khalid et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 413, + 615 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 34, + 76, + 377, + 153 + ], + "blocks": [ + { + "bbox": [ + 31, + 33, + 381, + 67 + ], + "lines": [ + { + "bbox": [ + 31, + 33, + 381, + 67 + ], + "spans": [ + { + "bbox": [ + 31, + 33, + 381, + 67 + ], + "type": "text", + "content": "Table 3: Comparing With Pose-Unknown Methods. Quantitative analysis of 200 edits applied to six datasets, comparing methods proposed for NeRF reconstruction without known camera poses. The top-performing results are emphasized in bold." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 34, + 76, + 377, + 153 + ], + "lines": [ + { + "bbox": [ + 34, + 76, + 377, + 153 + ], + "spans": [ + { + "bbox": [ + 34, + 76, + 377, + 153 + ], + "type": "table", + "html": "
DatasetsBARF [25]Nope-NeRF [3]Ours
CTIS↑CDCR↑E-PSNR↑CTIS↑CDCR↑E-PSNR↑CTIS↑CDCR↑E-PSNR↑
GS25 (Ours)0.1390.79720.4780.1280.75319.6600.1690.92523.660
Mip-NeRF0.1340.80621.3320.1470.82018.7990.1750.90124.250
NeRFstudio0.1400.81320.1160.1380.77321.3600.1630.93124.990
CO3D-V20.1570.82021.1480.1290.82417.9710.1790.93626.020
IN2N0.1500.82922.0920.1610.81822.6040.1830.92526.390
Tanks & Temples0.1350.80621.5730.1570.81020.9040.1640.91524.190
", + "image_path": "1083c5f08b329a7f464636523f22a3edf554067018c5a7d3de1f0ed7c7317721.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 30, + 177, + 381, + 261 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 177, + 381, + 261 + ], + "spans": [ + { + "bbox": [ + 30, + 177, + 381, + 261 + ], + "type": "text", + "content": "data. For the object removal task, we identify and remove the regions based on the 2D mask, " + }, + { + "bbox": [ + 30, + 177, + 381, + 261 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 30, + 177, + 381, + 261 + ], + "type": "text", + "content": ". Subsequently, we focus on inpainting these \"invisible regions\" in the original 2D frames using LAMA [41]. In Figure 5, we demonstrate our 3DEgo's effectiveness in object removal compared to Gaussian Grouping. Our method's reconstruction output notably surpasses that of Gaussian Grouping [50] in terms of retaining spatial accuracy and ensuring consistency across multiple views." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 31, + 279, + 181, + 292 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 31, + 279, + 181, + 292 + ], + "spans": [ + { + "bbox": [ + 31, + 279, + 181, + 292 + ], + "type": "text", + "content": "4.4 Quantitative Evaluation" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 30, + 300, + 197, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 300, + 197, + 467 + ], + "spans": [ + { + "bbox": [ + 30, + 300, + 197, + 467 + ], + "type": "text", + "content": "In our quantitative analysis, we employ three key metrics: CLIP Text-Image Direction Similarity (CTIS) [9], CLIP Direction Consistency Score (CDCR) [11], and Edit PSNR (EPSNR). We perform 200 edits across the six datasets listed above. We present quantitative comparisons with COLMAP-based 3D editing techniques in Table 2. Additionally, we extend our evaluation by integrating pose-free 3D reconstruction methods into our pipeline, with the performance outcomes detailed in Table 3." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 31, + 486, + 111, + 499 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 31, + 486, + 111, + 499 + ], + "spans": [ + { + "bbox": [ + 31, + 486, + 111, + 499 + ], + "type": "text", + "content": "5 Ablations" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 30, + 511, + 197, + 535 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 511, + 197, + 535 + ], + "spans": [ + { + "bbox": [ + 30, + 511, + 197, + 535 + ], + "type": "text", + "content": "To assess the influence of different elements within our framework, we em" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 30, + 536, + 381, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 536, + 381, + 584 + ], + "spans": [ + { + "bbox": [ + 30, + 536, + 381, + 584 + ], + "type": "text", + "content": "ploy PSNR, SSIM, and LPIPS metrics across several configurations. Given that images undergo editing before the training of a 3D model, our focus is on determining the effect of various losses on the model's rendering quality. The outcomes are documented in Table 4, showcasing IP2P+COLMAP as the baseline, where" + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 220, + 328, + 360, + 413 + ], + "blocks": [ + { + "bbox": [ + 220, + 328, + 360, + 413 + ], + "lines": [ + { + "bbox": [ + 220, + 328, + 360, + 413 + ], + "spans": [ + { + "bbox": [ + 220, + 328, + 360, + 413 + ], + "type": "image", + "image_path": "79af30484e8ab9c49609aed1c8f35a64f4e7afcdbc1eacb37ba9f96cfa954664.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 203, + 421, + 380, + 477 + ], + "lines": [ + { + "bbox": [ + 203, + 421, + 380, + 477 + ], + "spans": [ + { + "bbox": [ + 203, + 421, + 380, + 477 + ], + "type": "text", + "content": "Fig.6: Our method, 3D Ego achieves precise editing without using any SfM poses. To construct the IP2P+COLMAP 3D scene, we train nefacto [42] model on IP2P [4] edited frames." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 221, + 9, + 347, + 21 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 221, + 9, + 347, + 21 + ], + "spans": [ + { + "bbox": [ + 221, + 9, + 347, + 21 + ], + "type": "text", + "content": "3DEgo: 3D Editing on the Go!" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 369, + 10, + 379, + 19 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 369, + 10, + 379, + 19 + ], + "spans": [ + { + "bbox": [ + 369, + 10, + 379, + 19 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 413, + 615 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 30, + 34, + 380, + 57 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 34, + 380, + 57 + ], + "spans": [ + { + "bbox": [ + 30, + 34, + 380, + 57 + ], + "type": "text", + "content": "images are edited using the standard IP2P approach [4] and COLMAP-derived poses are utilized for 3D scene construction." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 30, + 59, + 198, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 59, + 198, + 213 + ], + "spans": [ + { + "bbox": [ + 30, + 59, + 198, + 213 + ], + "type": "text", + "content": "Although the " + }, + { + "bbox": [ + 30, + 59, + 198, + 213 + ], + "type": "inline_equation", + "content": "\\mathrm{IP2P + COLMAP}" + }, + { + "bbox": [ + 30, + 59, + 198, + 213 + ], + "type": "text", + "content": " setup demonstrates limited textual fidelity due to editing inconsistencies (see Figure 6), we are only interested in the rendering quality in this analysis to ascertain our approach's effectiveness. Table 4 illustrates the effects of different optimization hyperparameters on the global scene expansion. The findings reveal that excluding " + }, + { + "bbox": [ + 30, + 59, + 198, + 213 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{KEA}" + }, + { + "bbox": [ + 30, + 59, + 198, + 213 + ], + "type": "text", + "content": " in the scene expansion process minimally affects ren" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 30, + 213, + 209, + 238 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 213, + 209, + 238 + ], + "spans": [ + { + "bbox": [ + 30, + 213, + 209, + 238 + ], + "type": "text", + "content": "dering quality. On the other hand, densification resulting in the inferior" + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 211, + 116, + 376, + 188 + ], + "blocks": [ + { + "bbox": [ + 204, + 93, + 379, + 114 + ], + "lines": [ + { + "bbox": [ + 204, + 93, + 379, + 114 + ], + "spans": [ + { + "bbox": [ + 204, + 93, + 379, + 114 + ], + "type": "text", + "content": "Table 4: Ablation study results on GS25 dataset." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 211, + 116, + 376, + 188 + ], + "lines": [ + { + "bbox": [ + 211, + 116, + 376, + 188 + ], + "spans": [ + { + "bbox": [ + 211, + 116, + 376, + 188 + ], + "type": "table", + "html": "
MethodPSNR↑SSIM↑LPIPS↓
Ours27.860.900.18
IP2P+COLMAP23.870.790.23
Ours w/o LKEA26.730.880.19
Ours w/o Lipc22.460.0.780.24
Ours w/o Lpc25.180.840.20
", + "image_path": "5bd7ca0f1f5513f5d6f14d719a3dc067ec42dcd561ad2c10aca81becb2c984eb.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 31, + 255, + 116, + 268 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 31, + 255, + 116, + 268 + ], + "spans": [ + { + "bbox": [ + 31, + 255, + 116, + 268 + ], + "type": "text", + "content": "6 Limitation" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 30, + 279, + 198, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 279, + 198, + 434 + ], + "spans": [ + { + "bbox": [ + 30, + 279, + 198, + 434 + ], + "type": "text", + "content": "Our approach depends on the pretrained IP2P model [4], which has inherent limitations, especially evident in specific scenarios. For instance, Figure 7 shows the challenge with the prompt \"Make the car golden and give wheels blue color\". Unlike IN2N [11], which introduces unspecific color changes on the van's windows. Our method offers more targeted editing but falls short of generating ideal results due to IP2P's limitations in handling precise editing tas" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 31, + 452, + 119, + 464 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 31, + 452, + 119, + 464 + ], + "spans": [ + { + "bbox": [ + 31, + 452, + 119, + 464 + ], + "type": "text", + "content": "7 Conclusion" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 30, + 476, + 380, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 476, + 380, + 583 + ], + "spans": [ + { + "bbox": [ + 30, + 476, + 380, + 583 + ], + "type": "text", + "content": "3DEgo marks a pivotal advancement in 3D scene reconstruction from monocular videos, eliminating the need for conventional pose estimation methods and model initialization. Our method integrates frame-by-frame editing with advanced consistency techniques to efficiently generate photorealistic 3D scenes directly from textual prompts. Demonstrated across multiple datasets, our approach showcases superior editing speed, precision, and flexibility. 3DEgo not only simplifies the 3D editing process but also broadens the scope for creative content generation from readily available video sources. This work lays the groundwork for future innovations in accessible and intuitive 3D content creation tools." + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 205, + 306, + 291, + 336 + ], + "blocks": [ + { + "bbox": [ + 205, + 306, + 291, + 336 + ], + "lines": [ + { + "bbox": [ + 205, + 306, + 291, + 336 + ], + "spans": [ + { + "bbox": [ + 205, + 306, + 291, + 336 + ], + "type": "image", + "image_path": "3f096e8aea33f2dc404290312cea3cb32bce9fe044e4fe46daac12b6c3cfcdfc.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 230, + 336, + 263, + 341 + ], + "lines": [ + { + "bbox": [ + 230, + 336, + 263, + 341 + ], + "spans": [ + { + "bbox": [ + 230, + 336, + 263, + 341 + ], + "type": "text", + "content": "Original 3D Model" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 203, + 351, + 379, + 395 + ], + "lines": [ + { + "bbox": [ + 203, + 351, + 379, + 395 + ], + "spans": [ + { + "bbox": [ + 203, + 351, + 379, + 395 + ], + "type": "text", + "content": "Fig. 7: Due to the limitations of the IP2P model, our method inadvertently alters the colors of the van's windows, which is not the desired outcome." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 291, + 306, + 379, + 335 + ], + "blocks": [ + { + "bbox": [ + 291, + 306, + 379, + 335 + ], + "lines": [ + { + "bbox": [ + 291, + 306, + 379, + 335 + ], + "spans": [ + { + "bbox": [ + 291, + 306, + 379, + 335 + ], + "type": "image", + "image_path": "f5aefd096304432b44d46e7031178743d1ed19d68f9ed2aa66c3a3e7fc7a8f7b.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 295, + 336, + 377, + 341 + ], + "lines": [ + { + "bbox": [ + 295, + 336, + 377, + 341 + ], + "spans": [ + { + "bbox": [ + 295, + 336, + 377, + 341 + ], + "type": "text", + "content": "\"Make the car golden and give wheels blue color\"" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 33, + 10, + 43, + 19 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 10, + 43, + 19 + ], + "spans": [ + { + "bbox": [ + 33, + 10, + 43, + 19 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 66, + 9, + 130, + 19 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 9, + 130, + 19 + ], + "spans": [ + { + "bbox": [ + 66, + 9, + 130, + 19 + ], + "type": "text", + "content": "U. Khalid et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 413, + 615 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 32, + 33, + 140, + 47 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 33, + 140, + 47 + ], + "spans": [ + { + "bbox": [ + 32, + 33, + 140, + 47 + ], + "type": "text", + "content": "Acknowledgement" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 31, + 57, + 380, + 81 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 31, + 57, + 380, + 81 + ], + "spans": [ + { + "bbox": [ + 31, + 57, + 380, + 81 + ], + "type": "text", + "content": "This work was partially supported by the NSF under Grant Numbers OAC-1910469 and OAC-2311245." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 32, + 100, + 98, + 113 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 100, + 98, + 113 + ], + "spans": [ + { + "bbox": [ + 32, + 100, + 98, + 113 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 33, + 124, + 380, + 583 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 36, + 124, + 379, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 36, + 124, + 379, + 168 + ], + "spans": [ + { + "bbox": [ + 36, + 124, + 379, + 168 + ], + "type": "text", + "content": "1. Bao, C., Zhang, Y., Yang, B., Fan, T., Yang, Z., Bao, H., Zhang, G., Cui, Z.: Sine: Semantic-driven image-based nerf editing with prior-guided editing field. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 20919-20929 (2023)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 36, + 169, + 380, + 212 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 36, + 169, + 380, + 212 + ], + "spans": [ + { + "bbox": [ + 36, + 169, + 380, + 212 + ], + "type": "text", + "content": "2. Barron, J.T., Mildenhall, B., Verbin, D., Srinivasan, P.P., Hedman, P.: Mipnerf 360: Unbounded anti-aliased neural radiance fields. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5470-5479 (2022)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 36, + 212, + 380, + 245 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 36, + 212, + 380, + 245 + ], + "spans": [ + { + "bbox": [ + 36, + 212, + 380, + 245 + ], + "type": "text", + "content": "3. Bian, W., Wang, Z., Li, K., Bian, J.W., Prisacariu, V.A.: Nope-nerf: Optimising neural radiance field with no pose prior. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 4160-4169 (2023)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 37, + 246, + 380, + 277 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 37, + 246, + 380, + 277 + ], + "spans": [ + { + "bbox": [ + 37, + 246, + 380, + 277 + ], + "type": "text", + "content": "4. Brooks, T., Holynski, A., Efros, A.A.: Instructpix2pix: Learning to follow image editing instructions. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 18392-18402 (2023)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 37, + 277, + 380, + 310 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 37, + 277, + 380, + 310 + ], + "spans": [ + { + "bbox": [ + 37, + 277, + 380, + 310 + ], + "type": "text", + "content": "5. Brown, T., Mann, B., Ryder, N., Subbiah, M., Kaplan, J.D., Dhariwal, P., Neelakantan, A., Shyam, P., Sastry, G., Askell, A., et al.: Language models are few-shot learners. Advances in neural information processing systems 33, 1877-1901 (2020)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 37, + 310, + 380, + 343 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 37, + 310, + 380, + 343 + ], + "spans": [ + { + "bbox": [ + 37, + 310, + 380, + 343 + ], + "type": "text", + "content": "6. Chiang, P.Z., Tsai, M.S., Tseng, H.Y., Lai, W.S., Chiu, W.C.: Stylizing 3d scene via implicit representation and hypernetwork. In: Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision. pp. 1475-1484 (2022)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 37, + 343, + 380, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 37, + 343, + 380, + 365 + ], + "spans": [ + { + "bbox": [ + 37, + 343, + 380, + 365 + ], + "type": "text", + "content": "7. Dong, J., Wang, Y.X.: Vica-nerf: View-consistency-aware 3d editing of neural radiance fields. Advances in Neural Information Processing Systems 36 (2024)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 37, + 365, + 380, + 386 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 37, + 365, + 380, + 386 + ], + "spans": [ + { + "bbox": [ + 37, + 365, + 380, + 386 + ], + "type": "text", + "content": "8. Fu, Y., Liu, S., Kulkarni, A., Kautz, J., Efros, A.A., Wang, X.: Colmap-free 3d gaussian splatting (2023), https://arxiv.org/abs/2312.07504" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 37, + 387, + 380, + 419 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 37, + 387, + 380, + 419 + ], + "spans": [ + { + "bbox": [ + 37, + 387, + 380, + 419 + ], + "type": "text", + "content": "9. Gal, R., Patashnik, O., Maron, H., Chechik, G., Cohen-Or, D.: Stylegan-nada: Clipguided domain adaptation of image generators. arXiv preprint arXiv:2108.00946 (2021)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 33, + 420, + 379, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 420, + 379, + 441 + ], + "spans": [ + { + "bbox": [ + 33, + 420, + 379, + 441 + ], + "type": "text", + "content": "10. Gao, W., Aigerman, N., Groueix, T., Kim, V.G., Hanocka, R.: Textdeformer: Geometry manipulation using text guidance. arXiv preprint arXiv:2304.13348 (2023)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 33, + 442, + 379, + 474 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 442, + 379, + 474 + ], + "spans": [ + { + "bbox": [ + 33, + 442, + 379, + 474 + ], + "type": "text", + "content": "11. Haque, A., Tancik, M., Efros, A.A., Holynski, A., Kanazawa, A.: Instruct-nerf2nerf: Editing 3d scenes with instructions. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 19740-19750 (2023)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 33, + 474, + 379, + 506 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 474, + 379, + 506 + ], + "spans": [ + { + "bbox": [ + 33, + 474, + 379, + 506 + ], + "type": "text", + "content": "12. Hertz, A., Mokady, R., Tenenbaum, J., Aberman, K., Pritch, Y., Cohen-Or, D.: Prompt-to-prompt image editing with cross attention control. arXiv preprint arXiv:2208.01626 (2022)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 33, + 507, + 379, + 539 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 507, + 379, + 539 + ], + "spans": [ + { + "bbox": [ + 33, + 507, + 379, + 539 + ], + "type": "text", + "content": "13. Hong, F., Zhang, M., Pan, L., Cai, Z., Yang, L., Liu, Z.: Avatarclip: Zero-shot text-driven generation and animation of 3d avatars. ACM Transactions on Graphics (TOG) 41(4), 1-19 (2022)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 33, + 539, + 379, + 583 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 539, + 379, + 583 + ], + "spans": [ + { + "bbox": [ + 33, + 539, + 379, + 583 + ], + "type": "text", + "content": "14. Huang, Y.H., He, Y., Yuan, Y.J., Lai, Y.K., Gao, L.: Stylizednerf: consistent 3d scene stylization as stylized nerf via 2d-3d mutual learning. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 18342-18352 (2022)" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 221, + 9, + 347, + 21 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 221, + 9, + 347, + 21 + ], + "spans": [ + { + "bbox": [ + 221, + 9, + 347, + 21 + ], + "type": "text", + "content": "3DEgo: 3D Editing on the Go!" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 369, + 10, + 379, + 19 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 369, + 10, + 379, + 19 + ], + "spans": [ + { + "bbox": [ + 369, + 10, + 379, + 19 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 413, + 615 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 33, + 35, + 379, + 583 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 33, + 35, + 379, + 57 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 35, + 379, + 57 + ], + "spans": [ + { + "bbox": [ + 33, + 35, + 379, + 57 + ], + "type": "text", + "content": "15. Jeong, Y., Ahn, S., Choy, C., Anandkumar, A., Cho, M., Park, J.: Self-calibrating neural radiance fields. In: ICCV (2021)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 33, + 57, + 379, + 79 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 57, + 379, + 79 + ], + "spans": [ + { + "bbox": [ + 33, + 57, + 379, + 79 + ], + "type": "text", + "content": "16. Karim, N., Khalid, U., Iqbal, H., Hua, J., Chen, C.: Free-editor: Zero-shot text-driven 3d scene editing. arXiv preprint arXiv:2312.13663 (2023)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 33, + 79, + 379, + 110 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 79, + 379, + 110 + ], + "spans": [ + { + "bbox": [ + 33, + 79, + 379, + 110 + ], + "type": "text", + "content": "17. Kerbl, B., Kopanas, G., Leimkuhler, T., Drettakis, G.: 3d gaussian splatting for real-time radiance field rendering. ACM Transactions on Graphics (ToG) 42(4), 1-14 (2023)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 33, + 110, + 379, + 132 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 110, + 379, + 132 + ], + "spans": [ + { + "bbox": [ + 33, + 110, + 379, + 132 + ], + "type": "text", + "content": "18. Khalid, U., Iqbal, H., Karim, N., Hua, J., Chen, C.: Latentedirector: Text driven local editing of 3d scenes. arXiv preprint arXiv:2312.09313 (2023)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 33, + 132, + 379, + 175 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 132, + 379, + 175 + ], + "spans": [ + { + "bbox": [ + 33, + 132, + 379, + 175 + ], + "type": "text", + "content": "19. Kim, S., Lee, K., Choi, J.S., Jeong, J., Sohn, K., Shin, J.: Collaborative score distillation for consistent visual editing. In: Thirty-seventh Conference on Neural Information Processing Systems (2023), https://openreview.net/forum?id=0tEjORCGFD" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 33, + 175, + 379, + 207 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 175, + 379, + 207 + ], + "spans": [ + { + "bbox": [ + 33, + 175, + 379, + 207 + ], + "type": "text", + "content": "20. Kirillov, A., Mintun, E., Ravi, N., Mao, H., Rolland, C., Gustafson, L., Xiao, T., Whitehead, S., Berg, A.C., Lo, W.Y., et al.: Segment anything. arXiv preprint arXiv:2304.02643 (2023)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 33, + 207, + 379, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 207, + 379, + 229 + ], + "spans": [ + { + "bbox": [ + 33, + 207, + 379, + 229 + ], + "type": "text", + "content": "21. Knapitsch, A., Park, J., Zhou, Q.Y., Koltun, V.: Tanks and temples: Benchmarking large-scale scene reconstruction. ACM Transactions on Graphics (2017)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 33, + 229, + 379, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 229, + 379, + 250 + ], + "spans": [ + { + "bbox": [ + 33, + 229, + 379, + 250 + ], + "type": "text", + "content": "22. Kobayashi, S., Matsumoto, E., Sitzmann, V.: Decomposing nerf for editing via feature field distillation. arXiv preprint arXiv:2205.15585 (2022)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 33, + 250, + 379, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 250, + 379, + 282 + ], + "spans": [ + { + "bbox": [ + 33, + 250, + 379, + 282 + ], + "type": "text", + "content": "23. Li, Y., Lin, Z.H., Forsyth, D., Huang, J.B., Wang, S.: Climatenerf: Physically-based neural rendering for extreme climate synthesis. arXiv e-prints pp. arXiv-2211 (2022)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 33, + 282, + 379, + 315 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 282, + 379, + 315 + ], + "spans": [ + { + "bbox": [ + 33, + 282, + 379, + 315 + ], + "type": "text", + "content": "24. Li, Y., Dou, Y., Shi, Y., Lei, Y., Chen, X., Zhang, Y., Zhou, P., Ni, B.: Focaldreamer: Text-driven 3d editing via focal-fusion assembly. arXiv preprint arXiv:2308.10608 (2023)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 33, + 315, + 379, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 315, + 379, + 335 + ], + "spans": [ + { + "bbox": [ + 33, + 315, + 379, + 335 + ], + "type": "text", + "content": "25. Lin, C.H., Ma, W.C., Torralba, A., Lucey, S.: Barf: Bundle-adjusting neural radiance fields. In: ICCV (2021)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 33, + 335, + 379, + 357 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 335, + 379, + 357 + ], + "spans": [ + { + "bbox": [ + 33, + 335, + 379, + 357 + ], + "type": "text", + "content": "26. Liu, H.K., Shen, I., Chen, B.Y., et al.: Nerf-in: Free-form nerf inpainting with rgb-d priors. arXiv preprint arXiv:2206.04901 (2022)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 33, + 357, + 379, + 401 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 357, + 379, + 401 + ], + "spans": [ + { + "bbox": [ + 33, + 357, + 379, + 401 + ], + "type": "text", + "content": "27. Long, X., Guo, Y.C., Lin, C., Liu, Y., Dou, Z., Liu, L., Ma, Y., Zhang, S.H., Habermann, M., Theobalt, C., et al.: Wonder3d: Single image to 3d using cross-domain diffusion. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 9970-9980 (2024)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 33, + 401, + 379, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 401, + 379, + 422 + ], + "spans": [ + { + "bbox": [ + 33, + 401, + 379, + 422 + ], + "type": "text", + "content": "28. Michel, O., Bar-On, R., Liu, R., et al.: Text2mesh: Text-driven neural stylization for meshes. In: CVPR 2022. pp. 13492-13502 (2022)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 33, + 422, + 379, + 444 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 422, + 379, + 444 + ], + "spans": [ + { + "bbox": [ + 33, + 422, + 379, + 444 + ], + "type": "text", + "content": "29. Nguyen-Phuoc, T., Liu, F., Xiao, L.: Snerf: stylized neural implicit representations for 3d scenes. arXiv preprint arXiv:2207.02363 (2022)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 33, + 444, + 379, + 476 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 444, + 379, + 476 + ], + "spans": [ + { + "bbox": [ + 33, + 444, + 379, + 476 + ], + "type": "text", + "content": "30. Nichol, A., Dhariwal, P., Ramesh, A., Shyam, P., Mishkin, P., McGrew, B., Sutskever, I., Chen, M.: Glide: Towards photorealistic image generation and editing with text-guided diffusion models. arXiv preprint arXiv:2112.10741 (2021)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 33, + 476, + 379, + 497 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 476, + 379, + 497 + ], + "spans": [ + { + "bbox": [ + 33, + 476, + 379, + 497 + ], + "type": "text", + "content": "31. Noguchi, A., Sun, X., Lin, S., Harada, T.: Neural articulated radiance field. In: ICCV 2021. pp. 5762-5772 (2021)" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 33, + 497, + 379, + 519 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 497, + 379, + 519 + ], + "spans": [ + { + "bbox": [ + 33, + 497, + 379, + 519 + ], + "type": "text", + "content": "32. Park, H.S., Jun, C.H.: A simple and fast algorithm for k-medoids clustering. Expert systems with applications 36(2), 3336-3341 (2009)" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 33, + 519, + 379, + 562 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 519, + 379, + 562 + ], + "spans": [ + { + "bbox": [ + 33, + 519, + 379, + 562 + ], + "type": "text", + "content": "33. Paszke, A., Gross, S., Massa, F., Lerer, A., Bradbury, J., Chanan, G., Killeen, T., Lin, Z., Gimelshein, N., Antiga, L., et al.: Pytorch: An imperative style, high-performance deep learning library. Advances in neural information processing systems 32 (2019)" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 33, + 562, + 379, + 583 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 562, + 379, + 583 + ], + "spans": [ + { + "bbox": [ + 33, + 562, + 379, + 583 + ], + "type": "text", + "content": "34. Rajic, F., Ke, L., Tai, Y.W., Tang, C.K., Danelljan, M., Yu, F.: Segment anything meets point tracking. arXiv preprint arXiv:2307.01197 (2023)" + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 33, + 10, + 43, + 19 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 10, + 43, + 19 + ], + "spans": [ + { + "bbox": [ + 33, + 10, + 43, + 19 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 65, + 9, + 130, + 19 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 9, + 130, + 19 + ], + "spans": [ + { + "bbox": [ + 65, + 9, + 130, + 19 + ], + "type": "text", + "content": "U. Khalid et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 413, + 615 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 31, + 35, + 379, + 584 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 31, + 35, + 379, + 68 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 31, + 35, + 379, + 68 + ], + "spans": [ + { + "bbox": [ + 31, + 35, + 379, + 68 + ], + "type": "text", + "content": "35. Ramesh, A., Dhariwal, P., Nichol, A., Chu, C., Chen, M.: Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125 (2022)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 31, + 68, + 379, + 112 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 31, + 68, + 379, + 112 + ], + "spans": [ + { + "bbox": [ + 31, + 68, + 379, + 112 + ], + "type": "text", + "content": "36. Reizenstein, J., Shapovalov, R., Henzler, P., Sbordone, L., Labatut, P., Novotny, D.: Common objects in 3d: Large-scale learning and evaluation of real-life 3d category reconstruction. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 10901-10911 (2021)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 32, + 112, + 379, + 145 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 112, + 379, + 145 + ], + "spans": [ + { + "bbox": [ + 32, + 112, + 379, + 145 + ], + "type": "text", + "content": "37. Rombach, R., Blattmann, A., Lorenz, D., Esser, P., Ommer, B.: High-resolution image synthesis with latent diffusion models. In: CVPR 2022. pp. 10684-10695 (2022)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 32, + 145, + 379, + 188 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 145, + 379, + 188 + ], + "spans": [ + { + "bbox": [ + 32, + 145, + 379, + 188 + ], + "type": "text", + "content": "38. Ruiz, N., Li, Y., Jampani, V., Pritch, Y., Rubinstein, M., Aberman, K.: Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 22500-22510 (2023)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 32, + 189, + 379, + 210 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 189, + 379, + 210 + ], + "spans": [ + { + "bbox": [ + 32, + 189, + 379, + 210 + ], + "type": "text", + "content": "39. Sahara, C., Chan, W., Saxena, S.e.a.: Photorealistic text-to-image diffusion models with deep language understanding. NeurIPS 2022 35, 36479-36494 (2022)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 32, + 210, + 379, + 222 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 210, + 379, + 222 + ], + "spans": [ + { + "bbox": [ + 32, + 210, + 379, + 222 + ], + "type": "text", + "content": "40. Schonberger, J.L., Frahm, J.M.: Structure-from-motion revisited. In: CVPR (2016)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 32, + 222, + 379, + 265 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 222, + 379, + 265 + ], + "spans": [ + { + "bbox": [ + 32, + 222, + 379, + 265 + ], + "type": "text", + "content": "41. Suvorov, R., Logacheva, E., Mashikhin, A., Remizova, A., Ashukha, A., Silvestrov, A., Kong, N., Goka, H., Park, K., Lempitsky, V.: Resolution-robust large mask inpainting with fourier convolutions. In: Proceedings of the IEEE/CVF winter conference on applications of computer vision. pp. 2149-2159 (2022)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 32, + 265, + 379, + 309 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 265, + 379, + 309 + ], + "spans": [ + { + "bbox": [ + 32, + 265, + 379, + 309 + ], + "type": "text", + "content": "42. Tancik, M., Weber, E., Ng, E., Li, R., Yi, B., Wang, T., Kristoffersen, A., Austin, J., Salahi, K., Ahuja, A., et al.: Nerfstudio: A modular framework for neural radiance field development. In: ACM SIGGRAPH 2023 Conference Proceedings. pp. 1-12 (2023)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 32, + 309, + 379, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 309, + 379, + 342 + ], + "spans": [ + { + "bbox": [ + 32, + 309, + 379, + 342 + ], + "type": "text", + "content": "43. Tschernezki, V., Laina, I., Larlus, D., Vedaldi, A.: Neural feature fusion fields: 3d distillation of self-supervised 2d image representations. In: 2022 International Conference on 3D Vision (3DV). pp. 443-453. IEEE (2022)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 32, + 342, + 379, + 364 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 342, + 379, + 364 + ], + "spans": [ + { + "bbox": [ + 32, + 342, + 379, + 364 + ], + "type": "text", + "content": "44. Wang, C., Chai, M., He, M., et al.: Clip-nerf: Text-and-image driven manipulation of neural radiance fields. In: CVPR 2022. pp. 3835-3844 (2022)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 32, + 364, + 379, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 364, + 379, + 397 + ], + "spans": [ + { + "bbox": [ + 32, + 364, + 379, + 397 + ], + "type": "text", + "content": "45. Wang, C., Jiang, R., Chai, M., He, M., Chen, D., Liao, J.: Nerf-art: Text-driven neural radiance fields stylization. IEEE Transactions on Visualization and Computer Graphics (2023)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 32, + 397, + 379, + 429 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 397, + 379, + 429 + ], + "spans": [ + { + "bbox": [ + 32, + 397, + 379, + 429 + ], + "type": "text", + "content": "46. Weng, H., Yang, T., Wang, J., Li, Y., Zhang, T., Chen, C., Zhang, L.: Consistent123: Improve consistency for one image to 3d object synthesis. arXiv preprint arXiv:2310.08092 (2023)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 32, + 430, + 379, + 452 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 430, + 379, + 452 + ], + "spans": [ + { + "bbox": [ + 32, + 430, + 379, + 452 + ], + "type": "text", + "content": "47. Wu, Q., Tan, J., Xu, K.: Palettenerf: Palette-based color editing for nerfs. arXiv preprint arXiv:2212.12871 (2022)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 32, + 452, + 379, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 452, + 379, + 485 + ], + "spans": [ + { + "bbox": [ + 32, + 452, + 379, + 485 + ], + "type": "text", + "content": "48. Xu, T., Harada, T.: Deforming radiance fields with cages. In: Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part XXXIII. pp. 159-175. Springer (2022)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 32, + 485, + 379, + 528 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 485, + 379, + 528 + ], + "spans": [ + { + "bbox": [ + 32, + 485, + 379, + 528 + ], + "type": "text", + "content": "49. Yang, B., Bao, C., Zeng, J., Bao, H., Zhang, Y., Cui, Z., Zhang, G.: Neumesh: Learning disentangled neural mesh-based implicit field for geometry and texture editing. In: European Conference on Computer Vision. pp. 597-614. Springer (2022)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 32, + 528, + 379, + 551 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 528, + 379, + 551 + ], + "spans": [ + { + "bbox": [ + 32, + 528, + 379, + 551 + ], + "type": "text", + "content": "50. Ye, M., Danelljan, M., Yu, F., Ke, L.: Gaussian grouping: Segment and edit anything in 3d scenes. arXiv preprint arXiv:2312.00732 (2023)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 32, + 551, + 379, + 584 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 551, + 379, + 584 + ], + "spans": [ + { + "bbox": [ + 32, + 551, + 379, + 584 + ], + "type": "text", + "content": "51. Zhang, K., Kolkin, N., Bi, S., Luan, F., Xu, Z., Shechtman, E., Snavely, N.: Arf: Artistic radiance fields. In: European Conference on Computer Vision. pp. 717-733. Springer (2022)" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 221, + 9, + 347, + 21 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 221, + 9, + 347, + 21 + ], + "spans": [ + { + "bbox": [ + 221, + 9, + 347, + 21 + ], + "type": "text", + "content": "3DEgo: 3D Editing on the Go!" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 369, + 10, + 379, + 19 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 369, + 10, + 379, + 19 + ], + "spans": [ + { + "bbox": [ + 369, + 10, + 379, + 19 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 413, + 615 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 32, + 35, + 380, + 68 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 35, + 380, + 68 + ], + "spans": [ + { + "bbox": [ + 32, + 35, + 380, + 68 + ], + "type": "text", + "content": "52. Zhuang, J., Wang, C., Lin, L., Liu, L., Li, G.: Dreameditor: Text-driven 3d scene editing with neural fields. In: SIGGRAPH Asia 2023 Conference Papers. pp. 1-10 (2023)" + } + ] + } + ], + "index": 2 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 33, + 10, + 43, + 19 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 10, + 43, + 19 + ], + "spans": [ + { + "bbox": [ + 33, + 10, + 43, + 19 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 66, + 9, + 130, + 19 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 9, + 130, + 19 + ], + "spans": [ + { + "bbox": [ + 66, + 9, + 130, + 19 + ], + "type": "text", + "content": "U. Khalid et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 413, + 615 + ], + "page_idx": 17 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/3DFG-PIFu_ 3D Feature Grids for Human Digitization from Sparse Views/7528968f-06f7-4c18-aa8f-783ee6c0a1d6_content_list.json b/2024/3DFG-PIFu_ 3D Feature Grids for Human Digitization from Sparse Views/7528968f-06f7-4c18-aa8f-783ee6c0a1d6_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..8bc81ee85d2fcf7db304d66f356f80ef47918aaf --- /dev/null +++ b/2024/3DFG-PIFu_ 3D Feature Grids for Human Digitization from Sparse Views/7528968f-06f7-4c18-aa8f-783ee6c0a1d6_content_list.json @@ -0,0 +1,1660 @@ +[ + { + "type": "text", + "text": "3DFG-PIFu: 3D Feature Grids for Human Digitization from Sparse Views", + "text_level": 1, + "bbox": [ + 259, + 140, + 741, + 186 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Kennard Yanting Chan $^{1,2}$ , Fayao Liu $^{2}$ , Guosheng Lin $^{1}$ , Chuan Sheng Foo $^{2,3}$ , and Weisi Lin $^{1}$", + "bbox": [ + 225, + 210, + 776, + 242 + ], + "page_idx": 0 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1 Nanyang Technological University, Singapore", + "$^{2}$ Institute for Infocomm Research, A*STAR", + "3 Centre for Frontier AI Research, A*STAR" + ], + "bbox": [ + 341, + 253, + 658, + 295 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract. Pixel-aligned implicit models, such as Multi-view PIFu, Deep-MultiCap, DoubleField, and SeSDF, are well-established methods for reconstructing a clothed human from sparse views. However, given $V$ images, these models would only combine features from these images in a point-wise and localized manner. In other words, the $V$ images are processed individually and are only combined in a very narrow fashion at the end of the pipeline. To a large extent, this defeats the purpose of having multi-view information since the multi-view task in question is predominantly treated as a single-view task. To resolve this, we introduce 3DFG-PIFu, a pixel-aligned implicit model that exploits multi-view information right from the start and all the way to the end of the pipeline. Our 3DFG-PIFu makes use of 3D Feature Grids to combine features from $V$ images in a global manner (rather than point-wise or localized) and throughout the pipeline. Other than the 3D Feature Grids, 3DFG-PIFu also proposes an iterative mechanism that refines and updates an existing output human mesh using the different views. Moreover, 3DFG-PIFu introduces SDF-based SMPL-X features, which is a new method of incorporating a SMPL-X mesh into a pixel-aligned implicit model. Our experiments show that 3DFG-PIFu significantly outperforms SOTA models. Our code is released at https://github.com/kcyt/3DFG-PIFu.", + "bbox": [ + 261, + 330, + 743, + 609 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Keywords: 3D Clothed Human Reconstruction from Sparse Views $\\cdot$ 3D Feature Grids $\\cdot$ Pixel-aligned Implicit Models", + "bbox": [ + 261, + 619, + 740, + 648 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 215, + 672, + 375, + 690 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The field of 3D reconstruction of human bodies has gained considerable interest due to its potential use in various domains such as virtual reality, game production, and 3D printing. Pixel-aligned implicit models, such as Multi-view PIFu [13] DeepMultiCap [22], DoubleField [15], and SeSDF [1] are an influential class of deep learning methods for reconstructing clothed human bodies from sparse views. These models learn an implicit function that represents the surface of a human body. During testing, the learned implicit function is sampled using a grid of uniformly-spaced sample points. For each sample point, the learned implicit function (or the model) will return a predicted occupancy label (i.e.", + "bbox": [ + 212, + 703, + 787, + 843 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/48ec879dd53d26453007c6276e8dd7145b694c8b9d4902b7a817233b8c82fbb3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 222, + 142, + 777, + 241 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/063c760593ad20299e6bd7f63b1becdb643b372ebc02208d51e045ed0b6c34dc.jpg", + "image_caption": [ + "Fig. 1: Our models (last two columns) vs SOTA models.", + "Fig. 2: (a) Existing multi-view pixel-aligned implicit models vs (b) Our 3DFG-PIFu. whether the sample point is 'inside' or 'outside' of a human body surface). Once a grid of predicted occupancy labels is obtained, a human body mesh can be extracted from this grid using the Marching Cubes algorithm [11]." + ], + "image_footnote": [], + "bbox": [ + 251, + 257, + 743, + 547 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In order to predict the occupancy labels, existing multi-view pixel-aligned implicit models [1, 13, 15, 22], when given $V$ views or images, would compute $V$ different point embeddings for each sample point. This is illustrated in Fig. 2a for the case where $V = 2$ . For each sample point, its $V$ point embeddings would be fused together into a single point embedding via either simple averaging [13] or weighted averaging [1, 15, 22], as illustrated in the same figure. These fused point embeddings are then converted into predicted occupancy labels, from which a human body mesh can be obtained. It is important to note that the \"Point Embeddings for View 1\" grid and the \"Point Embeddings for View 2\" grid in Fig. 2a are in different 3D coordinate spaces. The former is in the 3D camera space of View 1, and the latter is in the 3D camera space of View 2. This means that a point located at the top left corner of a grid may not correspond to the top left corner of another grid. We let the \"Fused Point Embeddings\" grid follow the 3D camera space of View 1 (It is possible to choose another 3D camera space, but that is trivial).", + "bbox": [ + 212, + 613, + 787, + 840 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "K. Y. Chan et al.", + "bbox": [ + 271, + 114, + 388, + 126 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "As shown in Fig. 2a, there are two problems with existing multi-view pixel-aligned implicit models. 1. Firstly, the fusion of View 1 and View 2 are carried out in a point-wise and very localized manner. This is a problem because, as shown in the bidirectional red dashed arrow in Fig. 2a, there is no interaction between fused point embeddings, even if they are located close to each other. So if there is a sample point A that is closely surrounded by ten sample points, the existing multi-view pixel-aligned implicit models may assign those ten points with the same label and yet assign point A with an opposite label, which is an obvious error that would lead to a floating artefact. 2. Secondly, the fusion of View 1 and View 2 occurs at the end of the pipeline in a very simple manner (either simple or weighted averaging (e.g. attention)). To a large extent, the existing multiview pixel-aligned implicit models are not very different from their single-view counterparts except for the simplistic point-wise fusion of point embeddings at the end of the pipeline.", + "bbox": [ + 212, + 146, + 787, + 357 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Hence, we propose 3DFG-PIFu, a pixel-aligned implicit model that rethinks how multi-view information is incorporated in its pipeline. One key feature of 3DFG-PIFu is its use of 3D Feature Grid(s). As seen in Fig. 2b, 3DFG-PIFu makes use of 3D Feature Grid(s) to extract structural information from View 2. The 3D Feature Grid, due to its inherent design, is able to easily orient the extracted information to a different camera space. Thus, we re-orient the 3D Feature Grid from the 3D camera space of View 2 to the 3D camera space of View 1. Now aligned with View 1, the transformed 3D Feature Grid can be concatenated with View 1 and processed by a deep neural network to form 'Fused Point Embeddings'. These fused point embeddings will then be further refined using the fine-grained information from View 2 (Section 3.2).", + "bbox": [ + 212, + 359, + 787, + 525 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Crucially, this means that, unlike existing models, the fusion of multi-view information in 3DFG-PIFu occurs from the start to the end of the pipeline. Moreover, the multi-view fusion in 3DFG-PIFu occurs in a global and broad manner (rather than point-wise and localized) as information from View 2 is allowed to influence each and every fused point embedding.", + "bbox": [ + 212, + 527, + 787, + 602 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In total, 3DFG-PIFu makes three contributions: 1. The aforementioned 3D Feature Grids that fuse multi-view information (Section 3.1). 2. An iterative mechanism that refines and updates an existing output human mesh using the fine-grained information from the different views (Section 3.2). 3. Introduction of SDF-based SMPL-X features, which is a new method of incorporating a SMPL-X mesh into a pixel-aligned implicit model (Section 3.3).", + "bbox": [ + 212, + 603, + 787, + 694 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Related Work", + "text_level": 1, + "bbox": [ + 215, + 724, + 387, + 739 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.1 Human Reconstruction from Sparse Views", + "text_level": 1, + "bbox": [ + 214, + 762, + 612, + 777 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Methods that reconstruct a human body mesh from a sparse number of images can be broadly classified into two classes: Parametric approaches and non-parametric approaches.", + "bbox": [ + 212, + 794, + 785, + 839 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "3DFG-PIFu", + "bbox": [ + 648, + 114, + 730, + 126 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Parametric approaches, such as [7,9,10,20], reconstruct a human body surface by predicting parameters of a human parametric model (e.g. SMPL-X [12]). However, these methods can only produce human body meshes that are clothless.", + "bbox": [ + 212, + 146, + 782, + 191 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "On the other hand, non-parametric methods do not use a human parametric model. An important subclass of non-parametric methods is pixel-aligned implicit models. There are other subclasses like NERF methods (e.g. [21]), but they have yet to outperform pixel-aligned implicit models.", + "bbox": [ + 212, + 191, + 782, + 251 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Pixel-aligned implicit models can be single-view (e.g. [2,5,6]) or multi-view (e.g. Multi-view PIFu [13], DeepMultiCap [22], DoubleField [15], and SeSDF [1]).", + "bbox": [ + 215, + 252, + 782, + 282 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "As a side note, there are also pixel-aligned implicit models that use stereo images to reconstruct a clothed human mesh. However, these models, that include StereoPIFu [8] and DiffuStereo [16], require pairs of images to be taken at two similar viewpoints. This is often infeasible in many real-life applications and is thus not used in our experiments. Instead, our benchmarks are the aforementioned Multi-view PIFu, DeepMultiCap, DoubleField, SeSDF, and a few others.", + "bbox": [ + 212, + 284, + 784, + 373 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "As mentioned in Section 1 ('Introduction'), these benchmark models suffer from the problems of: 1. Fusing multi-view information in a very narrow or pointwise manner, and 2. Fusing multi-view information only at the very end of the pipeline. To resolve this problem, we introduce 3DFG-PIFu.", + "bbox": [ + 212, + 375, + 784, + 434 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3 Method", + "text_level": 1, + "bbox": [ + 215, + 459, + 328, + 474 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3DFG-PIFu is a two-staged model that works as long as the number of views $V > 1$ and the camera calibrations are known. One view will be randomly picked as the primary view and the other view(s) will be designated as the secondary view(s). Let us first assume $V = 2$ . This means that we have one primary view and one secondary view. As shown in Fig. 3, front and back normal maps, as well as a mask, can be predicted from a RGB image. We use the method outlined in PIFuHD [14] to predict the normal maps. Then, from the predicted normal maps, we can easily extract out the mask. Hereafter, we refer to a view as a collection of a RGB image, a front normal map, a back normal map, and a mask.", + "bbox": [ + 212, + 492, + 784, + 628 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "1st Stage In the first stage (refer to Fig. 4 and assume $V = 2$ ), we first generate two 3D feature grids ( $G_{N}$ and $G_{M}$ ) from the secondary view's front normal map and mask. We will elaborate on how $G_{N}$ and $G_{M}$ are generated later. In short, the $G_{N}$ and $G_{M}$ contain normal pixels and mask pixels, respectively, from the secondary view, but these pixels have been transformed into the 3D camera space of the primary view. Then, $G_{N}$ and $G_{M}$ are concatenated with the primary view's RGB image, front normal map, and back normal map. The concatenated output is sent to an encoder, which is a 2D CNN. The encoder will produce a", + "bbox": [ + 212, + 646, + 784, + 766 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/52226114b502a8ecad1a814df01ecd361b0a3ee0390eb9c0f1f213cabb032dae.jpg", + "image_caption": [ + "Fig.3: Predictions from a RGB image." + ], + "image_footnote": [], + "bbox": [ + 315, + 792, + 681, + 845 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "K. Y. Chan et al.", + "bbox": [ + 271, + 114, + 388, + 127 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/4ec1c63e6832fccfea46d4300abbb68be53eb93b2965813d4c5d041fff1cd8b0.jpg", + "image_caption": [ + "Fig. 4: 1st Stage of 3DFG-PIFu. Each view includes the mask and the predicted front and back normal maps. The primary view and the feature grids extracted from the secondary view(s) are fed into an encoder and MLP to generate a base mesh." + ], + "image_footnote": [], + "bbox": [ + 223, + 143, + 774, + 375 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "set of feature maps that is used by a Multilayer Perceptron (MLP) to produce a human body mesh, which we refer to as a base mesh.", + "bbox": [ + 212, + 419, + 782, + 450 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "2nd Stage While the base mesh from the 1st stage has good structural accuracy, it fails to capture the more fine-grained appearance details (e.g. clothes wrinkles) from all the views. Thus, a 2nd stage is needed. The 2nd stage of 3DFG-PIFu is an iterative mechanism or pipeline to combine appearance details from multiple views. We will briefly describe the flow of the pipeline here, but the details and rationales behind each step in the pipeline will be explained in Section 3.2.", + "bbox": [ + 212, + 462, + 784, + 551 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "At the start of the 2nd stage, a view is picked from the set of input views. Assume that $V = 2$ , and the view that we selected is the secondary view. The secondary view, as well as the base mesh from the 1st stage, will be used as inputs in the 2nd stage (shown in Fig. 5). First, we will rotate (or transform) the base mesh into the 3D camera space of the secondary view. From this rotated base mesh, we would generate two additional 3D feature grids ( $G_V$ and $G_S'$ ). $G_V$ and $G_S'$ will have the visibility information and the SDF values, respectively, of the rotated base mesh. We will elaborate on how $G_V$ and $G_S'$ are obtained later. $G_V$ , $G_S'$ and the secondary view will be concatenated together and fed into an encoder, which is a 2D CNN. This encoder will produce a set of feature maps that is used by a MLP to produce a partial refined mesh. The partial refined mesh will have the fine-grained appearance details of the secondary view.", + "bbox": [ + 212, + 553, + 785, + 733 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Finally, we will obtain a 3D feature grid of SDF values $(G_S)$ from the base mesh. Then, $G_S$ is refined and updated using information from the partial refined mesh via a process that we call visibility-based fusion (to be explained later). Visibility-based fusion will return a final 3D grid of SDF values, $G_F$ . From $G_F$ , we will retrieve the final mesh via the Marching Cubes algorithm.", + "bbox": [ + 212, + 734, + 785, + 809 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "For simplicity, Fig. 5 only shows the scenario where there is only 1 secondary view, and the secondary view (rather than the primary view) is picked at the", + "bbox": [ + 212, + 809, + 785, + 839 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "3DFG-PIFu", + "bbox": [ + 648, + 114, + 730, + 126 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/25f1ba29f265916ca4d21523b57789000869f973531da7d2c71ca540406048d2.jpg", + "image_caption": [ + "Fig. 5: 2nd Stage of 3DFG-PIFu. The base mesh is first aligned to the secondary view. Once aligned, it is combined with the secondary view to produce a partial refined mesh." + ], + "image_footnote": [], + "bbox": [ + 218, + 143, + 777, + 323 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "start of the 2nd stage. In reality, the primary view and every secondary view will be separately processed in the 2nd stage (See the blue arrows in Fig. 5), and each view will generate a different partial refined mesh. All these partial refined meshes will be used to refine and update the base mesh during visibility-based fusion before the final mesh is produced.", + "bbox": [ + 212, + 358, + 784, + 434 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Optionally, in the 1st stage, if a SMPL-X mesh is given as an input, we will convert the SMPL-X mesh into another 3D feature grid of SDF values $(G_X)$ . See illustration in Fig. 4. $G_{X}$ is what we refer to as SDF-based SMPL-X Features. This feature grid will be concatenated with the other inputs in the 1st stage. Concurrently, we use the technique described in PaMIR [23] to obtain voxel-aligned features. The voxel-aligned features will be used by the MLP at the end of the pipeline. We will explain the rationale behind this set-up later.", + "bbox": [ + 212, + 433, + 784, + 539 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Now, we will first elaborate on the different 3D feature grids (Sect. 3.1) before moving on to our iterative mechanism described in 3DFG-PIFu's 2nd stage (Sect. 3.2). Finally, we will explain our SDF-based SMPL-X Features (Sect. 3.3).", + "bbox": [ + 212, + 539, + 785, + 585 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.1 3D Feature Grids", + "text_level": 1, + "bbox": [ + 215, + 604, + 406, + 619 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In Section 1 ('Introduction'), we thoroughly explained why we need to use 3D feature grids. Indeed, the central theme of our paper revolves around the use of 3D feature grids. We define a 3D feature grid as a D x H x W grid where each element on the grid can be either a scalar value or a vector. D, H, and W are each an integer. A 3D feature grid is useful as it can contain various types of information and can represent these information in different 3D camera spaces. In total, we use four different types of 3D feature grids:", + "bbox": [ + 212, + 628, + 787, + 736 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "1. 3D Feature Grid for Visual Hull $(G_M)$ In Fig. 6, we illustrate how a 3D Feature Grid for Visual Hull is obtained if we only have 2 views - View 1 (Primary view) and View 2 (Secondary view). First, given a $256 \\times 256$ mask of View 2, we replicate the mask pixels (i.e. the non-empty pixels) 256 times in the z-dimension (camera direction), giving us $M_2 \\times 256$ elements in the 3D camera space of View 2, where $M_2$ represents the number of mask pixels in the mask of View 2. We do", + "bbox": [ + 212, + 748, + 787, + 840 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "K. Y. Chan et al.", + "bbox": [ + 271, + 114, + 388, + 126 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/7cb376422de7b0b1c6825755d6ad09c98507c2570973e4e2872135165c4ead55.jpg", + "image_caption": [ + "Fig. 6: 3D Feature Grid - Visual Hull. Above shows how $G_{M}$ is extracted." + ], + "image_footnote": [], + "bbox": [ + 218, + 142, + 782, + 255 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "the same for View 1 to get $M_1 \\times 256$ elements in the 3D camera space of View 1. The elements belonging to View 2 are then rotated or transformed into the 3D camera space of View 1 and placed together with the elements that belong to View 1. Lastly, we take the 3D intersection of the two groups of elements to obtain a visual hull. The visual hull is stored in a 3D grid that corresponds to the 3D camera space of View 1, and this is basically a 3D Feature Grid for Visual Hull or $G_M$ . We will always store the visual hull in the 3D camera space of the primary view (instead of a secondary view). On a side note, $G_M$ can be generated with more than 2 views too. For example, with 3 views, the visual hull is formed by the 3D intersection of 3 groups of elements.", + "bbox": [ + 212, + 277, + 784, + 428 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "$G_{M}$ is useful because it contains the structural information of both View 1 and View 2. Concretely, each element in $G_{M}$ is a binary value ('0' or '1'). '0' means the element or grid position is unoccupied, and '1' means the element is occupied. Together, these elements represent a possibility space for occupancy. No part of the groundtruth human body mesh can be outside of this possibility space, and this is very useful information for a pixel-aligned implicit model whose task is to predict and reconstruct a human body mesh.", + "bbox": [ + 212, + 429, + 784, + 534 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "2. 3D Feature Grid for Front Normals $(G_N)$ However, $G_M$ does not fully capture all the relevant information from View 2. Specifically, only mask information from View 2 is captured. If we look at the mask of View 2 in Fig. 6, we cannot actually differentiate the outlines of the arms from that of the torso, or the outlines of the person's left thigh from the right thigh. This information (the outlines) is not captured by the mask but is captured by the front normal map of View 2. Thus, we introduce 3D Feature Grid for Front Normals or $G_N$ as a complement to $G_M$ . $G_N$ is similar to $G_M$ except that each element on its 3D grid is a normal vector rather than a scalar occupancy value. $G_N$ is obtained in a manner similar to the first row of Fig. 6 except that the mask of View 2 is replaced by the front normal map of View 2. First, given a $3 \\times 256 \\times 256$ front normal map of View 2, we replicate the normal pixels (i.e. only the non-empty pixels) 256 times in the z-dimension (camera direction), giving us $N_2 \\times 256$ elements (i.e. vectors) in the 3D camera space of View 2, where $N_2$ represents the number of normal pixels in the front normal map of View 2. The elements belonging to View 2 are then rotated or transformed into the 3D camera space of View 1 and then stored in a 3D feature grid that corresponds to View 1's 3D camera space. This grid is the 3D Feature Grid for Front Normals or $G_N$ . Like $G_M$ , $G_N$ is also used as an input in the 1st stage of our 3DFG-PIFu.", + "bbox": [ + 212, + 551, + 785, + 840 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "3DFG-PIFu", + "bbox": [ + 648, + 114, + 730, + 126 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 774, + 114, + 784, + 126 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/84ec8fb46f8f5b9903170bf387b785858959d1566763f536f97b90c41ef2a690.jpg", + "image_caption": [ + "Base Mesh (viewed from 3 different angles)", + "Fig. 7: View visibility of different views" + ], + "image_footnote": [], + "bbox": [ + 313, + 142, + 421, + 205 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/0934a6da62f5132d1fbd85806b7e6c9e58262571feb0cd1da0c2c2d2d2707baf.jpg", + "image_caption": [ + "Visibility of View 1 \n(View 1 = a Frontal view)" + ], + "image_footnote": [], + "bbox": [ + 436, + 142, + 553, + 205 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/d8b22d2022b394e7951f21d8cd2d8efdbf6eb799e3781a265a39f1194a457a2b.jpg", + "image_caption": [ + "Visibility of View 2 (View $2 =$ a Left view)" + ], + "image_footnote": [], + "bbox": [ + 576, + 142, + 683, + 205 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3. 3D Feature Grid for SDF values $(G_S, G_S', G_F)$ Given an input view, a single-view pixel-aligned implicit model produces a human body mesh that is oriented in the 3D camera space of that input view. But once we have the mesh, we can transform or rotate the mesh into the 3D camera space of any view. This means that a mesh produced using View 1 can be transformed from its initial 3D camera space of View 1 to the 3D camera space of View 2. Once oriented to the 3D camera space of View 2, the mesh becomes a useful prior for a pixel-aligned implicit model that is trying to use View 2 to predict a human body mesh.", + "bbox": [ + 212, + 243, + 787, + 364 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "But we cannot feed a mesh, which consists of vertices and faces, into a pixel-aligned implicit model. To resolve this, we propose converting the mesh into a 3D feature grid of SDF values. This 3D grid will correspond to the 3D camera space of View 2, and each element in the 3D grid is a truncated SDF value that ranges from -1 to 1, where the value of 0 represents a mesh surface.", + "bbox": [ + 212, + 364, + 787, + 439 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This grid, which is 3D Feature Grid for SDF values, is a simple and effective way to condition a pixel-aligned implicit model on a mesh (as a prior). We will elaborate more on the usefulness of such a prior in Section 3.2. In our 3DFG-PIFu, 3D Feature Grid for SDF values is used as a prior in the 2nd stage (see Fig. 5). In Fig. 5, we see three variants of 3D Feature Grid for SDF values: $G_{S}$ , $G_{S}^{\\prime}$ , and $G_{F}$ . $G_{S}$ and $G_{S}^{\\prime}$ represent SDF values from an unrotated and rotated base mesh respectively. $G_{F}$ represents SDF values from the final mesh.", + "bbox": [ + 212, + 441, + 787, + 547 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4. 3D Feature Grid for View Visibility $(G_V)$ In the 2nd stage (see Fig. 5), we use $G_S'$ as an input to the encoder. Since $G_S'$ contains a rotated base mesh in SDF form, we are essentially using the rotated base mesh as a prior in the encoder.", + "bbox": [ + 212, + 565, + 787, + 611 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This rotated base mesh already has an accurate structure and shape of a human body. Thus, given a view (e.g. View 2) in the 2nd stage, we only want to modify the rotated base mesh in regions where we are confident of editing. The regions that we are most confident of editing are the regions that are visible from that given view. Examples of such regions are shown in Fig. 7. If we are given View 2, for example, then we only want to edit the green regions of the base mesh, as shown in the rightmost column of Fig. 7.", + "bbox": [ + 212, + 612, + 787, + 718 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "If the selected view in the 2nd stage is indeed View 2, then we want to have a 3D feature grid that contains all those green regions. Such a 3D feature grid would serve as a complement to the $G_S'$ by telling the pixel-aligned implicit model which part of the rotated base mesh should (and should not) be edited. This 3D feature grid is our 3D Feature Grid for View Visibility or $G_V$ . Each element in $G_V$ is a binary value (0 or 1). A value of 1 indicates that, at that grid position, there is a mesh surface and this mesh surface is visible from the view that is selected in the 2nd stage (as illustrated in Fig. 7).", + "bbox": [ + 212, + 719, + 787, + 840 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "K. Y. Chan et al.", + "bbox": [ + 271, + 114, + 388, + 126 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3.2 An Iterative Mechanism to Combine Appearance Details from Multiple Views", + "text_level": 1, + "bbox": [ + 215, + 146, + 776, + 176 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Our iterative mechanism or pipeline to combine appearance details from multiple views is the 2nd stage of our 3DFG-PIFu. It refines and updates the base mesh.", + "bbox": [ + 212, + 183, + 784, + 212 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "While a base mesh has a highly accurate structure, we observed that it often lacks fine-grained appearance details from all the given input views (primary view and secondary views(s)). This is illustrated in Fig. 9a and b.", + "bbox": [ + 212, + 213, + 784, + 258 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "To resolve this, we designed a 2nd stage that focuses on capturing the fine-grained appearance details of each view. Our 2nd stage is outlined in Fig. 5. Firstly, we select a view $v$ from the set of input views. Then, as seen in the figure, we condition our encoder on the rotated base mesh $(G_S')$ , which already captured the coarse but accurate structural information from all the input views. Given such a conditioning, we allow the encoder to now focus on capturing fine-grained appearance details from selected view $v$ .", + "bbox": [ + 212, + 258, + 784, + 362 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "To further ensure that appearance details are captured, we have two additional features in the 2nd stage. The first feature, which was just explained, is the use of $G_V$ (as shown in Fig. 5). By complementing $G_S'$ with $G_V$ , the encoder is able to identify which regions on the rotated base mesh are visible from view $v$ . Relative to invisible regions, the encoder will make less error modifying the visible regions. Thus, knowing where the visible regions are encourages the encoder to make more decisive and sharper modifications to these regions.", + "bbox": [ + 212, + 364, + 784, + 469 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "The second additional feature in the 2nd stage is the use of Depth Oriented Sampling (DOS) from IntegratedPIFu [4]. As shown in Fig. 5, for a given view, we will generate a partial refined mesh. However, for a partial refined mesh, we are actually only interested in the regions on the mesh that are visible from that given view. For this reason, it makes sense to use DOS to train the encoder and MLP that are used in the 2nd stage. This is because DOS works best when reconstructing mesh surfaces that are directly facing the camera direction (i.e. mesh regions that are visible from the given view). We briefly explain DOS now.", + "bbox": [ + 212, + 469, + 784, + 590 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Our 1st stage model predicts coarse-grained occupancy (in or out) of sample points in a 3D space to produce the base mesh. In contrast, our 2nd stage model, with use of DOS, predicts fine-grained displacement values of the sample points in the camera direction to produce a partial refined mesh.", + "bbox": [ + 212, + 590, + 784, + 650 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Intuitively, given the base mesh as prior and the use of DOS, our 2nd stage model is trying to shift and adjust the base mesh's surface in the camera direction such that the resulting partial refined mesh better reflects the appearance details of the given views (see Fig. 8).", + "bbox": [ + 212, + 651, + 784, + 712 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Visibility-based Fusion Each given view is used to generate a partial refined mesh. We aim to use these partial refined meshes to update the original base mesh. To do so, we transform the partial refined meshes to the primary view's 3D camera space so that they are physically aligned with the base mesh. Then, we will use these partial refined meshes to update the values in $G_{S}$ , which is a $256 \\times 256 \\times 256$ 3D feature grid containing the SDF values of the base mesh.", + "bbox": [ + 212, + 719, + 784, + 808 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "If a partial refined mesh is created from view $v$ , then this mesh will have the most accurate shape and geometry at regions that are visible from view $v$ . For", + "bbox": [ + 212, + 809, + 784, + 839 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "3DFG-PIFu", + "bbox": [ + 648, + 114, + 730, + 126 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 774, + 116, + 785, + 126 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/80fd0bb6438c89f3472d314eb8647666a97a17a981d8fcdf2a49a0facc6f71c7.jpg", + "image_caption": [ + "Fig. 8: Illustration of our Iterative Mechanism" + ], + "image_footnote": [], + "bbox": [ + 222, + 143, + 488, + 244 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/33e977971b6fccfeb314c35f51be05cbfba99552bbedaeab68be6b06a9532843.jpg", + "image_caption": [ + "Fig.9: Evaluation of 3DFG-PIFu's 2nd Stage." + ], + "image_footnote": [], + "bbox": [ + 526, + 142, + 756, + 244 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "this reason, we will identify locations on a partial refined mesh that are visible from its corresponding view and then extract the SDF values at these locations. So, for each partial refined mesh, these 'visible' SDF values are extracted and used to overwrite the $G_{S}$ grid. In the end, the updated $G_{S}$ , which is also referred to as our final mesh in SDF form $(G_{F})$ , will be a mix of SDF values from the base mesh and the partial refined mesh(es). To convert the $G_{F}$ to mesh form, we use the Marching Cube algorithm.", + "bbox": [ + 212, + 281, + 784, + 386 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "3.3 SDF-based SMPL-X features", + "text_level": 1, + "bbox": [ + 215, + 414, + 501, + 428 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "In multi-view settings, it is possible to use methods, such as [18] and [9], to predict a SMPL-X mesh that is fairly close to the ground truth. Thus, some multi-view pixel-aligned implicit models, like DeepMultiCap [22] and SeSDF [1], use a SMPL-X mesh as a prior before predicting a human body mesh. In 3DFGPIFu, we also offer an option to use SMPL-X meshes as a prior.", + "bbox": [ + 212, + 444, + 784, + 518 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "A well-known approach to incorporate a SMPL-X mesh as a prior in a pixel-aligned implicit model is via the use of voxel-aligned features introduced by PaMIR [23]. To obtain the voxel-aligned features, the SMPL-X mesh is first voxelized and then fed as an input to a 3D CNN, as shown in bottom of Fig. 4. Voxel-aligned features are produced by this 3D CNN. The voxel-aligned features are then used as an input to a MLP, which will produce a human body mesh. Voxel-aligned features are used in DeepMultiCap and SeSDF (with a PointNet). We can use voxel-aligned features in 3DFG-PIFu as well, as seen in Fig. 4.", + "bbox": [ + 212, + 521, + 784, + 641 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "But, as Fig. 4 shows, the features produced by the Encoder ('Pixel-aligned Features') are only fused with voxel-aligned features at the end of the pipeline. Moreover, the fusion is point-wise and localized. This means the pixel-aligned feature that corresponds to a sample point is fused only with the specific voxel-aligned feature that corresponds to the same sample point. In other words, there is no global interaction between voxel-aligned features and pixel-aligned features.", + "bbox": [ + 212, + 642, + 784, + 733 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We aim to design a method to fuse a SMPL-X mesh earlier in the pipeline and in a global manner. A recent method that does this is S-PIFu [3]. S-PIFu extracts a set of handcrafted 2D feature maps from a SMPL-X mesh. These maps are concatenated with the input image and then used as inputs at the start of the pipeline. However, useful 3D information is lost when S-PIFu reduces a SMPL-X mesh into a set of 2D handcrafted features. Thus, we propose our SDF-based SMPL-X features to directly replace the 2D handcrafted features. SDF-based", + "bbox": [ + 212, + 734, + 785, + 839 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "K. Y. Chan et al.", + "bbox": [ + 271, + 114, + 388, + 126 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/e39d69375004551d426bd02f5483c45fb5f0916591342dadc60de1565de9d0d3.jpg", + "image_caption": [ + "Fig. 10: Qualitative evaluation with SOTA models" + ], + "image_footnote": [], + "bbox": [ + 220, + 142, + 776, + 295 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "SMPL-X features retain 3D information by directly converting a SMPL-X mesh into a 3D feature grid of SDF values.", + "bbox": [ + 212, + 320, + 782, + 349 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "SDF-based SMPL-X features $(G_{X})$ is a 3D grid of SDF values (as seen in Fig. 4). $G_{X}$ is similar to $G_{S}$ , $G_{S}^{\\prime}$ , and $G_{F}$ except that $G_{X}$ involves a SMPL-X mesh. To get $G_{X}$ , we first transform the SMPL-X mesh to the 3D camera space of the primary view. From the transformed SMPL-X mesh, we sample a 3D grid of SDF values. Each SDF value ranges from -1 to 1, where the value of 0 represents a surface on the SMPL-X mesh.", + "bbox": [ + 212, + 351, + 785, + 439 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "As shown in Fig. 4, $G_{X}$ can be used together with PaMIR's voxel-aligned features, and we show later on that this combination yields the best results.", + "bbox": [ + 212, + 441, + 784, + 472 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "4 Experiments", + "text_level": 1, + "bbox": [ + 215, + 497, + 375, + 513 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "As this is a sparse views set-up, we set the number of views $V = 2$ and set the angle between the two views as 90 degrees. It is feasible to use other angles as well. Later in Sect. 4.3, we experiment with $V > 2$ .", + "bbox": [ + 212, + 531, + 784, + 575 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "4.1 Datasets", + "text_level": 1, + "bbox": [ + 215, + 601, + 333, + 614 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "In our experimental setup, we utilize the THuman2.0 dataset [17] as the training set for our models as well as other competing models. The THuman2.0 dataset comprises 526 high-quality, full-body scans (or meshes) of ethnic Chinese human subjects. A 80-20 train-test split of the dataset is used. For each training mesh, we render 36 RGB images (each spaced 10 degree apart) using a weak-perspective camera. For each training iteration, two views that are 90 degree apart are randomly selected.", + "bbox": [ + 212, + 628, + 784, + 733 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Furthermore, we use BUFF dataset [19] and MultiHuman dataset [22] for the evaluation of all models. No model is trained using these datasets. For BUFF dataset, we followed IntegratedPIFu [4] and performed systematic sampling (based on sequence number) on the dataset. This resulted in 101 human meshes that were used for evaluating the models. Utilizing systematic sampling allowed us to avoid meshes that have both the same human subject and the same pose. For MultiHuman dataset, all single human scans are used.", + "bbox": [ + 212, + 734, + 785, + 839 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "3DFG-PIFu", + "bbox": [ + 648, + 114, + 730, + 126 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 767, + 116, + 782, + 126 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/c6ffd9408dd35258f06bd29069a96719f56eddc023df9cd2652c46cb0492c9b5.jpg", + "table_caption": [ + "Table 1: SOTA vs Ours. The IntegratedPIFu [4] used is its multi-view version. 'SM' indicates if a groundtruth SMPL-X mesh is used. 'HR' indicates if 1024x1024 RGB images are used. By default, 512x512 RGB images are used." + ], + "table_footnote": [], + "table_body": "
MethodsSMHRTHuman2.0BUFFMultiHuman
CD (10-5)P2S (10-5)NormalCD (102)P2S (102)NormalCD (10-5)P2S (10-5)Normal
Multi-view PIFu××10.7917.0354714.3577.00548398.1979.8436046
IntegratedPIFu×10.0515.7553244.5767.49747388.4819.9705961
DeepMultiCap×8.2087.506958912.4514.781208132.8329.1611518
SeSDF×6.3029.18153883.8485.77952597.1679.2766157
Ours (No HR, No SM)××5.7965.81153862.5092.28647976.3205.7375352
Ours (HR, No SM)×5.1335.02853172.5082.12146945.3154.8665116
Ours (No HR, w SM)×3.5603.13952853.3752.69447585.6335.0705428
Ours (HR, w SM)3.5553.12952123.4122.70045605.3914.9345003
", + "bbox": [ + 233, + 186, + 769, + 273 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/fefe78ed7d05bd3f46b3d1ec809f8659517cc6841cf808daf203437403f2e231.jpg", + "image_caption": [ + "Fig.11: SeSDF vs our 3DFG-PIFu." + ], + "image_footnote": [], + "bbox": [ + 223, + 276, + 491, + 425 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/ca57291c87911ebcd2a166a48bdf8070e96fb506d7d3f0b1c6cb9631b0ae89e6.jpg", + "image_caption": [ + "Fig. 12: Qualitative evaluation of $G_{M}$" + ], + "image_footnote": [], + "bbox": [ + 506, + 277, + 764, + 425 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "4.2 Comparison with State-of-the-art", + "text_level": 1, + "bbox": [ + 215, + 446, + 537, + 463 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "We compared our models against other existing models on multi-view clothed human reconstruction. The models we compared with include Multi-view PIFu [13], IntegratedPIFu (multi-view version) [4], DeepMultiCap [22], and SeSDF [1]. We also compared with DoubleField [15] and Data-Driven 3D Reconstruction method [24] in our Supp. Mat. In our quantitative evaluation, we use metrics that include Chamfer distance (CD), Point-to-Surface (P2S), and Normal reprojection error (Normal). These metrics are also used in [1,4,13,22].", + "bbox": [ + 212, + 476, + 787, + 583 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Qualitative Evaluation We evaluate the methods qualitatively in Fig. 1 and Fig. 10. In these figures, we show the meshes produced by two of our models. Our first model (in column (e)) uses neither a SMPL-X mesh nor $1024 \\times 1024$ high-res images. Our second model (in column (f)) does not use a SMPL-X mesh but uses $1024 \\times 1024$ high-res images. Among the SOTA models, IntegratedPIFu uses high-res images, while DeepMultiCap and SeSDF use a groundtruth SMPL-X mesh. Comparison with SeSDF is shown in Fig. 11. We find that our models outperformed SOTA models in both structural accuracy and appearance details.", + "bbox": [ + 212, + 597, + 787, + 720 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Quantitative Evaluation In Tab. 1, we compared our models with existing methods quantitatively. Because different SOTA methods require different types of inputs (i.e. groundtruth SMPL-X or high-res images), and these different inputs may give additional advantage to a method, we decided to train four different versions of our model, with each version using a different combination of inputs as shown in the table. The table shows that our methods significantly outperform the existing models in all three datasets. See Supp. Mat. for more analysis.", + "bbox": [ + 212, + 734, + 787, + 840 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "K. Y. Chan et al.", + "bbox": [ + 271, + 114, + 388, + 127 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/49350e7e016413d87c469ade39b8aa7e38d050aa4dbbcbd7708f510836817a5d.jpg", + "table_caption": [ + "Table 2: Quantitative evaluation of ${G}_{M}$" + ], + "table_footnote": [], + "table_body": "
MethodsTHuman2.0BUFF
CD (10-5)P2S (10-5)CD (102)P2S (102)
PIFu26.9725.109.6519.247
PIFu + GM (GN not used)6.6267.2122.5303.005
PIFu + GM (GN is used)6.0076.6342.3862.999
", + "bbox": [ + 318, + 157, + 683, + 212 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/515b458ab1422b8edf66ea6dd031cee8f92562be332238d9911f838acd03d8ae.jpg", + "table_caption": [ + "Table 3: Quantitative evaluation of $G_V$ at visible regions" + ], + "table_footnote": [], + "table_body": "
MethodsTHuman2.0BUFF
CD (10-5)P2S (10-5)CD (10-4)P2S (10-4)
No Gv4.0362.9641.3151.096
With Gv3.8912.8401.2841.056
", + "bbox": [ + 362, + 229, + 640, + 272 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "4.3 Ablation Studies", + "text_level": 1, + "bbox": [ + 215, + 277, + 401, + 292 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Evaluation of the Different 3D Feature Grids Firstly, in order to assess the effectiveness of $G_{M}$ , we train and compare a single-view PIFu that is either not given or given $G_{M}$ as an additional input. The comparison is shown quantitatively in the first two rows of Tab. 2 and qualitatively in Fig. 12. Notably, with $G_{M}$ , the single-view PIFu can also outperform a Multi-view PIFu (1st row of Tab. 1).", + "bbox": [ + 212, + 301, + 785, + 377 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Next, as aforementioned, $G_{M}$ can be complemented with $G_{N}$ . Thus, we also show the results when $G_{M}$ is used with $G_{N}$ in a single-view PIFu (see last row of Tab. 2). The results clearly demonstrated the benefit of including $G_{N}$ .", + "bbox": [ + 212, + 378, + 785, + 422 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We also evaluated $G_V$ by training the 2nd stage of 3DFG-PIFu with or without $G_V$ . The results in Tab. 3 and Fig. 13 show that $G_V$ improves the partial refined meshes obtained in the 2nd stage. Aside: As only visible regions of partial refined meshes are used to form the final mesh, Tab. 3 must consider only visible regions.", + "bbox": [ + 212, + 424, + 785, + 500 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Evaluating our Iterative Mechanism (i.e. Our 2nd Stage) We also show that 3DFG-PIFu's 2nd stage indeed improves the base meshes from the 1st stage. See Fig. 9 and Tab. 4. The improved meshes show sharper appearance details.", + "bbox": [ + 212, + 506, + 785, + 551 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "When more views are made available (i.e. $V > 2$ ), the 3DFG-PIFu can incrementally update and improve the current mesh without the need for additional training. We simply replace the base mesh with the current mesh and re-run the 2nd stage again. Results are shown in Fig 14.", + "bbox": [ + 212, + 551, + 785, + 612 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Evaluation of SDF-based SMPL- $X$ features In order to evaluate the effectiveness of our SDF-based SMPL-X features $(G_{X})$ , we train and compare a single-view PIFu that is given either (i) S-PIFu features, (ii) PaMIR's voxel-aligned features, (iii) our $G_{X}$ , or (iv) PaMIR's voxel-aligned features + our $G_{X}$ .", + "bbox": [ + 212, + 619, + 785, + 681 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/3492c39eae036352e3f5bd688f513882c0b567a463da5412de271dfc63ac805b.jpg", + "image_caption": [ + "Fig. 13: Partial refined meshes obtained w and w/o $G_V$" + ], + "image_footnote": [], + "bbox": [ + 362, + 686, + 635, + 834 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "3DFG-PIFu", + "bbox": [ + 648, + 114, + 730, + 126 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 767, + 114, + 785, + 126 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A quantitative comparison is shown in Tab. 5. The table shows our $G_{X}$ outperformed S-PIFu features. Whether our $G_{X}$ is combined with voxel-aligned features or not, it clearly improves the performance of a model when in use. Qualitatively, Fig. 15 shows that combining PaMIR's voxel-aligned features with our $G_{X}$ yields the most robust results.", + "bbox": [ + 212, + 146, + 787, + 220 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "5 Limitations and Conclusion", + "text_level": 1, + "bbox": [ + 215, + 241, + 517, + 258 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "In our Supp. Mat., we address concerns on 3DFG-PIFu's efficiency. In short, via a series of implementation tricks, we show 3DFG-PIFu is actually more efficient than roughly half of the existing SOTA methods.", + "bbox": [ + 212, + 268, + 785, + 313 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "We have introduced 3DFG-PIFu, a multi-view pixel-aligned implicit model that uses 3D Feature Grids to fuse multi-view information. 3DFG-PIFu also proposed an iterative pipeline that combines appearance details from multiple views into a single mesh. Lastly, 3DFG-PIFu introduced SDF-based SMPL-X features, which is a new way of incorporating a SMPL-X mesh into a pixel-aligned implicit model.", + "bbox": [ + 212, + 314, + 785, + 404 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/b2d38c53df2d4a76e7118d1f7113b7fd5223b0ca4987c8204edf62b2d52c2842.jpg", + "table_caption": [ + "Table 4: Quantitative evaluation of 3DFG-PIFu's 2nd Stage" + ], + "table_footnote": [], + "table_body": "
MethodsTHuman2.0BUFF
CD (10-5)P2S (10-5)CD (102)P2S (102)
Base meshes (1st Stage)6.0076.6342.3862.999
Final meshes (2nd Stage)5.7965.8112.5092.286
", + "bbox": [ + 310, + 422, + 689, + 468 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/907dd685fa2ef62b9f25d129a07ea7535c1ea340fa518a1f4cdd9f00d4dd57d9.jpg", + "table_caption": [ + "Table 5: Quantitative evaluation of ${G}_{X}$" + ], + "table_footnote": [], + "table_body": "
MethodsTHuman2.0BUFF
CD (10-5)P2S (10-5)CD (102)P2S (102)
S-PIFu Features4.4884.0306.8126.880
Voxel-aligned Features4.1043.7408.0379.225
GX(Ours)4.3523.7346.3516.641
Voxel-aligned Features + GX(Ours)3.9703.4838.0128.827
", + "bbox": [ + 290, + 484, + 712, + 551 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/ca08776b13e539eb4ac6880654b3752e392632c430a1c08e284c169f2d532f46.jpg", + "image_caption": [ + "Fig. 14: Effect of using more views in 3DFG-PIFu." + ], + "image_footnote": [], + "bbox": [ + 339, + 553, + 651, + 667 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/84a01eba7cb18353ce7d0a07913ec46ee18d8436b98e92fce92e258d22e67f21.jpg", + "image_caption": [ + "Fig. 15: Qualitative evaluation of $G_{X}$" + ], + "image_footnote": [], + "bbox": [ + 285, + 686, + 712, + 814 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "K. Y. Chan et al.", + "bbox": [ + 271, + 114, + 388, + 127 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Acknowledgements This research work is supported by the Agency for Science, Technology and Research (A*STAR) under its MTC Programmatic Funds (Grant No. M23L7b0021).", + "bbox": [ + 215, + 146, + 787, + 193 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 215, + 212, + 321, + 228 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "1. Cao, Y., Han, K., Wong, K.Y.K.: Sesdf: Self-evolved signed distance field for implicit 3d clothed human reconstruction. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 4647-4657 (2023)", + "2. Chan, K., Lin, G., Zhao, H., Lin, W.: S-pifu: Integrating parametric human models with pifu for single-view clothed human reconstruction. Advances in Neural Information Processing Systems 35, 17373-17385 (2022)", + "3. Chan, K., Lin, G., Zhao, H., Lin, W.: S-pifu: Integrating parametric human models with pifu for single-view clothed human reconstruction. In: Advances in Neural Information Processing Systems (2022)", + "4. Chan, K.Y., Lin, G., Zhao, H., Lin, W.: Integratedpifu: Integrated pixel aligned implicit function for single-view human reconstruction. In: Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part II. pp. 328-344. Springer (2022)", + "5. Chan, K.Y., Liu, F., Lin, G., Foo, C.S., Lin, W.: Fine structure-aware sampling: A new sampling training scheme for pixel-aligned implicit models in single-view human reconstruction. In: Proceedings of the AAAI Conference on Artificial Intelligence. vol. 38, pp. 964-971 (2024)", + "6. Chan, K.Y., Liu, F., Lin, G., Foo, C.S., Lin, W.: R-cyclic diffuser: Reductive and cyclic latent diffusion for 3d clothed human digitalization. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 10304-10313 (2024)", + "7. Gong, X., Song, L., Zheng, M., Planche, B., Chen, T., Yuan, J., Doermann, D., Wu, Z.: Progressive multi-view human mesh recovery with self-supervision. In: Proceedings of the AAAI Conference on Artificial Intelligence. vol. 37, pp. 676-684 (2023)", + "8. Hong, Y., Zhang, J., Jiang, B., Guo, Y., Liu, L., Bao, H.: Stereopifu: Depth aware clothed human digitization via stereo vision. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 535-545 (2021)", + "9. Kolotouros, N., Pavlakos, G., Jayaraman, D., Daniilidis, K.: Probabilistic modeling for human mesh recovery. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 11605-11614 (2021)", + "0. Liang, J., Lin, M.C.: Shape-aware human pose and shape reconstruction using multi-view images. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 4352-4362 (2019)", + "1. Lorensen, W.E., Cline, H.E.: Marching cubes: A high resolution 3d surface construction algorithm. ACM siggraph computer graphics 21(4), 163-169 (1987)", + "2. Pavlakos, G., Choutas, V., Ghorbani, N., Bolkart, T., Osman, A.A.A., Tzionas, D., Black, M.J.: Expressive body capture: 3D hands, face, and body from a single image. In: Proceedings IEEE Conf. on Computer Vision and Pattern Recognition (CVPR). pp. 10975-10985 (2019)", + "3. Saito, S., Huang, Z., Natsume, R., Morishima, S., Kanazawa, A., Li, H.: Pifu: Pixel-aligned implicit function for high-resolution clothed human digitization. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 2304-2314 (2019)" + ], + "bbox": [ + 225, + 239, + 784, + 839 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "3DFG-PIFu", + "bbox": [ + 648, + 114, + 730, + 126 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 767, + 116, + 785, + 126 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "14. Saito, S., Simon, T., Saragih, J., Joo, H.: Pifuhd: Multi-level pixel-aligned implicit function for high-resolution 3d human digitization. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 84-93 (2020)", + "15. Shao, R., Zhang, H., Zhang, H., Chen, M., Cao, Y.P., Yu, T., Liu, Y.: Doublefield: Bridging the neural surface and radiance fields for high-fidelity human reconstruction and rendering. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 15872-15882 (2022)", + "16. Shao, R., Zheng, Z., Zhang, H., Sun, J., Liu, Y.: Diffustereo: High quality human reconstruction via diffusion-based stereo using sparse cameras. In: Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part XXXII. pp. 702-720. Springer (2022)", + "17. Yu, T., Zheng, Z., Guo, K., Liu, P., Dai, Q., Liu, Y.: Function4d: Real-time human volumetric capture from very sparse consumer rgbd sensors. In: IEEE Conference on Computer Vision and Pattern Recognition (CVPR2021) (June 2021)", + "18. Yu, Z., Zhang, L., Xu, Y., Tang, C., Tran, L., Keskin, C., Park, H.S.: Multiview human body reconstruction from uncalibrated cameras. In: Advances in Neural Information Processing Systems (2022)", + "19. Zhang, C., Pujades, S., Black, M.J., Pons-Moll, G.: Detailed, accurate, human shape estimation from clothed 3d scan sequences. In: The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (July 2017)", + "20. Zhang, H., Tian, Y., Zhou, X., Ouyang, W., Liu, Y., Wang, L., Sun, Z.: Pymaf: 3d human pose and shape regression with pyramidal mesh alignment feedback loop. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 11446-11456 (2021)", + "21. Zhao, F., Yang, W., Zhang, J., Lin, P., Zhang, Y., Yu, J., Xu, L.: Humannerf: Efficiently generated human radiance field from sparse inputs. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 7743-7753 (2022)", + "22. Zheng, Y., Shao, R., Zhang, Y., Yu, T., Zheng, Z., Dai, Q., Liu, Y.: Deepmulticap: Performance capture of multiple characters using sparse multiview cameras. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 6239-6249 (2021)", + "23. Zheng, Z., Yu, T., Liu, Y., Dai, Q.: Pamir: Parametric model-conditioned implicit representation for image-based human reconstruction. IEEE transactions on pattern analysis and machine intelligence 44(6), 3170-3184 (2021)", + "24. Zins, P., Xu, Y., Boyer, E., Wuhrer, S., Tung, T.: Data-driven 3d reconstruction of dressed humans from sparse views. In: 2021 International Conference on 3D Vision (3DV). pp. 494-504. IEEE (2021)" + ], + "bbox": [ + 215, + 146, + 785, + 686 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "K. Y. Chan et al.", + "bbox": [ + 271, + 114, + 388, + 127 + ], + "page_idx": 15 + } +] \ No newline at end of file diff --git a/2024/3DFG-PIFu_ 3D Feature Grids for Human Digitization from Sparse Views/7528968f-06f7-4c18-aa8f-783ee6c0a1d6_model.json b/2024/3DFG-PIFu_ 3D Feature Grids for Human Digitization from Sparse Views/7528968f-06f7-4c18-aa8f-783ee6c0a1d6_model.json new file mode 100644 index 0000000000000000000000000000000000000000..93159af6d365ee1b02e15fa168bfd116ad6c98a4 --- /dev/null +++ b/2024/3DFG-PIFu_ 3D Feature Grids for Human Digitization from Sparse Views/7528968f-06f7-4c18-aa8f-783ee6c0a1d6_model.json @@ -0,0 +1,2102 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.261, + 0.141, + 0.743, + 0.187 + ], + "angle": 0, + "content": "3DFG-PIFu: 3D Feature Grids for Human Digitization from Sparse Views" + }, + { + "type": "text", + "bbox": [ + 0.227, + 0.212, + 0.777, + 0.243 + ], + "angle": 0, + "content": "Kennard Yanting Chan\\(^{1,2}\\), Fayao Liu\\(^{2}\\), Guosheng Lin\\(^{1}\\), Chuan Sheng Foo\\(^{2,3}\\), and Weisi Lin\\(^{1}\\)" + }, + { + "type": "text", + "bbox": [ + 0.342, + 0.255, + 0.66, + 0.269 + ], + "angle": 0, + "content": "1 Nanyang Technological University, Singapore" + }, + { + "type": "text", + "bbox": [ + 0.349, + 0.27, + 0.653, + 0.282 + ], + "angle": 0, + "content": "\\(^{2}\\) Institute for Infocomm Research, A*STAR" + }, + { + "type": "text", + "bbox": [ + 0.351, + 0.283, + 0.65, + 0.296 + ], + "angle": 0, + "content": "3 Centre for Frontier AI Research, A*STAR" + }, + { + "type": "list", + "bbox": [ + 0.342, + 0.255, + 0.66, + 0.296 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.263, + 0.331, + 0.744, + 0.61 + ], + "angle": 0, + "content": "Abstract. Pixel-aligned implicit models, such as Multi-view PIFu, Deep-MultiCap, DoubleField, and SeSDF, are well-established methods for reconstructing a clothed human from sparse views. However, given \\(V\\) images, these models would only combine features from these images in a point-wise and localized manner. In other words, the \\(V\\) images are processed individually and are only combined in a very narrow fashion at the end of the pipeline. To a large extent, this defeats the purpose of having multi-view information since the multi-view task in question is predominantly treated as a single-view task. To resolve this, we introduce 3DFG-PIFu, a pixel-aligned implicit model that exploits multi-view information right from the start and all the way to the end of the pipeline. Our 3DFG-PIFu makes use of 3D Feature Grids to combine features from \\(V\\) images in a global manner (rather than point-wise or localized) and throughout the pipeline. Other than the 3D Feature Grids, 3DFG-PIFu also proposes an iterative mechanism that refines and updates an existing output human mesh using the different views. Moreover, 3DFG-PIFu introduces SDF-based SMPL-X features, which is a new method of incorporating a SMPL-X mesh into a pixel-aligned implicit model. Our experiments show that 3DFG-PIFu significantly outperforms SOTA models. Our code is released at https://github.com/kcyt/3DFG-PIFu." + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.621, + 0.741, + 0.649 + ], + "angle": 0, + "content": "Keywords: 3D Clothed Human Reconstruction from Sparse Views \\(\\cdot\\) 3D Feature Grids \\(\\cdot\\) Pixel-aligned Implicit Models" + }, + { + "type": "title", + "bbox": [ + 0.217, + 0.674, + 0.377, + 0.691 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.704, + 0.788, + 0.844 + ], + "angle": 0, + "content": "The field of 3D reconstruction of human bodies has gained considerable interest due to its potential use in various domains such as virtual reality, game production, and 3D printing. Pixel-aligned implicit models, such as Multi-view PIFu [13] DeepMultiCap [22], DoubleField [15], and SeSDF [1] are an influential class of deep learning methods for reconstructing clothed human bodies from sparse views. These models learn an implicit function that represents the surface of a human body. During testing, the learned implicit function is sampled using a grid of uniformly-spaced sample points. For each sample point, the learned implicit function (or the model) will return a predicted occupancy label (i.e." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "2" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.39, + 0.127 + ], + "angle": 0, + "content": "K. Y. Chan et al." + }, + { + "type": "image", + "bbox": [ + 0.223, + 0.143, + 0.778, + 0.242 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.312, + 0.244, + 0.692, + 0.258 + ], + "angle": 0, + "content": "Fig. 1: Our models (last two columns) vs SOTA models." + }, + { + "type": "image", + "bbox": [ + 0.253, + 0.258, + 0.744, + 0.549 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.216, + 0.55, + 0.788, + 0.613 + ], + "angle": 0, + "content": "Fig. 2: (a) Existing multi-view pixel-aligned implicit models vs (b) Our 3DFG-PIFu. whether the sample point is 'inside' or 'outside' of a human body surface). Once a grid of predicted occupancy labels is obtained, a human body mesh can be extracted from this grid using the Marching Cubes algorithm [11]." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.614, + 0.789, + 0.841 + ], + "angle": 0, + "content": "In order to predict the occupancy labels, existing multi-view pixel-aligned implicit models [1, 13, 15, 22], when given \\( V \\) views or images, would compute \\( V \\) different point embeddings for each sample point. This is illustrated in Fig. 2a for the case where \\( V = 2 \\). For each sample point, its \\( V \\) point embeddings would be fused together into a single point embedding via either simple averaging [13] or weighted averaging [1, 15, 22], as illustrated in the same figure. These fused point embeddings are then converted into predicted occupancy labels, from which a human body mesh can be obtained. It is important to note that the \"Point Embeddings for View 1\" grid and the \"Point Embeddings for View 2\" grid in Fig. 2a are in different 3D coordinate spaces. The former is in the 3D camera space of View 1, and the latter is in the 3D camera space of View 2. This means that a point located at the top left corner of a grid may not correspond to the top left corner of another grid. We let the \"Fused Point Embeddings\" grid follow the 3D camera space of View 1 (It is possible to choose another 3D camera space, but that is trivial)." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.65, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "3DFG-PIFu" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "3" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.358 + ], + "angle": 0, + "content": "As shown in Fig. 2a, there are two problems with existing multi-view pixel-aligned implicit models. 1. Firstly, the fusion of View 1 and View 2 are carried out in a point-wise and very localized manner. This is a problem because, as shown in the bidirectional red dashed arrow in Fig. 2a, there is no interaction between fused point embeddings, even if they are located close to each other. So if there is a sample point A that is closely surrounded by ten sample points, the existing multi-view pixel-aligned implicit models may assign those ten points with the same label and yet assign point A with an opposite label, which is an obvious error that would lead to a floating artefact. 2. Secondly, the fusion of View 1 and View 2 occurs at the end of the pipeline in a very simple manner (either simple or weighted averaging (e.g. attention)). To a large extent, the existing multiview pixel-aligned implicit models are not very different from their single-view counterparts except for the simplistic point-wise fusion of point embeddings at the end of the pipeline." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.36, + 0.788, + 0.526 + ], + "angle": 0, + "content": "Hence, we propose 3DFG-PIFu, a pixel-aligned implicit model that rethinks how multi-view information is incorporated in its pipeline. One key feature of 3DFG-PIFu is its use of 3D Feature Grid(s). As seen in Fig. 2b, 3DFG-PIFu makes use of 3D Feature Grid(s) to extract structural information from View 2. The 3D Feature Grid, due to its inherent design, is able to easily orient the extracted information to a different camera space. Thus, we re-orient the 3D Feature Grid from the 3D camera space of View 2 to the 3D camera space of View 1. Now aligned with View 1, the transformed 3D Feature Grid can be concatenated with View 1 and processed by a deep neural network to form 'Fused Point Embeddings'. These fused point embeddings will then be further refined using the fine-grained information from View 2 (Section 3.2)." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.528, + 0.788, + 0.603 + ], + "angle": 0, + "content": "Crucially, this means that, unlike existing models, the fusion of multi-view information in 3DFG-PIFu occurs from the start to the end of the pipeline. Moreover, the multi-view fusion in 3DFG-PIFu occurs in a global and broad manner (rather than point-wise and localized) as information from View 2 is allowed to influence each and every fused point embedding." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.604, + 0.788, + 0.695 + ], + "angle": 0, + "content": "In total, 3DFG-PIFu makes three contributions: 1. The aforementioned 3D Feature Grids that fuse multi-view information (Section 3.1). 2. An iterative mechanism that refines and updates an existing output human mesh using the fine-grained information from the different views (Section 3.2). 3. Introduction of SDF-based SMPL-X features, which is a new method of incorporating a SMPL-X mesh into a pixel-aligned implicit model (Section 3.3)." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.725, + 0.388, + 0.74 + ], + "angle": 0, + "content": "2 Related Work" + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.763, + 0.614, + 0.779 + ], + "angle": 0, + "content": "2.1 Human Reconstruction from Sparse Views" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.795, + 0.787, + 0.84 + ], + "angle": 0, + "content": "Methods that reconstruct a human body mesh from a sparse number of images can be broadly classified into two classes: Parametric approaches and non-parametric approaches." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "4" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.39, + 0.128 + ], + "angle": 0, + "content": "K. Y. Chan et al." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.784, + 0.192 + ], + "angle": 0, + "content": "Parametric approaches, such as [7,9,10,20], reconstruct a human body surface by predicting parameters of a human parametric model (e.g. SMPL-X [12]). However, these methods can only produce human body meshes that are clothless." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.193, + 0.784, + 0.252 + ], + "angle": 0, + "content": "On the other hand, non-parametric methods do not use a human parametric model. An important subclass of non-parametric methods is pixel-aligned implicit models. There are other subclasses like NERF methods (e.g. [21]), but they have yet to outperform pixel-aligned implicit models." + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.253, + 0.784, + 0.284 + ], + "angle": 0, + "content": "Pixel-aligned implicit models can be single-view (e.g. [2,5,6]) or multi-view (e.g. Multi-view PIFu [13], DeepMultiCap [22], DoubleField [15], and SeSDF [1])." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.285, + 0.785, + 0.374 + ], + "angle": 0, + "content": "As a side note, there are also pixel-aligned implicit models that use stereo images to reconstruct a clothed human mesh. However, these models, that include StereoPIFu [8] and DiffuStereo [16], require pairs of images to be taken at two similar viewpoints. This is often infeasible in many real-life applications and is thus not used in our experiments. Instead, our benchmarks are the aforementioned Multi-view PIFu, DeepMultiCap, DoubleField, SeSDF, and a few others." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.375, + 0.785, + 0.435 + ], + "angle": 0, + "content": "As mentioned in Section 1 ('Introduction'), these benchmark models suffer from the problems of: 1. Fusing multi-view information in a very narrow or pointwise manner, and 2. Fusing multi-view information only at the very end of the pipeline. To resolve this problem, we introduce 3DFG-PIFu." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.46, + 0.33, + 0.475 + ], + "angle": 0, + "content": "3 Method" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.493, + 0.785, + 0.63 + ], + "angle": 0, + "content": "3DFG-PIFu is a two-staged model that works as long as the number of views \\( V > 1 \\) and the camera calibrations are known. One view will be randomly picked as the primary view and the other view(s) will be designated as the secondary view(s). Let us first assume \\( V = 2 \\). This means that we have one primary view and one secondary view. As shown in Fig. 3, front and back normal maps, as well as a mask, can be predicted from a RGB image. We use the method outlined in PIFuHD [14] to predict the normal maps. Then, from the predicted normal maps, we can easily extract out the mask. Hereafter, we refer to a view as a collection of a RGB image, a front normal map, a back normal map, and a mask." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.647, + 0.785, + 0.767 + ], + "angle": 0, + "content": "1st Stage In the first stage (refer to Fig. 4 and assume \\( V = 2 \\)), we first generate two 3D feature grids (\\( G_{N} \\) and \\( G_{M} \\)) from the secondary view's front normal map and mask. We will elaborate on how \\( G_{N} \\) and \\( G_{M} \\) are generated later. In short, the \\( G_{N} \\) and \\( G_{M} \\) contain normal pixels and mask pixels, respectively, from the secondary view, but these pixels have been transformed into the 3D camera space of the primary view. Then, \\( G_{N} \\) and \\( G_{M} \\) are concatenated with the primary view's RGB image, front normal map, and back normal map. The concatenated output is sent to an encoder, which is a 2D CNN. The encoder will produce a" + }, + { + "type": "image", + "bbox": [ + 0.316, + 0.794, + 0.683, + 0.847 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.369, + 0.848, + 0.633, + 0.86 + ], + "angle": 0, + "content": "Fig.3: Predictions from a RGB image." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.65, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "3DFG-PIFu" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "5" + }, + { + "type": "image", + "bbox": [ + 0.225, + 0.144, + 0.775, + 0.376 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.377, + 0.785, + 0.418 + ], + "angle": 0, + "content": "Fig. 4: 1st Stage of 3DFG-PIFu. Each view includes the mask and the predicted front and back normal maps. The primary view and the feature grids extracted from the secondary view(s) are fed into an encoder and MLP to generate a base mesh." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.42, + 0.784, + 0.451 + ], + "angle": 0, + "content": "set of feature maps that is used by a Multilayer Perceptron (MLP) to produce a human body mesh, which we refer to as a base mesh." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.463, + 0.785, + 0.553 + ], + "angle": 0, + "content": "2nd Stage While the base mesh from the 1st stage has good structural accuracy, it fails to capture the more fine-grained appearance details (e.g. clothes wrinkles) from all the views. Thus, a 2nd stage is needed. The 2nd stage of 3DFG-PIFu is an iterative mechanism or pipeline to combine appearance details from multiple views. We will briefly describe the flow of the pipeline here, but the details and rationales behind each step in the pipeline will be explained in Section 3.2." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.554, + 0.786, + 0.734 + ], + "angle": 0, + "content": "At the start of the 2nd stage, a view is picked from the set of input views. Assume that \\( V = 2 \\), and the view that we selected is the secondary view. The secondary view, as well as the base mesh from the 1st stage, will be used as inputs in the 2nd stage (shown in Fig. 5). First, we will rotate (or transform) the base mesh into the 3D camera space of the secondary view. From this rotated base mesh, we would generate two additional 3D feature grids (\\( G_V \\) and \\( G_S' \\)). \\( G_V \\) and \\( G_S' \\) will have the visibility information and the SDF values, respectively, of the rotated base mesh. We will elaborate on how \\( G_V \\) and \\( G_S' \\) are obtained later. \\( G_V \\), \\( G_S' \\) and the secondary view will be concatenated together and fed into an encoder, which is a 2D CNN. This encoder will produce a set of feature maps that is used by a MLP to produce a partial refined mesh. The partial refined mesh will have the fine-grained appearance details of the secondary view." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.735, + 0.786, + 0.81 + ], + "angle": 0, + "content": "Finally, we will obtain a 3D feature grid of SDF values \\((G_S)\\) from the base mesh. Then, \\(G_S\\) is refined and updated using information from the partial refined mesh via a process that we call visibility-based fusion (to be explained later). Visibility-based fusion will return a final 3D grid of SDF values, \\(G_F\\). From \\(G_F\\), we will retrieve the final mesh via the Marching Cubes algorithm." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.81, + 0.786, + 0.84 + ], + "angle": 0, + "content": "For simplicity, Fig. 5 only shows the scenario where there is only 1 secondary view, and the secondary view (rather than the primary view) is picked at the" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "6" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.39, + 0.127 + ], + "angle": 0, + "content": "K. Y. Chan et al." + }, + { + "type": "image", + "bbox": [ + 0.22, + 0.144, + 0.778, + 0.324 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.216, + 0.325, + 0.784, + 0.354 + ], + "angle": 0, + "content": "Fig. 5: 2nd Stage of 3DFG-PIFu. The base mesh is first aligned to the secondary view. Once aligned, it is combined with the secondary view to produce a partial refined mesh." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.359, + 0.785, + 0.435 + ], + "angle": 0, + "content": "start of the 2nd stage. In reality, the primary view and every secondary view will be separately processed in the 2nd stage (See the blue arrows in Fig. 5), and each view will generate a different partial refined mesh. All these partial refined meshes will be used to refine and update the base mesh during visibility-based fusion before the final mesh is produced." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.434, + 0.785, + 0.54 + ], + "angle": 0, + "content": "Optionally, in the 1st stage, if a SMPL-X mesh is given as an input, we will convert the SMPL-X mesh into another 3D feature grid of SDF values \\((G_X)\\). See illustration in Fig. 4. \\(G_{X}\\) is what we refer to as SDF-based SMPL-X Features. This feature grid will be concatenated with the other inputs in the 1st stage. Concurrently, we use the technique described in PaMIR [23] to obtain voxel-aligned features. The voxel-aligned features will be used by the MLP at the end of the pipeline. We will explain the rationale behind this set-up later." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.54, + 0.787, + 0.587 + ], + "angle": 0, + "content": "Now, we will first elaborate on the different 3D feature grids (Sect. 3.1) before moving on to our iterative mechanism described in 3DFG-PIFu's 2nd stage (Sect. 3.2). Finally, we will explain our SDF-based SMPL-X Features (Sect. 3.3)." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.606, + 0.407, + 0.62 + ], + "angle": 0, + "content": "3.1 3D Feature Grids" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.63, + 0.788, + 0.737 + ], + "angle": 0, + "content": "In Section 1 ('Introduction'), we thoroughly explained why we need to use 3D feature grids. Indeed, the central theme of our paper revolves around the use of 3D feature grids. We define a 3D feature grid as a D x H x W grid where each element on the grid can be either a scalar value or a vector. D, H, and W are each an integer. A 3D feature grid is useful as it can contain various types of information and can represent these information in different 3D camera spaces. In total, we use four different types of 3D feature grids:" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.749, + 0.788, + 0.841 + ], + "angle": 0, + "content": "1. 3D Feature Grid for Visual Hull \\((G_M)\\) In Fig. 6, we illustrate how a 3D Feature Grid for Visual Hull is obtained if we only have 2 views - View 1 (Primary view) and View 2 (Secondary view). First, given a \\(256 \\times 256\\) mask of View 2, we replicate the mask pixels (i.e. the non-empty pixels) 256 times in the z-dimension (camera direction), giving us \\(M_2 \\times 256\\) elements in the 3D camera space of View 2, where \\(M_2\\) represents the number of mask pixels in the mask of View 2. We do" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.65, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "3DFG-PIFu" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.116, + 0.785, + 0.127 + ], + "angle": 0, + "content": "7" + }, + { + "type": "image", + "bbox": [ + 0.219, + 0.143, + 0.784, + 0.256 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.251, + 0.257, + 0.747, + 0.271 + ], + "angle": 0, + "content": "Fig. 6: 3D Feature Grid - Visual Hull. Above shows how \\( G_{M} \\) is extracted." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.278, + 0.785, + 0.429 + ], + "angle": 0, + "content": "the same for View 1 to get \\( M_1 \\times 256 \\) elements in the 3D camera space of View 1. The elements belonging to View 2 are then rotated or transformed into the 3D camera space of View 1 and placed together with the elements that belong to View 1. Lastly, we take the 3D intersection of the two groups of elements to obtain a visual hull. The visual hull is stored in a 3D grid that corresponds to the 3D camera space of View 1, and this is basically a 3D Feature Grid for Visual Hull or \\( G_M \\). We will always store the visual hull in the 3D camera space of the primary view (instead of a secondary view). On a side note, \\( G_M \\) can be generated with more than 2 views too. For example, with 3 views, the visual hull is formed by the 3D intersection of 3 groups of elements." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.43, + 0.785, + 0.535 + ], + "angle": 0, + "content": "\\(G_{M}\\) is useful because it contains the structural information of both View 1 and View 2. Concretely, each element in \\(G_{M}\\) is a binary value ('0' or '1'). '0' means the element or grid position is unoccupied, and '1' means the element is occupied. Together, these elements represent a possibility space for occupancy. No part of the groundtruth human body mesh can be outside of this possibility space, and this is very useful information for a pixel-aligned implicit model whose task is to predict and reconstruct a human body mesh." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.553, + 0.787, + 0.841 + ], + "angle": 0, + "content": "2. 3D Feature Grid for Front Normals \\((G_N)\\) However, \\(G_M\\) does not fully capture all the relevant information from View 2. Specifically, only mask information from View 2 is captured. If we look at the mask of View 2 in Fig. 6, we cannot actually differentiate the outlines of the arms from that of the torso, or the outlines of the person's left thigh from the right thigh. This information (the outlines) is not captured by the mask but is captured by the front normal map of View 2. Thus, we introduce 3D Feature Grid for Front Normals or \\(G_N\\) as a complement to \\(G_M\\). \\(G_N\\) is similar to \\(G_M\\) except that each element on its 3D grid is a normal vector rather than a scalar occupancy value. \\(G_N\\) is obtained in a manner similar to the first row of Fig. 6 except that the mask of View 2 is replaced by the front normal map of View 2. First, given a \\(3 \\times 256 \\times 256\\) front normal map of View 2, we replicate the normal pixels (i.e. only the non-empty pixels) 256 times in the z-dimension (camera direction), giving us \\(N_2 \\times 256\\) elements (i.e. vectors) in the 3D camera space of View 2, where \\(N_2\\) represents the number of normal pixels in the front normal map of View 2. The elements belonging to View 2 are then rotated or transformed into the 3D camera space of View 1 and then stored in a 3D feature grid that corresponds to View 1's 3D camera space. This grid is the 3D Feature Grid for Front Normals or \\(G_N\\). Like \\(G_M\\), \\(G_N\\) is also used as an input in the 1st stage of our 3DFG-PIFu." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "8" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.39, + 0.127 + ], + "angle": 0, + "content": "K. Y. Chan et al." + }, + { + "type": "image", + "bbox": [ + 0.315, + 0.143, + 0.422, + 0.206 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.307, + 0.206, + 0.436, + 0.225 + ], + "angle": 0, + "content": "Base Mesh (viewed from 3 different angles)" + }, + { + "type": "image", + "bbox": [ + 0.437, + 0.143, + 0.555, + 0.206 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.438, + 0.207, + 0.577, + 0.225 + ], + "angle": 0, + "content": "Visibility of View 1 \n(View 1 = a Frontal view)" + }, + { + "type": "image", + "bbox": [ + 0.578, + 0.143, + 0.684, + 0.206 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.577, + 0.207, + 0.691, + 0.225 + ], + "angle": 0, + "content": "Visibility of View 2 (View \\(2 =\\) a Left view)" + }, + { + "type": "image_caption", + "bbox": [ + 0.367, + 0.226, + 0.635, + 0.238 + ], + "angle": 0, + "content": "Fig. 7: View visibility of different views" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.244, + 0.788, + 0.365 + ], + "angle": 0, + "content": "3. 3D Feature Grid for SDF values \\((G_S, G_S', G_F)\\) Given an input view, a single-view pixel-aligned implicit model produces a human body mesh that is oriented in the 3D camera space of that input view. But once we have the mesh, we can transform or rotate the mesh into the 3D camera space of any view. This means that a mesh produced using View 1 can be transformed from its initial 3D camera space of View 1 to the 3D camera space of View 2. Once oriented to the 3D camera space of View 2, the mesh becomes a useful prior for a pixel-aligned implicit model that is trying to use View 2 to predict a human body mesh." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.366, + 0.788, + 0.44 + ], + "angle": 0, + "content": "But we cannot feed a mesh, which consists of vertices and faces, into a pixel-aligned implicit model. To resolve this, we propose converting the mesh into a 3D feature grid of SDF values. This 3D grid will correspond to the 3D camera space of View 2, and each element in the 3D grid is a truncated SDF value that ranges from -1 to 1, where the value of 0 represents a mesh surface." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.442, + 0.788, + 0.548 + ], + "angle": 0, + "content": "This grid, which is 3D Feature Grid for SDF values, is a simple and effective way to condition a pixel-aligned implicit model on a mesh (as a prior). We will elaborate more on the usefulness of such a prior in Section 3.2. In our 3DFG-PIFu, 3D Feature Grid for SDF values is used as a prior in the 2nd stage (see Fig. 5). In Fig. 5, we see three variants of 3D Feature Grid for SDF values: \\( G_{S} \\), \\( G_{S}^{\\prime} \\), and \\( G_{F} \\). \\( G_{S} \\) and \\( G_{S}^{\\prime} \\) represent SDF values from an unrotated and rotated base mesh respectively. \\( G_{F} \\) represents SDF values from the final mesh." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.566, + 0.788, + 0.612 + ], + "angle": 0, + "content": "4. 3D Feature Grid for View Visibility \\((G_V)\\) In the 2nd stage (see Fig. 5), we use \\(G_S'\\) as an input to the encoder. Since \\(G_S'\\) contains a rotated base mesh in SDF form, we are essentially using the rotated base mesh as a prior in the encoder." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.613, + 0.788, + 0.719 + ], + "angle": 0, + "content": "This rotated base mesh already has an accurate structure and shape of a human body. Thus, given a view (e.g. View 2) in the 2nd stage, we only want to modify the rotated base mesh in regions where we are confident of editing. The regions that we are most confident of editing are the regions that are visible from that given view. Examples of such regions are shown in Fig. 7. If we are given View 2, for example, then we only want to edit the green regions of the base mesh, as shown in the rightmost column of Fig. 7." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.72, + 0.788, + 0.841 + ], + "angle": 0, + "content": "If the selected view in the 2nd stage is indeed View 2, then we want to have a 3D feature grid that contains all those green regions. Such a 3D feature grid would serve as a complement to the \\( G_S' \\) by telling the pixel-aligned implicit model which part of the rotated base mesh should (and should not) be edited. This 3D feature grid is our 3D Feature Grid for View Visibility or \\( G_V \\). Each element in \\( G_V \\) is a binary value (0 or 1). A value of 1 indicates that, at that grid position, there is a mesh surface and this mesh surface is visible from the view that is selected in the 2nd stage (as illustrated in Fig. 7)." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.65, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "3DFG-PIFu" + }, + { + "type": "page_number", + "bbox": [ + 0.776, + 0.117, + 0.786, + 0.127 + ], + "angle": 0, + "content": "9" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.147, + 0.777, + 0.178 + ], + "angle": 0, + "content": "3.2 An Iterative Mechanism to Combine Appearance Details from Multiple Views" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.184, + 0.785, + 0.213 + ], + "angle": 0, + "content": "Our iterative mechanism or pipeline to combine appearance details from multiple views is the 2nd stage of our 3DFG-PIFu. It refines and updates the base mesh." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.214, + 0.785, + 0.259 + ], + "angle": 0, + "content": "While a base mesh has a highly accurate structure, we observed that it often lacks fine-grained appearance details from all the given input views (primary view and secondary views(s)). This is illustrated in Fig. 9a and b." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.26, + 0.785, + 0.363 + ], + "angle": 0, + "content": "To resolve this, we designed a 2nd stage that focuses on capturing the fine-grained appearance details of each view. Our 2nd stage is outlined in Fig. 5. Firstly, we select a view \\( v \\) from the set of input views. Then, as seen in the figure, we condition our encoder on the rotated base mesh \\( (G_S') \\), which already captured the coarse but accurate structural information from all the input views. Given such a conditioning, we allow the encoder to now focus on capturing fine-grained appearance details from selected view \\( v \\)." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.365, + 0.785, + 0.47 + ], + "angle": 0, + "content": "To further ensure that appearance details are captured, we have two additional features in the 2nd stage. The first feature, which was just explained, is the use of \\( G_V \\) (as shown in Fig. 5). By complementing \\( G_S' \\) with \\( G_V \\), the encoder is able to identify which regions on the rotated base mesh are visible from view \\( v \\). Relative to invisible regions, the encoder will make less error modifying the visible regions. Thus, knowing where the visible regions are encourages the encoder to make more decisive and sharper modifications to these regions." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.47, + 0.785, + 0.591 + ], + "angle": 0, + "content": "The second additional feature in the 2nd stage is the use of Depth Oriented Sampling (DOS) from IntegratedPIFu [4]. As shown in Fig. 5, for a given view, we will generate a partial refined mesh. However, for a partial refined mesh, we are actually only interested in the regions on the mesh that are visible from that given view. For this reason, it makes sense to use DOS to train the encoder and MLP that are used in the 2nd stage. This is because DOS works best when reconstructing mesh surfaces that are directly facing the camera direction (i.e. mesh regions that are visible from the given view). We briefly explain DOS now." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.591, + 0.785, + 0.651 + ], + "angle": 0, + "content": "Our 1st stage model predicts coarse-grained occupancy (in or out) of sample points in a 3D space to produce the base mesh. In contrast, our 2nd stage model, with use of DOS, predicts fine-grained displacement values of the sample points in the camera direction to produce a partial refined mesh." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.652, + 0.785, + 0.713 + ], + "angle": 0, + "content": "Intuitively, given the base mesh as prior and the use of DOS, our 2nd stage model is trying to shift and adjust the base mesh's surface in the camera direction such that the resulting partial refined mesh better reflects the appearance details of the given views (see Fig. 8)." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.72, + 0.785, + 0.809 + ], + "angle": 0, + "content": "Visibility-based Fusion Each given view is used to generate a partial refined mesh. We aim to use these partial refined meshes to update the original base mesh. To do so, we transform the partial refined meshes to the primary view's 3D camera space so that they are physically aligned with the base mesh. Then, we will use these partial refined meshes to update the values in \\( G_{S} \\), which is a \\( 256 \\times 256 \\times 256 \\) 3D feature grid containing the SDF values of the base mesh." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.81, + 0.785, + 0.84 + ], + "angle": 0, + "content": "If a partial refined mesh is created from view \\( v \\), then this mesh will have the most accurate shape and geometry at regions that are visible from view \\( v \\). For" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "10" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.39, + 0.127 + ], + "angle": 0, + "content": "K. Y. Chan et al." + }, + { + "type": "image", + "bbox": [ + 0.223, + 0.144, + 0.49, + 0.245 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.217, + 0.247, + 0.497, + 0.273 + ], + "angle": 0, + "content": "Fig. 8: Illustration of our Iterative Mechanism" + }, + { + "type": "image", + "bbox": [ + 0.527, + 0.143, + 0.758, + 0.245 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.504, + 0.247, + 0.785, + 0.275 + ], + "angle": 0, + "content": "Fig.9: Evaluation of 3DFG-PIFu's 2nd Stage." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.282, + 0.785, + 0.387 + ], + "angle": 0, + "content": "this reason, we will identify locations on a partial refined mesh that are visible from its corresponding view and then extract the SDF values at these locations. So, for each partial refined mesh, these 'visible' SDF values are extracted and used to overwrite the \\( G_{S} \\) grid. In the end, the updated \\( G_{S} \\), which is also referred to as our final mesh in SDF form \\( (G_{F}) \\), will be a mix of SDF values from the base mesh and the partial refined mesh(es). To convert the \\( G_{F} \\) to mesh form, we use the Marching Cube algorithm." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.415, + 0.502, + 0.429 + ], + "angle": 0, + "content": "3.3 SDF-based SMPL-X features" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.445, + 0.785, + 0.52 + ], + "angle": 0, + "content": "In multi-view settings, it is possible to use methods, such as [18] and [9], to predict a SMPL-X mesh that is fairly close to the ground truth. Thus, some multi-view pixel-aligned implicit models, like DeepMultiCap [22] and SeSDF [1], use a SMPL-X mesh as a prior before predicting a human body mesh. In 3DFGPIFu, we also offer an option to use SMPL-X meshes as a prior." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.522, + 0.785, + 0.642 + ], + "angle": 0, + "content": "A well-known approach to incorporate a SMPL-X mesh as a prior in a pixel-aligned implicit model is via the use of voxel-aligned features introduced by PaMIR [23]. To obtain the voxel-aligned features, the SMPL-X mesh is first voxelized and then fed as an input to a 3D CNN, as shown in bottom of Fig. 4. Voxel-aligned features are produced by this 3D CNN. The voxel-aligned features are then used as an input to a MLP, which will produce a human body mesh. Voxel-aligned features are used in DeepMultiCap and SeSDF (with a PointNet). We can use voxel-aligned features in 3DFG-PIFu as well, as seen in Fig. 4." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.643, + 0.785, + 0.734 + ], + "angle": 0, + "content": "But, as Fig. 4 shows, the features produced by the Encoder ('Pixel-aligned Features') are only fused with voxel-aligned features at the end of the pipeline. Moreover, the fusion is point-wise and localized. This means the pixel-aligned feature that corresponds to a sample point is fused only with the specific voxel-aligned feature that corresponds to the same sample point. In other words, there is no global interaction between voxel-aligned features and pixel-aligned features." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.735, + 0.787, + 0.84 + ], + "angle": 0, + "content": "We aim to design a method to fuse a SMPL-X mesh earlier in the pipeline and in a global manner. A recent method that does this is S-PIFu [3]. S-PIFu extracts a set of handcrafted 2D feature maps from a SMPL-X mesh. These maps are concatenated with the input image and then used as inputs at the start of the pipeline. However, useful 3D information is lost when S-PIFu reduces a SMPL-X mesh into a set of 2D handcrafted features. Thus, we propose our SDF-based SMPL-X features to directly replace the 2D handcrafted features. SDF-based" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.65, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "3DFG-PIFu" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.117, + 0.784, + 0.127 + ], + "angle": 0, + "content": "11" + }, + { + "type": "image", + "bbox": [ + 0.222, + 0.143, + 0.777, + 0.296 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.33, + 0.297, + 0.673, + 0.309 + ], + "angle": 0, + "content": "Fig. 10: Qualitative evaluation with SOTA models" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.321, + 0.784, + 0.351 + ], + "angle": 0, + "content": "SMPL-X features retain 3D information by directly converting a SMPL-X mesh into a 3D feature grid of SDF values." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.352, + 0.787, + 0.44 + ], + "angle": 0, + "content": "SDF-based SMPL-X features \\((G_{X})\\) is a 3D grid of SDF values (as seen in Fig. 4). \\(G_{X}\\) is similar to \\(G_{S}\\), \\(G_{S}^{\\prime}\\), and \\(G_{F}\\) except that \\(G_{X}\\) involves a SMPL-X mesh. To get \\(G_{X}\\), we first transform the SMPL-X mesh to the 3D camera space of the primary view. From the transformed SMPL-X mesh, we sample a 3D grid of SDF values. Each SDF value ranges from -1 to 1, where the value of 0 represents a surface on the SMPL-X mesh." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.443, + 0.785, + 0.473 + ], + "angle": 0, + "content": "As shown in Fig. 4, \\( G_{X} \\) can be used together with PaMIR's voxel-aligned features, and we show later on that this combination yields the best results." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.498, + 0.376, + 0.515 + ], + "angle": 0, + "content": "4 Experiments" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.532, + 0.785, + 0.577 + ], + "angle": 0, + "content": "As this is a sparse views set-up, we set the number of views \\( V = 2 \\) and set the angle between the two views as 90 degrees. It is feasible to use other angles as well. Later in Sect. 4.3, we experiment with \\( V > 2 \\)." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.602, + 0.334, + 0.616 + ], + "angle": 0, + "content": "4.1 Datasets" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.629, + 0.785, + 0.734 + ], + "angle": 0, + "content": "In our experimental setup, we utilize the THuman2.0 dataset [17] as the training set for our models as well as other competing models. The THuman2.0 dataset comprises 526 high-quality, full-body scans (or meshes) of ethnic Chinese human subjects. A 80-20 train-test split of the dataset is used. For each training mesh, we render 36 RGB images (each spaced 10 degree apart) using a weak-perspective camera. For each training iteration, two views that are 90 degree apart are randomly selected." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.735, + 0.787, + 0.84 + ], + "angle": 0, + "content": "Furthermore, we use BUFF dataset [19] and MultiHuman dataset [22] for the evaluation of all models. No model is trained using these datasets. For BUFF dataset, we followed IntegratedPIFu [4] and performed systematic sampling (based on sequence number) on the dataset. This resulted in 101 human meshes that were used for evaluating the models. Utilizing systematic sampling allowed us to avoid meshes that have both the same human subject and the same pose. For MultiHuman dataset, all single human scans are used." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "12" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.39, + 0.128 + ], + "angle": 0, + "content": "K. Y. Chan et al." + }, + { + "type": "table_caption", + "bbox": [ + 0.216, + 0.145, + 0.787, + 0.186 + ], + "angle": 0, + "content": "Table 1: SOTA vs Ours. The IntegratedPIFu [4] used is its multi-view version. 'SM' indicates if a groundtruth SMPL-X mesh is used. 'HR' indicates if 1024x1024 RGB images are used. By default, 512x512 RGB images are used." + }, + { + "type": "table", + "bbox": [ + 0.235, + 0.187, + 0.77, + 0.274 + ], + "angle": 0, + "content": "
MethodsSMHRTHuman2.0BUFFMultiHuman
CD (10-5)P2S (10-5)NormalCD (102)P2S (102)NormalCD (10-5)P2S (10-5)Normal
Multi-view PIFu××10.7917.0354714.3577.00548398.1979.8436046
IntegratedPIFu×10.0515.7553244.5767.49747388.4819.9705961
DeepMultiCap×8.2087.506958912.4514.781208132.8329.1611518
SeSDF×6.3029.18153883.8485.77952597.1679.2766157
Ours (No HR, No SM)××5.7965.81153862.5092.28647976.3205.7375352
Ours (HR, No SM)×5.1335.02853172.5082.12146945.3154.8665116
Ours (No HR, w SM)×3.5603.13952853.3752.69447585.6335.0705428
Ours (HR, w SM)3.5553.12952123.4122.70045605.3914.9345003
" + }, + { + "type": "image", + "bbox": [ + 0.225, + 0.277, + 0.492, + 0.426 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.24, + 0.427, + 0.482, + 0.44 + ], + "angle": 0, + "content": "Fig.11: SeSDF vs our 3DFG-PIFu." + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.278, + 0.766, + 0.426 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.509, + 0.427, + 0.765, + 0.44 + ], + "angle": 0, + "content": "Fig. 12: Qualitative evaluation of \\( G_{M} \\)" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.448, + 0.538, + 0.464 + ], + "angle": 0, + "content": "4.2 Comparison with State-of-the-art" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.477, + 0.788, + 0.584 + ], + "angle": 0, + "content": "We compared our models against other existing models on multi-view clothed human reconstruction. The models we compared with include Multi-view PIFu [13], IntegratedPIFu (multi-view version) [4], DeepMultiCap [22], and SeSDF [1]. We also compared with DoubleField [15] and Data-Driven 3D Reconstruction method [24] in our Supp. Mat. In our quantitative evaluation, we use metrics that include Chamfer distance (CD), Point-to-Surface (P2S), and Normal reprojection error (Normal). These metrics are also used in [1,4,13,22]." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.598, + 0.788, + 0.721 + ], + "angle": 0, + "content": "Qualitative Evaluation We evaluate the methods qualitatively in Fig. 1 and Fig. 10. In these figures, we show the meshes produced by two of our models. Our first model (in column (e)) uses neither a SMPL-X mesh nor \\(1024 \\times 1024\\) high-res images. Our second model (in column (f)) does not use a SMPL-X mesh but uses \\(1024 \\times 1024\\) high-res images. Among the SOTA models, IntegratedPIFu uses high-res images, while DeepMultiCap and SeSDF use a groundtruth SMPL-X mesh. Comparison with SeSDF is shown in Fig. 11. We find that our models outperformed SOTA models in both structural accuracy and appearance details." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.735, + 0.788, + 0.842 + ], + "angle": 0, + "content": "Quantitative Evaluation In Tab. 1, we compared our models with existing methods quantitatively. Because different SOTA methods require different types of inputs (i.e. groundtruth SMPL-X or high-res images), and these different inputs may give additional advantage to a method, we decided to train four different versions of our model, with each version using a different combination of inputs as shown in the table. The table shows that our methods significantly outperform the existing models in all three datasets. See Supp. Mat. for more analysis." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.65, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "3DFG-PIFu" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.116, + 0.786, + 0.127 + ], + "angle": 0, + "content": "13" + }, + { + "type": "table_caption", + "bbox": [ + 0.365, + 0.145, + 0.638, + 0.158 + ], + "angle": 0, + "content": "Table 2: Quantitative evaluation of \\( {G}_{M} \\)" + }, + { + "type": "table", + "bbox": [ + 0.32, + 0.159, + 0.684, + 0.213 + ], + "angle": 0, + "content": "
MethodsTHuman2.0BUFF
CD (10-5)P2S (10-5)CD (102)P2S (102)
PIFu26.9725.109.6519.247
PIFu + GM (GN not used)6.6267.2122.5303.005
PIFu + GM (GN is used)6.0076.6342.3862.999
" + }, + { + "type": "table_caption", + "bbox": [ + 0.3, + 0.216, + 0.704, + 0.23 + ], + "angle": 0, + "content": "Table 3: Quantitative evaluation of \\( G_V \\) at visible regions" + }, + { + "type": "table", + "bbox": [ + 0.363, + 0.231, + 0.642, + 0.273 + ], + "angle": 0, + "content": "
MethodsTHuman2.0BUFF
CD (10-5)P2S (10-5)CD (10-4)P2S (10-4)
No Gv4.0362.9641.3151.096
With Gv3.8912.8401.2841.056
" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.279, + 0.402, + 0.293 + ], + "angle": 0, + "content": "4.3 Ablation Studies" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.303, + 0.787, + 0.378 + ], + "angle": 0, + "content": "Evaluation of the Different 3D Feature Grids Firstly, in order to assess the effectiveness of \\( G_{M} \\), we train and compare a single-view PIFu that is either not given or given \\( G_{M} \\) as an additional input. The comparison is shown quantitatively in the first two rows of Tab. 2 and qualitatively in Fig. 12. Notably, with \\( G_{M} \\), the single-view PIFu can also outperform a Multi-view PIFu (1st row of Tab. 1)." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.379, + 0.787, + 0.424 + ], + "angle": 0, + "content": "Next, as aforementioned, \\( G_{M} \\) can be complemented with \\( G_{N} \\). Thus, we also show the results when \\( G_{M} \\) is used with \\( G_{N} \\) in a single-view PIFu (see last row of Tab. 2). The results clearly demonstrated the benefit of including \\( G_{N} \\)." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.425, + 0.787, + 0.501 + ], + "angle": 0, + "content": "We also evaluated \\( G_V \\) by training the 2nd stage of 3DFG-PIFu with or without \\( G_V \\). The results in Tab. 3 and Fig. 13 show that \\( G_V \\) improves the partial refined meshes obtained in the 2nd stage. Aside: As only visible regions of partial refined meshes are used to form the final mesh, Tab. 3 must consider only visible regions." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.507, + 0.787, + 0.552 + ], + "angle": 0, + "content": "Evaluating our Iterative Mechanism (i.e. Our 2nd Stage) We also show that 3DFG-PIFu's 2nd stage indeed improves the base meshes from the 1st stage. See Fig. 9 and Tab. 4. The improved meshes show sharper appearance details." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.552, + 0.787, + 0.613 + ], + "angle": 0, + "content": "When more views are made available (i.e. \\( V > 2 \\)), the 3DFG-PIFu can incrementally update and improve the current mesh without the need for additional training. We simply replace the base mesh with the current mesh and re-run the 2nd stage again. Results are shown in Fig 14." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.621, + 0.787, + 0.683 + ], + "angle": 0, + "content": "Evaluation of SDF-based SMPL- \\(X\\) features In order to evaluate the effectiveness of our SDF-based SMPL-X features \\((G_{X})\\), we train and compare a single-view PIFu that is given either (i) S-PIFu features, (ii) PaMIR's voxel-aligned features, (iii) our \\(G_{X}\\), or (iv) PaMIR's voxel-aligned features + our \\(G_{X}\\)." + }, + { + "type": "image", + "bbox": [ + 0.364, + 0.687, + 0.637, + 0.835 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.313, + 0.833, + 0.688, + 0.847 + ], + "angle": 0, + "content": "Fig. 13: Partial refined meshes obtained w and w/o \\( G_V \\)" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "14" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.39, + 0.128 + ], + "angle": 0, + "content": "K. Y. Chan et al." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.222 + ], + "angle": 0, + "content": "A quantitative comparison is shown in Tab. 5. The table shows our \\( G_{X} \\) outperformed S-PIFu features. Whether our \\( G_{X} \\) is combined with voxel-aligned features or not, it clearly improves the performance of a model when in use. Qualitatively, Fig. 15 shows that combining PaMIR's voxel-aligned features with our \\( G_{X} \\) yields the most robust results." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.242, + 0.519, + 0.259 + ], + "angle": 0, + "content": "5 Limitations and Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.269, + 0.786, + 0.314 + ], + "angle": 0, + "content": "In our Supp. Mat., we address concerns on 3DFG-PIFu's efficiency. In short, via a series of implementation tricks, we show 3DFG-PIFu is actually more efficient than roughly half of the existing SOTA methods." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.315, + 0.787, + 0.405 + ], + "angle": 0, + "content": "We have introduced 3DFG-PIFu, a multi-view pixel-aligned implicit model that uses 3D Feature Grids to fuse multi-view information. 3DFG-PIFu also proposed an iterative pipeline that combines appearance details from multiple views into a single mesh. Lastly, 3DFG-PIFu introduced SDF-based SMPL-X features, which is a new way of incorporating a SMPL-X mesh into a pixel-aligned implicit model." + }, + { + "type": "table_caption", + "bbox": [ + 0.298, + 0.408, + 0.706, + 0.422 + ], + "angle": 0, + "content": "Table 4: Quantitative evaluation of 3DFG-PIFu's 2nd Stage" + }, + { + "type": "table", + "bbox": [ + 0.312, + 0.423, + 0.691, + 0.469 + ], + "angle": 0, + "content": "
MethodsTHuman2.0BUFF
CD (10-5)P2S (10-5)CD (102)P2S (102)
Base meshes (1st Stage)6.0076.6342.3862.999
Final meshes (2nd Stage)5.7965.8112.5092.286
" + }, + { + "type": "table_caption", + "bbox": [ + 0.365, + 0.472, + 0.636, + 0.485 + ], + "angle": 0, + "content": "Table 5: Quantitative evaluation of \\( {G}_{X} \\)" + }, + { + "type": "table", + "bbox": [ + 0.292, + 0.486, + 0.713, + 0.552 + ], + "angle": 0, + "content": "
MethodsTHuman2.0BUFF
CD (10-5)P2S (10-5)CD (102)P2S (102)
S-PIFu Features4.4884.0306.8126.880
Voxel-aligned Features4.1043.7408.0379.225
GX(Ours)4.3523.7346.3516.641
Voxel-aligned Features + GX(Ours)3.9703.4838.0128.827
" + }, + { + "type": "image", + "bbox": [ + 0.341, + 0.554, + 0.652, + 0.669 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.33, + 0.671, + 0.672, + 0.684 + ], + "angle": 0, + "content": "Fig. 14: Effect of using more views in 3DFG-PIFu." + }, + { + "type": "image", + "bbox": [ + 0.287, + 0.687, + 0.713, + 0.815 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.373, + 0.815, + 0.628, + 0.828 + ], + "angle": 0, + "content": "Fig. 15: Qualitative evaluation of \\( G_{X} \\)" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.65, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "3DFG-PIFu" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.117, + 0.786, + 0.127 + ], + "angle": 0, + "content": "15" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.147, + 0.788, + 0.194 + ], + "angle": 0, + "content": "Acknowledgements This research work is supported by the Agency for Science, Technology and Research (A*STAR) under its MTC Programmatic Funds (Grant No. M23L7b0021)." + }, + { + "type": "title", + "bbox": [ + 0.217, + 0.213, + 0.323, + 0.229 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.24, + 0.785, + 0.281 + ], + "angle": 0, + "content": "1. Cao, Y., Han, K., Wong, K.Y.K.: Sesdf: Self-evolved signed distance field for implicit 3d clothed human reconstruction. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 4647-4657 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.283, + 0.785, + 0.322 + ], + "angle": 0, + "content": "2. Chan, K., Lin, G., Zhao, H., Lin, W.: S-pifu: Integrating parametric human models with pifu for single-view clothed human reconstruction. Advances in Neural Information Processing Systems 35, 17373-17385 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.323, + 0.785, + 0.363 + ], + "angle": 0, + "content": "3. Chan, K., Lin, G., Zhao, H., Lin, W.: S-pifu: Integrating parametric human models with pifu for single-view clothed human reconstruction. In: Advances in Neural Information Processing Systems (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.364, + 0.785, + 0.417 + ], + "angle": 0, + "content": "4. Chan, K.Y., Lin, G., Zhao, H., Lin, W.: Integratedpifu: Integrated pixel aligned implicit function for single-view human reconstruction. In: Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part II. pp. 328-344. Springer (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.418, + 0.785, + 0.471 + ], + "angle": 0, + "content": "5. Chan, K.Y., Liu, F., Lin, G., Foo, C.S., Lin, W.: Fine structure-aware sampling: A new sampling training scheme for pixel-aligned implicit models in single-view human reconstruction. In: Proceedings of the AAAI Conference on Artificial Intelligence. vol. 38, pp. 964-971 (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.473, + 0.785, + 0.527 + ], + "angle": 0, + "content": "6. Chan, K.Y., Liu, F., Lin, G., Foo, C.S., Lin, W.: R-cyclic diffuser: Reductive and cyclic latent diffusion for 3d clothed human digitalization. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 10304-10313 (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.528, + 0.785, + 0.58 + ], + "angle": 0, + "content": "7. Gong, X., Song, L., Zheng, M., Planche, B., Chen, T., Yuan, J., Doermann, D., Wu, Z.: Progressive multi-view human mesh recovery with self-supervision. In: Proceedings of the AAAI Conference on Artificial Intelligence. vol. 37, pp. 676-684 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.582, + 0.785, + 0.623 + ], + "angle": 0, + "content": "8. Hong, Y., Zhang, J., Jiang, B., Guo, Y., Liu, L., Bao, H.: Stereopifu: Depth aware clothed human digitization via stereo vision. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 535-545 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.624, + 0.785, + 0.663 + ], + "angle": 0, + "content": "9. Kolotouros, N., Pavlakos, G., Jayaraman, D., Daniilidis, K.: Probabilistic modeling for human mesh recovery. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 11605-11614 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.664, + 0.785, + 0.704 + ], + "angle": 0, + "content": "0. Liang, J., Lin, M.C.: Shape-aware human pose and shape reconstruction using multi-view images. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 4352-4362 (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.705, + 0.785, + 0.731 + ], + "angle": 0, + "content": "1. Lorensen, W.E., Cline, H.E.: Marching cubes: A high resolution 3d surface construction algorithm. ACM siggraph computer graphics 21(4), 163-169 (1987)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.732, + 0.785, + 0.785 + ], + "angle": 0, + "content": "2. Pavlakos, G., Choutas, V., Ghorbani, N., Bolkart, T., Osman, A.A.A., Tzionas, D., Black, M.J.: Expressive body capture: 3D hands, face, and body from a single image. In: Proceedings IEEE Conf. on Computer Vision and Pattern Recognition (CVPR). pp. 10975-10985 (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.786, + 0.785, + 0.84 + ], + "angle": 0, + "content": "3. Saito, S., Huang, Z., Natsume, R., Morishima, S., Kanazawa, A., Li, H.: Pifu: Pixel-aligned implicit function for high-resolution clothed human digitization. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 2304-2314 (2019)" + }, + { + "type": "list", + "bbox": [ + 0.226, + 0.24, + 0.785, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "16" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.39, + 0.128 + ], + "angle": 0, + "content": "K. Y. Chan et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.147, + 0.787, + 0.203 + ], + "angle": 0, + "content": "14. Saito, S., Simon, T., Saragih, J., Joo, H.: Pifuhd: Multi-level pixel-aligned implicit function for high-resolution 3d human digitization. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 84-93 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.204, + 0.787, + 0.259 + ], + "angle": 0, + "content": "15. Shao, R., Zhang, H., Zhang, H., Chen, M., Cao, Y.P., Yu, T., Liu, Y.: Doublefield: Bridging the neural surface and radiance fields for high-fidelity human reconstruction and rendering. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 15872-15882 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.259, + 0.787, + 0.314 + ], + "angle": 0, + "content": "16. Shao, R., Zheng, Z., Zhang, H., Sun, J., Liu, Y.: Diffustereo: High quality human reconstruction via diffusion-based stereo using sparse cameras. In: Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part XXXII. pp. 702-720. Springer (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.315, + 0.787, + 0.355 + ], + "angle": 0, + "content": "17. Yu, T., Zheng, Z., Guo, K., Liu, P., Dai, Q., Liu, Y.: Function4d: Real-time human volumetric capture from very sparse consumer rgbd sensors. In: IEEE Conference on Computer Vision and Pattern Recognition (CVPR2021) (June 2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.356, + 0.787, + 0.397 + ], + "angle": 0, + "content": "18. Yu, Z., Zhang, L., Xu, Y., Tang, C., Tran, L., Keskin, C., Park, H.S.: Multiview human body reconstruction from uncalibrated cameras. In: Advances in Neural Information Processing Systems (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.398, + 0.787, + 0.438 + ], + "angle": 0, + "content": "19. Zhang, C., Pujades, S., Black, M.J., Pons-Moll, G.: Detailed, accurate, human shape estimation from clothed 3d scan sequences. In: The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (July 2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.439, + 0.787, + 0.493 + ], + "angle": 0, + "content": "20. Zhang, H., Tian, Y., Zhou, X., Ouyang, W., Liu, Y., Wang, L., Sun, Z.: Pymaf: 3d human pose and shape regression with pyramidal mesh alignment feedback loop. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 11446-11456 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.494, + 0.787, + 0.549 + ], + "angle": 0, + "content": "21. Zhao, F., Yang, W., Zhang, J., Lin, P., Zhang, Y., Yu, J., Xu, L.: Humannerf: Efficiently generated human radiance field from sparse inputs. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 7743-7753 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.549, + 0.787, + 0.605 + ], + "angle": 0, + "content": "22. Zheng, Y., Shao, R., Zhang, Y., Yu, T., Zheng, Z., Dai, Q., Liu, Y.: Deepmulticap: Performance capture of multiple characters using sparse multiview cameras. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 6239-6249 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.606, + 0.787, + 0.646 + ], + "angle": 0, + "content": "23. Zheng, Z., Yu, T., Liu, Y., Dai, Q.: Pamir: Parametric model-conditioned implicit representation for image-based human reconstruction. IEEE transactions on pattern analysis and machine intelligence 44(6), 3170-3184 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.647, + 0.787, + 0.688 + ], + "angle": 0, + "content": "24. Zins, P., Xu, Y., Boyer, E., Wuhrer, S., Tung, T.: Data-driven 3d reconstruction of dressed humans from sparse views. In: 2021 International Conference on 3D Vision (3DV). pp. 494-504. IEEE (2021)" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.147, + 0.787, + 0.688 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/2024/3DFG-PIFu_ 3D Feature Grids for Human Digitization from Sparse Views/7528968f-06f7-4c18-aa8f-783ee6c0a1d6_origin.pdf b/2024/3DFG-PIFu_ 3D Feature Grids for Human Digitization from Sparse Views/7528968f-06f7-4c18-aa8f-783ee6c0a1d6_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..efe47ac7d1fe08807bdff87bef2a086dfd20f6bb --- /dev/null +++ b/2024/3DFG-PIFu_ 3D Feature Grids for Human Digitization from Sparse Views/7528968f-06f7-4c18-aa8f-783ee6c0a1d6_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a700e427e1cc5dc21d1a99000570cc28100be340febd5cf477d3499d852cac99 +size 9591441 diff --git a/2024/3DFG-PIFu_ 3D Feature Grids for Human Digitization from Sparse Views/full.md b/2024/3DFG-PIFu_ 3D Feature Grids for Human Digitization from Sparse Views/full.md new file mode 100644 index 0000000000000000000000000000000000000000..5c5937ce3b1d5265553361d6c44087c7d10b1278 --- /dev/null +++ b/2024/3DFG-PIFu_ 3D Feature Grids for Human Digitization from Sparse Views/full.md @@ -0,0 +1,268 @@ +# 3DFG-PIFu: 3D Feature Grids for Human Digitization from Sparse Views + +Kennard Yanting Chan $^{1,2}$ , Fayao Liu $^{2}$ , Guosheng Lin $^{1}$ , Chuan Sheng Foo $^{2,3}$ , and Weisi Lin $^{1}$ + +1 Nanyang Technological University, Singapore +$^{2}$ Institute for Infocomm Research, A*STAR +3 Centre for Frontier AI Research, A*STAR + +Abstract. Pixel-aligned implicit models, such as Multi-view PIFu, Deep-MultiCap, DoubleField, and SeSDF, are well-established methods for reconstructing a clothed human from sparse views. However, given $V$ images, these models would only combine features from these images in a point-wise and localized manner. In other words, the $V$ images are processed individually and are only combined in a very narrow fashion at the end of the pipeline. To a large extent, this defeats the purpose of having multi-view information since the multi-view task in question is predominantly treated as a single-view task. To resolve this, we introduce 3DFG-PIFu, a pixel-aligned implicit model that exploits multi-view information right from the start and all the way to the end of the pipeline. Our 3DFG-PIFu makes use of 3D Feature Grids to combine features from $V$ images in a global manner (rather than point-wise or localized) and throughout the pipeline. Other than the 3D Feature Grids, 3DFG-PIFu also proposes an iterative mechanism that refines and updates an existing output human mesh using the different views. Moreover, 3DFG-PIFu introduces SDF-based SMPL-X features, which is a new method of incorporating a SMPL-X mesh into a pixel-aligned implicit model. Our experiments show that 3DFG-PIFu significantly outperforms SOTA models. Our code is released at https://github.com/kcyt/3DFG-PIFu. + +Keywords: 3D Clothed Human Reconstruction from Sparse Views $\cdot$ 3D Feature Grids $\cdot$ Pixel-aligned Implicit Models + +# 1 Introduction + +The field of 3D reconstruction of human bodies has gained considerable interest due to its potential use in various domains such as virtual reality, game production, and 3D printing. Pixel-aligned implicit models, such as Multi-view PIFu [13] DeepMultiCap [22], DoubleField [15], and SeSDF [1] are an influential class of deep learning methods for reconstructing clothed human bodies from sparse views. These models learn an implicit function that represents the surface of a human body. During testing, the learned implicit function is sampled using a grid of uniformly-spaced sample points. For each sample point, the learned implicit function (or the model) will return a predicted occupancy label (i.e. + +![](images/48ec879dd53d26453007c6276e8dd7145b694c8b9d4902b7a817233b8c82fbb3.jpg) + +![](images/063c760593ad20299e6bd7f63b1becdb643b372ebc02208d51e045ed0b6c34dc.jpg) +Fig. 1: Our models (last two columns) vs SOTA models. +Fig. 2: (a) Existing multi-view pixel-aligned implicit models vs (b) Our 3DFG-PIFu. whether the sample point is 'inside' or 'outside' of a human body surface). Once a grid of predicted occupancy labels is obtained, a human body mesh can be extracted from this grid using the Marching Cubes algorithm [11]. + +In order to predict the occupancy labels, existing multi-view pixel-aligned implicit models [1, 13, 15, 22], when given $V$ views or images, would compute $V$ different point embeddings for each sample point. This is illustrated in Fig. 2a for the case where $V = 2$ . For each sample point, its $V$ point embeddings would be fused together into a single point embedding via either simple averaging [13] or weighted averaging [1, 15, 22], as illustrated in the same figure. These fused point embeddings are then converted into predicted occupancy labels, from which a human body mesh can be obtained. It is important to note that the "Point Embeddings for View 1" grid and the "Point Embeddings for View 2" grid in Fig. 2a are in different 3D coordinate spaces. The former is in the 3D camera space of View 1, and the latter is in the 3D camera space of View 2. This means that a point located at the top left corner of a grid may not correspond to the top left corner of another grid. We let the "Fused Point Embeddings" grid follow the 3D camera space of View 1 (It is possible to choose another 3D camera space, but that is trivial). + +As shown in Fig. 2a, there are two problems with existing multi-view pixel-aligned implicit models. 1. Firstly, the fusion of View 1 and View 2 are carried out in a point-wise and very localized manner. This is a problem because, as shown in the bidirectional red dashed arrow in Fig. 2a, there is no interaction between fused point embeddings, even if they are located close to each other. So if there is a sample point A that is closely surrounded by ten sample points, the existing multi-view pixel-aligned implicit models may assign those ten points with the same label and yet assign point A with an opposite label, which is an obvious error that would lead to a floating artefact. 2. Secondly, the fusion of View 1 and View 2 occurs at the end of the pipeline in a very simple manner (either simple or weighted averaging (e.g. attention)). To a large extent, the existing multiview pixel-aligned implicit models are not very different from their single-view counterparts except for the simplistic point-wise fusion of point embeddings at the end of the pipeline. + +Hence, we propose 3DFG-PIFu, a pixel-aligned implicit model that rethinks how multi-view information is incorporated in its pipeline. One key feature of 3DFG-PIFu is its use of 3D Feature Grid(s). As seen in Fig. 2b, 3DFG-PIFu makes use of 3D Feature Grid(s) to extract structural information from View 2. The 3D Feature Grid, due to its inherent design, is able to easily orient the extracted information to a different camera space. Thus, we re-orient the 3D Feature Grid from the 3D camera space of View 2 to the 3D camera space of View 1. Now aligned with View 1, the transformed 3D Feature Grid can be concatenated with View 1 and processed by a deep neural network to form 'Fused Point Embeddings'. These fused point embeddings will then be further refined using the fine-grained information from View 2 (Section 3.2). + +Crucially, this means that, unlike existing models, the fusion of multi-view information in 3DFG-PIFu occurs from the start to the end of the pipeline. Moreover, the multi-view fusion in 3DFG-PIFu occurs in a global and broad manner (rather than point-wise and localized) as information from View 2 is allowed to influence each and every fused point embedding. + +In total, 3DFG-PIFu makes three contributions: 1. The aforementioned 3D Feature Grids that fuse multi-view information (Section 3.1). 2. An iterative mechanism that refines and updates an existing output human mesh using the fine-grained information from the different views (Section 3.2). 3. Introduction of SDF-based SMPL-X features, which is a new method of incorporating a SMPL-X mesh into a pixel-aligned implicit model (Section 3.3). + +# 2 Related Work + +# 2.1 Human Reconstruction from Sparse Views + +Methods that reconstruct a human body mesh from a sparse number of images can be broadly classified into two classes: Parametric approaches and non-parametric approaches. + +Parametric approaches, such as [7,9,10,20], reconstruct a human body surface by predicting parameters of a human parametric model (e.g. SMPL-X [12]). However, these methods can only produce human body meshes that are clothless. + +On the other hand, non-parametric methods do not use a human parametric model. An important subclass of non-parametric methods is pixel-aligned implicit models. There are other subclasses like NERF methods (e.g. [21]), but they have yet to outperform pixel-aligned implicit models. + +Pixel-aligned implicit models can be single-view (e.g. [2,5,6]) or multi-view (e.g. Multi-view PIFu [13], DeepMultiCap [22], DoubleField [15], and SeSDF [1]). + +As a side note, there are also pixel-aligned implicit models that use stereo images to reconstruct a clothed human mesh. However, these models, that include StereoPIFu [8] and DiffuStereo [16], require pairs of images to be taken at two similar viewpoints. This is often infeasible in many real-life applications and is thus not used in our experiments. Instead, our benchmarks are the aforementioned Multi-view PIFu, DeepMultiCap, DoubleField, SeSDF, and a few others. + +As mentioned in Section 1 ('Introduction'), these benchmark models suffer from the problems of: 1. Fusing multi-view information in a very narrow or pointwise manner, and 2. Fusing multi-view information only at the very end of the pipeline. To resolve this problem, we introduce 3DFG-PIFu. + +# 3 Method + +3DFG-PIFu is a two-staged model that works as long as the number of views $V > 1$ and the camera calibrations are known. One view will be randomly picked as the primary view and the other view(s) will be designated as the secondary view(s). Let us first assume $V = 2$ . This means that we have one primary view and one secondary view. As shown in Fig. 3, front and back normal maps, as well as a mask, can be predicted from a RGB image. We use the method outlined in PIFuHD [14] to predict the normal maps. Then, from the predicted normal maps, we can easily extract out the mask. Hereafter, we refer to a view as a collection of a RGB image, a front normal map, a back normal map, and a mask. + +1st Stage In the first stage (refer to Fig. 4 and assume $V = 2$ ), we first generate two 3D feature grids ( $G_{N}$ and $G_{M}$ ) from the secondary view's front normal map and mask. We will elaborate on how $G_{N}$ and $G_{M}$ are generated later. In short, the $G_{N}$ and $G_{M}$ contain normal pixels and mask pixels, respectively, from the secondary view, but these pixels have been transformed into the 3D camera space of the primary view. Then, $G_{N}$ and $G_{M}$ are concatenated with the primary view's RGB image, front normal map, and back normal map. The concatenated output is sent to an encoder, which is a 2D CNN. The encoder will produce a + +![](images/52226114b502a8ecad1a814df01ecd361b0a3ee0390eb9c0f1f213cabb032dae.jpg) +Fig.3: Predictions from a RGB image. + +![](images/4ec1c63e6832fccfea46d4300abbb68be53eb93b2965813d4c5d041fff1cd8b0.jpg) +Fig. 4: 1st Stage of 3DFG-PIFu. Each view includes the mask and the predicted front and back normal maps. The primary view and the feature grids extracted from the secondary view(s) are fed into an encoder and MLP to generate a base mesh. + +set of feature maps that is used by a Multilayer Perceptron (MLP) to produce a human body mesh, which we refer to as a base mesh. + +2nd Stage While the base mesh from the 1st stage has good structural accuracy, it fails to capture the more fine-grained appearance details (e.g. clothes wrinkles) from all the views. Thus, a 2nd stage is needed. The 2nd stage of 3DFG-PIFu is an iterative mechanism or pipeline to combine appearance details from multiple views. We will briefly describe the flow of the pipeline here, but the details and rationales behind each step in the pipeline will be explained in Section 3.2. + +At the start of the 2nd stage, a view is picked from the set of input views. Assume that $V = 2$ , and the view that we selected is the secondary view. The secondary view, as well as the base mesh from the 1st stage, will be used as inputs in the 2nd stage (shown in Fig. 5). First, we will rotate (or transform) the base mesh into the 3D camera space of the secondary view. From this rotated base mesh, we would generate two additional 3D feature grids ( $G_V$ and $G_S'$ ). $G_V$ and $G_S'$ will have the visibility information and the SDF values, respectively, of the rotated base mesh. We will elaborate on how $G_V$ and $G_S'$ are obtained later. $G_V$ , $G_S'$ and the secondary view will be concatenated together and fed into an encoder, which is a 2D CNN. This encoder will produce a set of feature maps that is used by a MLP to produce a partial refined mesh. The partial refined mesh will have the fine-grained appearance details of the secondary view. + +Finally, we will obtain a 3D feature grid of SDF values $(G_S)$ from the base mesh. Then, $G_S$ is refined and updated using information from the partial refined mesh via a process that we call visibility-based fusion (to be explained later). Visibility-based fusion will return a final 3D grid of SDF values, $G_F$ . From $G_F$ , we will retrieve the final mesh via the Marching Cubes algorithm. + +For simplicity, Fig. 5 only shows the scenario where there is only 1 secondary view, and the secondary view (rather than the primary view) is picked at the + +![](images/25f1ba29f265916ca4d21523b57789000869f973531da7d2c71ca540406048d2.jpg) +Fig. 5: 2nd Stage of 3DFG-PIFu. The base mesh is first aligned to the secondary view. Once aligned, it is combined with the secondary view to produce a partial refined mesh. + +start of the 2nd stage. In reality, the primary view and every secondary view will be separately processed in the 2nd stage (See the blue arrows in Fig. 5), and each view will generate a different partial refined mesh. All these partial refined meshes will be used to refine and update the base mesh during visibility-based fusion before the final mesh is produced. + +Optionally, in the 1st stage, if a SMPL-X mesh is given as an input, we will convert the SMPL-X mesh into another 3D feature grid of SDF values $(G_X)$ . See illustration in Fig. 4. $G_{X}$ is what we refer to as SDF-based SMPL-X Features. This feature grid will be concatenated with the other inputs in the 1st stage. Concurrently, we use the technique described in PaMIR [23] to obtain voxel-aligned features. The voxel-aligned features will be used by the MLP at the end of the pipeline. We will explain the rationale behind this set-up later. + +Now, we will first elaborate on the different 3D feature grids (Sect. 3.1) before moving on to our iterative mechanism described in 3DFG-PIFu's 2nd stage (Sect. 3.2). Finally, we will explain our SDF-based SMPL-X Features (Sect. 3.3). + +# 3.1 3D Feature Grids + +In Section 1 ('Introduction'), we thoroughly explained why we need to use 3D feature grids. Indeed, the central theme of our paper revolves around the use of 3D feature grids. We define a 3D feature grid as a D x H x W grid where each element on the grid can be either a scalar value or a vector. D, H, and W are each an integer. A 3D feature grid is useful as it can contain various types of information and can represent these information in different 3D camera spaces. In total, we use four different types of 3D feature grids: + +1. 3D Feature Grid for Visual Hull $(G_M)$ In Fig. 6, we illustrate how a 3D Feature Grid for Visual Hull is obtained if we only have 2 views - View 1 (Primary view) and View 2 (Secondary view). First, given a $256 \times 256$ mask of View 2, we replicate the mask pixels (i.e. the non-empty pixels) 256 times in the z-dimension (camera direction), giving us $M_2 \times 256$ elements in the 3D camera space of View 2, where $M_2$ represents the number of mask pixels in the mask of View 2. We do + +![](images/7cb376422de7b0b1c6825755d6ad09c98507c2570973e4e2872135165c4ead55.jpg) +Fig. 6: 3D Feature Grid - Visual Hull. Above shows how $G_{M}$ is extracted. + +the same for View 1 to get $M_1 \times 256$ elements in the 3D camera space of View 1. The elements belonging to View 2 are then rotated or transformed into the 3D camera space of View 1 and placed together with the elements that belong to View 1. Lastly, we take the 3D intersection of the two groups of elements to obtain a visual hull. The visual hull is stored in a 3D grid that corresponds to the 3D camera space of View 1, and this is basically a 3D Feature Grid for Visual Hull or $G_M$ . We will always store the visual hull in the 3D camera space of the primary view (instead of a secondary view). On a side note, $G_M$ can be generated with more than 2 views too. For example, with 3 views, the visual hull is formed by the 3D intersection of 3 groups of elements. + +$G_{M}$ is useful because it contains the structural information of both View 1 and View 2. Concretely, each element in $G_{M}$ is a binary value ('0' or '1'). '0' means the element or grid position is unoccupied, and '1' means the element is occupied. Together, these elements represent a possibility space for occupancy. No part of the groundtruth human body mesh can be outside of this possibility space, and this is very useful information for a pixel-aligned implicit model whose task is to predict and reconstruct a human body mesh. + +2. 3D Feature Grid for Front Normals $(G_N)$ However, $G_M$ does not fully capture all the relevant information from View 2. Specifically, only mask information from View 2 is captured. If we look at the mask of View 2 in Fig. 6, we cannot actually differentiate the outlines of the arms from that of the torso, or the outlines of the person's left thigh from the right thigh. This information (the outlines) is not captured by the mask but is captured by the front normal map of View 2. Thus, we introduce 3D Feature Grid for Front Normals or $G_N$ as a complement to $G_M$ . $G_N$ is similar to $G_M$ except that each element on its 3D grid is a normal vector rather than a scalar occupancy value. $G_N$ is obtained in a manner similar to the first row of Fig. 6 except that the mask of View 2 is replaced by the front normal map of View 2. First, given a $3 \times 256 \times 256$ front normal map of View 2, we replicate the normal pixels (i.e. only the non-empty pixels) 256 times in the z-dimension (camera direction), giving us $N_2 \times 256$ elements (i.e. vectors) in the 3D camera space of View 2, where $N_2$ represents the number of normal pixels in the front normal map of View 2. The elements belonging to View 2 are then rotated or transformed into the 3D camera space of View 1 and then stored in a 3D feature grid that corresponds to View 1's 3D camera space. This grid is the 3D Feature Grid for Front Normals or $G_N$ . Like $G_M$ , $G_N$ is also used as an input in the 1st stage of our 3DFG-PIFu. + +![](images/84ec8fb46f8f5b9903170bf387b785858959d1566763f536f97b90c41ef2a690.jpg) +Base Mesh (viewed from 3 different angles) +Fig. 7: View visibility of different views + +![](images/0934a6da62f5132d1fbd85806b7e6c9e58262571feb0cd1da0c2c2d2d2707baf.jpg) +Visibility of View 1 +(View 1 = a Frontal view) + +![](images/d8b22d2022b394e7951f21d8cd2d8efdbf6eb799e3781a265a39f1194a457a2b.jpg) +Visibility of View 2 (View $2 =$ a Left view) + +3. 3D Feature Grid for SDF values $(G_S, G_S', G_F)$ Given an input view, a single-view pixel-aligned implicit model produces a human body mesh that is oriented in the 3D camera space of that input view. But once we have the mesh, we can transform or rotate the mesh into the 3D camera space of any view. This means that a mesh produced using View 1 can be transformed from its initial 3D camera space of View 1 to the 3D camera space of View 2. Once oriented to the 3D camera space of View 2, the mesh becomes a useful prior for a pixel-aligned implicit model that is trying to use View 2 to predict a human body mesh. + +But we cannot feed a mesh, which consists of vertices and faces, into a pixel-aligned implicit model. To resolve this, we propose converting the mesh into a 3D feature grid of SDF values. This 3D grid will correspond to the 3D camera space of View 2, and each element in the 3D grid is a truncated SDF value that ranges from -1 to 1, where the value of 0 represents a mesh surface. + +This grid, which is 3D Feature Grid for SDF values, is a simple and effective way to condition a pixel-aligned implicit model on a mesh (as a prior). We will elaborate more on the usefulness of such a prior in Section 3.2. In our 3DFG-PIFu, 3D Feature Grid for SDF values is used as a prior in the 2nd stage (see Fig. 5). In Fig. 5, we see three variants of 3D Feature Grid for SDF values: $G_{S}$ , $G_{S}^{\prime}$ , and $G_{F}$ . $G_{S}$ and $G_{S}^{\prime}$ represent SDF values from an unrotated and rotated base mesh respectively. $G_{F}$ represents SDF values from the final mesh. + +4. 3D Feature Grid for View Visibility $(G_V)$ In the 2nd stage (see Fig. 5), we use $G_S'$ as an input to the encoder. Since $G_S'$ contains a rotated base mesh in SDF form, we are essentially using the rotated base mesh as a prior in the encoder. + +This rotated base mesh already has an accurate structure and shape of a human body. Thus, given a view (e.g. View 2) in the 2nd stage, we only want to modify the rotated base mesh in regions where we are confident of editing. The regions that we are most confident of editing are the regions that are visible from that given view. Examples of such regions are shown in Fig. 7. If we are given View 2, for example, then we only want to edit the green regions of the base mesh, as shown in the rightmost column of Fig. 7. + +If the selected view in the 2nd stage is indeed View 2, then we want to have a 3D feature grid that contains all those green regions. Such a 3D feature grid would serve as a complement to the $G_S'$ by telling the pixel-aligned implicit model which part of the rotated base mesh should (and should not) be edited. This 3D feature grid is our 3D Feature Grid for View Visibility or $G_V$ . Each element in $G_V$ is a binary value (0 or 1). A value of 1 indicates that, at that grid position, there is a mesh surface and this mesh surface is visible from the view that is selected in the 2nd stage (as illustrated in Fig. 7). + +# 3.2 An Iterative Mechanism to Combine Appearance Details from Multiple Views + +Our iterative mechanism or pipeline to combine appearance details from multiple views is the 2nd stage of our 3DFG-PIFu. It refines and updates the base mesh. + +While a base mesh has a highly accurate structure, we observed that it often lacks fine-grained appearance details from all the given input views (primary view and secondary views(s)). This is illustrated in Fig. 9a and b. + +To resolve this, we designed a 2nd stage that focuses on capturing the fine-grained appearance details of each view. Our 2nd stage is outlined in Fig. 5. Firstly, we select a view $v$ from the set of input views. Then, as seen in the figure, we condition our encoder on the rotated base mesh $(G_S')$ , which already captured the coarse but accurate structural information from all the input views. Given such a conditioning, we allow the encoder to now focus on capturing fine-grained appearance details from selected view $v$ . + +To further ensure that appearance details are captured, we have two additional features in the 2nd stage. The first feature, which was just explained, is the use of $G_V$ (as shown in Fig. 5). By complementing $G_S'$ with $G_V$ , the encoder is able to identify which regions on the rotated base mesh are visible from view $v$ . Relative to invisible regions, the encoder will make less error modifying the visible regions. Thus, knowing where the visible regions are encourages the encoder to make more decisive and sharper modifications to these regions. + +The second additional feature in the 2nd stage is the use of Depth Oriented Sampling (DOS) from IntegratedPIFu [4]. As shown in Fig. 5, for a given view, we will generate a partial refined mesh. However, for a partial refined mesh, we are actually only interested in the regions on the mesh that are visible from that given view. For this reason, it makes sense to use DOS to train the encoder and MLP that are used in the 2nd stage. This is because DOS works best when reconstructing mesh surfaces that are directly facing the camera direction (i.e. mesh regions that are visible from the given view). We briefly explain DOS now. + +Our 1st stage model predicts coarse-grained occupancy (in or out) of sample points in a 3D space to produce the base mesh. In contrast, our 2nd stage model, with use of DOS, predicts fine-grained displacement values of the sample points in the camera direction to produce a partial refined mesh. + +Intuitively, given the base mesh as prior and the use of DOS, our 2nd stage model is trying to shift and adjust the base mesh's surface in the camera direction such that the resulting partial refined mesh better reflects the appearance details of the given views (see Fig. 8). + +Visibility-based Fusion Each given view is used to generate a partial refined mesh. We aim to use these partial refined meshes to update the original base mesh. To do so, we transform the partial refined meshes to the primary view's 3D camera space so that they are physically aligned with the base mesh. Then, we will use these partial refined meshes to update the values in $G_{S}$ , which is a $256 \times 256 \times 256$ 3D feature grid containing the SDF values of the base mesh. + +If a partial refined mesh is created from view $v$ , then this mesh will have the most accurate shape and geometry at regions that are visible from view $v$ . For + +![](images/80fd0bb6438c89f3472d314eb8647666a97a17a981d8fcdf2a49a0facc6f71c7.jpg) +Fig. 8: Illustration of our Iterative Mechanism + +![](images/33e977971b6fccfeb314c35f51be05cbfba99552bbedaeab68be6b06a9532843.jpg) +Fig.9: Evaluation of 3DFG-PIFu's 2nd Stage. + +this reason, we will identify locations on a partial refined mesh that are visible from its corresponding view and then extract the SDF values at these locations. So, for each partial refined mesh, these 'visible' SDF values are extracted and used to overwrite the $G_{S}$ grid. In the end, the updated $G_{S}$ , which is also referred to as our final mesh in SDF form $(G_{F})$ , will be a mix of SDF values from the base mesh and the partial refined mesh(es). To convert the $G_{F}$ to mesh form, we use the Marching Cube algorithm. + +# 3.3 SDF-based SMPL-X features + +In multi-view settings, it is possible to use methods, such as [18] and [9], to predict a SMPL-X mesh that is fairly close to the ground truth. Thus, some multi-view pixel-aligned implicit models, like DeepMultiCap [22] and SeSDF [1], use a SMPL-X mesh as a prior before predicting a human body mesh. In 3DFGPIFu, we also offer an option to use SMPL-X meshes as a prior. + +A well-known approach to incorporate a SMPL-X mesh as a prior in a pixel-aligned implicit model is via the use of voxel-aligned features introduced by PaMIR [23]. To obtain the voxel-aligned features, the SMPL-X mesh is first voxelized and then fed as an input to a 3D CNN, as shown in bottom of Fig. 4. Voxel-aligned features are produced by this 3D CNN. The voxel-aligned features are then used as an input to a MLP, which will produce a human body mesh. Voxel-aligned features are used in DeepMultiCap and SeSDF (with a PointNet). We can use voxel-aligned features in 3DFG-PIFu as well, as seen in Fig. 4. + +But, as Fig. 4 shows, the features produced by the Encoder ('Pixel-aligned Features') are only fused with voxel-aligned features at the end of the pipeline. Moreover, the fusion is point-wise and localized. This means the pixel-aligned feature that corresponds to a sample point is fused only with the specific voxel-aligned feature that corresponds to the same sample point. In other words, there is no global interaction between voxel-aligned features and pixel-aligned features. + +We aim to design a method to fuse a SMPL-X mesh earlier in the pipeline and in a global manner. A recent method that does this is S-PIFu [3]. S-PIFu extracts a set of handcrafted 2D feature maps from a SMPL-X mesh. These maps are concatenated with the input image and then used as inputs at the start of the pipeline. However, useful 3D information is lost when S-PIFu reduces a SMPL-X mesh into a set of 2D handcrafted features. Thus, we propose our SDF-based SMPL-X features to directly replace the 2D handcrafted features. SDF-based + +![](images/e39d69375004551d426bd02f5483c45fb5f0916591342dadc60de1565de9d0d3.jpg) +Fig. 10: Qualitative evaluation with SOTA models + +SMPL-X features retain 3D information by directly converting a SMPL-X mesh into a 3D feature grid of SDF values. + +SDF-based SMPL-X features $(G_{X})$ is a 3D grid of SDF values (as seen in Fig. 4). $G_{X}$ is similar to $G_{S}$ , $G_{S}^{\prime}$ , and $G_{F}$ except that $G_{X}$ involves a SMPL-X mesh. To get $G_{X}$ , we first transform the SMPL-X mesh to the 3D camera space of the primary view. From the transformed SMPL-X mesh, we sample a 3D grid of SDF values. Each SDF value ranges from -1 to 1, where the value of 0 represents a surface on the SMPL-X mesh. + +As shown in Fig. 4, $G_{X}$ can be used together with PaMIR's voxel-aligned features, and we show later on that this combination yields the best results. + +# 4 Experiments + +As this is a sparse views set-up, we set the number of views $V = 2$ and set the angle between the two views as 90 degrees. It is feasible to use other angles as well. Later in Sect. 4.3, we experiment with $V > 2$ . + +# 4.1 Datasets + +In our experimental setup, we utilize the THuman2.0 dataset [17] as the training set for our models as well as other competing models. The THuman2.0 dataset comprises 526 high-quality, full-body scans (or meshes) of ethnic Chinese human subjects. A 80-20 train-test split of the dataset is used. For each training mesh, we render 36 RGB images (each spaced 10 degree apart) using a weak-perspective camera. For each training iteration, two views that are 90 degree apart are randomly selected. + +Furthermore, we use BUFF dataset [19] and MultiHuman dataset [22] for the evaluation of all models. No model is trained using these datasets. For BUFF dataset, we followed IntegratedPIFu [4] and performed systematic sampling (based on sequence number) on the dataset. This resulted in 101 human meshes that were used for evaluating the models. Utilizing systematic sampling allowed us to avoid meshes that have both the same human subject and the same pose. For MultiHuman dataset, all single human scans are used. + +Table 1: SOTA vs Ours. The IntegratedPIFu [4] used is its multi-view version. 'SM' indicates if a groundtruth SMPL-X mesh is used. 'HR' indicates if 1024x1024 RGB images are used. By default, 512x512 RGB images are used. + +
MethodsSMHRTHuman2.0BUFFMultiHuman
CD (10-5)P2S (10-5)NormalCD (102)P2S (102)NormalCD (10-5)P2S (10-5)Normal
Multi-view PIFu××10.7917.0354714.3577.00548398.1979.8436046
IntegratedPIFu×10.0515.7553244.5767.49747388.4819.9705961
DeepMultiCap×8.2087.506958912.4514.781208132.8329.1611518
SeSDF×6.3029.18153883.8485.77952597.1679.2766157
Ours (No HR, No SM)××5.7965.81153862.5092.28647976.3205.7375352
Ours (HR, No SM)×5.1335.02853172.5082.12146945.3154.8665116
Ours (No HR, w SM)×3.5603.13952853.3752.69447585.6335.0705428
Ours (HR, w SM)3.5553.12952123.4122.70045605.3914.9345003
+ +![](images/fefe78ed7d05bd3f46b3d1ec809f8659517cc6841cf808daf203437403f2e231.jpg) +Fig.11: SeSDF vs our 3DFG-PIFu. + +![](images/ca57291c87911ebcd2a166a48bdf8070e96fb506d7d3f0b1c6cb9631b0ae89e6.jpg) +Fig. 12: Qualitative evaluation of $G_{M}$ + +# 4.2 Comparison with State-of-the-art + +We compared our models against other existing models on multi-view clothed human reconstruction. The models we compared with include Multi-view PIFu [13], IntegratedPIFu (multi-view version) [4], DeepMultiCap [22], and SeSDF [1]. We also compared with DoubleField [15] and Data-Driven 3D Reconstruction method [24] in our Supp. Mat. In our quantitative evaluation, we use metrics that include Chamfer distance (CD), Point-to-Surface (P2S), and Normal reprojection error (Normal). These metrics are also used in [1,4,13,22]. + +Qualitative Evaluation We evaluate the methods qualitatively in Fig. 1 and Fig. 10. In these figures, we show the meshes produced by two of our models. Our first model (in column (e)) uses neither a SMPL-X mesh nor $1024 \times 1024$ high-res images. Our second model (in column (f)) does not use a SMPL-X mesh but uses $1024 \times 1024$ high-res images. Among the SOTA models, IntegratedPIFu uses high-res images, while DeepMultiCap and SeSDF use a groundtruth SMPL-X mesh. Comparison with SeSDF is shown in Fig. 11. We find that our models outperformed SOTA models in both structural accuracy and appearance details. + +Quantitative Evaluation In Tab. 1, we compared our models with existing methods quantitatively. Because different SOTA methods require different types of inputs (i.e. groundtruth SMPL-X or high-res images), and these different inputs may give additional advantage to a method, we decided to train four different versions of our model, with each version using a different combination of inputs as shown in the table. The table shows that our methods significantly outperform the existing models in all three datasets. See Supp. Mat. for more analysis. + +Table 2: Quantitative evaluation of ${G}_{M}$ + +
MethodsTHuman2.0BUFF
CD (10-5)P2S (10-5)CD (102)P2S (102)
PIFu26.9725.109.6519.247
PIFu + GM (GN not used)6.6267.2122.5303.005
PIFu + GM (GN is used)6.0076.6342.3862.999
+ +Table 3: Quantitative evaluation of $G_V$ at visible regions + +
MethodsTHuman2.0BUFF
CD (10-5)P2S (10-5)CD (10-4)P2S (10-4)
No Gv4.0362.9641.3151.096
With Gv3.8912.8401.2841.056
+ +# 4.3 Ablation Studies + +Evaluation of the Different 3D Feature Grids Firstly, in order to assess the effectiveness of $G_{M}$ , we train and compare a single-view PIFu that is either not given or given $G_{M}$ as an additional input. The comparison is shown quantitatively in the first two rows of Tab. 2 and qualitatively in Fig. 12. Notably, with $G_{M}$ , the single-view PIFu can also outperform a Multi-view PIFu (1st row of Tab. 1). + +Next, as aforementioned, $G_{M}$ can be complemented with $G_{N}$ . Thus, we also show the results when $G_{M}$ is used with $G_{N}$ in a single-view PIFu (see last row of Tab. 2). The results clearly demonstrated the benefit of including $G_{N}$ . + +We also evaluated $G_V$ by training the 2nd stage of 3DFG-PIFu with or without $G_V$ . The results in Tab. 3 and Fig. 13 show that $G_V$ improves the partial refined meshes obtained in the 2nd stage. Aside: As only visible regions of partial refined meshes are used to form the final mesh, Tab. 3 must consider only visible regions. + +Evaluating our Iterative Mechanism (i.e. Our 2nd Stage) We also show that 3DFG-PIFu's 2nd stage indeed improves the base meshes from the 1st stage. See Fig. 9 and Tab. 4. The improved meshes show sharper appearance details. + +When more views are made available (i.e. $V > 2$ ), the 3DFG-PIFu can incrementally update and improve the current mesh without the need for additional training. We simply replace the base mesh with the current mesh and re-run the 2nd stage again. Results are shown in Fig 14. + +Evaluation of SDF-based SMPL- $X$ features In order to evaluate the effectiveness of our SDF-based SMPL-X features $(G_{X})$ , we train and compare a single-view PIFu that is given either (i) S-PIFu features, (ii) PaMIR's voxel-aligned features, (iii) our $G_{X}$ , or (iv) PaMIR's voxel-aligned features + our $G_{X}$ . + +![](images/3492c39eae036352e3f5bd688f513882c0b567a463da5412de271dfc63ac805b.jpg) +Fig. 13: Partial refined meshes obtained w and w/o $G_V$ + +A quantitative comparison is shown in Tab. 5. The table shows our $G_{X}$ outperformed S-PIFu features. Whether our $G_{X}$ is combined with voxel-aligned features or not, it clearly improves the performance of a model when in use. Qualitatively, Fig. 15 shows that combining PaMIR's voxel-aligned features with our $G_{X}$ yields the most robust results. + +# 5 Limitations and Conclusion + +In our Supp. Mat., we address concerns on 3DFG-PIFu's efficiency. In short, via a series of implementation tricks, we show 3DFG-PIFu is actually more efficient than roughly half of the existing SOTA methods. + +We have introduced 3DFG-PIFu, a multi-view pixel-aligned implicit model that uses 3D Feature Grids to fuse multi-view information. 3DFG-PIFu also proposed an iterative pipeline that combines appearance details from multiple views into a single mesh. Lastly, 3DFG-PIFu introduced SDF-based SMPL-X features, which is a new way of incorporating a SMPL-X mesh into a pixel-aligned implicit model. + +Table 4: Quantitative evaluation of 3DFG-PIFu's 2nd Stage + +
MethodsTHuman2.0BUFF
CD (10-5)P2S (10-5)CD (102)P2S (102)
Base meshes (1st Stage)6.0076.6342.3862.999
Final meshes (2nd Stage)5.7965.8112.5092.286
+ +Table 5: Quantitative evaluation of ${G}_{X}$ + +
MethodsTHuman2.0BUFF
CD (10-5)P2S (10-5)CD (102)P2S (102)
S-PIFu Features4.4884.0306.8126.880
Voxel-aligned Features4.1043.7408.0379.225
GX(Ours)4.3523.7346.3516.641
Voxel-aligned Features + GX(Ours)3.9703.4838.0128.827
+ +![](images/ca08776b13e539eb4ac6880654b3752e392632c430a1c08e284c169f2d532f46.jpg) +Fig. 14: Effect of using more views in 3DFG-PIFu. + +![](images/84a01eba7cb18353ce7d0a07913ec46ee18d8436b98e92fce92e258d22e67f21.jpg) +Fig. 15: Qualitative evaluation of $G_{X}$ + +Acknowledgements This research work is supported by the Agency for Science, Technology and Research (A*STAR) under its MTC Programmatic Funds (Grant No. M23L7b0021). + +# References + +1. Cao, Y., Han, K., Wong, K.Y.K.: Sesdf: Self-evolved signed distance field for implicit 3d clothed human reconstruction. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 4647-4657 (2023) +2. Chan, K., Lin, G., Zhao, H., Lin, W.: S-pifu: Integrating parametric human models with pifu for single-view clothed human reconstruction. Advances in Neural Information Processing Systems 35, 17373-17385 (2022) +3. Chan, K., Lin, G., Zhao, H., Lin, W.: S-pifu: Integrating parametric human models with pifu for single-view clothed human reconstruction. In: Advances in Neural Information Processing Systems (2022) +4. Chan, K.Y., Lin, G., Zhao, H., Lin, W.: Integratedpifu: Integrated pixel aligned implicit function for single-view human reconstruction. In: Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part II. pp. 328-344. Springer (2022) +5. Chan, K.Y., Liu, F., Lin, G., Foo, C.S., Lin, W.: Fine structure-aware sampling: A new sampling training scheme for pixel-aligned implicit models in single-view human reconstruction. In: Proceedings of the AAAI Conference on Artificial Intelligence. vol. 38, pp. 964-971 (2024) +6. Chan, K.Y., Liu, F., Lin, G., Foo, C.S., Lin, W.: R-cyclic diffuser: Reductive and cyclic latent diffusion for 3d clothed human digitalization. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 10304-10313 (2024) +7. Gong, X., Song, L., Zheng, M., Planche, B., Chen, T., Yuan, J., Doermann, D., Wu, Z.: Progressive multi-view human mesh recovery with self-supervision. In: Proceedings of the AAAI Conference on Artificial Intelligence. vol. 37, pp. 676-684 (2023) +8. Hong, Y., Zhang, J., Jiang, B., Guo, Y., Liu, L., Bao, H.: Stereopifu: Depth aware clothed human digitization via stereo vision. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 535-545 (2021) +9. Kolotouros, N., Pavlakos, G., Jayaraman, D., Daniilidis, K.: Probabilistic modeling for human mesh recovery. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 11605-11614 (2021) +0. Liang, J., Lin, M.C.: Shape-aware human pose and shape reconstruction using multi-view images. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 4352-4362 (2019) +1. Lorensen, W.E., Cline, H.E.: Marching cubes: A high resolution 3d surface construction algorithm. ACM siggraph computer graphics 21(4), 163-169 (1987) +2. Pavlakos, G., Choutas, V., Ghorbani, N., Bolkart, T., Osman, A.A.A., Tzionas, D., Black, M.J.: Expressive body capture: 3D hands, face, and body from a single image. In: Proceedings IEEE Conf. on Computer Vision and Pattern Recognition (CVPR). pp. 10975-10985 (2019) +3. Saito, S., Huang, Z., Natsume, R., Morishima, S., Kanazawa, A., Li, H.: Pifu: Pixel-aligned implicit function for high-resolution clothed human digitization. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 2304-2314 (2019) + +14. Saito, S., Simon, T., Saragih, J., Joo, H.: Pifuhd: Multi-level pixel-aligned implicit function for high-resolution 3d human digitization. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 84-93 (2020) +15. Shao, R., Zhang, H., Zhang, H., Chen, M., Cao, Y.P., Yu, T., Liu, Y.: Doublefield: Bridging the neural surface and radiance fields for high-fidelity human reconstruction and rendering. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 15872-15882 (2022) +16. Shao, R., Zheng, Z., Zhang, H., Sun, J., Liu, Y.: Diffustereo: High quality human reconstruction via diffusion-based stereo using sparse cameras. In: Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part XXXII. pp. 702-720. Springer (2022) +17. Yu, T., Zheng, Z., Guo, K., Liu, P., Dai, Q., Liu, Y.: Function4d: Real-time human volumetric capture from very sparse consumer rgbd sensors. In: IEEE Conference on Computer Vision and Pattern Recognition (CVPR2021) (June 2021) +18. Yu, Z., Zhang, L., Xu, Y., Tang, C., Tran, L., Keskin, C., Park, H.S.: Multiview human body reconstruction from uncalibrated cameras. In: Advances in Neural Information Processing Systems (2022) +19. Zhang, C., Pujades, S., Black, M.J., Pons-Moll, G.: Detailed, accurate, human shape estimation from clothed 3d scan sequences. In: The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (July 2017) +20. Zhang, H., Tian, Y., Zhou, X., Ouyang, W., Liu, Y., Wang, L., Sun, Z.: Pymaf: 3d human pose and shape regression with pyramidal mesh alignment feedback loop. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 11446-11456 (2021) +21. Zhao, F., Yang, W., Zhang, J., Lin, P., Zhang, Y., Yu, J., Xu, L.: Humannerf: Efficiently generated human radiance field from sparse inputs. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 7743-7753 (2022) +22. Zheng, Y., Shao, R., Zhang, Y., Yu, T., Zheng, Z., Dai, Q., Liu, Y.: Deepmulticap: Performance capture of multiple characters using sparse multiview cameras. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 6239-6249 (2021) +23. Zheng, Z., Yu, T., Liu, Y., Dai, Q.: Pamir: Parametric model-conditioned implicit representation for image-based human reconstruction. IEEE transactions on pattern analysis and machine intelligence 44(6), 3170-3184 (2021) +24. Zins, P., Xu, Y., Boyer, E., Wuhrer, S., Tung, T.: Data-driven 3d reconstruction of dressed humans from sparse views. In: 2021 International Conference on 3D Vision (3DV). pp. 494-504. IEEE (2021) \ No newline at end of file diff --git a/2024/3DFG-PIFu_ 3D Feature Grids for Human Digitization from Sparse Views/images.zip b/2024/3DFG-PIFu_ 3D Feature Grids for Human Digitization from Sparse Views/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..8a55841ebb96b1f59a4dc6cbd0dfaca968c3e8e0 --- /dev/null +++ b/2024/3DFG-PIFu_ 3D Feature Grids for Human Digitization from Sparse Views/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f29f6fead64d3349826a3de472d6c0648f375cac24036b2c0d2fc8963e592da2 +size 640665 diff --git a/2024/3DFG-PIFu_ 3D Feature Grids for Human Digitization from Sparse Views/layout.json b/2024/3DFG-PIFu_ 3D Feature Grids for Human Digitization from Sparse Views/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..0b69574fa3d8d5819b5be8c9d6597554d002fe10 --- /dev/null +++ b/2024/3DFG-PIFu_ 3D Feature Grids for Human Digitization from Sparse Views/layout.json @@ -0,0 +1,9117 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 159, + 111, + 454, + 148 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 111, + 454, + 148 + ], + "spans": [ + { + "bbox": [ + 159, + 111, + 454, + 148 + ], + "type": "text", + "content": "3DFG-PIFu: 3D Feature Grids for Human Digitization from Sparse Views" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 138, + 167, + 475, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 167, + 475, + 192 + ], + "spans": [ + { + "bbox": [ + 138, + 167, + 475, + 192 + ], + "type": "text", + "content": "Kennard Yanting Chan" + }, + { + "bbox": [ + 138, + 167, + 475, + 192 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 138, + 167, + 475, + 192 + ], + "type": "text", + "content": ", Fayao Liu" + }, + { + "bbox": [ + 138, + 167, + 475, + 192 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 138, + 167, + 475, + 192 + ], + "type": "text", + "content": ", Guosheng Lin" + }, + { + "bbox": [ + 138, + 167, + 475, + 192 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 138, + 167, + 475, + 192 + ], + "type": "text", + "content": ", Chuan Sheng Foo" + }, + { + "bbox": [ + 138, + 167, + 475, + 192 + ], + "type": "inline_equation", + "content": "^{2,3}" + }, + { + "bbox": [ + 138, + 167, + 475, + 192 + ], + "type": "text", + "content": ", and Weisi Lin" + }, + { + "bbox": [ + 138, + 167, + 475, + 192 + ], + "type": "inline_equation", + "content": "^{1}" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 209, + 201, + 403, + 234 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 209, + 201, + 403, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 209, + 201, + 403, + 213 + ], + "spans": [ + { + "bbox": [ + 209, + 201, + 403, + 213 + ], + "type": "text", + "content": "1 Nanyang Technological University, Singapore" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 213, + 213, + 399, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 213, + 213, + 399, + 223 + ], + "spans": [ + { + "bbox": [ + 213, + 213, + 399, + 223 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 213, + 213, + 399, + 223 + ], + "type": "text", + "content": " Institute for Infocomm Research, A*STAR" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 214, + 224, + 397, + 234 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 214, + 224, + 397, + 234 + ], + "spans": [ + { + "bbox": [ + 214, + 224, + 397, + 234 + ], + "type": "text", + "content": "3 Centre for Frontier AI Research, A*STAR" + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 160, + 262, + 455, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 262, + 455, + 483 + ], + "spans": [ + { + "bbox": [ + 160, + 262, + 455, + 483 + ], + "type": "text", + "content": "Abstract. Pixel-aligned implicit models, such as Multi-view PIFu, Deep-MultiCap, DoubleField, and SeSDF, are well-established methods for reconstructing a clothed human from sparse views. However, given " + }, + { + "bbox": [ + 160, + 262, + 455, + 483 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 160, + 262, + 455, + 483 + ], + "type": "text", + "content": " images, these models would only combine features from these images in a point-wise and localized manner. In other words, the " + }, + { + "bbox": [ + 160, + 262, + 455, + 483 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 160, + 262, + 455, + 483 + ], + "type": "text", + "content": " images are processed individually and are only combined in a very narrow fashion at the end of the pipeline. To a large extent, this defeats the purpose of having multi-view information since the multi-view task in question is predominantly treated as a single-view task. To resolve this, we introduce 3DFG-PIFu, a pixel-aligned implicit model that exploits multi-view information right from the start and all the way to the end of the pipeline. Our 3DFG-PIFu makes use of 3D Feature Grids to combine features from " + }, + { + "bbox": [ + 160, + 262, + 455, + 483 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 160, + 262, + 455, + 483 + ], + "type": "text", + "content": " images in a global manner (rather than point-wise or localized) and throughout the pipeline. Other than the 3D Feature Grids, 3DFG-PIFu also proposes an iterative mechanism that refines and updates an existing output human mesh using the different views. Moreover, 3DFG-PIFu introduces SDF-based SMPL-X features, which is a new method of incorporating a SMPL-X mesh into a pixel-aligned implicit model. Our experiments show that 3DFG-PIFu significantly outperforms SOTA models. Our code is released at https://github.com/kcyt/3DFG-PIFu." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 160, + 491, + 453, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 491, + 453, + 514 + ], + "spans": [ + { + "bbox": [ + 160, + 491, + 453, + 514 + ], + "type": "text", + "content": "Keywords: 3D Clothed Human Reconstruction from Sparse Views " + }, + { + "bbox": [ + 160, + 491, + 453, + 514 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 160, + 491, + 453, + 514 + ], + "type": "text", + "content": " 3D Feature Grids " + }, + { + "bbox": [ + 160, + 491, + 453, + 514 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 160, + 491, + 453, + 514 + ], + "type": "text", + "content": " Pixel-aligned Implicit Models" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 533, + 230, + 547 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 533, + 230, + 547 + ], + "spans": [ + { + "bbox": [ + 132, + 533, + 230, + 547 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 557, + 482, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 557, + 482, + 668 + ], + "spans": [ + { + "bbox": [ + 130, + 557, + 482, + 668 + ], + "type": "text", + "content": "The field of 3D reconstruction of human bodies has gained considerable interest due to its potential use in various domains such as virtual reality, game production, and 3D printing. Pixel-aligned implicit models, such as Multi-view PIFu [13] DeepMultiCap [22], DoubleField [15], and SeSDF [1] are an influential class of deep learning methods for reconstructing clothed human bodies from sparse views. These models learn an implicit function that represents the surface of a human body. During testing, the learned implicit function is sampled using a grid of uniformly-spaced sample points. For each sample point, the learned implicit function (or the model) will return a predicted occupancy label (i.e." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 136, + 113, + 476, + 191 + ], + "blocks": [ + { + "bbox": [ + 136, + 113, + 476, + 191 + ], + "lines": [ + { + "bbox": [ + 136, + 113, + 476, + 191 + ], + "spans": [ + { + "bbox": [ + 136, + 113, + 476, + 191 + ], + "type": "image", + "image_path": "48ec879dd53d26453007c6276e8dd7145b694c8b9d4902b7a817233b8c82fbb3.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 154, + 204, + 455, + 434 + ], + "blocks": [ + { + "bbox": [ + 190, + 193, + 423, + 204 + ], + "lines": [ + { + "bbox": [ + 190, + 193, + 423, + 204 + ], + "spans": [ + { + "bbox": [ + 190, + 193, + 423, + 204 + ], + "type": "text", + "content": "Fig. 1: Our models (last two columns) vs SOTA models." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 154, + 204, + 455, + 434 + ], + "lines": [ + { + "bbox": [ + 154, + 204, + 455, + 434 + ], + "spans": [ + { + "bbox": [ + 154, + 204, + 455, + 434 + ], + "type": "image", + "image_path": "063c760593ad20299e6bd7f63b1becdb643b372ebc02208d51e045ed0b6c34dc.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 132, + 435, + 482, + 485 + ], + "lines": [ + { + "bbox": [ + 132, + 435, + 482, + 485 + ], + "spans": [ + { + "bbox": [ + 132, + 435, + 482, + 485 + ], + "type": "text", + "content": "Fig. 2: (a) Existing multi-view pixel-aligned implicit models vs (b) Our 3DFG-PIFu. whether the sample point is 'inside' or 'outside' of a human body surface). Once a grid of predicted occupancy labels is obtained, a human body mesh can be extracted from this grid using the Marching Cubes algorithm [11]." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 486, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 486, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 486, + 482, + 666 + ], + "type": "text", + "content": "In order to predict the occupancy labels, existing multi-view pixel-aligned implicit models [1, 13, 15, 22], when given " + }, + { + "bbox": [ + 130, + 486, + 482, + 666 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 130, + 486, + 482, + 666 + ], + "type": "text", + "content": " views or images, would compute " + }, + { + "bbox": [ + 130, + 486, + 482, + 666 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 130, + 486, + 482, + 666 + ], + "type": "text", + "content": " different point embeddings for each sample point. This is illustrated in Fig. 2a for the case where " + }, + { + "bbox": [ + 130, + 486, + 482, + 666 + ], + "type": "inline_equation", + "content": "V = 2" + }, + { + "bbox": [ + 130, + 486, + 482, + 666 + ], + "type": "text", + "content": ". For each sample point, its " + }, + { + "bbox": [ + 130, + 486, + 482, + 666 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 130, + 486, + 482, + 666 + ], + "type": "text", + "content": " point embeddings would be fused together into a single point embedding via either simple averaging [13] or weighted averaging [1, 15, 22], as illustrated in the same figure. These fused point embeddings are then converted into predicted occupancy labels, from which a human body mesh can be obtained. It is important to note that the \"Point Embeddings for View 1\" grid and the \"Point Embeddings for View 2\" grid in Fig. 2a are in different 3D coordinate spaces. The former is in the 3D camera space of View 1, and the latter is in the 3D camera space of View 2. This means that a point located at the top left corner of a grid may not correspond to the top left corner of another grid. We let the \"Fused Point Embeddings\" grid follow the 3D camera space of View 1 (It is possible to choose another 3D camera space, but that is trivial)." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 238, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 238, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 238, + 100 + ], + "type": "text", + "content": "K. Y. Chan et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 283 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 283 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 283 + ], + "type": "text", + "content": "As shown in Fig. 2a, there are two problems with existing multi-view pixel-aligned implicit models. 1. Firstly, the fusion of View 1 and View 2 are carried out in a point-wise and very localized manner. This is a problem because, as shown in the bidirectional red dashed arrow in Fig. 2a, there is no interaction between fused point embeddings, even if they are located close to each other. So if there is a sample point A that is closely surrounded by ten sample points, the existing multi-view pixel-aligned implicit models may assign those ten points with the same label and yet assign point A with an opposite label, which is an obvious error that would lead to a floating artefact. 2. Secondly, the fusion of View 1 and View 2 occurs at the end of the pipeline in a very simple manner (either simple or weighted averaging (e.g. attention)). To a large extent, the existing multiview pixel-aligned implicit models are not very different from their single-view counterparts except for the simplistic point-wise fusion of point embeddings at the end of the pipeline." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 285, + 482, + 416 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 285, + 482, + 416 + ], + "spans": [ + { + "bbox": [ + 130, + 285, + 482, + 416 + ], + "type": "text", + "content": "Hence, we propose 3DFG-PIFu, a pixel-aligned implicit model that rethinks how multi-view information is incorporated in its pipeline. One key feature of 3DFG-PIFu is its use of 3D Feature Grid(s). As seen in Fig. 2b, 3DFG-PIFu makes use of 3D Feature Grid(s) to extract structural information from View 2. The 3D Feature Grid, due to its inherent design, is able to easily orient the extracted information to a different camera space. Thus, we re-orient the 3D Feature Grid from the 3D camera space of View 2 to the 3D camera space of View 1. Now aligned with View 1, the transformed 3D Feature Grid can be concatenated with View 1 and processed by a deep neural network to form 'Fused Point Embeddings'. These fused point embeddings will then be further refined using the fine-grained information from View 2 (Section 3.2)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 418, + 482, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 418, + 482, + 477 + ], + "spans": [ + { + "bbox": [ + 130, + 418, + 482, + 477 + ], + "type": "text", + "content": "Crucially, this means that, unlike existing models, the fusion of multi-view information in 3DFG-PIFu occurs from the start to the end of the pipeline. Moreover, the multi-view fusion in 3DFG-PIFu occurs in a global and broad manner (rather than point-wise and localized) as information from View 2 is allowed to influence each and every fused point embedding." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 478, + 482, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 478, + 482, + 550 + ], + "spans": [ + { + "bbox": [ + 130, + 478, + 482, + 550 + ], + "type": "text", + "content": "In total, 3DFG-PIFu makes three contributions: 1. The aforementioned 3D Feature Grids that fuse multi-view information (Section 3.1). 2. An iterative mechanism that refines and updates an existing output human mesh using the fine-grained information from the different views (Section 3.2). 3. Introduction of SDF-based SMPL-X features, which is a new method of incorporating a SMPL-X mesh into a pixel-aligned implicit model (Section 3.3)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 574, + 237, + 586 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 574, + 237, + 586 + ], + "spans": [ + { + "bbox": [ + 132, + 574, + 237, + 586 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 131, + 604, + 375, + 616 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 604, + 375, + 616 + ], + "spans": [ + { + "bbox": [ + 131, + 604, + 375, + 616 + ], + "type": "text", + "content": "2.1 Human Reconstruction from Sparse Views" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 629, + 481, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 629, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 629, + 481, + 665 + ], + "type": "text", + "content": "Methods that reconstruct a human body mesh from a sparse number of images can be broadly classified into two classes: Parametric approaches and non-parametric approaches." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 397, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 397, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 397, + 91, + 447, + 100 + ], + "type": "text", + "content": "3DFG-PIFu" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 479, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 479, + 152 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 479, + 152 + ], + "type": "text", + "content": "Parametric approaches, such as [7,9,10,20], reconstruct a human body surface by predicting parameters of a human parametric model (e.g. SMPL-X [12]). However, these methods can only produce human body meshes that are clothless." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 152, + 479, + 199 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 152, + 479, + 199 + ], + "spans": [ + { + "bbox": [ + 130, + 152, + 479, + 199 + ], + "type": "text", + "content": "On the other hand, non-parametric methods do not use a human parametric model. An important subclass of non-parametric methods is pixel-aligned implicit models. There are other subclasses like NERF methods (e.g. [21]), but they have yet to outperform pixel-aligned implicit models." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 200, + 479, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 200, + 479, + 224 + ], + "spans": [ + { + "bbox": [ + 132, + 200, + 479, + 224 + ], + "type": "text", + "content": "Pixel-aligned implicit models can be single-view (e.g. [2,5,6]) or multi-view (e.g. Multi-view PIFu [13], DeepMultiCap [22], DoubleField [15], and SeSDF [1])." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 225, + 480, + 296 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 225, + 480, + 296 + ], + "spans": [ + { + "bbox": [ + 130, + 225, + 480, + 296 + ], + "type": "text", + "content": "As a side note, there are also pixel-aligned implicit models that use stereo images to reconstruct a clothed human mesh. However, these models, that include StereoPIFu [8] and DiffuStereo [16], require pairs of images to be taken at two similar viewpoints. This is often infeasible in many real-life applications and is thus not used in our experiments. Instead, our benchmarks are the aforementioned Multi-view PIFu, DeepMultiCap, DoubleField, SeSDF, and a few others." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 297, + 480, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 297, + 480, + 344 + ], + "spans": [ + { + "bbox": [ + 130, + 297, + 480, + 344 + ], + "type": "text", + "content": "As mentioned in Section 1 ('Introduction'), these benchmark models suffer from the problems of: 1. Fusing multi-view information in a very narrow or pointwise manner, and 2. Fusing multi-view information only at the very end of the pipeline. To resolve this problem, we introduce 3DFG-PIFu." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 364, + 201, + 376 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 364, + 201, + 376 + ], + "spans": [ + { + "bbox": [ + 132, + 364, + 201, + 376 + ], + "type": "text", + "content": "3 Method" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 390, + 480, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 390, + 480, + 498 + ], + "spans": [ + { + "bbox": [ + 130, + 390, + 480, + 498 + ], + "type": "text", + "content": "3DFG-PIFu is a two-staged model that works as long as the number of views " + }, + { + "bbox": [ + 130, + 390, + 480, + 498 + ], + "type": "inline_equation", + "content": "V > 1" + }, + { + "bbox": [ + 130, + 390, + 480, + 498 + ], + "type": "text", + "content": " and the camera calibrations are known. One view will be randomly picked as the primary view and the other view(s) will be designated as the secondary view(s). Let us first assume " + }, + { + "bbox": [ + 130, + 390, + 480, + 498 + ], + "type": "inline_equation", + "content": "V = 2" + }, + { + "bbox": [ + 130, + 390, + 480, + 498 + ], + "type": "text", + "content": ". This means that we have one primary view and one secondary view. As shown in Fig. 3, front and back normal maps, as well as a mask, can be predicted from a RGB image. We use the method outlined in PIFuHD [14] to predict the normal maps. Then, from the predicted normal maps, we can easily extract out the mask. Hereafter, we refer to a view as a collection of a RGB image, a front normal map, a back normal map, and a mask." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 512, + 480, + 607 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 512, + 480, + 607 + ], + "spans": [ + { + "bbox": [ + 130, + 512, + 480, + 607 + ], + "type": "text", + "content": "1st Stage In the first stage (refer to Fig. 4 and assume " + }, + { + "bbox": [ + 130, + 512, + 480, + 607 + ], + "type": "inline_equation", + "content": "V = 2" + }, + { + "bbox": [ + 130, + 512, + 480, + 607 + ], + "type": "text", + "content": "), we first generate two 3D feature grids (" + }, + { + "bbox": [ + 130, + 512, + 480, + 607 + ], + "type": "inline_equation", + "content": "G_{N}" + }, + { + "bbox": [ + 130, + 512, + 480, + 607 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 512, + 480, + 607 + ], + "type": "inline_equation", + "content": "G_{M}" + }, + { + "bbox": [ + 130, + 512, + 480, + 607 + ], + "type": "text", + "content": ") from the secondary view's front normal map and mask. We will elaborate on how " + }, + { + "bbox": [ + 130, + 512, + 480, + 607 + ], + "type": "inline_equation", + "content": "G_{N}" + }, + { + "bbox": [ + 130, + 512, + 480, + 607 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 512, + 480, + 607 + ], + "type": "inline_equation", + "content": "G_{M}" + }, + { + "bbox": [ + 130, + 512, + 480, + 607 + ], + "type": "text", + "content": " are generated later. In short, the " + }, + { + "bbox": [ + 130, + 512, + 480, + 607 + ], + "type": "inline_equation", + "content": "G_{N}" + }, + { + "bbox": [ + 130, + 512, + 480, + 607 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 512, + 480, + 607 + ], + "type": "inline_equation", + "content": "G_{M}" + }, + { + "bbox": [ + 130, + 512, + 480, + 607 + ], + "type": "text", + "content": " contain normal pixels and mask pixels, respectively, from the secondary view, but these pixels have been transformed into the 3D camera space of the primary view. Then, " + }, + { + "bbox": [ + 130, + 512, + 480, + 607 + ], + "type": "inline_equation", + "content": "G_{N}" + }, + { + "bbox": [ + 130, + 512, + 480, + 607 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 512, + 480, + 607 + ], + "type": "inline_equation", + "content": "G_{M}" + }, + { + "bbox": [ + 130, + 512, + 480, + 607 + ], + "type": "text", + "content": " are concatenated with the primary view's RGB image, front normal map, and back normal map. The concatenated output is sent to an encoder, which is a 2D CNN. The encoder will produce a" + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 193, + 628, + 417, + 670 + ], + "blocks": [ + { + "bbox": [ + 193, + 628, + 417, + 670 + ], + "lines": [ + { + "bbox": [ + 193, + 628, + 417, + 670 + ], + "spans": [ + { + "bbox": [ + 193, + 628, + 417, + 670 + ], + "type": "image", + "image_path": "52226114b502a8ecad1a814df01ecd361b0a3ee0390eb9c0f1f213cabb032dae.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 225, + 671, + 387, + 681 + ], + "lines": [ + { + "bbox": [ + 225, + 671, + 387, + 681 + ], + "spans": [ + { + "bbox": [ + 225, + 671, + 387, + 681 + ], + "type": "text", + "content": "Fig.3: Predictions from a RGB image." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 238, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 238, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 238, + 101 + ], + "type": "text", + "content": "K. Y. Chan et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 137, + 114, + 474, + 297 + ], + "blocks": [ + { + "bbox": [ + 137, + 114, + 474, + 297 + ], + "lines": [ + { + "bbox": [ + 137, + 114, + 474, + 297 + ], + "spans": [ + { + "bbox": [ + 137, + 114, + 474, + 297 + ], + "type": "image", + "image_path": "4ec1c63e6832fccfea46d4300abbb68be53eb93b2965813d4c5d041fff1cd8b0.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 298, + 480, + 331 + ], + "lines": [ + { + "bbox": [ + 130, + 298, + 480, + 331 + ], + "spans": [ + { + "bbox": [ + 130, + 298, + 480, + 331 + ], + "type": "text", + "content": "Fig. 4: 1st Stage of 3DFG-PIFu. Each view includes the mask and the predicted front and back normal maps. The primary view and the feature grids extracted from the secondary view(s) are fed into an encoder and MLP to generate a base mesh." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 332, + 479, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 332, + 479, + 357 + ], + "spans": [ + { + "bbox": [ + 130, + 332, + 479, + 357 + ], + "type": "text", + "content": "set of feature maps that is used by a Multilayer Perceptron (MLP) to produce a human body mesh, which we refer to as a base mesh." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 366, + 480, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 366, + 480, + 437 + ], + "spans": [ + { + "bbox": [ + 130, + 366, + 480, + 437 + ], + "type": "text", + "content": "2nd Stage While the base mesh from the 1st stage has good structural accuracy, it fails to capture the more fine-grained appearance details (e.g. clothes wrinkles) from all the views. Thus, a 2nd stage is needed. The 2nd stage of 3DFG-PIFu is an iterative mechanism or pipeline to combine appearance details from multiple views. We will briefly describe the flow of the pipeline here, but the details and rationales behind each step in the pipeline will be explained in Section 3.2." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 438, + 481, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 438, + 481, + 581 + ], + "spans": [ + { + "bbox": [ + 130, + 438, + 481, + 581 + ], + "type": "text", + "content": "At the start of the 2nd stage, a view is picked from the set of input views. Assume that " + }, + { + "bbox": [ + 130, + 438, + 481, + 581 + ], + "type": "inline_equation", + "content": "V = 2" + }, + { + "bbox": [ + 130, + 438, + 481, + 581 + ], + "type": "text", + "content": ", and the view that we selected is the secondary view. The secondary view, as well as the base mesh from the 1st stage, will be used as inputs in the 2nd stage (shown in Fig. 5). First, we will rotate (or transform) the base mesh into the 3D camera space of the secondary view. From this rotated base mesh, we would generate two additional 3D feature grids (" + }, + { + "bbox": [ + 130, + 438, + 481, + 581 + ], + "type": "inline_equation", + "content": "G_V" + }, + { + "bbox": [ + 130, + 438, + 481, + 581 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 438, + 481, + 581 + ], + "type": "inline_equation", + "content": "G_S'" + }, + { + "bbox": [ + 130, + 438, + 481, + 581 + ], + "type": "text", + "content": "). " + }, + { + "bbox": [ + 130, + 438, + 481, + 581 + ], + "type": "inline_equation", + "content": "G_V" + }, + { + "bbox": [ + 130, + 438, + 481, + 581 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 438, + 481, + 581 + ], + "type": "inline_equation", + "content": "G_S'" + }, + { + "bbox": [ + 130, + 438, + 481, + 581 + ], + "type": "text", + "content": " will have the visibility information and the SDF values, respectively, of the rotated base mesh. We will elaborate on how " + }, + { + "bbox": [ + 130, + 438, + 481, + 581 + ], + "type": "inline_equation", + "content": "G_V" + }, + { + "bbox": [ + 130, + 438, + 481, + 581 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 438, + 481, + 581 + ], + "type": "inline_equation", + "content": "G_S'" + }, + { + "bbox": [ + 130, + 438, + 481, + 581 + ], + "type": "text", + "content": " are obtained later. " + }, + { + "bbox": [ + 130, + 438, + 481, + 581 + ], + "type": "inline_equation", + "content": "G_V" + }, + { + "bbox": [ + 130, + 438, + 481, + 581 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 438, + 481, + 581 + ], + "type": "inline_equation", + "content": "G_S'" + }, + { + "bbox": [ + 130, + 438, + 481, + 581 + ], + "type": "text", + "content": " and the secondary view will be concatenated together and fed into an encoder, which is a 2D CNN. This encoder will produce a set of feature maps that is used by a MLP to produce a partial refined mesh. The partial refined mesh will have the fine-grained appearance details of the secondary view." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 582, + 481, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 582, + 481, + 641 + ], + "spans": [ + { + "bbox": [ + 130, + 582, + 481, + 641 + ], + "type": "text", + "content": "Finally, we will obtain a 3D feature grid of SDF values " + }, + { + "bbox": [ + 130, + 582, + 481, + 641 + ], + "type": "inline_equation", + "content": "(G_S)" + }, + { + "bbox": [ + 130, + 582, + 481, + 641 + ], + "type": "text", + "content": " from the base mesh. Then, " + }, + { + "bbox": [ + 130, + 582, + 481, + 641 + ], + "type": "inline_equation", + "content": "G_S" + }, + { + "bbox": [ + 130, + 582, + 481, + 641 + ], + "type": "text", + "content": " is refined and updated using information from the partial refined mesh via a process that we call visibility-based fusion (to be explained later). Visibility-based fusion will return a final 3D grid of SDF values, " + }, + { + "bbox": [ + 130, + 582, + 481, + 641 + ], + "type": "inline_equation", + "content": "G_F" + }, + { + "bbox": [ + 130, + 582, + 481, + 641 + ], + "type": "text", + "content": ". From " + }, + { + "bbox": [ + 130, + 582, + 481, + 641 + ], + "type": "inline_equation", + "content": "G_F" + }, + { + "bbox": [ + 130, + 582, + 481, + 641 + ], + "type": "text", + "content": ", we will retrieve the final mesh via the Marching Cubes algorithm." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 641, + 481, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 641, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 641, + 481, + 665 + ], + "type": "text", + "content": "For simplicity, Fig. 5 only shows the scenario where there is only 1 secondary view, and the secondary view (rather than the primary view) is picked at the" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 397, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 397, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 397, + 91, + 447, + 100 + ], + "type": "text", + "content": "3DFG-PIFu" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 134, + 114, + 476, + 256 + ], + "blocks": [ + { + "bbox": [ + 134, + 114, + 476, + 256 + ], + "lines": [ + { + "bbox": [ + 134, + 114, + 476, + 256 + ], + "spans": [ + { + "bbox": [ + 134, + 114, + 476, + 256 + ], + "type": "image", + "image_path": "25f1ba29f265916ca4d21523b57789000869f973531da7d2c71ca540406048d2.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 132, + 257, + 479, + 280 + ], + "lines": [ + { + "bbox": [ + 132, + 257, + 479, + 280 + ], + "spans": [ + { + "bbox": [ + 132, + 257, + 479, + 280 + ], + "type": "text", + "content": "Fig. 5: 2nd Stage of 3DFG-PIFu. The base mesh is first aligned to the secondary view. Once aligned, it is combined with the secondary view to produce a partial refined mesh." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 284, + 480, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 284, + 480, + 344 + ], + "spans": [ + { + "bbox": [ + 130, + 284, + 480, + 344 + ], + "type": "text", + "content": "start of the 2nd stage. In reality, the primary view and every secondary view will be separately processed in the 2nd stage (See the blue arrows in Fig. 5), and each view will generate a different partial refined mesh. All these partial refined meshes will be used to refine and update the base mesh during visibility-based fusion before the final mesh is produced." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 343, + 480, + 427 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 343, + 480, + 427 + ], + "spans": [ + { + "bbox": [ + 130, + 343, + 480, + 427 + ], + "type": "text", + "content": "Optionally, in the 1st stage, if a SMPL-X mesh is given as an input, we will convert the SMPL-X mesh into another 3D feature grid of SDF values " + }, + { + "bbox": [ + 130, + 343, + 480, + 427 + ], + "type": "inline_equation", + "content": "(G_X)" + }, + { + "bbox": [ + 130, + 343, + 480, + 427 + ], + "type": "text", + "content": ". See illustration in Fig. 4. " + }, + { + "bbox": [ + 130, + 343, + 480, + 427 + ], + "type": "inline_equation", + "content": "G_{X}" + }, + { + "bbox": [ + 130, + 343, + 480, + 427 + ], + "type": "text", + "content": " is what we refer to as SDF-based SMPL-X Features. This feature grid will be concatenated with the other inputs in the 1st stage. Concurrently, we use the technique described in PaMIR [23] to obtain voxel-aligned features. The voxel-aligned features will be used by the MLP at the end of the pipeline. We will explain the rationale behind this set-up later." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 427, + 481, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 427, + 481, + 464 + ], + "spans": [ + { + "bbox": [ + 130, + 427, + 481, + 464 + ], + "type": "text", + "content": "Now, we will first elaborate on the different 3D feature grids (Sect. 3.1) before moving on to our iterative mechanism described in 3DFG-PIFu's 2nd stage (Sect. 3.2). Finally, we will explain our SDF-based SMPL-X Features (Sect. 3.3)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 479, + 249, + 491 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 479, + 249, + 491 + ], + "spans": [ + { + "bbox": [ + 132, + 479, + 249, + 491 + ], + "type": "text", + "content": "3.1 3D Feature Grids" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 498, + 482, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 498, + 482, + 583 + ], + "spans": [ + { + "bbox": [ + 130, + 498, + 482, + 583 + ], + "type": "text", + "content": "In Section 1 ('Introduction'), we thoroughly explained why we need to use 3D feature grids. Indeed, the central theme of our paper revolves around the use of 3D feature grids. We define a 3D feature grid as a D x H x W grid where each element on the grid can be either a scalar value or a vector. D, H, and W are each an integer. A 3D feature grid is useful as it can contain various types of information and can represent these information in different 3D camera spaces. In total, we use four different types of 3D feature grids:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 593, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 593, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 593, + 482, + 666 + ], + "type": "text", + "content": "1. 3D Feature Grid for Visual Hull " + }, + { + "bbox": [ + 130, + 593, + 482, + 666 + ], + "type": "inline_equation", + "content": "(G_M)" + }, + { + "bbox": [ + 130, + 593, + 482, + 666 + ], + "type": "text", + "content": " In Fig. 6, we illustrate how a 3D Feature Grid for Visual Hull is obtained if we only have 2 views - View 1 (Primary view) and View 2 (Secondary view). First, given a " + }, + { + "bbox": [ + 130, + 593, + 482, + 666 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 130, + 593, + 482, + 666 + ], + "type": "text", + "content": " mask of View 2, we replicate the mask pixels (i.e. the non-empty pixels) 256 times in the z-dimension (camera direction), giving us " + }, + { + "bbox": [ + 130, + 593, + 482, + 666 + ], + "type": "inline_equation", + "content": "M_2 \\times 256" + }, + { + "bbox": [ + 130, + 593, + 482, + 666 + ], + "type": "text", + "content": " elements in the 3D camera space of View 2, where " + }, + { + "bbox": [ + 130, + 593, + 482, + 666 + ], + "type": "inline_equation", + "content": "M_2" + }, + { + "bbox": [ + 130, + 593, + 482, + 666 + ], + "type": "text", + "content": " represents the number of mask pixels in the mask of View 2. We do" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 238, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 238, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 238, + 100 + ], + "type": "text", + "content": "K. Y. Chan et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 134, + 113, + 479, + 202 + ], + "blocks": [ + { + "bbox": [ + 134, + 113, + 479, + 202 + ], + "lines": [ + { + "bbox": [ + 134, + 113, + 479, + 202 + ], + "spans": [ + { + "bbox": [ + 134, + 113, + 479, + 202 + ], + "type": "image", + "image_path": "7cb376422de7b0b1c6825755d6ad09c98507c2570973e4e2872135165c4ead55.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 153, + 203, + 457, + 214 + ], + "lines": [ + { + "bbox": [ + 153, + 203, + 457, + 214 + ], + "spans": [ + { + "bbox": [ + 153, + 203, + 457, + 214 + ], + "type": "text", + "content": "Fig. 6: 3D Feature Grid - Visual Hull. Above shows how " + }, + { + "bbox": [ + 153, + 203, + 457, + 214 + ], + "type": "inline_equation", + "content": "G_{M}" + }, + { + "bbox": [ + 153, + 203, + 457, + 214 + ], + "type": "text", + "content": " is extracted." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 220, + 480, + 339 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 220, + 480, + 339 + ], + "spans": [ + { + "bbox": [ + 130, + 220, + 480, + 339 + ], + "type": "text", + "content": "the same for View 1 to get " + }, + { + "bbox": [ + 130, + 220, + 480, + 339 + ], + "type": "inline_equation", + "content": "M_1 \\times 256" + }, + { + "bbox": [ + 130, + 220, + 480, + 339 + ], + "type": "text", + "content": " elements in the 3D camera space of View 1. The elements belonging to View 2 are then rotated or transformed into the 3D camera space of View 1 and placed together with the elements that belong to View 1. Lastly, we take the 3D intersection of the two groups of elements to obtain a visual hull. The visual hull is stored in a 3D grid that corresponds to the 3D camera space of View 1, and this is basically a 3D Feature Grid for Visual Hull or " + }, + { + "bbox": [ + 130, + 220, + 480, + 339 + ], + "type": "inline_equation", + "content": "G_M" + }, + { + "bbox": [ + 130, + 220, + 480, + 339 + ], + "type": "text", + "content": ". We will always store the visual hull in the 3D camera space of the primary view (instead of a secondary view). On a side note, " + }, + { + "bbox": [ + 130, + 220, + 480, + 339 + ], + "type": "inline_equation", + "content": "G_M" + }, + { + "bbox": [ + 130, + 220, + 480, + 339 + ], + "type": "text", + "content": " can be generated with more than 2 views too. For example, with 3 views, the visual hull is formed by the 3D intersection of 3 groups of elements." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 340, + 480, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 340, + 480, + 423 + ], + "spans": [ + { + "bbox": [ + 130, + 340, + 480, + 423 + ], + "type": "inline_equation", + "content": "G_{M}" + }, + { + "bbox": [ + 130, + 340, + 480, + 423 + ], + "type": "text", + "content": " is useful because it contains the structural information of both View 1 and View 2. Concretely, each element in " + }, + { + "bbox": [ + 130, + 340, + 480, + 423 + ], + "type": "inline_equation", + "content": "G_{M}" + }, + { + "bbox": [ + 130, + 340, + 480, + 423 + ], + "type": "text", + "content": " is a binary value ('0' or '1'). '0' means the element or grid position is unoccupied, and '1' means the element is occupied. Together, these elements represent a possibility space for occupancy. No part of the groundtruth human body mesh can be outside of this possibility space, and this is very useful information for a pixel-aligned implicit model whose task is to predict and reconstruct a human body mesh." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 437, + 481, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 437, + 481, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 437, + 481, + 666 + ], + "type": "text", + "content": "2. 3D Feature Grid for Front Normals " + }, + { + "bbox": [ + 130, + 437, + 481, + 666 + ], + "type": "inline_equation", + "content": "(G_N)" + }, + { + "bbox": [ + 130, + 437, + 481, + 666 + ], + "type": "text", + "content": " However, " + }, + { + "bbox": [ + 130, + 437, + 481, + 666 + ], + "type": "inline_equation", + "content": "G_M" + }, + { + "bbox": [ + 130, + 437, + 481, + 666 + ], + "type": "text", + "content": " does not fully capture all the relevant information from View 2. Specifically, only mask information from View 2 is captured. If we look at the mask of View 2 in Fig. 6, we cannot actually differentiate the outlines of the arms from that of the torso, or the outlines of the person's left thigh from the right thigh. This information (the outlines) is not captured by the mask but is captured by the front normal map of View 2. Thus, we introduce 3D Feature Grid for Front Normals or " + }, + { + "bbox": [ + 130, + 437, + 481, + 666 + ], + "type": "inline_equation", + "content": "G_N" + }, + { + "bbox": [ + 130, + 437, + 481, + 666 + ], + "type": "text", + "content": " as a complement to " + }, + { + "bbox": [ + 130, + 437, + 481, + 666 + ], + "type": "inline_equation", + "content": "G_M" + }, + { + "bbox": [ + 130, + 437, + 481, + 666 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 130, + 437, + 481, + 666 + ], + "type": "inline_equation", + "content": "G_N" + }, + { + "bbox": [ + 130, + 437, + 481, + 666 + ], + "type": "text", + "content": " is similar to " + }, + { + "bbox": [ + 130, + 437, + 481, + 666 + ], + "type": "inline_equation", + "content": "G_M" + }, + { + "bbox": [ + 130, + 437, + 481, + 666 + ], + "type": "text", + "content": " except that each element on its 3D grid is a normal vector rather than a scalar occupancy value. " + }, + { + "bbox": [ + 130, + 437, + 481, + 666 + ], + "type": "inline_equation", + "content": "G_N" + }, + { + "bbox": [ + 130, + 437, + 481, + 666 + ], + "type": "text", + "content": " is obtained in a manner similar to the first row of Fig. 6 except that the mask of View 2 is replaced by the front normal map of View 2. First, given a " + }, + { + "bbox": [ + 130, + 437, + 481, + 666 + ], + "type": "inline_equation", + "content": "3 \\times 256 \\times 256" + }, + { + "bbox": [ + 130, + 437, + 481, + 666 + ], + "type": "text", + "content": " front normal map of View 2, we replicate the normal pixels (i.e. only the non-empty pixels) 256 times in the z-dimension (camera direction), giving us " + }, + { + "bbox": [ + 130, + 437, + 481, + 666 + ], + "type": "inline_equation", + "content": "N_2 \\times 256" + }, + { + "bbox": [ + 130, + 437, + 481, + 666 + ], + "type": "text", + "content": " elements (i.e. vectors) in the 3D camera space of View 2, where " + }, + { + "bbox": [ + 130, + 437, + 481, + 666 + ], + "type": "inline_equation", + "content": "N_2" + }, + { + "bbox": [ + 130, + 437, + 481, + 666 + ], + "type": "text", + "content": " represents the number of normal pixels in the front normal map of View 2. The elements belonging to View 2 are then rotated or transformed into the 3D camera space of View 1 and then stored in a 3D feature grid that corresponds to View 1's 3D camera space. This grid is the 3D Feature Grid for Front Normals or " + }, + { + "bbox": [ + 130, + 437, + 481, + 666 + ], + "type": "inline_equation", + "content": "G_N" + }, + { + "bbox": [ + 130, + 437, + 481, + 666 + ], + "type": "text", + "content": ". Like " + }, + { + "bbox": [ + 130, + 437, + 481, + 666 + ], + "type": "inline_equation", + "content": "G_M" + }, + { + "bbox": [ + 130, + 437, + 481, + 666 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 437, + 481, + 666 + ], + "type": "inline_equation", + "content": "G_N" + }, + { + "bbox": [ + 130, + 437, + 481, + 666 + ], + "type": "text", + "content": " is also used as an input in the 1st stage of our 3DFG-PIFu." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 397, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 397, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 397, + 91, + 447, + 100 + ], + "type": "text", + "content": "3DFG-PIFu" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 91, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 91, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 91, + 480, + 100 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 192, + 113, + 258, + 163 + ], + "blocks": [ + { + "bbox": [ + 192, + 113, + 258, + 163 + ], + "lines": [ + { + "bbox": [ + 192, + 113, + 258, + 163 + ], + "spans": [ + { + "bbox": [ + 192, + 113, + 258, + 163 + ], + "type": "image", + "image_path": "84ec8fb46f8f5b9903170bf387b785858959d1566763f536f97b90c41ef2a690.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 187, + 163, + 266, + 178 + ], + "lines": [ + { + "bbox": [ + 187, + 163, + 266, + 178 + ], + "spans": [ + { + "bbox": [ + 187, + 163, + 266, + 178 + ], + "type": "text", + "content": "Base Mesh (viewed from 3 different angles)" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 224, + 178, + 388, + 188 + ], + "lines": [ + { + "bbox": [ + 224, + 178, + 388, + 188 + ], + "spans": [ + { + "bbox": [ + 224, + 178, + 388, + 188 + ], + "type": "text", + "content": "Fig. 7: View visibility of different views" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 267, + 113, + 339, + 163 + ], + "blocks": [ + { + "bbox": [ + 267, + 113, + 339, + 163 + ], + "lines": [ + { + "bbox": [ + 267, + 113, + 339, + 163 + ], + "spans": [ + { + "bbox": [ + 267, + 113, + 339, + 163 + ], + "type": "image", + "image_path": "0934a6da62f5132d1fbd85806b7e6c9e58262571feb0cd1da0c2c2d2d2707baf.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 268, + 163, + 353, + 178 + ], + "lines": [ + { + "bbox": [ + 268, + 163, + 353, + 178 + ], + "spans": [ + { + "bbox": [ + 268, + 163, + 353, + 178 + ], + "type": "text", + "content": "Visibility of View 1 \n(View 1 = a Frontal view)" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 353, + 113, + 418, + 163 + ], + "blocks": [ + { + "bbox": [ + 353, + 113, + 418, + 163 + ], + "lines": [ + { + "bbox": [ + 353, + 113, + 418, + 163 + ], + "spans": [ + { + "bbox": [ + 353, + 113, + 418, + 163 + ], + "type": "image", + "image_path": "d8b22d2022b394e7951f21d8cd2d8efdbf6eb799e3781a265a39f1194a457a2b.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 353, + 163, + 422, + 178 + ], + "lines": [ + { + "bbox": [ + 353, + 163, + 422, + 178 + ], + "spans": [ + { + "bbox": [ + 353, + 163, + 422, + 178 + ], + "type": "text", + "content": "Visibility of View 2 (View " + }, + { + "bbox": [ + 353, + 163, + 422, + 178 + ], + "type": "inline_equation", + "content": "2 =" + }, + { + "bbox": [ + 353, + 163, + 422, + 178 + ], + "type": "text", + "content": " a Left view)" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 193, + 482, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 193, + 482, + 289 + ], + "spans": [ + { + "bbox": [ + 130, + 193, + 482, + 289 + ], + "type": "text", + "content": "3. 3D Feature Grid for SDF values " + }, + { + "bbox": [ + 130, + 193, + 482, + 289 + ], + "type": "inline_equation", + "content": "(G_S, G_S', G_F)" + }, + { + "bbox": [ + 130, + 193, + 482, + 289 + ], + "type": "text", + "content": " Given an input view, a single-view pixel-aligned implicit model produces a human body mesh that is oriented in the 3D camera space of that input view. But once we have the mesh, we can transform or rotate the mesh into the 3D camera space of any view. This means that a mesh produced using View 1 can be transformed from its initial 3D camera space of View 1 to the 3D camera space of View 2. Once oriented to the 3D camera space of View 2, the mesh becomes a useful prior for a pixel-aligned implicit model that is trying to use View 2 to predict a human body mesh." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 289, + 482, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 289, + 482, + 348 + ], + "spans": [ + { + "bbox": [ + 130, + 289, + 482, + 348 + ], + "type": "text", + "content": "But we cannot feed a mesh, which consists of vertices and faces, into a pixel-aligned implicit model. To resolve this, we propose converting the mesh into a 3D feature grid of SDF values. This 3D grid will correspond to the 3D camera space of View 2, and each element in the 3D grid is a truncated SDF value that ranges from -1 to 1, where the value of 0 represents a mesh surface." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 350, + 482, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 350, + 482, + 434 + ], + "spans": [ + { + "bbox": [ + 130, + 350, + 482, + 434 + ], + "type": "text", + "content": "This grid, which is 3D Feature Grid for SDF values, is a simple and effective way to condition a pixel-aligned implicit model on a mesh (as a prior). We will elaborate more on the usefulness of such a prior in Section 3.2. In our 3DFG-PIFu, 3D Feature Grid for SDF values is used as a prior in the 2nd stage (see Fig. 5). In Fig. 5, we see three variants of 3D Feature Grid for SDF values: " + }, + { + "bbox": [ + 130, + 350, + 482, + 434 + ], + "type": "inline_equation", + "content": "G_{S}" + }, + { + "bbox": [ + 130, + 350, + 482, + 434 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 350, + 482, + 434 + ], + "type": "inline_equation", + "content": "G_{S}^{\\prime}" + }, + { + "bbox": [ + 130, + 350, + 482, + 434 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 130, + 350, + 482, + 434 + ], + "type": "inline_equation", + "content": "G_{F}" + }, + { + "bbox": [ + 130, + 350, + 482, + 434 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 130, + 350, + 482, + 434 + ], + "type": "inline_equation", + "content": "G_{S}" + }, + { + "bbox": [ + 130, + 350, + 482, + 434 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 350, + 482, + 434 + ], + "type": "inline_equation", + "content": "G_{S}^{\\prime}" + }, + { + "bbox": [ + 130, + 350, + 482, + 434 + ], + "type": "text", + "content": " represent SDF values from an unrotated and rotated base mesh respectively. " + }, + { + "bbox": [ + 130, + 350, + 482, + 434 + ], + "type": "inline_equation", + "content": "G_{F}" + }, + { + "bbox": [ + 130, + 350, + 482, + 434 + ], + "type": "text", + "content": " represents SDF values from the final mesh." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 130, + 448, + 482, + 484 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 448, + 482, + 484 + ], + "spans": [ + { + "bbox": [ + 130, + 448, + 482, + 484 + ], + "type": "text", + "content": "4. 3D Feature Grid for View Visibility " + }, + { + "bbox": [ + 130, + 448, + 482, + 484 + ], + "type": "inline_equation", + "content": "(G_V)" + }, + { + "bbox": [ + 130, + 448, + 482, + 484 + ], + "type": "text", + "content": " In the 2nd stage (see Fig. 5), we use " + }, + { + "bbox": [ + 130, + 448, + 482, + 484 + ], + "type": "inline_equation", + "content": "G_S'" + }, + { + "bbox": [ + 130, + 448, + 482, + 484 + ], + "type": "text", + "content": " as an input to the encoder. Since " + }, + { + "bbox": [ + 130, + 448, + 482, + 484 + ], + "type": "inline_equation", + "content": "G_S'" + }, + { + "bbox": [ + 130, + 448, + 482, + 484 + ], + "type": "text", + "content": " contains a rotated base mesh in SDF form, we are essentially using the rotated base mesh as a prior in the encoder." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 130, + 485, + 482, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 485, + 482, + 569 + ], + "spans": [ + { + "bbox": [ + 130, + 485, + 482, + 569 + ], + "type": "text", + "content": "This rotated base mesh already has an accurate structure and shape of a human body. Thus, given a view (e.g. View 2) in the 2nd stage, we only want to modify the rotated base mesh in regions where we are confident of editing. The regions that we are most confident of editing are the regions that are visible from that given view. Examples of such regions are shown in Fig. 7. If we are given View 2, for example, then we only want to edit the green regions of the base mesh, as shown in the rightmost column of Fig. 7." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 130, + 570, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 570, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 570, + 482, + 666 + ], + "type": "text", + "content": "If the selected view in the 2nd stage is indeed View 2, then we want to have a 3D feature grid that contains all those green regions. Such a 3D feature grid would serve as a complement to the " + }, + { + "bbox": [ + 130, + 570, + 482, + 666 + ], + "type": "inline_equation", + "content": "G_S'" + }, + { + "bbox": [ + 130, + 570, + 482, + 666 + ], + "type": "text", + "content": " by telling the pixel-aligned implicit model which part of the rotated base mesh should (and should not) be edited. This 3D feature grid is our 3D Feature Grid for View Visibility or " + }, + { + "bbox": [ + 130, + 570, + 482, + 666 + ], + "type": "inline_equation", + "content": "G_V" + }, + { + "bbox": [ + 130, + 570, + 482, + 666 + ], + "type": "text", + "content": ". Each element in " + }, + { + "bbox": [ + 130, + 570, + 482, + 666 + ], + "type": "inline_equation", + "content": "G_V" + }, + { + "bbox": [ + 130, + 570, + 482, + 666 + ], + "type": "text", + "content": " is a binary value (0 or 1). A value of 1 indicates that, at that grid position, there is a mesh surface and this mesh surface is visible from the view that is selected in the 2nd stage (as illustrated in Fig. 7)." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 238, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 238, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 238, + 100 + ], + "type": "text", + "content": "K. Y. Chan et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 475, + 140 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 116, + 475, + 140 + ], + "spans": [ + { + "bbox": [ + 132, + 116, + 475, + 140 + ], + "type": "text", + "content": "3.2 An Iterative Mechanism to Combine Appearance Details from Multiple Views" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 145, + 480, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 145, + 480, + 168 + ], + "spans": [ + { + "bbox": [ + 130, + 145, + 480, + 168 + ], + "type": "text", + "content": "Our iterative mechanism or pipeline to combine appearance details from multiple views is the 2nd stage of our 3DFG-PIFu. It refines and updates the base mesh." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 169, + 480, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 169, + 480, + 205 + ], + "spans": [ + { + "bbox": [ + 130, + 169, + 480, + 205 + ], + "type": "text", + "content": "While a base mesh has a highly accurate structure, we observed that it often lacks fine-grained appearance details from all the given input views (primary view and secondary views(s)). This is illustrated in Fig. 9a and b." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 205, + 480, + 287 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 205, + 480, + 287 + ], + "spans": [ + { + "bbox": [ + 130, + 205, + 480, + 287 + ], + "type": "text", + "content": "To resolve this, we designed a 2nd stage that focuses on capturing the fine-grained appearance details of each view. Our 2nd stage is outlined in Fig. 5. Firstly, we select a view " + }, + { + "bbox": [ + 130, + 205, + 480, + 287 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 130, + 205, + 480, + 287 + ], + "type": "text", + "content": " from the set of input views. Then, as seen in the figure, we condition our encoder on the rotated base mesh " + }, + { + "bbox": [ + 130, + 205, + 480, + 287 + ], + "type": "inline_equation", + "content": "(G_S')" + }, + { + "bbox": [ + 130, + 205, + 480, + 287 + ], + "type": "text", + "content": ", which already captured the coarse but accurate structural information from all the input views. Given such a conditioning, we allow the encoder to now focus on capturing fine-grained appearance details from selected view " + }, + { + "bbox": [ + 130, + 205, + 480, + 287 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 130, + 205, + 480, + 287 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 289, + 480, + 372 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 289, + 480, + 372 + ], + "spans": [ + { + "bbox": [ + 130, + 289, + 480, + 372 + ], + "type": "text", + "content": "To further ensure that appearance details are captured, we have two additional features in the 2nd stage. The first feature, which was just explained, is the use of " + }, + { + "bbox": [ + 130, + 289, + 480, + 372 + ], + "type": "inline_equation", + "content": "G_V" + }, + { + "bbox": [ + 130, + 289, + 480, + 372 + ], + "type": "text", + "content": " (as shown in Fig. 5). By complementing " + }, + { + "bbox": [ + 130, + 289, + 480, + 372 + ], + "type": "inline_equation", + "content": "G_S'" + }, + { + "bbox": [ + 130, + 289, + 480, + 372 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 130, + 289, + 480, + 372 + ], + "type": "inline_equation", + "content": "G_V" + }, + { + "bbox": [ + 130, + 289, + 480, + 372 + ], + "type": "text", + "content": ", the encoder is able to identify which regions on the rotated base mesh are visible from view " + }, + { + "bbox": [ + 130, + 289, + 480, + 372 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 130, + 289, + 480, + 372 + ], + "type": "text", + "content": ". Relative to invisible regions, the encoder will make less error modifying the visible regions. Thus, knowing where the visible regions are encourages the encoder to make more decisive and sharper modifications to these regions." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 372, + 480, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 372, + 480, + 468 + ], + "spans": [ + { + "bbox": [ + 130, + 372, + 480, + 468 + ], + "type": "text", + "content": "The second additional feature in the 2nd stage is the use of Depth Oriented Sampling (DOS) from IntegratedPIFu [4]. As shown in Fig. 5, for a given view, we will generate a partial refined mesh. However, for a partial refined mesh, we are actually only interested in the regions on the mesh that are visible from that given view. For this reason, it makes sense to use DOS to train the encoder and MLP that are used in the 2nd stage. This is because DOS works best when reconstructing mesh surfaces that are directly facing the camera direction (i.e. mesh regions that are visible from the given view). We briefly explain DOS now." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 468, + 480, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 468, + 480, + 515 + ], + "spans": [ + { + "bbox": [ + 130, + 468, + 480, + 515 + ], + "type": "text", + "content": "Our 1st stage model predicts coarse-grained occupancy (in or out) of sample points in a 3D space to produce the base mesh. In contrast, our 2nd stage model, with use of DOS, predicts fine-grained displacement values of the sample points in the camera direction to produce a partial refined mesh." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 516, + 480, + 564 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 516, + 480, + 564 + ], + "spans": [ + { + "bbox": [ + 130, + 516, + 480, + 564 + ], + "type": "text", + "content": "Intuitively, given the base mesh as prior and the use of DOS, our 2nd stage model is trying to shift and adjust the base mesh's surface in the camera direction such that the resulting partial refined mesh better reflects the appearance details of the given views (see Fig. 8)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 570, + 480, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 570, + 480, + 640 + ], + "spans": [ + { + "bbox": [ + 130, + 570, + 480, + 640 + ], + "type": "text", + "content": "Visibility-based Fusion Each given view is used to generate a partial refined mesh. We aim to use these partial refined meshes to update the original base mesh. To do so, we transform the partial refined meshes to the primary view's 3D camera space so that they are physically aligned with the base mesh. Then, we will use these partial refined meshes to update the values in " + }, + { + "bbox": [ + 130, + 570, + 480, + 640 + ], + "type": "inline_equation", + "content": "G_{S}" + }, + { + "bbox": [ + 130, + 570, + 480, + 640 + ], + "type": "text", + "content": ", which is a " + }, + { + "bbox": [ + 130, + 570, + 480, + 640 + ], + "type": "inline_equation", + "content": "256 \\times 256 \\times 256" + }, + { + "bbox": [ + 130, + 570, + 480, + 640 + ], + "type": "text", + "content": " 3D feature grid containing the SDF values of the base mesh." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 641, + 480, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 641, + 480, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 641, + 480, + 665 + ], + "type": "text", + "content": "If a partial refined mesh is created from view " + }, + { + "bbox": [ + 130, + 641, + 480, + 665 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 130, + 641, + 480, + 665 + ], + "type": "text", + "content": ", then this mesh will have the most accurate shape and geometry at regions that are visible from view " + }, + { + "bbox": [ + 130, + 641, + 480, + 665 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 130, + 641, + 480, + 665 + ], + "type": "text", + "content": ". For" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 397, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 397, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 397, + 91, + 447, + 100 + ], + "type": "text", + "content": "3DFG-PIFu" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 481, + 100 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 136, + 114, + 299, + 194 + ], + "blocks": [ + { + "bbox": [ + 136, + 114, + 299, + 194 + ], + "lines": [ + { + "bbox": [ + 136, + 114, + 299, + 194 + ], + "spans": [ + { + "bbox": [ + 136, + 114, + 299, + 194 + ], + "type": "image", + "image_path": "80fd0bb6438c89f3472d314eb8647666a97a17a981d8fcdf2a49a0facc6f71c7.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 132, + 195, + 304, + 216 + ], + "lines": [ + { + "bbox": [ + 132, + 195, + 304, + 216 + ], + "spans": [ + { + "bbox": [ + 132, + 195, + 304, + 216 + ], + "type": "text", + "content": "Fig. 8: Illustration of our Iterative Mechanism" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 322, + 113, + 463, + 194 + ], + "blocks": [ + { + "bbox": [ + 322, + 113, + 463, + 194 + ], + "lines": [ + { + "bbox": [ + 322, + 113, + 463, + 194 + ], + "spans": [ + { + "bbox": [ + 322, + 113, + 463, + 194 + ], + "type": "image", + "image_path": "33e977971b6fccfeb314c35f51be05cbfba99552bbedaeab68be6b06a9532843.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 308, + 195, + 480, + 217 + ], + "lines": [ + { + "bbox": [ + 308, + 195, + 480, + 217 + ], + "spans": [ + { + "bbox": [ + 308, + 195, + 480, + 217 + ], + "type": "text", + "content": "Fig.9: Evaluation of 3DFG-PIFu's 2nd Stage." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 223, + 480, + 306 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 223, + 480, + 306 + ], + "spans": [ + { + "bbox": [ + 130, + 223, + 480, + 306 + ], + "type": "text", + "content": "this reason, we will identify locations on a partial refined mesh that are visible from its corresponding view and then extract the SDF values at these locations. So, for each partial refined mesh, these 'visible' SDF values are extracted and used to overwrite the " + }, + { + "bbox": [ + 130, + 223, + 480, + 306 + ], + "type": "inline_equation", + "content": "G_{S}" + }, + { + "bbox": [ + 130, + 223, + 480, + 306 + ], + "type": "text", + "content": " grid. In the end, the updated " + }, + { + "bbox": [ + 130, + 223, + 480, + 306 + ], + "type": "inline_equation", + "content": "G_{S}" + }, + { + "bbox": [ + 130, + 223, + 480, + 306 + ], + "type": "text", + "content": ", which is also referred to as our final mesh in SDF form " + }, + { + "bbox": [ + 130, + 223, + 480, + 306 + ], + "type": "inline_equation", + "content": "(G_{F})" + }, + { + "bbox": [ + 130, + 223, + 480, + 306 + ], + "type": "text", + "content": ", will be a mix of SDF values from the base mesh and the partial refined mesh(es). To convert the " + }, + { + "bbox": [ + 130, + 223, + 480, + 306 + ], + "type": "inline_equation", + "content": "G_{F}" + }, + { + "bbox": [ + 130, + 223, + 480, + 306 + ], + "type": "text", + "content": " to mesh form, we use the Marching Cube algorithm." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 328, + 307, + 339 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 328, + 307, + 339 + ], + "spans": [ + { + "bbox": [ + 132, + 328, + 307, + 339 + ], + "type": "text", + "content": "3.3 SDF-based SMPL-X features" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 352, + 480, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 352, + 480, + 411 + ], + "spans": [ + { + "bbox": [ + 130, + 352, + 480, + 411 + ], + "type": "text", + "content": "In multi-view settings, it is possible to use methods, such as [18] and [9], to predict a SMPL-X mesh that is fairly close to the ground truth. Thus, some multi-view pixel-aligned implicit models, like DeepMultiCap [22] and SeSDF [1], use a SMPL-X mesh as a prior before predicting a human body mesh. In 3DFGPIFu, we also offer an option to use SMPL-X meshes as a prior." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 413, + 480, + 508 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 413, + 480, + 508 + ], + "spans": [ + { + "bbox": [ + 130, + 413, + 480, + 508 + ], + "type": "text", + "content": "A well-known approach to incorporate a SMPL-X mesh as a prior in a pixel-aligned implicit model is via the use of voxel-aligned features introduced by PaMIR [23]. To obtain the voxel-aligned features, the SMPL-X mesh is first voxelized and then fed as an input to a 3D CNN, as shown in bottom of Fig. 4. Voxel-aligned features are produced by this 3D CNN. The voxel-aligned features are then used as an input to a MLP, which will produce a human body mesh. Voxel-aligned features are used in DeepMultiCap and SeSDF (with a PointNet). We can use voxel-aligned features in 3DFG-PIFu as well, as seen in Fig. 4." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 509, + 480, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 509, + 480, + 581 + ], + "spans": [ + { + "bbox": [ + 130, + 509, + 480, + 581 + ], + "type": "text", + "content": "But, as Fig. 4 shows, the features produced by the Encoder ('Pixel-aligned Features') are only fused with voxel-aligned features at the end of the pipeline. Moreover, the fusion is point-wise and localized. This means the pixel-aligned feature that corresponds to a sample point is fused only with the specific voxel-aligned feature that corresponds to the same sample point. In other words, there is no global interaction between voxel-aligned features and pixel-aligned features." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 582, + 481, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 582, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 582, + 481, + 665 + ], + "type": "text", + "content": "We aim to design a method to fuse a SMPL-X mesh earlier in the pipeline and in a global manner. A recent method that does this is S-PIFu [3]. S-PIFu extracts a set of handcrafted 2D feature maps from a SMPL-X mesh. These maps are concatenated with the input image and then used as inputs at the start of the pipeline. However, useful 3D information is lost when S-PIFu reduces a SMPL-X mesh into a set of 2D handcrafted features. Thus, we propose our SDF-based SMPL-X features to directly replace the 2D handcrafted features. SDF-based" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 238, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 238, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 238, + 100 + ], + "type": "text", + "content": "K. Y. Chan et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 135, + 113, + 475, + 234 + ], + "blocks": [ + { + "bbox": [ + 135, + 113, + 475, + 234 + ], + "lines": [ + { + "bbox": [ + 135, + 113, + 475, + 234 + ], + "spans": [ + { + "bbox": [ + 135, + 113, + 475, + 234 + ], + "type": "image", + "image_path": "e39d69375004551d426bd02f5483c45fb5f0916591342dadc60de1565de9d0d3.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 201, + 235, + 411, + 244 + ], + "lines": [ + { + "bbox": [ + 201, + 235, + 411, + 244 + ], + "spans": [ + { + "bbox": [ + 201, + 235, + 411, + 244 + ], + "type": "text", + "content": "Fig. 10: Qualitative evaluation with SOTA models" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 254, + 479, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 254, + 479, + 277 + ], + "spans": [ + { + "bbox": [ + 130, + 254, + 479, + 277 + ], + "type": "text", + "content": "SMPL-X features retain 3D information by directly converting a SMPL-X mesh into a 3D feature grid of SDF values." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 278, + 481, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 278, + 481, + 348 + ], + "spans": [ + { + "bbox": [ + 130, + 278, + 481, + 348 + ], + "type": "text", + "content": "SDF-based SMPL-X features " + }, + { + "bbox": [ + 130, + 278, + 481, + 348 + ], + "type": "inline_equation", + "content": "(G_{X})" + }, + { + "bbox": [ + 130, + 278, + 481, + 348 + ], + "type": "text", + "content": " is a 3D grid of SDF values (as seen in Fig. 4). " + }, + { + "bbox": [ + 130, + 278, + 481, + 348 + ], + "type": "inline_equation", + "content": "G_{X}" + }, + { + "bbox": [ + 130, + 278, + 481, + 348 + ], + "type": "text", + "content": " is similar to " + }, + { + "bbox": [ + 130, + 278, + 481, + 348 + ], + "type": "inline_equation", + "content": "G_{S}" + }, + { + "bbox": [ + 130, + 278, + 481, + 348 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 278, + 481, + 348 + ], + "type": "inline_equation", + "content": "G_{S}^{\\prime}" + }, + { + "bbox": [ + 130, + 278, + 481, + 348 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 130, + 278, + 481, + 348 + ], + "type": "inline_equation", + "content": "G_{F}" + }, + { + "bbox": [ + 130, + 278, + 481, + 348 + ], + "type": "text", + "content": " except that " + }, + { + "bbox": [ + 130, + 278, + 481, + 348 + ], + "type": "inline_equation", + "content": "G_{X}" + }, + { + "bbox": [ + 130, + 278, + 481, + 348 + ], + "type": "text", + "content": " involves a SMPL-X mesh. To get " + }, + { + "bbox": [ + 130, + 278, + 481, + 348 + ], + "type": "inline_equation", + "content": "G_{X}" + }, + { + "bbox": [ + 130, + 278, + 481, + 348 + ], + "type": "text", + "content": ", we first transform the SMPL-X mesh to the 3D camera space of the primary view. From the transformed SMPL-X mesh, we sample a 3D grid of SDF values. Each SDF value ranges from -1 to 1, where the value of 0 represents a surface on the SMPL-X mesh." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 350, + 480, + 374 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 350, + 480, + 374 + ], + "spans": [ + { + "bbox": [ + 130, + 350, + 480, + 374 + ], + "type": "text", + "content": "As shown in Fig. 4, " + }, + { + "bbox": [ + 130, + 350, + 480, + 374 + ], + "type": "inline_equation", + "content": "G_{X}" + }, + { + "bbox": [ + 130, + 350, + 480, + 374 + ], + "type": "text", + "content": " can be used together with PaMIR's voxel-aligned features, and we show later on that this combination yields the best results." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 394, + 230, + 407 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 394, + 230, + 407 + ], + "spans": [ + { + "bbox": [ + 132, + 394, + 230, + 407 + ], + "type": "text", + "content": "4 Experiments" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 421, + 480, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 421, + 480, + 456 + ], + "spans": [ + { + "bbox": [ + 130, + 421, + 480, + 456 + ], + "type": "text", + "content": "As this is a sparse views set-up, we set the number of views " + }, + { + "bbox": [ + 130, + 421, + 480, + 456 + ], + "type": "inline_equation", + "content": "V = 2" + }, + { + "bbox": [ + 130, + 421, + 480, + 456 + ], + "type": "text", + "content": " and set the angle between the two views as 90 degrees. It is feasible to use other angles as well. Later in Sect. 4.3, we experiment with " + }, + { + "bbox": [ + 130, + 421, + 480, + 456 + ], + "type": "inline_equation", + "content": "V > 2" + }, + { + "bbox": [ + 130, + 421, + 480, + 456 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 476, + 204, + 487 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 476, + 204, + 487 + ], + "spans": [ + { + "bbox": [ + 132, + 476, + 204, + 487 + ], + "type": "text", + "content": "4.1 Datasets" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 498, + 480, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 498, + 480, + 581 + ], + "spans": [ + { + "bbox": [ + 130, + 498, + 480, + 581 + ], + "type": "text", + "content": "In our experimental setup, we utilize the THuman2.0 dataset [17] as the training set for our models as well as other competing models. The THuman2.0 dataset comprises 526 high-quality, full-body scans (or meshes) of ethnic Chinese human subjects. A 80-20 train-test split of the dataset is used. For each training mesh, we render 36 RGB images (each spaced 10 degree apart) using a weak-perspective camera. For each training iteration, two views that are 90 degree apart are randomly selected." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 582, + 481, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 582, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 582, + 481, + 665 + ], + "type": "text", + "content": "Furthermore, we use BUFF dataset [19] and MultiHuman dataset [22] for the evaluation of all models. No model is trained using these datasets. For BUFF dataset, we followed IntegratedPIFu [4] and performed systematic sampling (based on sequence number) on the dataset. This resulted in 101 human meshes that were used for evaluating the models. Utilizing systematic sampling allowed us to avoid meshes that have both the same human subject and the same pose. For MultiHuman dataset, all single human scans are used." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 397, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 397, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 397, + 91, + 447, + 100 + ], + "type": "text", + "content": "3DFG-PIFu" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 479, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 479, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 479, + 100 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 143, + 148, + 471, + 217 + ], + "blocks": [ + { + "bbox": [ + 132, + 114, + 481, + 147 + ], + "lines": [ + { + "bbox": [ + 132, + 114, + 481, + 147 + ], + "spans": [ + { + "bbox": [ + 132, + 114, + 481, + 147 + ], + "type": "text", + "content": "Table 1: SOTA vs Ours. The IntegratedPIFu [4] used is its multi-view version. 'SM' indicates if a groundtruth SMPL-X mesh is used. 'HR' indicates if 1024x1024 RGB images are used. By default, 512x512 RGB images are used." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 143, + 148, + 471, + 217 + ], + "lines": [ + { + "bbox": [ + 143, + 148, + 471, + 217 + ], + "spans": [ + { + "bbox": [ + 143, + 148, + 471, + 217 + ], + "type": "table", + "html": "
MethodsSMHRTHuman2.0BUFFMultiHuman
CD (10-5)P2S (10-5)NormalCD (102)P2S (102)NormalCD (10-5)P2S (10-5)Normal
Multi-view PIFu××10.7917.0354714.3577.00548398.1979.8436046
IntegratedPIFu×10.0515.7553244.5767.49747388.4819.9705961
DeepMultiCap×8.2087.506958912.4514.781208132.8329.1611518
SeSDF×6.3029.18153883.8485.77952597.1679.2766157
Ours (No HR, No SM)××5.7965.81153862.5092.28647976.3205.7375352
Ours (HR, No SM)×5.1335.02853172.5082.12146945.3154.8665116
Ours (No HR, w SM)×3.5603.13952853.3752.69447585.6335.0705428
Ours (HR, w SM)3.5553.12952123.4122.70045605.3914.9345003
", + "image_path": "c6ffd9408dd35258f06bd29069a96719f56eddc023df9cd2652c46cb0492c9b5.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 137, + 219, + 301, + 337 + ], + "blocks": [ + { + "bbox": [ + 137, + 219, + 301, + 337 + ], + "lines": [ + { + "bbox": [ + 137, + 219, + 301, + 337 + ], + "spans": [ + { + "bbox": [ + 137, + 219, + 301, + 337 + ], + "type": "image", + "image_path": "fefe78ed7d05bd3f46b3d1ec809f8659517cc6841cf808daf203437403f2e231.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 146, + 338, + 294, + 348 + ], + "lines": [ + { + "bbox": [ + 146, + 338, + 294, + 348 + ], + "spans": [ + { + "bbox": [ + 146, + 338, + 294, + 348 + ], + "type": "text", + "content": "Fig.11: SeSDF vs our 3DFG-PIFu." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 310, + 220, + 468, + 337 + ], + "blocks": [ + { + "bbox": [ + 310, + 220, + 468, + 337 + ], + "lines": [ + { + "bbox": [ + 310, + 220, + 468, + 337 + ], + "spans": [ + { + "bbox": [ + 310, + 220, + 468, + 337 + ], + "type": "image", + "image_path": "ca57291c87911ebcd2a166a48bdf8070e96fb506d7d3f0b1c6cb9631b0ae89e6.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 311, + 338, + 468, + 348 + ], + "lines": [ + { + "bbox": [ + 311, + 338, + 468, + 348 + ], + "spans": [ + { + "bbox": [ + 311, + 338, + 468, + 348 + ], + "type": "text", + "content": "Fig. 12: Qualitative evaluation of " + }, + { + "bbox": [ + 311, + 338, + 468, + 348 + ], + "type": "inline_equation", + "content": "G_{M}" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 354, + 329, + 367 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 354, + 329, + 367 + ], + "spans": [ + { + "bbox": [ + 132, + 354, + 329, + 367 + ], + "type": "text", + "content": "4.2 Comparison with State-of-the-art" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 377, + 482, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 377, + 482, + 462 + ], + "spans": [ + { + "bbox": [ + 130, + 377, + 482, + 462 + ], + "type": "text", + "content": "We compared our models against other existing models on multi-view clothed human reconstruction. The models we compared with include Multi-view PIFu [13], IntegratedPIFu (multi-view version) [4], DeepMultiCap [22], and SeSDF [1]. We also compared with DoubleField [15] and Data-Driven 3D Reconstruction method [24] in our Supp. Mat. In our quantitative evaluation, we use metrics that include Chamfer distance (CD), Point-to-Surface (P2S), and Normal reprojection error (Normal). These metrics are also used in [1,4,13,22]." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 473, + 482, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 473, + 482, + 571 + ], + "spans": [ + { + "bbox": [ + 130, + 473, + 482, + 571 + ], + "type": "text", + "content": "Qualitative Evaluation We evaluate the methods qualitatively in Fig. 1 and Fig. 10. In these figures, we show the meshes produced by two of our models. Our first model (in column (e)) uses neither a SMPL-X mesh nor " + }, + { + "bbox": [ + 130, + 473, + 482, + 571 + ], + "type": "inline_equation", + "content": "1024 \\times 1024" + }, + { + "bbox": [ + 130, + 473, + 482, + 571 + ], + "type": "text", + "content": " high-res images. Our second model (in column (f)) does not use a SMPL-X mesh but uses " + }, + { + "bbox": [ + 130, + 473, + 482, + 571 + ], + "type": "inline_equation", + "content": "1024 \\times 1024" + }, + { + "bbox": [ + 130, + 473, + 482, + 571 + ], + "type": "text", + "content": " high-res images. Among the SOTA models, IntegratedPIFu uses high-res images, while DeepMultiCap and SeSDF use a groundtruth SMPL-X mesh. Comparison with SeSDF is shown in Fig. 11. We find that our models outperformed SOTA models in both structural accuracy and appearance details." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 582, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 582, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 582, + 482, + 666 + ], + "type": "text", + "content": "Quantitative Evaluation In Tab. 1, we compared our models with existing methods quantitatively. Because different SOTA methods require different types of inputs (i.e. groundtruth SMPL-X or high-res images), and these different inputs may give additional advantage to a method, we decided to train four different versions of our model, with each version using a different combination of inputs as shown in the table. The table shows that our methods significantly outperform the existing models in all three datasets. See Supp. Mat. for more analysis." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 238, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 238, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 238, + 101 + ], + "type": "text", + "content": "K. Y. Chan et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 195, + 125, + 418, + 168 + ], + "blocks": [ + { + "bbox": [ + 223, + 114, + 390, + 125 + ], + "lines": [ + { + "bbox": [ + 223, + 114, + 390, + 125 + ], + "spans": [ + { + "bbox": [ + 223, + 114, + 390, + 125 + ], + "type": "text", + "content": "Table 2: Quantitative evaluation of " + }, + { + "bbox": [ + 223, + 114, + 390, + 125 + ], + "type": "inline_equation", + "content": "{G}_{M}" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 195, + 125, + 418, + 168 + ], + "lines": [ + { + "bbox": [ + 195, + 125, + 418, + 168 + ], + "spans": [ + { + "bbox": [ + 195, + 125, + 418, + 168 + ], + "type": "table", + "html": "
MethodsTHuman2.0BUFF
CD (10-5)P2S (10-5)CD (102)P2S (102)
PIFu26.9725.109.6519.247
PIFu + GM (GN not used)6.6267.2122.5303.005
PIFu + GM (GN is used)6.0076.6342.3862.999
", + "image_path": "49350e7e016413d87c469ade39b8aa7e38d050aa4dbbcbd7708f510836817a5d.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 222, + 182, + 392, + 216 + ], + "blocks": [ + { + "bbox": [ + 183, + 171, + 430, + 182 + ], + "lines": [ + { + "bbox": [ + 183, + 171, + 430, + 182 + ], + "spans": [ + { + "bbox": [ + 183, + 171, + 430, + 182 + ], + "type": "text", + "content": "Table 3: Quantitative evaluation of " + }, + { + "bbox": [ + 183, + 171, + 430, + 182 + ], + "type": "inline_equation", + "content": "G_V" + }, + { + "bbox": [ + 183, + 171, + 430, + 182 + ], + "type": "text", + "content": " at visible regions" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 222, + 182, + 392, + 216 + ], + "lines": [ + { + "bbox": [ + 222, + 182, + 392, + 216 + ], + "spans": [ + { + "bbox": [ + 222, + 182, + 392, + 216 + ], + "type": "table", + "html": "
MethodsTHuman2.0BUFF
CD (10-5)P2S (10-5)CD (10-4)P2S (10-4)
No Gv4.0362.9641.3151.096
With Gv3.8912.8401.2841.056
", + "image_path": "515b458ab1422b8edf66ea6dd031cee8f92562be332238d9911f838acd03d8ae.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 220, + 246, + 232 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 220, + 246, + 232 + ], + "spans": [ + { + "bbox": [ + 132, + 220, + 246, + 232 + ], + "type": "text", + "content": "4.3 Ablation Studies" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 239, + 481, + 299 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 239, + 481, + 299 + ], + "spans": [ + { + "bbox": [ + 130, + 239, + 481, + 299 + ], + "type": "text", + "content": "Evaluation of the Different 3D Feature Grids Firstly, in order to assess the effectiveness of " + }, + { + "bbox": [ + 130, + 239, + 481, + 299 + ], + "type": "inline_equation", + "content": "G_{M}" + }, + { + "bbox": [ + 130, + 239, + 481, + 299 + ], + "type": "text", + "content": ", we train and compare a single-view PIFu that is either not given or given " + }, + { + "bbox": [ + 130, + 239, + 481, + 299 + ], + "type": "inline_equation", + "content": "G_{M}" + }, + { + "bbox": [ + 130, + 239, + 481, + 299 + ], + "type": "text", + "content": " as an additional input. The comparison is shown quantitatively in the first two rows of Tab. 2 and qualitatively in Fig. 12. Notably, with " + }, + { + "bbox": [ + 130, + 239, + 481, + 299 + ], + "type": "inline_equation", + "content": "G_{M}" + }, + { + "bbox": [ + 130, + 239, + 481, + 299 + ], + "type": "text", + "content": ", the single-view PIFu can also outperform a Multi-view PIFu (1st row of Tab. 1)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 300, + 481, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 300, + 481, + 335 + ], + "spans": [ + { + "bbox": [ + 130, + 300, + 481, + 335 + ], + "type": "text", + "content": "Next, as aforementioned, " + }, + { + "bbox": [ + 130, + 300, + 481, + 335 + ], + "type": "inline_equation", + "content": "G_{M}" + }, + { + "bbox": [ + 130, + 300, + 481, + 335 + ], + "type": "text", + "content": " can be complemented with " + }, + { + "bbox": [ + 130, + 300, + 481, + 335 + ], + "type": "inline_equation", + "content": "G_{N}" + }, + { + "bbox": [ + 130, + 300, + 481, + 335 + ], + "type": "text", + "content": ". Thus, we also show the results when " + }, + { + "bbox": [ + 130, + 300, + 481, + 335 + ], + "type": "inline_equation", + "content": "G_{M}" + }, + { + "bbox": [ + 130, + 300, + 481, + 335 + ], + "type": "text", + "content": " is used with " + }, + { + "bbox": [ + 130, + 300, + 481, + 335 + ], + "type": "inline_equation", + "content": "G_{N}" + }, + { + "bbox": [ + 130, + 300, + 481, + 335 + ], + "type": "text", + "content": " in a single-view PIFu (see last row of Tab. 2). The results clearly demonstrated the benefit of including " + }, + { + "bbox": [ + 130, + 300, + 481, + 335 + ], + "type": "inline_equation", + "content": "G_{N}" + }, + { + "bbox": [ + 130, + 300, + 481, + 335 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 336, + 481, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 336, + 481, + 396 + ], + "spans": [ + { + "bbox": [ + 130, + 336, + 481, + 396 + ], + "type": "text", + "content": "We also evaluated " + }, + { + "bbox": [ + 130, + 336, + 481, + 396 + ], + "type": "inline_equation", + "content": "G_V" + }, + { + "bbox": [ + 130, + 336, + 481, + 396 + ], + "type": "text", + "content": " by training the 2nd stage of 3DFG-PIFu with or without " + }, + { + "bbox": [ + 130, + 336, + 481, + 396 + ], + "type": "inline_equation", + "content": "G_V" + }, + { + "bbox": [ + 130, + 336, + 481, + 396 + ], + "type": "text", + "content": ". The results in Tab. 3 and Fig. 13 show that " + }, + { + "bbox": [ + 130, + 336, + 481, + 396 + ], + "type": "inline_equation", + "content": "G_V" + }, + { + "bbox": [ + 130, + 336, + 481, + 396 + ], + "type": "text", + "content": " improves the partial refined meshes obtained in the 2nd stage. Aside: As only visible regions of partial refined meshes are used to form the final mesh, Tab. 3 must consider only visible regions." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 401, + 481, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 401, + 481, + 437 + ], + "spans": [ + { + "bbox": [ + 130, + 401, + 481, + 437 + ], + "type": "text", + "content": "Evaluating our Iterative Mechanism (i.e. Our 2nd Stage) We also show that 3DFG-PIFu's 2nd stage indeed improves the base meshes from the 1st stage. See Fig. 9 and Tab. 4. The improved meshes show sharper appearance details." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 437, + 481, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 437, + 481, + 485 + ], + "spans": [ + { + "bbox": [ + 130, + 437, + 481, + 485 + ], + "type": "text", + "content": "When more views are made available (i.e. " + }, + { + "bbox": [ + 130, + 437, + 481, + 485 + ], + "type": "inline_equation", + "content": "V > 2" + }, + { + "bbox": [ + 130, + 437, + 481, + 485 + ], + "type": "text", + "content": "), the 3DFG-PIFu can incrementally update and improve the current mesh without the need for additional training. We simply replace the base mesh with the current mesh and re-run the 2nd stage again. Results are shown in Fig 14." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 130, + 491, + 481, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 491, + 481, + 540 + ], + "spans": [ + { + "bbox": [ + 130, + 491, + 481, + 540 + ], + "type": "text", + "content": "Evaluation of SDF-based SMPL- " + }, + { + "bbox": [ + 130, + 491, + 481, + 540 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 130, + 491, + 481, + 540 + ], + "type": "text", + "content": " features In order to evaluate the effectiveness of our SDF-based SMPL-X features " + }, + { + "bbox": [ + 130, + 491, + 481, + 540 + ], + "type": "inline_equation", + "content": "(G_{X})" + }, + { + "bbox": [ + 130, + 491, + 481, + 540 + ], + "type": "text", + "content": ", we train and compare a single-view PIFu that is given either (i) S-PIFu features, (ii) PaMIR's voxel-aligned features, (iii) our " + }, + { + "bbox": [ + 130, + 491, + 481, + 540 + ], + "type": "inline_equation", + "content": "G_{X}" + }, + { + "bbox": [ + 130, + 491, + 481, + 540 + ], + "type": "text", + "content": ", or (iv) PaMIR's voxel-aligned features + our " + }, + { + "bbox": [ + 130, + 491, + 481, + 540 + ], + "type": "inline_equation", + "content": "G_{X}" + }, + { + "bbox": [ + 130, + 491, + 481, + 540 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 222, + 544, + 389, + 661 + ], + "blocks": [ + { + "bbox": [ + 222, + 544, + 389, + 661 + ], + "lines": [ + { + "bbox": [ + 222, + 544, + 389, + 661 + ], + "spans": [ + { + "bbox": [ + 222, + 544, + 389, + 661 + ], + "type": "image", + "image_path": "3492c39eae036352e3f5bd688f513882c0b567a463da5412de271dfc63ac805b.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 191, + 659, + 421, + 670 + ], + "lines": [ + { + "bbox": [ + 191, + 659, + 421, + 670 + ], + "spans": [ + { + "bbox": [ + 191, + 659, + 421, + 670 + ], + "type": "text", + "content": "Fig. 13: Partial refined meshes obtained w and w/o " + }, + { + "bbox": [ + 191, + 659, + 421, + 670 + ], + "type": "inline_equation", + "content": "G_V" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 397, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 397, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 397, + 91, + 447, + 100 + ], + "type": "text", + "content": "3DFG-PIFu" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 175 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 175 + ], + "type": "text", + "content": "A quantitative comparison is shown in Tab. 5. The table shows our " + }, + { + "bbox": [ + 130, + 116, + 482, + 175 + ], + "type": "inline_equation", + "content": "G_{X}" + }, + { + "bbox": [ + 130, + 116, + 482, + 175 + ], + "type": "text", + "content": " outperformed S-PIFu features. Whether our " + }, + { + "bbox": [ + 130, + 116, + 482, + 175 + ], + "type": "inline_equation", + "content": "G_{X}" + }, + { + "bbox": [ + 130, + 116, + 482, + 175 + ], + "type": "text", + "content": " is combined with voxel-aligned features or not, it clearly improves the performance of a model when in use. Qualitatively, Fig. 15 shows that combining PaMIR's voxel-aligned features with our " + }, + { + "bbox": [ + 130, + 116, + 482, + 175 + ], + "type": "inline_equation", + "content": "G_{X}" + }, + { + "bbox": [ + 130, + 116, + 482, + 175 + ], + "type": "text", + "content": " yields the most robust results." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 191, + 317, + 205 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 191, + 317, + 205 + ], + "spans": [ + { + "bbox": [ + 132, + 191, + 317, + 205 + ], + "type": "text", + "content": "5 Limitations and Conclusion" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 213, + 481, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 213, + 481, + 248 + ], + "spans": [ + { + "bbox": [ + 130, + 213, + 481, + 248 + ], + "type": "text", + "content": "In our Supp. Mat., we address concerns on 3DFG-PIFu's efficiency. In short, via a series of implementation tricks, we show 3DFG-PIFu is actually more efficient than roughly half of the existing SOTA methods." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 249, + 481, + 320 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 249, + 481, + 320 + ], + "spans": [ + { + "bbox": [ + 130, + 249, + 481, + 320 + ], + "type": "text", + "content": "We have introduced 3DFG-PIFu, a multi-view pixel-aligned implicit model that uses 3D Feature Grids to fuse multi-view information. 3DFG-PIFu also proposed an iterative pipeline that combines appearance details from multiple views into a single mesh. Lastly, 3DFG-PIFu introduced SDF-based SMPL-X features, which is a new way of incorporating a SMPL-X mesh into a pixel-aligned implicit model." + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 190, + 335, + 422, + 371 + ], + "blocks": [ + { + "bbox": [ + 182, + 323, + 432, + 334 + ], + "lines": [ + { + "bbox": [ + 182, + 323, + 432, + 334 + ], + "spans": [ + { + "bbox": [ + 182, + 323, + 432, + 334 + ], + "type": "text", + "content": "Table 4: Quantitative evaluation of 3DFG-PIFu's 2nd Stage" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 190, + 335, + 422, + 371 + ], + "lines": [ + { + "bbox": [ + 190, + 335, + 422, + 371 + ], + "spans": [ + { + "bbox": [ + 190, + 335, + 422, + 371 + ], + "type": "table", + "html": "
MethodsTHuman2.0BUFF
CD (10-5)P2S (10-5)CD (102)P2S (102)
Base meshes (1st Stage)6.0076.6342.3862.999
Final meshes (2nd Stage)5.7965.8112.5092.286
", + "image_path": "b2d38c53df2d4a76e7118d1f7113b7fd5223b0ca4987c8204edf62b2d52c2842.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 178, + 384, + 436, + 437 + ], + "blocks": [ + { + "bbox": [ + 223, + 373, + 389, + 384 + ], + "lines": [ + { + "bbox": [ + 223, + 373, + 389, + 384 + ], + "spans": [ + { + "bbox": [ + 223, + 373, + 389, + 384 + ], + "type": "text", + "content": "Table 5: Quantitative evaluation of " + }, + { + "bbox": [ + 223, + 373, + 389, + 384 + ], + "type": "inline_equation", + "content": "{G}_{X}" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 178, + 384, + 436, + 437 + ], + "lines": [ + { + "bbox": [ + 178, + 384, + 436, + 437 + ], + "spans": [ + { + "bbox": [ + 178, + 384, + 436, + 437 + ], + "type": "table", + "html": "
MethodsTHuman2.0BUFF
CD (10-5)P2S (10-5)CD (102)P2S (102)
S-PIFu Features4.4884.0306.8126.880
Voxel-aligned Features4.1043.7408.0379.225
GX(Ours)4.3523.7346.3516.641
Voxel-aligned Features + GX(Ours)3.9703.4838.0128.827
", + "image_path": "907dd685fa2ef62b9f25d129a07ea7535c1ea340fa518a1f4cdd9f00d4dd57d9.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 208, + 438, + 399, + 529 + ], + "blocks": [ + { + "bbox": [ + 208, + 438, + 399, + 529 + ], + "lines": [ + { + "bbox": [ + 208, + 438, + 399, + 529 + ], + "spans": [ + { + "bbox": [ + 208, + 438, + 399, + 529 + ], + "type": "image", + "image_path": "ca08776b13e539eb4ac6880654b3752e392632c430a1c08e284c169f2d532f46.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 201, + 531, + 411, + 541 + ], + "lines": [ + { + "bbox": [ + 201, + 531, + 411, + 541 + ], + "spans": [ + { + "bbox": [ + 201, + 531, + 411, + 541 + ], + "type": "text", + "content": "Fig. 14: Effect of using more views in 3DFG-PIFu." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 175, + 544, + 436, + 645 + ], + "blocks": [ + { + "bbox": [ + 175, + 544, + 436, + 645 + ], + "lines": [ + { + "bbox": [ + 175, + 544, + 436, + 645 + ], + "spans": [ + { + "bbox": [ + 175, + 544, + 436, + 645 + ], + "type": "image", + "image_path": "84a01eba7cb18353ce7d0a07913ec46ee18d8436b98e92fce92e258d22e67f21.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 228, + 645, + 384, + 655 + ], + "lines": [ + { + "bbox": [ + 228, + 645, + 384, + 655 + ], + "spans": [ + { + "bbox": [ + 228, + 645, + 384, + 655 + ], + "type": "text", + "content": "Fig. 15: Qualitative evaluation of " + }, + { + "bbox": [ + 228, + 645, + 384, + 655 + ], + "type": "inline_equation", + "content": "G_{X}" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 238, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 238, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 238, + 101 + ], + "type": "text", + "content": "K. Y. Chan et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 482, + 153 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 116, + 482, + 153 + ], + "spans": [ + { + "bbox": [ + 132, + 116, + 482, + 153 + ], + "type": "text", + "content": "Acknowledgements This research work is supported by the Agency for Science, Technology and Research (A*STAR) under its MTC Programmatic Funds (Grant No. M23L7b0021)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 168, + 197, + 181 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 168, + 197, + 181 + ], + "spans": [ + { + "bbox": [ + 132, + 168, + 197, + 181 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 138, + 190, + 480, + 665 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 138, + 190, + 480, + 222 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 190, + 480, + 222 + ], + "spans": [ + { + "bbox": [ + 138, + 190, + 480, + 222 + ], + "type": "text", + "content": "1. Cao, Y., Han, K., Wong, K.Y.K.: Sesdf: Self-evolved signed distance field for implicit 3d clothed human reconstruction. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 4647-4657 (2023)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 138, + 224, + 480, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 224, + 480, + 255 + ], + "spans": [ + { + "bbox": [ + 138, + 224, + 480, + 255 + ], + "type": "text", + "content": "2. Chan, K., Lin, G., Zhao, H., Lin, W.: S-pifu: Integrating parametric human models with pifu for single-view clothed human reconstruction. Advances in Neural Information Processing Systems 35, 17373-17385 (2022)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 138, + 255, + 480, + 287 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 255, + 480, + 287 + ], + "spans": [ + { + "bbox": [ + 138, + 255, + 480, + 287 + ], + "type": "text", + "content": "3. Chan, K., Lin, G., Zhao, H., Lin, W.: S-pifu: Integrating parametric human models with pifu for single-view clothed human reconstruction. In: Advances in Neural Information Processing Systems (2022)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 138, + 288, + 480, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 288, + 480, + 330 + ], + "spans": [ + { + "bbox": [ + 138, + 288, + 480, + 330 + ], + "type": "text", + "content": "4. Chan, K.Y., Lin, G., Zhao, H., Lin, W.: Integratedpifu: Integrated pixel aligned implicit function for single-view human reconstruction. In: Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part II. pp. 328-344. Springer (2022)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 138, + 331, + 480, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 331, + 480, + 373 + ], + "spans": [ + { + "bbox": [ + 138, + 331, + 480, + 373 + ], + "type": "text", + "content": "5. Chan, K.Y., Liu, F., Lin, G., Foo, C.S., Lin, W.: Fine structure-aware sampling: A new sampling training scheme for pixel-aligned implicit models in single-view human reconstruction. In: Proceedings of the AAAI Conference on Artificial Intelligence. vol. 38, pp. 964-971 (2024)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 138, + 374, + 480, + 417 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 374, + 480, + 417 + ], + "spans": [ + { + "bbox": [ + 138, + 374, + 480, + 417 + ], + "type": "text", + "content": "6. Chan, K.Y., Liu, F., Lin, G., Foo, C.S., Lin, W.: R-cyclic diffuser: Reductive and cyclic latent diffusion for 3d clothed human digitalization. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 10304-10313 (2024)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 138, + 418, + 480, + 459 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 418, + 480, + 459 + ], + "spans": [ + { + "bbox": [ + 138, + 418, + 480, + 459 + ], + "type": "text", + "content": "7. Gong, X., Song, L., Zheng, M., Planche, B., Chen, T., Yuan, J., Doermann, D., Wu, Z.: Progressive multi-view human mesh recovery with self-supervision. In: Proceedings of the AAAI Conference on Artificial Intelligence. vol. 37, pp. 676-684 (2023)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 138, + 460, + 480, + 493 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 460, + 480, + 493 + ], + "spans": [ + { + "bbox": [ + 138, + 460, + 480, + 493 + ], + "type": "text", + "content": "8. Hong, Y., Zhang, J., Jiang, B., Guo, Y., Liu, L., Bao, H.: Stereopifu: Depth aware clothed human digitization via stereo vision. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 535-545 (2021)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 138, + 494, + 480, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 494, + 480, + 525 + ], + "spans": [ + { + "bbox": [ + 138, + 494, + 480, + 525 + ], + "type": "text", + "content": "9. Kolotouros, N., Pavlakos, G., Jayaraman, D., Daniilidis, K.: Probabilistic modeling for human mesh recovery. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 11605-11614 (2021)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 138, + 525, + 480, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 525, + 480, + 557 + ], + "spans": [ + { + "bbox": [ + 138, + 525, + 480, + 557 + ], + "type": "text", + "content": "0. Liang, J., Lin, M.C.: Shape-aware human pose and shape reconstruction using multi-view images. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 4352-4362 (2019)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 138, + 558, + 480, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 558, + 480, + 578 + ], + "spans": [ + { + "bbox": [ + 138, + 558, + 480, + 578 + ], + "type": "text", + "content": "1. Lorensen, W.E., Cline, H.E.: Marching cubes: A high resolution 3d surface construction algorithm. ACM siggraph computer graphics 21(4), 163-169 (1987)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 138, + 579, + 480, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 579, + 480, + 621 + ], + "spans": [ + { + "bbox": [ + 138, + 579, + 480, + 621 + ], + "type": "text", + "content": "2. Pavlakos, G., Choutas, V., Ghorbani, N., Bolkart, T., Osman, A.A.A., Tzionas, D., Black, M.J.: Expressive body capture: 3D hands, face, and body from a single image. In: Proceedings IEEE Conf. on Computer Vision and Pattern Recognition (CVPR). pp. 10975-10985 (2019)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 138, + 622, + 480, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 622, + 480, + 665 + ], + "spans": [ + { + "bbox": [ + 138, + 622, + 480, + 665 + ], + "type": "text", + "content": "3. Saito, S., Huang, Z., Natsume, R., Morishima, S., Kanazawa, A., Li, H.: Pifu: Pixel-aligned implicit function for high-resolution clothed human digitization. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 2304-2314 (2019)" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 397, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 397, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 397, + 91, + 447, + 100 + ], + "type": "text", + "content": "3DFG-PIFu" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 481, + 100 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 481, + 544 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 133, + 116, + 481, + 160 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 116, + 481, + 160 + ], + "spans": [ + { + "bbox": [ + 133, + 116, + 481, + 160 + ], + "type": "text", + "content": "14. Saito, S., Simon, T., Saragih, J., Joo, H.: Pifuhd: Multi-level pixel-aligned implicit function for high-resolution 3d human digitization. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 84-93 (2020)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 133, + 161, + 481, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 161, + 481, + 205 + ], + "spans": [ + { + "bbox": [ + 133, + 161, + 481, + 205 + ], + "type": "text", + "content": "15. Shao, R., Zhang, H., Zhang, H., Chen, M., Cao, Y.P., Yu, T., Liu, Y.: Doublefield: Bridging the neural surface and radiance fields for high-fidelity human reconstruction and rendering. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 15872-15882 (2022)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 133, + 205, + 481, + 248 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 205, + 481, + 248 + ], + "spans": [ + { + "bbox": [ + 133, + 205, + 481, + 248 + ], + "type": "text", + "content": "16. Shao, R., Zheng, Z., Zhang, H., Sun, J., Liu, Y.: Diffustereo: High quality human reconstruction via diffusion-based stereo using sparse cameras. In: Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part XXXII. pp. 702-720. Springer (2022)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 249, + 481, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 249, + 481, + 281 + ], + "spans": [ + { + "bbox": [ + 132, + 249, + 481, + 281 + ], + "type": "text", + "content": "17. Yu, T., Zheng, Z., Guo, K., Liu, P., Dai, Q., Liu, Y.: Function4d: Real-time human volumetric capture from very sparse consumer rgbd sensors. In: IEEE Conference on Computer Vision and Pattern Recognition (CVPR2021) (June 2021)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 281, + 481, + 314 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 281, + 481, + 314 + ], + "spans": [ + { + "bbox": [ + 132, + 281, + 481, + 314 + ], + "type": "text", + "content": "18. Yu, Z., Zhang, L., Xu, Y., Tang, C., Tran, L., Keskin, C., Park, H.S.: Multiview human body reconstruction from uncalibrated cameras. In: Advances in Neural Information Processing Systems (2022)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 315, + 481, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 315, + 481, + 346 + ], + "spans": [ + { + "bbox": [ + 132, + 315, + 481, + 346 + ], + "type": "text", + "content": "19. Zhang, C., Pujades, S., Black, M.J., Pons-Moll, G.: Detailed, accurate, human shape estimation from clothed 3d scan sequences. In: The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (July 2017)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 347, + 481, + 390 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 347, + 481, + 390 + ], + "spans": [ + { + "bbox": [ + 132, + 347, + 481, + 390 + ], + "type": "text", + "content": "20. Zhang, H., Tian, Y., Zhou, X., Ouyang, W., Liu, Y., Wang, L., Sun, Z.: Pymaf: 3d human pose and shape regression with pyramidal mesh alignment feedback loop. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 11446-11456 (2021)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 391, + 481, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 391, + 481, + 434 + ], + "spans": [ + { + "bbox": [ + 132, + 391, + 481, + 434 + ], + "type": "text", + "content": "21. Zhao, F., Yang, W., Zhang, J., Lin, P., Zhang, Y., Yu, J., Xu, L.: Humannerf: Efficiently generated human radiance field from sparse inputs. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 7743-7753 (2022)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 434, + 481, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 434, + 481, + 479 + ], + "spans": [ + { + "bbox": [ + 132, + 434, + 481, + 479 + ], + "type": "text", + "content": "22. Zheng, Y., Shao, R., Zhang, Y., Yu, T., Zheng, Z., Dai, Q., Liu, Y.: Deepmulticap: Performance capture of multiple characters using sparse multiview cameras. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 6239-6249 (2021)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 479, + 481, + 511 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 479, + 481, + 511 + ], + "spans": [ + { + "bbox": [ + 132, + 479, + 481, + 511 + ], + "type": "text", + "content": "23. Zheng, Z., Yu, T., Liu, Y., Dai, Q.: Pamir: Parametric model-conditioned implicit representation for image-based human reconstruction. IEEE transactions on pattern analysis and machine intelligence 44(6), 3170-3184 (2021)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 512, + 481, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 512, + 481, + 544 + ], + "spans": [ + { + "bbox": [ + 132, + 512, + 481, + 544 + ], + "type": "text", + "content": "24. Zins, P., Xu, Y., Boyer, E., Wuhrer, S., Tung, T.: Data-driven 3d reconstruction of dressed humans from sparse views. In: 2021 International Conference on 3D Vision (3DV). pp. 494-504. IEEE (2021)" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 238, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 238, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 238, + 101 + ], + "type": "text", + "content": "K. Y. Chan et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/3DGazeNet_ Generalizing Gaze Estimation with Weak Supervision from Synthetic Views/7d049317-38f5-44ac-a691-e07c992f4970_content_list.json b/2024/3DGazeNet_ Generalizing Gaze Estimation with Weak Supervision from Synthetic Views/7d049317-38f5-44ac-a691-e07c992f4970_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..c1e96b8a28803b741dcd3e3730aadfcd605ee6d8 --- /dev/null +++ b/2024/3DGazeNet_ Generalizing Gaze Estimation with Weak Supervision from Synthetic Views/7d049317-38f5-44ac-a691-e07c992f4970_content_list.json @@ -0,0 +1,1634 @@ +[ + { + "type": "text", + "text": "3DGazeNet: Generalizing 3D Gaze Estimation with Weak-Supervision from Synthetic Views", + "text_level": 1, + "bbox": [ + 236, + 140, + 766, + 186 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Evangelos Ververas1,2, Polydefkis Gkagkos2, Jiankang Deng1,2, Michail Christos Doukas1, Jia Guo3, and Stefanos Zafeiriou1", + "bbox": [ + 246, + 210, + 754, + 243 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Imperial College London, UK", + "bbox": [ + 393, + 253, + 607, + 268 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{2}$ Huawei Noah's Ark Lab, UK", + "bbox": [ + 393, + 268, + 604, + 281 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "3 InsightFace", + "bbox": [ + 454, + 281, + 547, + 296 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "https://eververas.github.io/3DGazeNet/", + "bbox": [ + 351, + 297, + 648, + 310 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract. Developing gaze estimation models that generalize well to unseen domains and in-the-wild conditions remains a challenge with no known best solution. This is mostly due to the difficulty of acquiring ground truth data that cover the distribution of faces, head poses, and environments that exist in the real world. Most recent methods attempt to close the gap between specific source and target domains using domain adaptation. In this work, we propose to train general gaze estimation models which can be directly employed in novel environments without adaptation. To do so, we leverage the observation that head, body, and hand pose estimation benefit from revising them as dense 3D coordinate prediction, and similarly express gaze estimation as regression of dense 3D eye meshes. To close the gap between image domains, we create a large-scale dataset of diverse faces with gaze pseudo-annotations, which we extract based on the 3D geometry of the face, and design a multi-view supervision framework to balance their effect during training. We test our method in the task of gaze generalization, in which we demonstrate improvement of up to $23\\%$ compared to state-of-the-art when no ground truth data are available, and up to $10\\%$ when they are.", + "bbox": [ + 261, + 342, + 740, + 593 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Keywords: 3D Gaze Estimation $\\cdot$ 3D Eye Mesh $\\cdot$ Gaze Generalization", + "bbox": [ + 261, + 604, + 738, + 619 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 217, + 643, + 375, + 659 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Eye gaze serves as a cue for understanding human behavior and intents, including attention, communication, and mental state. As a result, gaze information has been exploited by a lot of applications of various fields of interest, ranging from medical and psychological analysis [9,37,64] to human-computer interaction [4], efficient rendering in VR/AR headset systems [6,10,39], virtual character animation [57,61,62,77] and driver state monitoring [34,50]. When high accuracy is important, data collection under the particular capturing set up is crucial, e.g. specific VR headsets, static screen-camera setups. However, in numerous real-world applications robustness is equally important to high accuracy, e.g. face-unlocking in mobile devices, best frame capturing/selection in group photos and automatic gaze annotation of large datasets for face reenactment.", + "bbox": [ + 212, + 672, + 787, + 840 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/8461aabaf554595059207969dbc8a35fa58eb3982c46f8faf331904eb2e1199c.jpg", + "image_caption": [ + "Fig. 1: Overview of our method 3DGazeNet. a) We approach 3D gaze estimation as dense 3D eye mesh regression, which is robust against sparse prediction errors. b) Domain generalization is one of the hardest challenges in gaze estimation. Training with common gaze datasets often results in poor cross-dataset performance. c) Our multi-view supervision method employs pseudo-labels from in-the-wild face images to close the gap between controlled and in-the-wild datasets." + ], + "image_footnote": [], + "bbox": [ + 222, + 143, + 781, + 304 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Typically, 3D gaze estimation is expressed as a direct mapping between input images and a few pose parameters [12, 42, 52, 70, 82], or sparse representations of the eyes [54, 55, 66]. Nevertheless, it has been shown that unconstrained face and body pose estimation from single images benefits from replacing predicting few pose or shape parameters by directly predicting dense 3D geometry [3, 16, 26, 43, 58]. In this work, we leverage this observation and revise the formulation of gaze estimation as end-to-end dense 3D eye mesh regression, which combined with standard vector regression induces multiple benefits. Existing datasets with ground truth 3D eyes include only images in the IR domain [21], however, IR images cannot be directly employed for RGB-based methods. As 3D eye meshes are not available for most gaze datasets, we define a unified eye representation, i.e. a rigid 3D eyeball template (Fig. 3(a)), which we fit on images based on sparse landmarks and the available gaze labels.", + "bbox": [ + 212, + 429, + 787, + 626 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Several gaze datasets have become available in the last decade [20, 22, 35, 42, 52, 59, 60, 79, 81], which have contributed to the recent progress in automatic 3D gaze estimation from monocular RGB images. However, collecting gaze datasets is a costly and challenging process which often restricts them being captured in controlled environments and consisting of limited unique identities, thus lacking variation compared to data from the real world. This causes the most common challenge in gaze estimation, which is cross-domain and in-the-wild generalization. In this work, we propose a method to exploit arbitrary, unlabeled face images to largely increase the diversity of our training data as well as our model's generalization capabilities. To that end, we design a simple pipeline to extract robust 3D gaze pseudo-labels based on the 3D shape of the face and eyes, without having any prior gaze information. Based on recent advancements on weakly-supervised head, body and hand pose estimation [8, 17, 31, 44, 65], we regularize inconsistencies of pseudo-labels, by a geometric constraint which encourages our", + "bbox": [ + 212, + 628, + 787, + 840 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "E. Ververas et al.", + "bbox": [ + 271, + 114, + 387, + 126 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We model to maintain prediction consistency between multiple synthetic views of the same subject.", + "bbox": [ + 212, + 146, + 782, + 176 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Most recent methods attempt to close the gap between diverse image domains using domain adaptation. Commonly, they employ a few samples of the target domain, with [29, 53, 73] or without [5, 7, 11, 24, 27, 47, 68, 70] their gaze labels, to fine-tune an initial model. Although successful, approaches following this scheme require knowledge of the target domain and model re-training, which prohibit their use as plug-n-play methods in real user applications. In contrast, we propose a method to train gaze estimation models that generalize well to unseen and inthe-wild environments without the constraints of domain adaption. Our method can effortlessly be employed by user applications in a plug-n-play fashion.", + "bbox": [ + 212, + 176, + 785, + 311 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "An overview of our approach, which we name 3DGazeNet, is presented in Fig. 1. We evaluate our method in cross-dataset gaze generalization, showcasing improvements over the state-of-the-art, even by a large margin, and perform ablations over the model components. To summarize, the key contributions of our work are:", + "bbox": [ + 212, + 313, + 785, + 387 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- A simple automatic method to extract robust 3D eye meshes from arbitrary face images and a multi-view consistency regularization which allows to exploit them for improved gaze generalization.", + "- A revised formulation for gaze estimation, based on dense 3D eye mesh regression from images. To the best of our knowledge, we are the first to utilize an end-to-end 3D eye mesh regression approach for gaze estimation.", + "- Improved performance over the state-of-the-art in gaze generalization with $(10\\%)$ and without $(23\\%)$ using source domain ground truth, with a simple model architecture. Based on that, we believe that 3DGazeNet is an important step towards reliable plug-n-play gaze tracking." + ], + "bbox": [ + 223, + 396, + 782, + 544 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Related Work", + "text_level": 1, + "bbox": [ + 215, + 569, + 387, + 585 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Numerous model designs for supervised 3D gaze estimation have been tested recently, investigating which face region to use as input [12,42,82], the model architecture [1,14,46,67] and what external stimuli to utilize to improve performance [52]. Motivated by the difficulties in collecting diverse and large scale data for gaze estimation, recent works have shown that valuable gaze representations can be extracted in fully unsupervised settings, by applying gaze redirection [74] or disentanglement constraints [63].", + "bbox": [ + 212, + 599, + 782, + 704 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Gaze Adaptation and Generalization Much effort has been made to design methods that adapt well to known target subjects and environments, by employing either few labeled samples [29, 53, 73] or completely unlabeled data of the target domain [5, 7, 11, 24, 27, 47, 68, 70]. Differently from the above, gaze generalization models aim to improve cross-domain performance without any knowledge of the target domains. The models in [5, 11, 70], even though targeted for gaze adaptation, are based on learning general features for gaze estimation and thus, they perform well in target domain-agnostic settings. Moreover, [40] has shown", + "bbox": [ + 212, + 719, + 785, + 839 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "3DGazeNet", + "bbox": [ + 651, + 114, + 730, + 126 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 774, + 114, + 785, + 126 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "that it is possible to train general gaze estimation models by employing geometric constraints in scenes depicting social interaction between people. We believe that [40] is the closest work to ours, as it is the only method which uses 3D geometric cues of the scene to learn gaze from arbitrary face data. Lastly, [78] proposes to improve generalization by employing synthetic images which are, however, limited by the gaze distribution of existing gaze datasets. Both the implementation and custom dataset are not public, which hinders reproducibility and reliable comparisons.", + "bbox": [ + 212, + 146, + 787, + 268 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Model-Based Gaze Estimation Differently from the above, sparse or semantic representations of the eye geometry have also been employed by some methods to infer gaze from images [54, 55, 66, 67, 71, 72]. However, such representations do not convey information about the 3D substance of eyes and are prone to noisy predictions. In contrast, by predicting 3D eye meshes we learn a much more robust representation, from which we can retrieve any other sparse or semantic one just by indexing. Recovering dense 3D geometry of the eye region from images by fitting parametric models of the shape and texture has been previously proposed [71]. However, restrictions posed by building large-scale parametric models and fitting in-the-wild images have resulted in low gaze accuracy compared to learning-based methods.", + "bbox": [ + 212, + 287, + 789, + 454 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Face Reenactment and Learning from Synthetic Data Synthetic image data have been previously used in training deep networks, mainly to augment the training datasets and provide pseudo-ground truth annotations. For instance, [84] used CycleGAN [83] to create a new training corpus in order to balance emotion classes in the task of emotion classification. More recently, GANcraft [28] employed SPADE [56] to generate pseudo-ground truth images that were used to supervise their neural rendering framework. In this work, we obtain access to image pairs of the same subject in different views, by taking advantage of HeadGAN [19], a face reenactment system. In contrast to person-specific reenactment methods [18, 36, 41] or person-generic landmark-driven approaches [69, 75, 76], HeadGAN is able to perform free-view synthesis using a single source image.", + "bbox": [ + 212, + 473, + 789, + 654 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/54d377a55c6adaa4a090f6627c49621dd66d5167505f0a92fdf56187d7a886b2.jpg", + "image_caption": [ + "Fig. 2: We use HeadGAN [19] to generate novel views by manipulating the 3D pose of the face. During synthesis, angle $\\theta_z$ is transferred to all facial parts including the eyes, thus the relative angle between the head and eyes (i.e. the gaze direction in the head coordinate system) is maintained." + ], + "image_footnote": [], + "bbox": [ + 232, + 684, + 772, + 770 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "E. Ververas et al.", + "bbox": [ + 271, + 114, + 387, + 126 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3 Method", + "text_level": 1, + "bbox": [ + 215, + 143, + 330, + 160 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.1 Problem Definition and Motivation", + "text_level": 1, + "bbox": [ + 215, + 176, + 552, + 191 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The aim of this work is to design a method that given a face image $\\mathbf{I}$ , it estimates $2\\times N_{v}$ 3D coordinates $\\mathbf{V} = [\\mathbf{V}_l^T,\\mathbf{V}_r^T ]^T$ , where $\\mathbf{V}_l\\in \\mathbb{R}^{N_v\\times 3}$ are coordinates corresponding to the left eyeball while $\\mathbf{V}_r\\in \\mathbb{R}^{N_v\\times 3}$ to the right, as well as a 3D gaze vector $g = (g_{x},g_{y},g_{z})$ . Then, the final gaze result is calculated by the mean direction of the two output components. Inspired by recent work in self-supervised 3D body pose estimation [31,44,65], we adopt multi-view constraints to train our model based on in-the-wild faces and automatically generated gaze pseudo-labels.", + "bbox": [ + 212, + 202, + 787, + 321 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To employ multi-view losses, we assume that images of the same subject with different head poses and the same gaze direction relatively to the head are available. For example, this condition is satisfied when a face picture is taken from different angles at the same time. As such images are not commonly available for in-the-wild datasets, we employ HeadGAN [19], a recent face reenactment method, to generate novel face poses from existing images. HeadGAN is able to synthesize face animations using dense face geometry, which covers the eyes, as a driving signal and single source images. Using dense geometry guarantees that the relative angle between the head and eyes is maintained when synthesizing novel poses, as it is shown in Fig. 2.", + "bbox": [ + 212, + 323, + 787, + 474 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2 Unified 3D Eye Representation", + "text_level": 1, + "bbox": [ + 215, + 496, + 519, + 512 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Learning consistent eye meshes across different images and datasets, requires establishing a unified 3D eye representation. To that end, we define a 3D eyeball template as a rigid 3D triangular mesh with spherical shape, consisting of $N_{v} = 481$ vertices and $N_{t} = 928$ triangles. We create two mirrored versions, $\\mathbf{M}_l$ and $\\mathbf{M}_r$ , of the above mesh to represent a left and a right reference eyeball respectively. This representation allows us to allocate semantic labels to specific vertices of the eyeball, such as the iris border (Fig. 3 (a)), and calculate 3D gaze direction as the orientation of the central axis of our 3D eyeball template. In practice, an offset angle (the kappa coefficient) exists between the optical (central) and visual axes of eyes, which is subject-dependent and varies between $-2^{o}$ to $2^{o}$ across the population [73]. Accounting for this offset is essential for person-specific gaze estimation [29,45,53,73]. However, in our case of cross-dataset and in-the-wild gaze generalization, in which errors are much larger than the possible offset, data diversity is more important than anatomical precision and thus, our spherical eyeball is a reasonable approximation.", + "bbox": [ + 212, + 522, + 787, + 750 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3D Eyes Ground-Truth from Gaze Datasets For gaze estimation datasets, exact supervision can be acquired by automatically fitting the eyeball template on face images based on sparse iris landmarks and the available gaze labels, as shown in Fig. 3(b). Specifically, we first rotate the eyeball template around its center according to the gaze label. Then, we align (scale and translation) $x$ ,", + "bbox": [ + 212, + 763, + 787, + 840 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "3DGazeNet", + "bbox": [ + 651, + 114, + 730, + 126 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/9d7d58ea4fb7905809fe43185be6c3fca2fb763900fcf6120f1a4066415450e3.jpg", + "image_caption": [ + "M: $N_{v} = 481$ vertices, $N_{t} = 928$ triangles" + ], + "image_footnote": [], + "bbox": [ + 251, + 186, + 459, + 228 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/eb84fdff110a6cbad3c3dc547b0e17a22aa1a34d605f90f195d7f764d3dcb1a6.jpg", + "image_caption": [ + "(a) Eyeball template", + "(b) Ground truth generation", + "Fig. 3: (a) The employed rigid 3D eyeball mesh template. (b) Ground truth data generation, applied on gaze estimation datasets with available ground truth. (c) Pseudoground truth data generation, applied on arbitrary face images without any gaze label." + ], + "image_footnote": [], + "bbox": [ + 223, + 273, + 491, + 364 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/d9aecc08e389f66d569df7b440a01df77fca7a3cbacefe69e706b885f19c9513.jpg", + "image_caption": [ + "(c) Pseudo-ground truth generation" + ], + "image_footnote": [], + "bbox": [ + 511, + 146, + 785, + 364 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "$y$ coordinates of the rotated eye mesh to the iris landmarks of the image and multiply $z$ coordinates with the same scale. To extract sparse iris landmarks we employed the method of [55] as a basis for building an iris localization model which is robust against occlusions and low resolution. More details about the iris localization model are provided in the supplemental material.", + "bbox": [ + 212, + 463, + 787, + 540 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3D Eyes Pseudo-Ground Truth from In-The-Wild Images To extract 3D eyes from images without gaze labels, we have developed an automatic pipeline based on 3D face alignment and 2D iris localization. First, we recover the 3D face with $x$ , $y$ in image space using an off-the-shelf method. Then, we align our eyeball templates in the eye sockets based on the face's eyelid landmarks and predefined eyelid landmarks around the eyeball templates. In fact, we use the two corner landmarks of each eye which do not move between open and closed eyes. Next, we lift 2D iris predictions to 3D by finding the nearest vertexes from the aligned 3D eye templates. Finally, we compute the rotation between the initially aligned eyes and the 3D-lifted iris center and rotate the eyeballs accordingly. For 3D face alignment, we employ RetinaFace [16] and for 2D iris localization [55] as above. The process is presented in Fig. 3(c).", + "bbox": [ + 212, + 551, + 787, + 736 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.3 Joint 3D Eye Mesh and Vector Regression", + "text_level": 1, + "bbox": [ + 214, + 755, + 612, + 771 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Given an input face image $\\mathbf{I}$ , we utilize 5 face detection landmarks to crop patches around each one of the two eyes. We resize the patches to shape $128 \\times 128 \\times 3$ and stack them channel-wise along with a cropped image of the face. We employ a simple model architecture consisting of a ResNet-18 [30] to extract", + "bbox": [ + 212, + 779, + 787, + 840 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "E. Ververas et al.", + "bbox": [ + 271, + 114, + 387, + 126 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "features, followed by two fully connected layers to map them to two separate eye modalities, which are a) dense 3D eye coordinates and b) a 3D gaze vector. As the final gaze output, we consider the mean direction calculated from the two modalities.", + "bbox": [ + 212, + 146, + 782, + 205 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To train the above network for mesh regression, similarly to [16], we enforce a vertex loss and an edge length loss between the model outputs and the respective ground truth or pseudo-ground truth, which can be expressed as:", + "bbox": [ + 212, + 205, + 784, + 252 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {v e r t} = \\frac {1}{N _ {v}} \\sum_ {j = \\{l, r \\}} \\sum_ {i = 1} ^ {N _ {v}} \\| \\mathbf {V} _ {j, i} - \\mathbf {V} _ {j, i} ^ {*} \\| _ {1}, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 367, + 255, + 785, + 297 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $\\mathbf{V}_j\\in \\mathbb{R}^{N_v\\times 3}$ and $\\mathbf{V}_j^*\\in \\mathbb{R}^{N_v\\times 3}$ for $j = \\{l,r\\}$ are the output and the (pseudo-)ground truth coordinates, while the edge length loss (based on the fixed mesh triangulation of our template meshes) can be written as:", + "bbox": [ + 214, + 303, + 782, + 349 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {e d g e}} = \\frac {1}{3 N _ {t}} \\sum_ {j = \\{l, r \\}} \\sum_ {i = 1} ^ {3 N _ {t}} \\| \\mathbf {E} _ {j, i} - \\mathbf {E} _ {j, i} ^ {*} \\| _ {2}, \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 367, + 354, + 784, + 397 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $\\mathbf{E}_j\\in \\mathbb{R}^{3N_t}$ and $\\mathbf{E}_j^*\\in \\mathbb{R}^{3N_t}$ for $j = \\{l,r\\}$ are the edge lengths of the predicted and the (pseudo-)ground truth eyes. As edge length we define the Euclidean distance between two vertices of the same triangle. In addition to the mesh regression losses, we enforce a gaze loss to the gaze output of our model, expressed as:", + "bbox": [ + 212, + 402, + 782, + 478 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {g a z e} = (1 8 0 / \\pi) \\operatorname {a r c c o s} \\left(\\mathbf {g} ^ {T} \\mathbf {g} ^ {*}\\right), \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 392, + 484, + 784, + 501 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $\\mathbf{g}$ and $\\mathbf{g}^*$ are the normalized model output and the gaze (pseudo-)ground truth respectively. We combine losses of Eqs. (1) to (3) in a single loss function to train our models with supervision from (pseudo-)ground truth 3D eye meshes and gaze vectors. The combined loss is written as:", + "bbox": [ + 212, + 508, + 782, + 568 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {(P) G T} = \\lambda_ {v} \\mathcal {L} _ {v e r t} + \\lambda_ {e} \\mathcal {L} _ {e d g e} + \\lambda_ {g} \\mathcal {L} _ {g a z e}, \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 359, + 575, + 784, + 590 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $\\lambda_v, \\lambda_e$ , and $\\lambda_g$ are parameters which regularize the contribution of the loss terms in the overall loss. From our experiments we have selected their values to be $\\lambda_v = 0.1$ , $\\lambda_e = 0.01$ and $\\lambda_g = 1$ .", + "bbox": [ + 212, + 595, + 782, + 640 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.4 Multi-View Consistency Supervision", + "text_level": 1, + "bbox": [ + 214, + 662, + 563, + 678 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Extending our training dataset with in-the-wild images and training using pseudoground truth, usually improves the ability of our models to generalize to unseen domains, as can be seen by our experiments in Sec. 4.3. However, automatically generated 3D eyes and gaze include inconsistencies which are hard to identify and filter out. To balance the feedback of direct supervision from pseudo-ground truth, we design a multi-view supervision framework, based on pairs of real and synthetic images with different head poses, generated by HeadGAN as described in Sec. 3.1.", + "bbox": [ + 212, + 688, + 785, + 808 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Recovering dense 3D face coordinates and pose from images has recently been quite reliable [2,16,16,23]. Having a pair of images $\\mathbf{I}_1$ and $\\mathbf{I}_2$ of the same", + "bbox": [ + 212, + 809, + 785, + 839 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "3DGazeNet", + "bbox": [ + 651, + 114, + 730, + 126 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 774, + 116, + 785, + 126 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/b9fc82b93804cf19ce076b1926b7e8f68517b8e3dcd15a8d209a06a59321fc04.jpg", + "image_caption": [ + "Fig. 4: Overview of the proposed method 3DGazeNet. a) During training we employ single images with ground-truth supervision or pairs of synthetic views of the same subject with pseudo-annotations and different head poses. Different sets of losses are employed depending on the type of supervision. b) Detailed demonstration of $\\mathcal{L}_{MV}$ . 3D transformation $\\mathbf{P}$ which maps view 1 to view 2, is employed to transform points $\\mathbf{V}_{l,1}$ and $\\mathbf{V}_{r,1}$ , before calculating an L1 distance loss against $\\mathbf{V}_{l,2}$ and $\\mathbf{V}_{r,2}$ . c) The base network (3DEyeNet) of our model consists of a ResNet-18 backbone and two fully connected layers leading to the 3D eye mesh and gaze vector outputs." + ], + "image_footnote": [], + "bbox": [ + 217, + 143, + 785, + 309 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "subject and their reconstructed 3D faces, we can compute a transformation matrix $\\mathbf{P} \\in \\mathbb{R}^{3 \\times 4}$ which aligns the two faces in image space. Assuming that gaze direction in both images remains still relative to the face, as is the case with images created by HeadGAN, we are able to supervise 3D regression of eyes by restricting our model's predictions to be consistent over an image pair, as output vertices should coincide when transformation $\\mathbf{P}$ is applied to one of the pair's outputs. A similar approach has been employed successfully for weakly-supervised body pose estimation [31,44,65]. Particularly, we form the vertex loss of a pair as:", + "bbox": [ + 212, + 489, + 787, + 626 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {M V, v e r t e x} = \\frac {1}{N _ {v}} \\sum_ {j = \\{l, r \\}} \\sum_ {i = 1} ^ {N _ {v}} \\| \\mathbf {V} _ {1, j, i} \\mathbf {P} ^ {T} - \\mathbf {V} _ {2, j, i} \\| _ {1}, \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 323, + 628, + 785, + 672 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "where $\\mathbf{V}_{1,j},\\mathbf{V}_{2,j}\\in \\mathbb{R}^{N_v\\times 4}$ for $j = \\{l,r\\}$ are the output matrices for left and right eyes, which correspond to input images $\\mathbf{I}_1$ and $\\mathbf{I}_2$ . $\\mathbf{V}_{1,j,i},\\mathbf{V}_{2,j,i}\\in \\mathbb{R}^4$ are the specific homogeneous 3D coordinates indexed by $i$ in the above matrices. To enforce consistency constraints to the gaze head of our model, we analyse matrix $\\mathbf{P}$ to scale $s$ , rotation $\\mathbf{R}$ and translation $\\mathbf{t}$ components and employ $\\mathbf{R}$ in a gaze consistency loss within a pair:", + "bbox": [ + 212, + 676, + 787, + 768 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {M V, g a z e} = (1 8 0 / \\pi) \\arccos \\left(\\left(\\mathbf {g} _ {1} ^ {T} \\mathbf {R} ^ {T}\\right) \\mathbf {g} _ {2}\\right), \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 357, + 771, + 785, + 789 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "where $\\mathbf{g}_1$ and $\\mathbf{g}_2$ are the normalized model outputs for input images $\\mathbf{I}_1$ and $\\mathbf{I}_2$ respectively. We combine losses of Eqs. (5) and (6) in a single loss function to enforce multi-view consistency in mesh and gaze vector regression, between", + "bbox": [ + 212, + 794, + 787, + 840 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "E. Ververas et al.", + "bbox": [ + 271, + 114, + 387, + 126 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "model outputs coming from pairs of input images. This loss is written as:", + "bbox": [ + 215, + 146, + 743, + 161 + ], + "page_idx": 8 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {M V} = \\lambda_ {M V, v} \\mathcal {L} _ {M V, v e r t e x} + \\lambda_ {M V, g} \\mathcal {L} _ {M V, g a z e}, \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 346, + 167, + 784, + 184 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "where $\\lambda_{MV,v}$ and $\\lambda_{MV,g}$ are parameters which regularize the contribution of the loss terms in the overall loss. In our experiments, we have selected their values to be $\\lambda_{MV,v} = 0.1$ and $\\lambda_{MV,g} = 1$ . To train models with all supervision signals, i.e. ground truth $(\\mathcal{L}_{GT})$ , pseudo-ground truth $(\\mathcal{L}_{PGT})$ and multi-view supervision $(\\mathcal{L}_{MV})$ , we utilize the following overall loss function:", + "bbox": [ + 214, + 188, + 785, + 263 + ], + "page_idx": 8 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\lambda_ {G T} \\mathcal {L} _ {G T} + \\lambda_ {P G T} \\mathcal {L} _ {P G T} + \\lambda_ {M V} \\mathcal {L} _ {M V}, \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 356, + 270, + 784, + 287 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "with parameters $\\lambda_{GT} = \\lambda_{PGT} = \\lambda_{MV} = 1$ . Implementation details are included in the supplemental material. An overview of 3DGazeNet is presented in Fig. 4.", + "bbox": [ + 214, + 290, + 785, + 321 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4 Experiments", + "text_level": 1, + "bbox": [ + 215, + 343, + 375, + 359 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.1 Datasets", + "text_level": 1, + "bbox": [ + 215, + 375, + 334, + 387 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Gaze Datasets Captured in a lab environment, ETH-XGaze (EXG) [79] consists of 756K frames of 80 subjects and includes large head pose and gaze variation. Collected in uncontrolled indoor environments with mobile devices, MPI-IFaceGaze (MPII) [81] includes smaller head pose and gaze variation and consists of 45K images of 15 subjects, while GazeCapture (GC) [42] contains almost 2M frontal face images of 1474 subjects. In contrast to the above datasets, Gaze360 (G360) [35] is the only gaze dataset captured both indoors and outdoors and consists of 127K training sequences from 365 subjects. The large variation in head pose, gaze, and environmental conditions of Gaze360 makes it the most challenging yet appropriate benchmark for in-the-wild gaze estimation, available in literature. For our experiments, we normalized the above datasets based on [80], except for Gaze360 which we process to get normalized face crops. Additionally, we employ the predefined training-test splits, while for Gaze360 we only use the frontal facing images with head pose yaw angle up to $90^{\\circ}$ . The head pose and gaze distributions of the above datasets are presented in Fig. 5.", + "bbox": [ + 214, + 398, + 787, + 625 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In-The-Wild Face Datasets In-the-wild face datasets consist of significantly more unique subjects and capturing environments. For our experiments, we employed four publicly-available datasets FFHQ [33] (70K images), AFLW [38] (25K images), AVA [25,48,49] and CMU-Panoptic [32]. FFHQ and AFLW are in-the-wild face datasets commonly used for face analysis, AVA is a large-scale in-the-wild human activity dataset annotated under the Looking-At-Each-Other condition and CMU-Panoptic is collected in lab conditions and captures interactions of multiple people in the same scene. FFHQ and AFLW include one face per image and thus are only processed to get normalized face crops. AVA and CMU-Panoptic include frames with multiple faces, from which we randomly select 80K faces from each dataset with a maximum head pose of $90^o$ . Similarly to [40], for CMU we employed only frames captured with cameras in eye height. We name this collection of 255K images as the \"In-The-Wild Gaze\" dataset (ITWG). Lastly, to enforce multi-view supervision as described in Sec. 3.4, we synthesized", + "bbox": [ + 214, + 628, + 787, + 840 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "3DGazeNet", + "bbox": [ + 651, + 114, + 730, + 126 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 774, + 116, + 785, + 126 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/c6c73a25e4b5a9ae40439184f52c5dd9b3d75210945144df0a4045cf7e0139df.jpg", + "image_caption": [ + "Fig. 5: Distributions of the head pose (top row) and gaze (bottom row) of the gaze datasets (red) and the face datasets (blue). Wide distribution datasets CMU, AVA, FFHQ, and AFLW are exploited to close the gap between diverse image domains." + ], + "image_footnote": [], + "bbox": [ + 217, + 143, + 500, + 255 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/897e2f78b2617c6a38b20bff068c56f4ef5fce8c4e766aeff5be652f83433fcc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 142, + 787, + 256 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "novel views from images of ITWG using HeadGAN, sampling the pitch and yaw angles from Gaussians $\\mathcal{N}(0,20)$ , relatively to the original head pose. We name this collection of images as \"Multi-View In-The-Wild Gaze\" dataset (ITWG-MV) and employ it to improve the generalization of gaze estimation. The head pose and gaze distributions of the above datasets are presented in Fig. 5.", + "bbox": [ + 212, + 338, + 782, + 414 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4.2 Gaze Generalization", + "text_level": 1, + "bbox": [ + 215, + 438, + 429, + 452 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "In this section, we evaluate 3DGazeNet in within-dataset and cross-dataset experiments. We believe that [40] is the most closely related method to ours, as it is the only method using 3D geometric cues of the scene to generalize gaze from arbitrary face data.", + "bbox": [ + 212, + 464, + 784, + 525 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Cross-dataset Evaluation We design two cross-dataset experiments to test the generalization of our method on G360 and report the results on Tab. 1(a) and (b). Particularly, the experiments are: a) we train our method on the CMU, AVA, and ITWG-MV datasets utilizing only our pseudo-labels and multi-view supervision and b) we additionally employ ground truth supervision from GC and EXG. From the results of the above experiments, it becomes obvious that our geometry-aware pseudo-labels employed within our multi-view supervision training effectively generalize gaze estimation to unseen domains, even without any available ground truth. In particular, in experiment a) our method outperforms [40] by $23\\%$ with AVA, $22\\%$ with CMU, $12.5\\%$ with $\\mathrm{AVA + CMU}$ and $20\\%$ with our large-scale ITWG-MV. Similarly, in experiment b) 3DGazeNet outperforms [40] by $10\\%$ and $9\\%$ with GC and EXG respectively.", + "bbox": [ + 212, + 529, + 787, + 710 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Within-dataset Evaluation Here we compare our method against state-of-the-art within-dataset gaze estimation on G360. Similarly to [40], we employ AVA for additional supervision, while we also examine the effect of the larger-scale ITWG-MV. The results, presented in Tab. 1 (c), show that multi-view supervision from AVA does not improve performance (which is in line with the compared method), but the large-scale ITWG-MV does.", + "bbox": [ + 212, + 714, + 787, + 805 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Comparison with state-of-the-art We further compare 3DGazeNet against recent methods for gaze generalization. The works in [5,70] are developed with a", + "bbox": [ + 212, + 809, + 787, + 840 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "E. Ververas et al.", + "bbox": [ + 271, + 114, + 387, + 126 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/5410cbb40f8a8fc0ae6edc4b06fa38a4999a12a44239087209db83e03e098cf0.jpg", + "table_caption": [ + "Table 1: Weakly-supervised method evaluation in cross- and within-dataset experiments. In all cases, we calculate gaze error in degrees (lower is better), on the test set of Gaze360. CMU and AVA correspond to subsets of ITWG-MV (i.e. augmented for multi-view supervision), providing a clearer comparison with [40]. Our method trained with ITWG-MV outperforms the baselines in all cases. 3DGN refers to 3DGazeNet" + ], + "table_footnote": [], + "table_body": "
(a) Cross-dataset\nSynthetic Views(b) Cross-dataset\nGround Truth + Synthetic Views(c) Within-dataset\nGround Truth + Synthetic Views
Dataset[40]3DGNDataset[79][40]3DGNDataset[79][40]3DGN
AVA29.022.4GC30.229.227.5EXG27.320.522.1
CMU26.020.3GC+AVA-19.518.9EXG+AVA-16.917.1
CMU+AVA22.519.7GC+AVA+CMU--18.4EXG+AVA+CMU--16.7
ITWG-MV-18.1GC+ITWG-MV--17.6EXG+ITWG-MV--15.4
", + "bbox": [ + 220, + 226, + 784, + 299 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/d49bfc6c1a1b2e0785d69bf565e147c80fc2a40614fbcc8e51cb74098ea8b83a.jpg", + "table_caption": [ + "Table 2: Comparison with state-of-the-art in domain generalization for gaze estimation. In all experiments our model outperforms the compared methods. Gaze error is in degrees (lower is better)." + ], + "table_footnote": [], + "table_body": "
Stage 1 (Gaze Generalization Models)+ Stage 2 (Adaptation/Fine Tuning)
EXGEXG+ITWG-MVG360G360+ITWG-MVEXG+ITWG-MVG360+ITWG-MV
MethodMPII GCMPII GCMPII GCMPII GCMPII GCMPII GCMPII GCMPII GCMPII GCMPII GC
RAT/RUDA [5]7.18.47.08.29.39.09.18.56.88.17.98.3
CDG/CRGA [70]6.79.26.99.57.08.38.18.97.49.07.68.7
PureGaze [11]7.98.77.79.37.68.37.48.66.68.07.28.3
3DGazeNet7.710.76.07.89.112.16.38.0----
", + "bbox": [ + 218, + 391, + 782, + 484 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "focus on domain adaptation for gaze estimation and encompass two-stage training schemes, both training feature invariant models at the first stage. That is, in the first training stage RUDA [5] trains gaze estimation model invariant to image rotations, while CRGA [70] uses a contrastive loss to separate image features according to gaze. The second stage of the above methods is focused on adapting the initially trained models to specific target domains. As our method aims to train general gaze estimation models without knowledge of specific target domains, we implement the first-stage models of the above methods, namely RAT [5], CDG [70] and compare them with 3DGazeNet in cross-dataset experiments. Additionally, we compare against PureGaze [11] which is a gaze generalization method that purifies face features to achieve higher gaze estimation performance. To follow the evaluation protocol in the above works, we train all methods on EXG and G360 (+ITWG-MV) and test on MPII and GC. For completeness, we include results of the full models RUDA and CRGA after using ITWG-MV according to their domain adaptation schemes. For PureGaze, ITWG-MV was used for fine-tuning. Tab. 2 shows that the proposed method outperforms the baselines for gaze generalization when ITWG-MV is employed. The compared methods do not include regularization for the noisy labels of ITWG-MV, resulting in similar or worse performance, while our method exploits them through $\\mathcal{L}_{MV}$ , benefiting from the extended variation.", + "bbox": [ + 212, + 537, + 787, + 840 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "3DGazeNet", + "bbox": [ + 651, + 114, + 730, + 126 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 767, + 114, + 784, + 126 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/070547e17f6a4bbe2a377b9eedc95f39d6d7ffe6ebc6171b32c0519c58ced832.jpg", + "table_caption": [ + "Table 3: Comparison between training targets Vector(V), Mesh(M) and Mesh+Vector(M+V) in within-dataset experiments (using only $\\mathcal{L}_{GT}$ ). Target M+V leads to lower errors than state-of-the-art. Error is in degrees (lower is better)." + ], + "table_footnote": [], + "table_body": "
DatasetCompared Methods3DGazeNet
[51][13][1][15,82][53][15,35][40][79]VMM+V
MPII4.044.003.924.95.34.06-4.84.14.24.0
G36010.710.610.414.9-11.110.1-9.89.89.6
GC----3.5--3.33.23.33.1
EXG---7.3---4.54.24.44.2
", + "bbox": [ + 305, + 198, + 697, + 282 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "4.3 Ablation studies", + "text_level": 1, + "bbox": [ + 215, + 311, + 397, + 325 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Gaze Estimation via 3D Eye Mesh Regression Here we experimentally evaluate our suggestion that gaze estimation benefits from replacing the training target from gaze vectors or angles to dense 3D eye coordinates. To this end, we employ the fully supervised version of our model, utilizing data with exact ground truth and $\\mathcal{L}_{GT}$ for training. We conduct within-dataset experiments on MPII, GC, G360 and EXG for which specific training-testing subsets are provided. We compare against state-of-the-art methods [1,13,15,35,40,51,53,79,82] and report the results in Tab. 3. In almost all cases, our model outperforms the baselines, while combining the two modalities, i.e. dense 3D meshes and gaze vectors $(\\mathrm{M} + \\mathrm{V})$ , improves performance compared to training with vector targets (V) or 3D mesh targets (M) alone. This is possibly due to the distinct nature of the two modalities, i.e. the vectors provide exact label supervision, while meshes provide a robust representation which limits sparse prediction errors.", + "bbox": [ + 212, + 337, + 785, + 532 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "The main benefit of dense coordinate regression over pose parameters or sparse points prediction is that individual parameter errors have limited effect on the total outcome making them more robust to prediction inaccuracies [16]. This effect is particularly useful for our multi-view training scheme in which introducing consistency of dense correspondences between images rather than only vector consistency, offers stronger regularization. We validate this argument in gaze generalization experiments in G360, GC, EXG, and MPII, presented in Tab. 4. For this experiment, we consider three versions of 3DGazeNet: one which predicts only gaze vectors and no coordinates (Vector), one which predicts 8 3D iris landmarks instead of dense eye meshes (Iris+Vector), to highlight the effect of dense coordinate prediction, and the full 3DGazeNet (Mesh+Vector). The results show that employing combined training targets always benefits performance, while replacing dense 3D eye meshes with iris landmarks highly limits this effect.", + "bbox": [ + 212, + 534, + 785, + 731 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "The Effect of Gaze Pseudo-Labels and Multi-View Supervision Here we examine the contribution of our automatic geometry-aware pseudo-labels and the multi-view supervision loss of our approach. To this end, we consider three training scenarios which are the following: a) training with ITWG and its pseudo-labels as ground truth $(\\mathcal{L}_{PGT})$ , b) training with ITWG-MV utilizing only the multi-view consistency constraints and no pseudo-labels $(\\mathcal{L}_{MV})$ and c) training with ITWG-MV while employing both pseudo-labels and the multi-view", + "bbox": [ + 212, + 733, + 785, + 840 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "E. Ververas et al.", + "bbox": [ + 271, + 114, + 387, + 126 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/6173df637b2093e13e088c15f3ddc4f41a7b5e5492dd1f1863400b988435524e.jpg", + "table_caption": [ + "Table 4: Comparison between training targets Vector, Iris+Vector and Mesh+Vector for domain generalization when employing our full model (Eq. (8)). For the target Vector, we remove all mesh terms from the employed losses. In all experiments, the target Mesh+Vector results in a lower error. Gaze error is in degrees (lower is better)." + ], + "table_footnote": [], + "table_body": "
Training DatasetVectorIris+VectorMesh+Vector
G360GCEXGMPIIG360GCEXGMPIIG360GCEXGMPII
ITWG-MV19.110.116.78.518.89.916.78.218.19.016.77.6
G360+ITWG-MV10.110.215.17.09.79.415.06.89.38.014.66.3
GC+ITWG-MV18.23.116.06.118.03.015.96.217.63.015.56.1
EXG+ITWG-MV16.510.24.56.616.39.64.56.415.47.84.36.0
MPII+ITWG-MV17.88.215.24.817.97.615.04.617.66.814.94.2
", + "bbox": [ + 220, + 212, + 781, + 325 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/affe50515f33289a3b98a13bd04fcad04260e448f78a40b0fbe803f1a081403e.jpg", + "table_caption": [ + "Table 5: The effect of incorporating pseudo-ground truth and multi-view supervision during training. Both components contribute towards improving results in cross-dataset gaze estimation experiments. Gaze error is in degrees (lower is better)." + ], + "table_footnote": [], + "table_body": "
Dataset\\( {\\mathcal{L}}_{GT} \\)\\( {\\mathcal{L}}_{PGT} \\)\\( {\\mathcal{L}}_{MV} \\)G360GCEXGMPII
ITWG--23.114.824.313.6
ITWG-MV--47.433.241.132.8
ITWG-MV-18.19.016.77.6
GC--27.53.128.410.4
GC+ITWG-21.43.223.79.1
GC+ITWG-MV-24.73.526.210.1
GC+ITWG-MV17.63.015.56.1
", + "bbox": [ + 305, + 391, + 697, + 520 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "consistency loss $(\\mathcal{L}_{PGT} + \\mathcal{L}_{MV})$ . To further evaluate the effect of the pseudolabels and multi-view loss, we repeat the above experiments by adding ground truth supervision from GC $(+ \\mathcal{L}_{GT})$ . We test our models on the test set of G360, GC, EXG, and MPII, and report the results in Tab. 5. In all cases, combining our pseudo-labels and multi-view loss yields the lowest error in degrees. Lastly, utilizing only $\\mathcal{L}_{MV}$ on ITWG-MV leads to very high errors which is reasonable as no supervision for the eyeball topology exists, thus, the model outputs cannot follow the spherical shape of the eyeball template.", + "bbox": [ + 212, + 547, + 787, + 669 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The Effect of Head Pose Distribution of ITWG Head pose distribution difference between the train and test set is one of the main reasons that gaze-estimation models fail in cross-dataset situations. To close the gap between different training and testing scenarios, we have designed ITWG, a large-scale dataset with widespread variation in head pose and gaze angles. To study the effect of the head pose variation of ITWG in our experiments, we employ different subsets of ITWG with various levels of head pose variation and conduct cross-dataset experiments with them. In particular, we consider four subsets of ITWG, with maximum yaw angles of $5^o$ , $20^o$ , $40^o$ and $90^o$ (all) respectively.", + "bbox": [ + 212, + 672, + 787, + 809 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We train 3DGazeNet with ground truth supervision from MPII as well as pseudo-labels and multi-view supervision from the four versions of ITWG-MV.", + "bbox": [ + 212, + 809, + 785, + 839 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "3DGazeNet", + "bbox": [ + 651, + 114, + 730, + 126 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 767, + 114, + 785, + 126 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/8e5c305ff8fd9108a272c436f6fc78dfc297cff71cffa33dc0809c624ae7ac3e.jpg", + "image_caption": [ + "Fig. 6: Gaze error of G360 across head poses when training with MPII and subsets of ITWG-MV. Wider range of head poses in the ITWG-MV data, lead to significantly lower errors in large poses." + ], + "image_footnote": [], + "bbox": [ + 303, + 145, + 697, + 316 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "The results of testing on G360 are presented in Fig. 6. The resulting curves clearly demonstrate the effect of the available head pose variation in the training data. Specifically, utilizing the entirety of ITWG-MV leads to the lowest errors which are relatively consistent across the head pose range. As expected, decreasing the available head pose variation, increasingly affects model performance with the worst case being training with MPII alone. Based on the above finding we argue that the gap between small and wide distribution gaze datasets (regarding head pose) can effectively close by employing similarly large distribution unlabeled face datasets, which is crucial for training plug-n-play gaze estimation models that can be directly employed in applications.", + "bbox": [ + 212, + 402, + 787, + 554 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "5 Limitations and Conclusion", + "text_level": 1, + "bbox": [ + 215, + 579, + 517, + 595 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "In Sec. 4, we shown that pseudo-ground truth can be effectively utilized in gaze estimation. Nevertheless, a limitation of our method is that pseudo-annotation accuracy is related to the accuracy of 3D face and 2D iris alignment. In addition, our current method cannot operate on images without a visible face (when the face is looking away from the camera).", + "bbox": [ + 212, + 612, + 785, + 686 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Overall, In this work, we present a novel weakly-supervised method for gaze generalization, based on dense 3D eye mesh regression. We demonstrate that by utilizing both 3D eye coordinates and gaze labels during training, instead of just gaze labels, we can achieve lower prediction errors. Moreover, we explore the possibility of exploiting the abundantly available in-the-wild face data for improving gaze estimation generalization. To that end, we propose a novel methodology to generate robust, 3D geometry-aware pseudo ground truth labels, as well as a multi-view weak-supervision framework for effective training. By enforcing these constraints, we are able to successfully utilize in-the-wild face data and achieve improvements in cross-dataset and within-dataset experiments.", + "bbox": [ + 212, + 688, + 787, + 840 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "E. Ververas et al.", + "bbox": [ + 271, + 114, + 388, + 126 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Acknowledgments. S. Zafeiriou was supported by EPSRC Project DEFORM (EP/S010203/1) and GNOMON (EP/X011364).", + "bbox": [ + 215, + 146, + 784, + 176 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 215, + 202, + 321, + 217 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "1. Abdelrahman, A.A., Hempel, T., Khalifa, A., Al-Hamadi, A., Dinges, L.: L2cs-net: Fine-grained gaze estimation in unconstrained environments. In: ICFSP. pp. 98-102. IEEE (2023)", + "2. Albiero, V., Chen, X., Yin, X., Pang, G., Hassner, T.: img2pose: Face alignment and detection via 6dof, face pose estimation. In: CVPR (2021)", + "3. Alp Guler, R., Trigeorgis, G., Antonakos, E., Snape, P., Zafeiriou, S., Kokkinos, I.: Densereg: Fully convolutional dense shape regression in-the-wild. In: CVPR (2017)", + "4. Andrist, S., Tan, X.Z., Gleicher, M., Mutlu, B.: Conversational gaze aversion for humanlike robots. In: HRI (2014)", + "5. Bao, Y., Liu, Y., Wang, H., Lu, F.: Generalizing gaze estimation with rotation consistency. In: CVPR (2022)", + "6. Burova, A., Mäkelä, J., Hakulinen, J., Keskinen, T., Heinonen, H., Siltanen, S., Turunen, M.: Utilizing vr and gaze tracking to develop ar solutions for industrial maintenance. In: CHI (2020)", + "7. Cai, X., Zeng, J., Shan, S., Chen, X.: Source-free adaptive gaze estimation by uncertainty reduction. In: CVPR. pp. 22035-22045 (2023)", + "8. Cai, Y., Ge, L., Cai, J., Yuan, J.: Weakly-supervised 3d hand pose estimation from monocular rgb images. In: ECCV (2018)", + "9. Castner, N., Kuebler, T.C., Scheiter, K., Richter, J., Eder, T., Hützig, F., Keutel, C., Kasneci, E.: Deep semantic gaze embedding and scanpath comparison for expertise classification during opt viewing. In: ACM ETRA (2020)", + "0. Chen, M., Jin, Y., Goodall, T., Yu, X., Bovik, A.C.: Study of 3d virtual reality picture quality. IEEE Journal of Selected Topics in Signal Processing (2020)", + "1. Cheng, Y., Bao, Y., Lu, F.: Puregaze: Purifying gaze feature for generalizable gaze estimation. In: AAAI (2022)", + "2. Cheng, Y., Huang, S., Wang, F., Qian, C., Lu, F.: A coarse-to-fine adaptive network for appearance-based gaze estimation. In: AAAI (2020)", + "3. Cheng, Y., Lu, F.: Gaze estimation using transformer. In: ICPR (2022)", + "4. Cheng, Y., Lu, F., Zhang, X.: Appearance-based gaze estimation via evaluation-guided asymmetric regression. In: ECCV (2018)", + "5. Cheng, Y., Wang, H., Bao, Y., Lu, F.: Appearance-based gaze estimation with deep learning: A review and benchmark. arXiv preprint arXiv:2104.12668 (2021)", + "6. Deng, J., Guo, J., Ververas, E., Kotsia, I., Zafeiriou, S.: Retinaface: Single-shot multi-level face localisation in the wild. In: CVPR (2020)", + "7. Deng, Y., Yang, J., Xu, S., Chen, D., Jia, Y., Tong, X.: Accurate 3d face reconstruction with weakly-supervised learning: From single image to image set. In: CVPR Workshops (2019)", + "8. Doukas, M.C., Koujan, M.R., Sharmanska, V., Roussos, A., Zafeiriou, S.: Head2head++: Deep facial attributes re-targeting. T-BIOM (2021)", + "9. Doukas, M.C., Zafeiriou, S., Sharmanska, V.: Headgan: One-shot neural head synthesis and editing. In: ICCV (2021)", + "20. Fischer, T., Chang, H.J., Demiris, Y.: Rt-gene: Real-time eye gaze estimation in natural environments. In: ECCV (2018)" + ], + "bbox": [ + 225, + 234, + 784, + 839 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "3DGazeNet", + "bbox": [ + 651, + 114, + 730, + 126 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 767, + 116, + 785, + 126 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "21. Fuhl, W., Kasneci, G., Kasneci, E.: Teyed: Over 20 million real-world eye images with pupil, eyelid, and iris 2d and 3d segmentations, 2d and 3d landmarks, 3d eyeball, gaze vector, and eye movement types. ISMAR (2021)", + "22. Funes Mora, K.A., Monay, F., Odobez, J.M.: Eyediap: A database for the development and evaluation of gaze estimation algorithms from rgb and rgb-d cameras. In: ACM ETRA (2014)", + "23. Gecer, B., Ploumpis, S., Kotsia, I., Zafeiriou, S.: Ganfit: Generative adversarial network fitting for high fidelity 3d face reconstruction. In: CVPR (2019)", + "24. Ghosh, S., Hayat, M., Dhall, A., Knibbe, J.: Mtgls: Multi-task gaze estimation with limited supervision. In: WACV (2022)", + "25. Gu, C., Sun, C., Ross, D.A., Vondrick, C., Pantofaru, C., Li, Y., Vijayanarasimhan, S., Toderici, G., Ricco, S., Sukthankar, R., Schmid, C., Malik, J.: Ava: A video dataset of spatio-temporally localized atomic visual actions. In: CVPR (2018)", + "26. Guler, R.A., Kokkinos, I.: Holopose: Holistic 3d human reconstruction in-the-wild. In: CVPR (2019)", + "27. Guo, Z., Yuan, Z., Zhang, C., Chi, W., Ling, Y., Zhang, S.: Domain adaptation gaze estimation by embedding with prediction consistency. In: ACCV (2020)", + "28. Hao, Z., Mallya, A., Belongie, S., Liu, M.Y.: GANcraft: Unsupervised 3D Neural Rendering of Minecraft Worlds. In: ICCV (2021)", + "29. He, J., Pham, K., Valliappan, N., Xu, P., Roberts, C., Lagun, D., Navalpakkam, V.: On-device few-shot personalization for real-time gaze estimation. In: ICCV Workshops (2019)", + "30. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: CVPR (2016)", + "31. Iqbal, U., Molchanov, P., Kautz, J.: Weakly-supervised 3d human pose learning via multi-view images in the wild. In: CVPR (2020)", + "32. Joo, H., Liu, H., Tan, L., Gui, L., Nabbe, B., Matthews, I., Kanade, T., Nobuhara, S., Sheikh, Y.: Panoptic studio: A massively multiview system for social motion capture. In: ICCV (2015)", + "33. Karras, T., Laine, S., Aila, T.: A style-based generator architecture for generative adversarial networks. In: CVPR (2019)", + "34. Kasahara, I., Stent, S., Park, H.S.: Look both ways: Self-supervising driver gaze estimation and road scene saliency. In: ECCV (2022)", + "35. Kellnhofer, P., Recasens, A., Stent, S., Matusik, W.,, Torralba, A.: Gaze360: Physically unconstrained gaze estimation in the wild. In: ICCV (2019)", + "36. Kim, H., Garrido, P., Tewari, A., Xu, W., Thies, J., Nießner, M., Pérez, P., Richardt, C., Zolloffer, M., Theobalt, C.: Deep video portraits. TOG (2018)", + "37. Kleinke, C.L.: Gaze and eye contact: a research review. Psychological bulletin (1986)", + "38. Koestinger, M., Wohlhart, P., Roth, P.M., Bischof, H.: Annotated facial landmarks in the wild: A large-scale, real-world database for facial landmark localization. In: ICCVW (2011)", + "39. Konrad, R., Angelopoulos, A., Wetzstein, G.: Gaze-contingent ocular parallax rendering for virtual reality. In: TOG (2019)", + "40. Kothari, R., De Mello, S., Iqbal, U., Byeon, W., Park, S., Kautz, J.: Weakly-supervised physically unconstrained gaze estimation. In: CVPR (2021)", + "41. Koujan, M.R., Doukas, M.C., Roussos, A., Zafeiriou, S.: Head2head: Video-based neural head synthesis. In: FG (2020)", + "42. Krafka, K., Khosla, A., Kellnhofer, P., Kannan, H., Bhandarkar, S., Matusik, W., Torralba, A.: Eye tracking for everyone. In: CVPR (2016)" + ], + "bbox": [ + 215, + 146, + 784, + 839 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "E. Ververas et al.", + "bbox": [ + 271, + 114, + 387, + 126 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "43. Kulon, D., Guler, R.A., Kokkinos, I., Bronstein, M.M., Zafeiriou, S.: Weakly-supervised mesh-convolutional hand reconstruction in the wild. In: CVPR (2020)", + "44. Li, Y., Li, K., Jiang, S., Zhang, Z., Huang, C., Xu, R.Y.D.: Geometry-driven self-supervised method for 3d human pose estimation. In: AAAI (2020)", + "45. Liu, G., Yu, Y., Mora, K., Odobez, J.: A differential approach for gaze estimation with calibration. In: BMVC (2018)", + "46. Liu, G., Yu, Y., Mora, K.A.F., Odobez, J.M.: A differential approach for gaze estimation with calibration. In: BMVC (2018)", + "47. Liu, Y., Liu, R., Wang, H., Lu, F.: Generalizing gaze estimation with outlier-guided collaborative adaptation. In: ICCV (2021)", + "48. Marín-Jiménez, M.J., Kalogeiton, V., Medina-Suárez, P., , Zisserman, A.: LAEO-Net++: revisiting people Looking At Each Other in videos. TPAMI (2021)", + "49. Marin-Jimenez, M.J., Kalogeiton, V., Medina-Suarez, P., Zisserman, A.: Laeo-net: Revisiting people looking at each other in videos. In: CVPR (2019)", + "50. Mavely, A.G., Judith, J.E., Sahal, P.A., Kuruvilla, S.A.: Eye gaze tracking based driver monitoring system. In: ICCS (2017)", + "51. O Oh, J., Chang, H.J., Choi, S.I.: Self-attention with convolution and deconvolution for efficient eye gaze estimation from a full face image. In: CVPRW (2022)", + "52. Park, S., Aksan, E., Zhang, X., Hilliges, O.: Towards end-to-end video-based eyetracking. In: ECCV (2020)", + "53. Park, S., Mello, S.D., Molchanov, P., Iqbal, U., Hilliges, O., Kautz, J.: Few-shot adaptive gaze estimation. In: ICCV (2019)", + "54. Park, S., Spurr, A., Hilliges, O.: Deep pictorial gaze estimation. In: ECCV (2018)", + "55. Park, S., Zhang, X., Bulling, A., Hilliges, O.: Learning to find eye region landmarks for remote gaze estimation in unconstrained settings. In: ACM ETRA (2018)", + "56. Park, T., Liu, M.Y., Wang, T.C., Zhu, J.Y.: Semantic image synthesis with spatially-adaptive normalization. In: CVPR (2019)", + "57. Richard, A., Lea, C., Ma, S., Gall, J., de la Torre, F., Sheikh, Y.: Audio- and gaze-driven facial animation of codec avatars. In: WACV (2021)", + "58. Riza Alp Guler, Natalia Neverova, I.K.: Densesepose: Dense human pose estimation in the wild. In: CVPR (2018)", + "59. Smith, B., Yin, Q., Feiner, S., Nayar, S.: Gaze Locking: Passive Eye Contact Detection for Human? Object Interaction. In: ACM UIST (2013)", + "60. Sugano, Y., Matsushita, Y., Sato, Y.: Learning-by-synthesis for appearance-based 3d gaze estimation. In: CVPR (2014)", + "61. Sun, J., Wang, X., Shi, Y., Wang, L., Wang, J., Liu, Y.: Ide-3d: Interactive disentangled editing for high-resolution 3d-aware portrait synthesis. ACM TOG 41(6), 1-10 (2022)", + "62. Sun, J., Wang, X., Wang, L., Li, X., Zhang, Y., Zhang, H., Liu, Y.: Next3d: Generative neural texture rasterization for 3d-aware head avatars. In: CVPR (2023)", + "63. Sun, Y., Zeng, J., Shan, S., Chen, X.: Cross-encoder for unsupervised gaze representation learning. In: ICCV (2021)", + "64. Vidal, M., Turner, J., Bulling, A., Gellersen, H.: Wearable eye tracking for mental health monitoring. Computer Communications (2012)", + "65. Wandt, B., Rudolph, M., Zell, P., Rhodin, H., Rosenhahn, B.: Canonpose: Self-supervised monocular 3d human pose estimation in the wild. In: CVPR (2021)", + "66. Wang, K., Ji, Q.: Real time eye gaze tracking with 3d deformable eye-face model. In: ICCV (2017)", + "67. Wang, K., Zhao, R., Ji, Q.: A hierarchical generative model for eye image synthesis and eye gaze estimation. In: CVPR (2018)" + ], + "bbox": [ + 212, + 146, + 787, + 839 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "3DGazeNet", + "bbox": [ + 651, + 114, + 730, + 126 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 767, + 116, + 785, + 126 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "68. Wang, K., Zhao, R., Su, H., Ji, Q.: Generalizing eye tracking with bayesian adversarial learning. In: CVPR (2019)", + "69. Wang, T.C., Liu, M.Y., Tao, A., Liu, G., Kautz, J., Catanzaro, B.: Few-shot video-to-video synthesis. In: NeurIPS (2019)", + "70. Wang, Y., Jiang, Y., Li, J., Ni, B., Dai, W., Li, C., Xiong, H., Li, T.: Contrastive regression for domain adaptation on gaze estimation. In: CVPR (2022)", + "71. Wood, E., Baltrusaitis, T., Morency, L.P., Robinson, P., Bulling, A.: A 3d morphable eye region model for gaze estimation. In: ECCV (2016)", + "72. Yu, Y., Liu, G., Odobez, J.M.: Deep multitask gaze estimation with a constrained landmark-gaze model. In: ECCV Workshops (2018)", + "73. Yu, Y., Liu, G., Odobez, J.M.: Improving few-shot user-specific gaze adaptation via gaze redirection synthesis. In: CVPR (2019)", + "74. Yu, Y., Odobez, J.M.: Unsupervised representation learning for gaze estimation. In: CVPR (2020)", + "75. Zakharov, E., Shysheya, A., Burkov, E., Lempitsky, V.: Few-shot adversarial learning of realistic neural talking head models. ICCV (2019)", + "76. Zakharov, E., Ivakhnenko, A., Shysheya, A., Lempitsky, V.: Fast bi-layer neural synthesis of one-shot realistic head avatars. In: ECCV (2020)", + "77. Zhang, J., Chen, J., Tang, H., Wang, W., Yan, Y., Sangineto, E., Sebe, N.: Dual in-painting model for unsupervised gaze correction and animation in the wild. In: ACM MM (2020)", + "78. Zhang, M., Liu, Y., Lu, F.: Gazeonce: Real-time multi-person gaze estimation. In: CVPR (2022)", + "79. Zhang, X., Park, S., Beeler, T., Bradley, D., Tang, S., Hilliges, O.: Eth-xgaze: A large scale dataset for gaze estimation under extreme head pose and gaze variation. In: ECCV (2020)", + "80. Zhang, X., Sugano, Y., Bulling, A.: Revisiting data normalization for appearance-based gaze estimation. In: ACM ETRA (2018)", + "81. Zhang, X., Sugano, Y., Fritz, M., Bulling, A.: Appearance-based gaze estimation in the wild. In: CVPR (2015)", + "82. Zhang, X., Sugano, Y., Fritz, M., Bulling, A.: It's written all over your face: Fullface appearance-based gaze estimation. In: CVPRW (2017)", + "83. Zhu, J.Y., Park, T., Isola, P., Efros, A.A.: Unpaired image-to-image translation using cycle-consistent adversarial networks. In: ICCV (2017)", + "84. Zhu, X., Liu, Y., Li, J., Wan, T., Qin, Z.: Emotion classification with data augmentation using generative adversarial networks. In: PAKDD (2018)" + ], + "bbox": [ + 215, + 147, + 784, + 646 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "E. Ververas et al.", + "bbox": [ + 271, + 114, + 387, + 126 + ], + "page_idx": 17 + } +] \ No newline at end of file diff --git a/2024/3DGazeNet_ Generalizing Gaze Estimation with Weak Supervision from Synthetic Views/7d049317-38f5-44ac-a691-e07c992f4970_model.json b/2024/3DGazeNet_ Generalizing Gaze Estimation with Weak Supervision from Synthetic Views/7d049317-38f5-44ac-a691-e07c992f4970_model.json new file mode 100644 index 0000000000000000000000000000000000000000..1ab525c651c865474ed35a6911cdeafd939c4c87 --- /dev/null +++ b/2024/3DGazeNet_ Generalizing Gaze Estimation with Weak Supervision from Synthetic Views/7d049317-38f5-44ac-a691-e07c992f4970_model.json @@ -0,0 +1,2612 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.237, + 0.141, + 0.767, + 0.187 + ], + "angle": 0, + "content": "3DGazeNet: Generalizing 3D Gaze Estimation with Weak-Supervision from Synthetic Views" + }, + { + "type": "text", + "bbox": [ + 0.247, + 0.212, + 0.756, + 0.244 + ], + "angle": 0, + "content": "Evangelos Ververas1,2, Polydefkis Gkagkos2, Jiankang Deng1,2, Michail Christos Doukas1, Jia Guo3, and Stefanos Zafeiriou1" + }, + { + "type": "text", + "bbox": [ + 0.394, + 0.255, + 0.608, + 0.269 + ], + "angle": 0, + "content": "1 Imperial College London, UK" + }, + { + "type": "text", + "bbox": [ + 0.394, + 0.269, + 0.605, + 0.282 + ], + "angle": 0, + "content": "\\(^{2}\\) Huawei Noah's Ark Lab, UK" + }, + { + "type": "text", + "bbox": [ + 0.455, + 0.282, + 0.548, + 0.297 + ], + "angle": 0, + "content": "3 InsightFace" + }, + { + "type": "text", + "bbox": [ + 0.352, + 0.298, + 0.65, + 0.311 + ], + "angle": 0, + "content": "https://eververas.github.io/3DGazeNet/" + }, + { + "type": "text", + "bbox": [ + 0.263, + 0.343, + 0.741, + 0.594 + ], + "angle": 0, + "content": "Abstract. Developing gaze estimation models that generalize well to unseen domains and in-the-wild conditions remains a challenge with no known best solution. This is mostly due to the difficulty of acquiring ground truth data that cover the distribution of faces, head poses, and environments that exist in the real world. Most recent methods attempt to close the gap between specific source and target domains using domain adaptation. In this work, we propose to train general gaze estimation models which can be directly employed in novel environments without adaptation. To do so, we leverage the observation that head, body, and hand pose estimation benefit from revising them as dense 3D coordinate prediction, and similarly express gaze estimation as regression of dense 3D eye meshes. To close the gap between image domains, we create a large-scale dataset of diverse faces with gaze pseudo-annotations, which we extract based on the 3D geometry of the face, and design a multi-view supervision framework to balance their effect during training. We test our method in the task of gaze generalization, in which we demonstrate improvement of up to \\(23\\%\\) compared to state-of-the-art when no ground truth data are available, and up to \\(10\\%\\) when they are." + }, + { + "type": "text", + "bbox": [ + 0.263, + 0.606, + 0.739, + 0.621 + ], + "angle": 0, + "content": "Keywords: 3D Gaze Estimation \\(\\cdot\\) 3D Eye Mesh \\(\\cdot\\) Gaze Generalization" + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.644, + 0.377, + 0.66 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.674, + 0.788, + 0.841 + ], + "angle": 0, + "content": "Eye gaze serves as a cue for understanding human behavior and intents, including attention, communication, and mental state. As a result, gaze information has been exploited by a lot of applications of various fields of interest, ranging from medical and psychological analysis [9,37,64] to human-computer interaction [4], efficient rendering in VR/AR headset systems [6,10,39], virtual character animation [57,61,62,77] and driver state monitoring [34,50]. When high accuracy is important, data collection under the particular capturing set up is crucial, e.g. specific VR headsets, static screen-camera setups. However, in numerous real-world applications robustness is equally important to high accuracy, e.g. face-unlocking in mobile devices, best frame capturing/selection in group photos and automatic gaze annotation of large datasets for face reenactment." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "2" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.388, + 0.127 + ], + "angle": 0, + "content": "E. Ververas et al." + }, + { + "type": "image", + "bbox": [ + 0.223, + 0.144, + 0.782, + 0.305 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.307, + 0.788, + 0.39 + ], + "angle": 0, + "content": "Fig. 1: Overview of our method 3DGazeNet. a) We approach 3D gaze estimation as dense 3D eye mesh regression, which is robust against sparse prediction errors. b) Domain generalization is one of the hardest challenges in gaze estimation. Training with common gaze datasets often results in poor cross-dataset performance. c) Our multi-view supervision method employs pseudo-labels from in-the-wild face images to close the gap between controlled and in-the-wild datasets." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.43, + 0.788, + 0.627 + ], + "angle": 0, + "content": "Typically, 3D gaze estimation is expressed as a direct mapping between input images and a few pose parameters [12, 42, 52, 70, 82], or sparse representations of the eyes [54, 55, 66]. Nevertheless, it has been shown that unconstrained face and body pose estimation from single images benefits from replacing predicting few pose or shape parameters by directly predicting dense 3D geometry [3, 16, 26, 43, 58]. In this work, we leverage this observation and revise the formulation of gaze estimation as end-to-end dense 3D eye mesh regression, which combined with standard vector regression induces multiple benefits. Existing datasets with ground truth 3D eyes include only images in the IR domain [21], however, IR images cannot be directly employed for RGB-based methods. As 3D eye meshes are not available for most gaze datasets, we define a unified eye representation, i.e. a rigid 3D eyeball template (Fig. 3(a)), which we fit on images based on sparse landmarks and the available gaze labels." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.629, + 0.788, + 0.842 + ], + "angle": 0, + "content": "Several gaze datasets have become available in the last decade [20, 22, 35, 42, 52, 59, 60, 79, 81], which have contributed to the recent progress in automatic 3D gaze estimation from monocular RGB images. However, collecting gaze datasets is a costly and challenging process which often restricts them being captured in controlled environments and consisting of limited unique identities, thus lacking variation compared to data from the real world. This causes the most common challenge in gaze estimation, which is cross-domain and in-the-wild generalization. In this work, we propose a method to exploit arbitrary, unlabeled face images to largely increase the diversity of our training data as well as our model's generalization capabilities. To that end, we design a simple pipeline to extract robust 3D gaze pseudo-labels based on the 3D shape of the face and eyes, without having any prior gaze information. Based on recent advancements on weakly-supervised head, body and hand pose estimation [8, 17, 31, 44, 65], we regularize inconsistencies of pseudo-labels, by a geometric constraint which encourages our" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.652, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "3DGazeNet" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.116, + 0.787, + 0.127 + ], + "angle": 0, + "content": "3" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.784, + 0.177 + ], + "angle": 0, + "content": "We model to maintain prediction consistency between multiple synthetic views of the same subject." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.178, + 0.787, + 0.313 + ], + "angle": 0, + "content": "Most recent methods attempt to close the gap between diverse image domains using domain adaptation. Commonly, they employ a few samples of the target domain, with [29, 53, 73] or without [5, 7, 11, 24, 27, 47, 68, 70] their gaze labels, to fine-tune an initial model. Although successful, approaches following this scheme require knowledge of the target domain and model re-training, which prohibit their use as plug-n-play methods in real user applications. In contrast, we propose a method to train gaze estimation models that generalize well to unseen and inthe-wild environments without the constraints of domain adaption. Our method can effortlessly be employed by user applications in a plug-n-play fashion." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.314, + 0.787, + 0.388 + ], + "angle": 0, + "content": "An overview of our approach, which we name 3DGazeNet, is presented in Fig. 1. We evaluate our method in cross-dataset gaze generalization, showcasing improvements over the state-of-the-art, even by a large margin, and perform ablations over the model components. To summarize, the key contributions of our work are:" + }, + { + "type": "text", + "bbox": [ + 0.226, + 0.397, + 0.784, + 0.44 + ], + "angle": 0, + "content": "- A simple automatic method to extract robust 3D eye meshes from arbitrary face images and a multi-view consistency regularization which allows to exploit them for improved gaze generalization." + }, + { + "type": "text", + "bbox": [ + 0.225, + 0.442, + 0.784, + 0.485 + ], + "angle": 0, + "content": "- A revised formulation for gaze estimation, based on dense 3D eye mesh regression from images. To the best of our knowledge, we are the first to utilize an end-to-end 3D eye mesh regression approach for gaze estimation." + }, + { + "type": "text", + "bbox": [ + 0.225, + 0.487, + 0.784, + 0.545 + ], + "angle": 0, + "content": "- Improved performance over the state-of-the-art in gaze generalization with \\((10\\%)\\) and without \\((23\\%)\\) using source domain ground truth, with a simple model architecture. Based on that, we believe that 3DGazeNet is an important step towards reliable plug-n-play gaze tracking." + }, + { + "type": "list", + "bbox": [ + 0.225, + 0.397, + 0.784, + 0.545 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.57, + 0.388, + 0.586 + ], + "angle": 0, + "content": "2 Related Work" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.6, + 0.784, + 0.705 + ], + "angle": 0, + "content": "Numerous model designs for supervised 3D gaze estimation have been tested recently, investigating which face region to use as input [12,42,82], the model architecture [1,14,46,67] and what external stimuli to utilize to improve performance [52]. Motivated by the difficulties in collecting diverse and large scale data for gaze estimation, recent works have shown that valuable gaze representations can be extracted in fully unsupervised settings, by applying gaze redirection [74] or disentanglement constraints [63]." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.72, + 0.787, + 0.84 + ], + "angle": 0, + "content": "Gaze Adaptation and Generalization Much effort has been made to design methods that adapt well to known target subjects and environments, by employing either few labeled samples [29, 53, 73] or completely unlabeled data of the target domain [5, 7, 11, 24, 27, 47, 68, 70]. Differently from the above, gaze generalization models aim to improve cross-domain performance without any knowledge of the target domains. The models in [5, 11, 70], even though targeted for gaze adaptation, are based on learning general features for gaze estimation and thus, they perform well in target domain-agnostic settings. Moreover, [40] has shown" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "4" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.388, + 0.127 + ], + "angle": 0, + "content": "E. Ververas et al." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.269 + ], + "angle": 0, + "content": "that it is possible to train general gaze estimation models by employing geometric constraints in scenes depicting social interaction between people. We believe that [40] is the closest work to ours, as it is the only method which uses 3D geometric cues of the scene to learn gaze from arbitrary face data. Lastly, [78] proposes to improve generalization by employing synthetic images which are, however, limited by the gaze distribution of existing gaze datasets. Both the implementation and custom dataset are not public, which hinders reproducibility and reliable comparisons." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.288, + 0.79, + 0.455 + ], + "angle": 0, + "content": "Model-Based Gaze Estimation Differently from the above, sparse or semantic representations of the eye geometry have also been employed by some methods to infer gaze from images [54, 55, 66, 67, 71, 72]. However, such representations do not convey information about the 3D substance of eyes and are prone to noisy predictions. In contrast, by predicting 3D eye meshes we learn a much more robust representation, from which we can retrieve any other sparse or semantic one just by indexing. Recovering dense 3D geometry of the eye region from images by fitting parametric models of the shape and texture has been previously proposed [71]. However, restrictions posed by building large-scale parametric models and fitting in-the-wild images have resulted in low gaze accuracy compared to learning-based methods." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.474, + 0.79, + 0.655 + ], + "angle": 0, + "content": "Face Reenactment and Learning from Synthetic Data Synthetic image data have been previously used in training deep networks, mainly to augment the training datasets and provide pseudo-ground truth annotations. For instance, [84] used CycleGAN [83] to create a new training corpus in order to balance emotion classes in the task of emotion classification. More recently, GANcraft [28] employed SPADE [56] to generate pseudo-ground truth images that were used to supervise their neural rendering framework. In this work, we obtain access to image pairs of the same subject in different views, by taking advantage of HeadGAN [19], a face reenactment system. In contrast to person-specific reenactment methods [18, 36, 41] or person-generic landmark-driven approaches [69, 75, 76], HeadGAN is able to perform free-view synthesis using a single source image." + }, + { + "type": "image", + "bbox": [ + 0.233, + 0.685, + 0.773, + 0.771 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.78, + 0.789, + 0.837 + ], + "angle": 0, + "content": "Fig. 2: We use HeadGAN [19] to generate novel views by manipulating the 3D pose of the face. During synthesis, angle \\(\\theta_z\\) is transferred to all facial parts including the eyes, thus the relative angle between the head and eyes (i.e. the gaze direction in the head coordinate system) is maintained." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.652, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "3DGazeNet" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "5" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.145, + 0.331, + 0.161 + ], + "angle": 0, + "content": "3 Method" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.177, + 0.553, + 0.192 + ], + "angle": 0, + "content": "3.1 Problem Definition and Motivation" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.203, + 0.788, + 0.323 + ], + "angle": 0, + "content": "The aim of this work is to design a method that given a face image \\(\\mathbf{I}\\), it estimates \\(2\\times N_{v}\\) 3D coordinates \\(\\mathbf{V} = [\\mathbf{V}_l^T,\\mathbf{V}_r^T ]^T\\), where \\(\\mathbf{V}_l\\in \\mathbb{R}^{N_v\\times 3}\\) are coordinates corresponding to the left eyeball while \\(\\mathbf{V}_r\\in \\mathbb{R}^{N_v\\times 3}\\) to the right, as well as a 3D gaze vector \\(g = (g_{x},g_{y},g_{z})\\). Then, the final gaze result is calculated by the mean direction of the two output components. Inspired by recent work in self-supervised 3D body pose estimation [31,44,65], we adopt multi-view constraints to train our model based on in-the-wild faces and automatically generated gaze pseudo-labels." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.324, + 0.788, + 0.476 + ], + "angle": 0, + "content": "To employ multi-view losses, we assume that images of the same subject with different head poses and the same gaze direction relatively to the head are available. For example, this condition is satisfied when a face picture is taken from different angles at the same time. As such images are not commonly available for in-the-wild datasets, we employ HeadGAN [19], a recent face reenactment method, to generate novel face poses from existing images. HeadGAN is able to synthesize face animations using dense face geometry, which covers the eyes, as a driving signal and single source images. Using dense geometry guarantees that the relative angle between the head and eyes is maintained when synthesizing novel poses, as it is shown in Fig. 2." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.497, + 0.52, + 0.513 + ], + "angle": 0, + "content": "3.2 Unified 3D Eye Representation" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.523, + 0.788, + 0.751 + ], + "angle": 0, + "content": "Learning consistent eye meshes across different images and datasets, requires establishing a unified 3D eye representation. To that end, we define a 3D eyeball template as a rigid 3D triangular mesh with spherical shape, consisting of \\( N_{v} = 481 \\) vertices and \\( N_{t} = 928 \\) triangles. We create two mirrored versions, \\( \\mathbf{M}_l \\) and \\( \\mathbf{M}_r \\), of the above mesh to represent a left and a right reference eyeball respectively. This representation allows us to allocate semantic labels to specific vertices of the eyeball, such as the iris border (Fig. 3 (a)), and calculate 3D gaze direction as the orientation of the central axis of our 3D eyeball template. In practice, an offset angle (the kappa coefficient) exists between the optical (central) and visual axes of eyes, which is subject-dependent and varies between \\( -2^{o} \\) to \\( 2^{o} \\) across the population [73]. Accounting for this offset is essential for person-specific gaze estimation [29,45,53,73]. However, in our case of cross-dataset and in-the-wild gaze generalization, in which errors are much larger than the possible offset, data diversity is more important than anatomical precision and thus, our spherical eyeball is a reasonable approximation." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.765, + 0.788, + 0.842 + ], + "angle": 0, + "content": "3D Eyes Ground-Truth from Gaze Datasets For gaze estimation datasets, exact supervision can be acquired by automatically fitting the eyeball template on face images based on sparse iris landmarks and the available gaze labels, as shown in Fig. 3(b). Specifically, we first rotate the eyeball template around its center according to the gaze label. Then, we align (scale and translation) \\( x \\)," + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "6" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.388, + 0.127 + ], + "angle": 0, + "content": "E. Ververas et al." + }, + { + "type": "image", + "bbox": [ + 0.252, + 0.188, + 0.46, + 0.229 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.218, + 0.232, + 0.493, + 0.247 + ], + "angle": 0, + "content": "M: \\(N_{v} = 481\\) vertices, \\(N_{t} = 928\\) triangles" + }, + { + "type": "image_caption", + "bbox": [ + 0.285, + 0.248, + 0.422, + 0.261 + ], + "angle": 0, + "content": "(a) Eyeball template" + }, + { + "type": "image", + "bbox": [ + 0.224, + 0.274, + 0.493, + 0.365 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.259, + 0.369, + 0.451, + 0.383 + ], + "angle": 0, + "content": "(b) Ground truth generation" + }, + { + "type": "image", + "bbox": [ + 0.513, + 0.147, + 0.787, + 0.366 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.369, + 0.748, + 0.383 + ], + "angle": 0, + "content": "(c) Pseudo-ground truth generation" + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.393, + 0.785, + 0.437 + ], + "angle": 0, + "content": "Fig. 3: (a) The employed rigid 3D eyeball mesh template. (b) Ground truth data generation, applied on gaze estimation datasets with available ground truth. (c) Pseudoground truth data generation, applied on arbitrary face images without any gaze label." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.464, + 0.788, + 0.541 + ], + "angle": 0, + "content": "\\(y\\) coordinates of the rotated eye mesh to the iris landmarks of the image and multiply \\(z\\) coordinates with the same scale. To extract sparse iris landmarks we employed the method of [55] as a basis for building an iris localization model which is robust against occlusions and low resolution. More details about the iris localization model are provided in the supplemental material." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.553, + 0.788, + 0.737 + ], + "angle": 0, + "content": "3D Eyes Pseudo-Ground Truth from In-The-Wild Images To extract 3D eyes from images without gaze labels, we have developed an automatic pipeline based on 3D face alignment and 2D iris localization. First, we recover the 3D face with \\( x \\), \\( y \\) in image space using an off-the-shelf method. Then, we align our eyeball templates in the eye sockets based on the face's eyelid landmarks and predefined eyelid landmarks around the eyeball templates. In fact, we use the two corner landmarks of each eye which do not move between open and closed eyes. Next, we lift 2D iris predictions to 3D by finding the nearest vertexes from the aligned 3D eye templates. Finally, we compute the rotation between the initially aligned eyes and the 3D-lifted iris center and rotate the eyeballs accordingly. For 3D face alignment, we employ RetinaFace [16] and for 2D iris localization [55] as above. The process is presented in Fig. 3(c)." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.756, + 0.613, + 0.772 + ], + "angle": 0, + "content": "3.3 Joint 3D Eye Mesh and Vector Regression" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.78, + 0.788, + 0.842 + ], + "angle": 0, + "content": "Given an input face image \\(\\mathbf{I}\\), we utilize 5 face detection landmarks to crop patches around each one of the two eyes. We resize the patches to shape \\(128 \\times 128 \\times 3\\) and stack them channel-wise along with a cropped image of the face. We employ a simple model architecture consisting of a ResNet-18 [30] to extract" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.652, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "3DGazeNet" + }, + { + "type": "page_number", + "bbox": [ + 0.776, + 0.117, + 0.786, + 0.127 + ], + "angle": 0, + "content": "7" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.784, + 0.206 + ], + "angle": 0, + "content": "features, followed by two fully connected layers to map them to two separate eye modalities, which are a) dense 3D eye coordinates and b) a 3D gaze vector. As the final gaze output, we consider the mean direction calculated from the two modalities." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.207, + 0.785, + 0.253 + ], + "angle": 0, + "content": "To train the above network for mesh regression, similarly to [16], we enforce a vertex loss and an edge length loss between the model outputs and the respective ground truth or pseudo-ground truth, which can be expressed as:" + }, + { + "type": "equation", + "bbox": [ + 0.369, + 0.256, + 0.786, + 0.299 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {v e r t} = \\frac {1}{N _ {v}} \\sum_ {j = \\{l, r \\}} \\sum_ {i = 1} ^ {N _ {v}} \\| \\mathbf {V} _ {j, i} - \\mathbf {V} _ {j, i} ^ {*} \\| _ {1}, \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.304, + 0.784, + 0.351 + ], + "angle": 0, + "content": "where \\(\\mathbf{V}_j\\in \\mathbb{R}^{N_v\\times 3}\\) and \\(\\mathbf{V}_j^*\\in \\mathbb{R}^{N_v\\times 3}\\) for \\(j = \\{l,r\\}\\) are the output and the (pseudo-)ground truth coordinates, while the edge length loss (based on the fixed mesh triangulation of our template meshes) can be written as:" + }, + { + "type": "equation", + "bbox": [ + 0.368, + 0.355, + 0.785, + 0.398 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {e d g e}} = \\frac {1}{3 N _ {t}} \\sum_ {j = \\{l, r \\}} \\sum_ {i = 1} ^ {3 N _ {t}} \\| \\mathbf {E} _ {j, i} - \\mathbf {E} _ {j, i} ^ {*} \\| _ {2}, \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.403, + 0.784, + 0.479 + ], + "angle": 0, + "content": "where \\(\\mathbf{E}_j\\in \\mathbb{R}^{3N_t}\\) and \\(\\mathbf{E}_j^*\\in \\mathbb{R}^{3N_t}\\) for \\(j = \\{l,r\\}\\) are the edge lengths of the predicted and the (pseudo-)ground truth eyes. As edge length we define the Euclidean distance between two vertices of the same triangle. In addition to the mesh regression losses, we enforce a gaze loss to the gaze output of our model, expressed as:" + }, + { + "type": "equation", + "bbox": [ + 0.393, + 0.485, + 0.785, + 0.502 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {g a z e} = (1 8 0 / \\pi) \\operatorname {a r c c o s} \\left(\\mathbf {g} ^ {T} \\mathbf {g} ^ {*}\\right), \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.509, + 0.784, + 0.569 + ], + "angle": 0, + "content": "where \\(\\mathbf{g}\\) and \\(\\mathbf{g}^*\\) are the normalized model output and the gaze (pseudo-)ground truth respectively. We combine losses of Eqs. (1) to (3) in a single loss function to train our models with supervision from (pseudo-)ground truth 3D eye meshes and gaze vectors. The combined loss is written as:" + }, + { + "type": "equation", + "bbox": [ + 0.361, + 0.576, + 0.785, + 0.591 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {(P) G T} = \\lambda_ {v} \\mathcal {L} _ {v e r t} + \\lambda_ {e} \\mathcal {L} _ {e d g e} + \\lambda_ {g} \\mathcal {L} _ {g a z e}, \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.596, + 0.784, + 0.641 + ], + "angle": 0, + "content": "where \\(\\lambda_v, \\lambda_e\\), and \\(\\lambda_g\\) are parameters which regularize the contribution of the loss terms in the overall loss. From our experiments we have selected their values to be \\(\\lambda_v = 0.1\\), \\(\\lambda_e = 0.01\\) and \\(\\lambda_g = 1\\)." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.664, + 0.564, + 0.679 + ], + "angle": 0, + "content": "3.4 Multi-View Consistency Supervision" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.689, + 0.787, + 0.809 + ], + "angle": 0, + "content": "Extending our training dataset with in-the-wild images and training using pseudoground truth, usually improves the ability of our models to generalize to unseen domains, as can be seen by our experiments in Sec. 4.3. However, automatically generated 3D eyes and gaze include inconsistencies which are hard to identify and filter out. To balance the feedback of direct supervision from pseudo-ground truth, we design a multi-view supervision framework, based on pairs of real and synthetic images with different head poses, generated by HeadGAN as described in Sec. 3.1." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.81, + 0.786, + 0.84 + ], + "angle": 0, + "content": "Recovering dense 3D face coordinates and pose from images has recently been quite reliable [2,16,16,23]. Having a pair of images \\(\\mathbf{I}_1\\) and \\(\\mathbf{I}_2\\) of the same" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "8" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.388, + 0.127 + ], + "angle": 0, + "content": "E. Ververas et al." + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.144, + 0.787, + 0.31 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.321, + 0.788, + 0.433 + ], + "angle": 0, + "content": "Fig. 4: Overview of the proposed method 3DGazeNet. a) During training we employ single images with ground-truth supervision or pairs of synthetic views of the same subject with pseudo-annotations and different head poses. Different sets of losses are employed depending on the type of supervision. b) Detailed demonstration of \\(\\mathcal{L}_{MV}\\). 3D transformation \\(\\mathbf{P}\\) which maps view 1 to view 2, is employed to transform points \\(\\mathbf{V}_{l,1}\\) and \\(\\mathbf{V}_{r,1}\\), before calculating an L1 distance loss against \\(\\mathbf{V}_{l,2}\\) and \\(\\mathbf{V}_{r,2}\\). c) The base network (3DEyeNet) of our model consists of a ResNet-18 backbone and two fully connected layers leading to the 3D eye mesh and gaze vector outputs." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.491, + 0.788, + 0.627 + ], + "angle": 0, + "content": "subject and their reconstructed 3D faces, we can compute a transformation matrix \\(\\mathbf{P} \\in \\mathbb{R}^{3 \\times 4}\\) which aligns the two faces in image space. Assuming that gaze direction in both images remains still relative to the face, as is the case with images created by HeadGAN, we are able to supervise 3D regression of eyes by restricting our model's predictions to be consistent over an image pair, as output vertices should coincide when transformation \\(\\mathbf{P}\\) is applied to one of the pair's outputs. A similar approach has been employed successfully for weakly-supervised body pose estimation [31,44,65]. Particularly, we form the vertex loss of a pair as:" + }, + { + "type": "equation", + "bbox": [ + 0.324, + 0.629, + 0.787, + 0.673 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {M V, v e r t e x} = \\frac {1}{N _ {v}} \\sum_ {j = \\{l, r \\}} \\sum_ {i = 1} ^ {N _ {v}} \\| \\mathbf {V} _ {1, j, i} \\mathbf {P} ^ {T} - \\mathbf {V} _ {2, j, i} \\| _ {1}, \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.678, + 0.788, + 0.77 + ], + "angle": 0, + "content": "where \\(\\mathbf{V}_{1,j},\\mathbf{V}_{2,j}\\in \\mathbb{R}^{N_v\\times 4}\\) for \\(j = \\{l,r\\}\\) are the output matrices for left and right eyes, which correspond to input images \\(\\mathbf{I}_1\\) and \\(\\mathbf{I}_2\\). \\(\\mathbf{V}_{1,j,i},\\mathbf{V}_{2,j,i}\\in \\mathbb{R}^4\\) are the specific homogeneous 3D coordinates indexed by \\(i\\) in the above matrices. To enforce consistency constraints to the gaze head of our model, we analyse matrix \\(\\mathbf{P}\\) to scale \\(s\\), rotation \\(\\mathbf{R}\\) and translation \\(\\mathbf{t}\\) components and employ \\(\\mathbf{R}\\) in a gaze consistency loss within a pair:" + }, + { + "type": "equation", + "bbox": [ + 0.358, + 0.772, + 0.787, + 0.79 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {M V, g a z e} = (1 8 0 / \\pi) \\arccos \\left(\\left(\\mathbf {g} _ {1} ^ {T} \\mathbf {R} ^ {T}\\right) \\mathbf {g} _ {2}\\right), \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.795, + 0.788, + 0.842 + ], + "angle": 0, + "content": "where \\(\\mathbf{g}_1\\) and \\(\\mathbf{g}_2\\) are the normalized model outputs for input images \\(\\mathbf{I}_1\\) and \\(\\mathbf{I}_2\\) respectively. We combine losses of Eqs. (5) and (6) in a single loss function to enforce multi-view consistency in mesh and gaze vector regression, between" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.652, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "3DGazeNet" + }, + { + "type": "page_number", + "bbox": [ + 0.776, + 0.117, + 0.786, + 0.127 + ], + "angle": 0, + "content": "9" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.147, + 0.744, + 0.162 + ], + "angle": 0, + "content": "model outputs coming from pairs of input images. This loss is written as:" + }, + { + "type": "equation", + "bbox": [ + 0.348, + 0.169, + 0.785, + 0.185 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {M V} = \\lambda_ {M V, v} \\mathcal {L} _ {M V, v e r t e x} + \\lambda_ {M V, g} \\mathcal {L} _ {M V, g a z e}, \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.189, + 0.787, + 0.264 + ], + "angle": 0, + "content": "where \\(\\lambda_{MV,v}\\) and \\(\\lambda_{MV,g}\\) are parameters which regularize the contribution of the loss terms in the overall loss. In our experiments, we have selected their values to be \\(\\lambda_{MV,v} = 0.1\\) and \\(\\lambda_{MV,g} = 1\\). To train models with all supervision signals, i.e. ground truth \\((\\mathcal{L}_{GT})\\), pseudo-ground truth \\((\\mathcal{L}_{PGT})\\) and multi-view supervision \\((\\mathcal{L}_{MV})\\), we utilize the following overall loss function:" + }, + { + "type": "equation", + "bbox": [ + 0.357, + 0.271, + 0.785, + 0.288 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\lambda_ {G T} \\mathcal {L} _ {G T} + \\lambda_ {P G T} \\mathcal {L} _ {P G T} + \\lambda_ {M V} \\mathcal {L} _ {M V}, \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.291, + 0.787, + 0.322 + ], + "angle": 0, + "content": "with parameters \\(\\lambda_{GT} = \\lambda_{PGT} = \\lambda_{MV} = 1\\). Implementation details are included in the supplemental material. An overview of 3DGazeNet is presented in Fig. 4." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.344, + 0.376, + 0.361 + ], + "angle": 0, + "content": "4 Experiments" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.375, + 0.335, + 0.388 + ], + "angle": 0, + "content": "4.1 Datasets" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.399, + 0.788, + 0.626 + ], + "angle": 0, + "content": "Gaze Datasets Captured in a lab environment, ETH-XGaze (EXG) [79] consists of 756K frames of 80 subjects and includes large head pose and gaze variation. Collected in uncontrolled indoor environments with mobile devices, MPI-IFaceGaze (MPII) [81] includes smaller head pose and gaze variation and consists of 45K images of 15 subjects, while GazeCapture (GC) [42] contains almost 2M frontal face images of 1474 subjects. In contrast to the above datasets, Gaze360 (G360) [35] is the only gaze dataset captured both indoors and outdoors and consists of 127K training sequences from 365 subjects. The large variation in head pose, gaze, and environmental conditions of Gaze360 makes it the most challenging yet appropriate benchmark for in-the-wild gaze estimation, available in literature. For our experiments, we normalized the above datasets based on [80], except for Gaze360 which we process to get normalized face crops. Additionally, we employ the predefined training-test splits, while for Gaze360 we only use the frontal facing images with head pose yaw angle up to \\(90^{\\circ}\\). The head pose and gaze distributions of the above datasets are presented in Fig. 5." + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.629, + 0.788, + 0.841 + ], + "angle": 0, + "content": "In-The-Wild Face Datasets In-the-wild face datasets consist of significantly more unique subjects and capturing environments. For our experiments, we employed four publicly-available datasets FFHQ [33] (70K images), AFLW [38] (25K images), AVA [25,48,49] and CMU-Panoptic [32]. FFHQ and AFLW are in-the-wild face datasets commonly used for face analysis, AVA is a large-scale in-the-wild human activity dataset annotated under the Looking-At-Each-Other condition and CMU-Panoptic is collected in lab conditions and captures interactions of multiple people in the same scene. FFHQ and AFLW include one face per image and thus are only processed to get normalized face crops. AVA and CMU-Panoptic include frames with multiple faces, from which we randomly select 80K faces from each dataset with a maximum head pose of \\(90^o\\). Similarly to [40], for CMU we employed only frames captured with cameras in eye height. We name this collection of 255K images as the \"In-The-Wild Gaze\" dataset (ITWG). Lastly, to enforce multi-view supervision as described in Sec. 3.4, we synthesized" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "10" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.388, + 0.127 + ], + "angle": 0, + "content": "E. Ververas et al." + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.144, + 0.501, + 0.256 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.143, + 0.788, + 0.257 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.266, + 0.788, + 0.309 + ], + "angle": 0, + "content": "Fig. 5: Distributions of the head pose (top row) and gaze (bottom row) of the gaze datasets (red) and the face datasets (blue). Wide distribution datasets CMU, AVA, FFHQ, and AFLW are exploited to close the gap between diverse image domains." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.339, + 0.784, + 0.415 + ], + "angle": 0, + "content": "novel views from images of ITWG using HeadGAN, sampling the pitch and yaw angles from Gaussians \\(\\mathcal{N}(0,20)\\), relatively to the original head pose. We name this collection of images as \"Multi-View In-The-Wild Gaze\" dataset (ITWG-MV) and employ it to improve the generalization of gaze estimation. The head pose and gaze distributions of the above datasets are presented in Fig. 5." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.439, + 0.431, + 0.453 + ], + "angle": 0, + "content": "4.2 Gaze Generalization" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.465, + 0.785, + 0.526 + ], + "angle": 0, + "content": "In this section, we evaluate 3DGazeNet in within-dataset and cross-dataset experiments. We believe that [40] is the most closely related method to ours, as it is the only method using 3D geometric cues of the scene to generalize gaze from arbitrary face data." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.53, + 0.788, + 0.712 + ], + "angle": 0, + "content": "Cross-dataset Evaluation We design two cross-dataset experiments to test the generalization of our method on G360 and report the results on Tab. 1(a) and (b). Particularly, the experiments are: a) we train our method on the CMU, AVA, and ITWG-MV datasets utilizing only our pseudo-labels and multi-view supervision and b) we additionally employ ground truth supervision from GC and EXG. From the results of the above experiments, it becomes obvious that our geometry-aware pseudo-labels employed within our multi-view supervision training effectively generalize gaze estimation to unseen domains, even without any available ground truth. In particular, in experiment a) our method outperforms [40] by \\(23\\%\\) with AVA, \\(22\\%\\) with CMU, \\(12.5\\%\\) with \\(\\mathrm{AVA + CMU}\\) and \\(20\\%\\) with our large-scale ITWG-MV. Similarly, in experiment b) 3DGazeNet outperforms [40] by \\(10\\%\\) and \\(9\\%\\) with GC and EXG respectively." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.715, + 0.788, + 0.806 + ], + "angle": 0, + "content": "Within-dataset Evaluation Here we compare our method against state-of-the-art within-dataset gaze estimation on G360. Similarly to [40], we employ AVA for additional supervision, while we also examine the effect of the larger-scale ITWG-MV. The results, presented in Tab. 1 (c), show that multi-view supervision from AVA does not improve performance (which is in line with the compared method), but the large-scale ITWG-MV does." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.81, + 0.788, + 0.842 + ], + "angle": 0, + "content": "Comparison with state-of-the-art We further compare 3DGazeNet against recent methods for gaze generalization. The works in [5,70] are developed with a" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.652, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "3DGazeNet" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.116, + 0.785, + 0.127 + ], + "angle": 0, + "content": "11" + }, + { + "type": "table_caption", + "bbox": [ + 0.214, + 0.145, + 0.788, + 0.214 + ], + "angle": 0, + "content": "Table 1: Weakly-supervised method evaluation in cross- and within-dataset experiments. In all cases, we calculate gaze error in degrees (lower is better), on the test set of Gaze360. CMU and AVA correspond to subsets of ITWG-MV (i.e. augmented for multi-view supervision), providing a clearer comparison with [40]. Our method trained with ITWG-MV outperforms the baselines in all cases. 3DGN refers to 3DGazeNet" + }, + { + "type": "table", + "bbox": [ + 0.221, + 0.227, + 0.785, + 0.3 + ], + "angle": 0, + "content": "
(a) Cross-dataset\nSynthetic Views(b) Cross-dataset\nGround Truth + Synthetic Views(c) Within-dataset\nGround Truth + Synthetic Views
Dataset[40]3DGNDataset[79][40]3DGNDataset[79][40]3DGN
AVA29.022.4GC30.229.227.5EXG27.320.522.1
CMU26.020.3GC+AVA-19.518.9EXG+AVA-16.917.1
CMU+AVA22.519.7GC+AVA+CMU--18.4EXG+AVA+CMU--16.7
ITWG-MV-18.1GC+ITWG-MV--17.6EXG+ITWG-MV--15.4
" + }, + { + "type": "table_caption", + "bbox": [ + 0.214, + 0.337, + 0.784, + 0.379 + ], + "angle": 0, + "content": "Table 2: Comparison with state-of-the-art in domain generalization for gaze estimation. In all experiments our model outperforms the compared methods. Gaze error is in degrees (lower is better)." + }, + { + "type": "table", + "bbox": [ + 0.22, + 0.392, + 0.784, + 0.485 + ], + "angle": 0, + "content": "
Stage 1 (Gaze Generalization Models)+ Stage 2 (Adaptation/Fine Tuning)
EXGEXG+ITWG-MVG360G360+ITWG-MVEXG+ITWG-MVG360+ITWG-MV
MethodMPII GCMPII GCMPII GCMPII GCMPII GCMPII GCMPII GCMPII GCMPII GCMPII GC
RAT/RUDA [5]7.18.47.08.29.39.09.18.56.88.17.98.3
CDG/CRGA [70]6.79.26.99.57.08.38.18.97.49.07.68.7
PureGaze [11]7.98.77.79.37.68.37.48.66.68.07.28.3
3DGazeNet7.710.76.07.89.112.16.38.0----
" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.538, + 0.788, + 0.841 + ], + "angle": 0, + "content": "focus on domain adaptation for gaze estimation and encompass two-stage training schemes, both training feature invariant models at the first stage. That is, in the first training stage RUDA [5] trains gaze estimation model invariant to image rotations, while CRGA [70] uses a contrastive loss to separate image features according to gaze. The second stage of the above methods is focused on adapting the initially trained models to specific target domains. As our method aims to train general gaze estimation models without knowledge of specific target domains, we implement the first-stage models of the above methods, namely RAT [5], CDG [70] and compare them with 3DGazeNet in cross-dataset experiments. Additionally, we compare against PureGaze [11] which is a gaze generalization method that purifies face features to achieve higher gaze estimation performance. To follow the evaluation protocol in the above works, we train all methods on EXG and G360 (+ITWG-MV) and test on MPII and GC. For completeness, we include results of the full models RUDA and CRGA after using ITWG-MV according to their domain adaptation schemes. For PureGaze, ITWG-MV was used for fine-tuning. Tab. 2 shows that the proposed method outperforms the baselines for gaze generalization when ITWG-MV is employed. The compared methods do not include regularization for the noisy labels of ITWG-MV, resulting in similar or worse performance, while our method exploits them through \\(\\mathcal{L}_{MV}\\), benefiting from the extended variation." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "12" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.388, + 0.127 + ], + "angle": 0, + "content": "E. Ververas et al." + }, + { + "type": "table_caption", + "bbox": [ + 0.216, + 0.145, + 0.788, + 0.187 + ], + "angle": 0, + "content": "Table 3: Comparison between training targets Vector(V), Mesh(M) and Mesh+Vector(M+V) in within-dataset experiments (using only \\(\\mathcal{L}_{GT}\\)). Target M+V leads to lower errors than state-of-the-art. Error is in degrees (lower is better)." + }, + { + "type": "table", + "bbox": [ + 0.306, + 0.199, + 0.699, + 0.284 + ], + "angle": 0, + "content": "
DatasetCompared Methods3DGazeNet
[51][13][1][15,82][53][15,35][40][79]VMM+V
MPII4.044.003.924.95.34.06-4.84.14.24.0
G36010.710.610.414.9-11.110.1-9.89.89.6
GC----3.5--3.33.23.33.1
EXG---7.3---4.54.24.44.2
" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.312, + 0.398, + 0.326 + ], + "angle": 0, + "content": "4.3 Ablation studies" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.338, + 0.787, + 0.534 + ], + "angle": 0, + "content": "Gaze Estimation via 3D Eye Mesh Regression Here we experimentally evaluate our suggestion that gaze estimation benefits from replacing the training target from gaze vectors or angles to dense 3D eye coordinates. To this end, we employ the fully supervised version of our model, utilizing data with exact ground truth and \\(\\mathcal{L}_{GT}\\) for training. We conduct within-dataset experiments on MPII, GC, G360 and EXG for which specific training-testing subsets are provided. We compare against state-of-the-art methods [1,13,15,35,40,51,53,79,82] and report the results in Tab. 3. In almost all cases, our model outperforms the baselines, while combining the two modalities, i.e. dense 3D meshes and gaze vectors \\((\\mathrm{M} + \\mathrm{V})\\), improves performance compared to training with vector targets (V) or 3D mesh targets (M) alone. This is possibly due to the distinct nature of the two modalities, i.e. the vectors provide exact label supervision, while meshes provide a robust representation which limits sparse prediction errors." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.535, + 0.787, + 0.732 + ], + "angle": 0, + "content": "The main benefit of dense coordinate regression over pose parameters or sparse points prediction is that individual parameter errors have limited effect on the total outcome making them more robust to prediction inaccuracies [16]. This effect is particularly useful for our multi-view training scheme in which introducing consistency of dense correspondences between images rather than only vector consistency, offers stronger regularization. We validate this argument in gaze generalization experiments in G360, GC, EXG, and MPII, presented in Tab. 4. For this experiment, we consider three versions of 3DGazeNet: one which predicts only gaze vectors and no coordinates (Vector), one which predicts 8 3D iris landmarks instead of dense eye meshes (Iris+Vector), to highlight the effect of dense coordinate prediction, and the full 3DGazeNet (Mesh+Vector). The results show that employing combined training targets always benefits performance, while replacing dense 3D eye meshes with iris landmarks highly limits this effect." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.734, + 0.787, + 0.841 + ], + "angle": 0, + "content": "The Effect of Gaze Pseudo-Labels and Multi-View Supervision Here we examine the contribution of our automatic geometry-aware pseudo-labels and the multi-view supervision loss of our approach. To this end, we consider three training scenarios which are the following: a) training with ITWG and its pseudo-labels as ground truth \\((\\mathcal{L}_{PGT})\\), b) training with ITWG-MV utilizing only the multi-view consistency constraints and no pseudo-labels \\((\\mathcal{L}_{MV})\\) and c) training with ITWG-MV while employing both pseudo-labels and the multi-view" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.652, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "3DGazeNet" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.116, + 0.786, + 0.127 + ], + "angle": 0, + "content": "13" + }, + { + "type": "table_caption", + "bbox": [ + 0.216, + 0.145, + 0.788, + 0.201 + ], + "angle": 0, + "content": "Table 4: Comparison between training targets Vector, Iris+Vector and Mesh+Vector for domain generalization when employing our full model (Eq. (8)). For the target Vector, we remove all mesh terms from the employed losses. In all experiments, the target Mesh+Vector results in a lower error. Gaze error is in degrees (lower is better)." + }, + { + "type": "table", + "bbox": [ + 0.222, + 0.213, + 0.782, + 0.326 + ], + "angle": 0, + "content": "
Training DatasetVectorIris+VectorMesh+Vector
G360GCEXGMPIIG360GCEXGMPIIG360GCEXGMPII
ITWG-MV19.110.116.78.518.89.916.78.218.19.016.77.6
G360+ITWG-MV10.110.215.17.09.79.415.06.89.38.014.66.3
GC+ITWG-MV18.23.116.06.118.03.015.96.217.63.015.56.1
EXG+ITWG-MV16.510.24.56.616.39.64.56.415.47.84.36.0
MPII+ITWG-MV17.88.215.24.817.97.615.04.617.66.814.94.2
" + }, + { + "type": "table_caption", + "bbox": [ + 0.216, + 0.338, + 0.785, + 0.381 + ], + "angle": 0, + "content": "Table 5: The effect of incorporating pseudo-ground truth and multi-view supervision during training. Both components contribute towards improving results in cross-dataset gaze estimation experiments. Gaze error is in degrees (lower is better)." + }, + { + "type": "table", + "bbox": [ + 0.307, + 0.392, + 0.698, + 0.521 + ], + "angle": 0, + "content": "
Dataset\\( {\\mathcal{L}}_{GT} \\)\\( {\\mathcal{L}}_{PGT} \\)\\( {\\mathcal{L}}_{MV} \\)G360GCEXGMPII
ITWG--23.114.824.313.6
ITWG-MV--47.433.241.132.8
ITWG-MV-18.19.016.77.6
GC--27.53.128.410.4
GC+ITWG-21.43.223.79.1
GC+ITWG-MV-24.73.526.210.1
GC+ITWG-MV17.63.015.56.1
" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.549, + 0.788, + 0.67 + ], + "angle": 0, + "content": "consistency loss \\((\\mathcal{L}_{PGT} + \\mathcal{L}_{MV})\\). To further evaluate the effect of the pseudolabels and multi-view loss, we repeat the above experiments by adding ground truth supervision from GC \\((+ \\mathcal{L}_{GT})\\). We test our models on the test set of G360, GC, EXG, and MPII, and report the results in Tab. 5. In all cases, combining our pseudo-labels and multi-view loss yields the lowest error in degrees. Lastly, utilizing only \\(\\mathcal{L}_{MV}\\) on ITWG-MV leads to very high errors which is reasonable as no supervision for the eyeball topology exists, thus, the model outputs cannot follow the spherical shape of the eyeball template." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.673, + 0.788, + 0.81 + ], + "angle": 0, + "content": "The Effect of Head Pose Distribution of ITWG Head pose distribution difference between the train and test set is one of the main reasons that gaze-estimation models fail in cross-dataset situations. To close the gap between different training and testing scenarios, we have designed ITWG, a large-scale dataset with widespread variation in head pose and gaze angles. To study the effect of the head pose variation of ITWG in our experiments, we employ different subsets of ITWG with various levels of head pose variation and conduct cross-dataset experiments with them. In particular, we consider four subsets of ITWG, with maximum yaw angles of \\(5^o\\), \\(20^o\\), \\(40^o\\) and \\(90^o\\) (all) respectively." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.81, + 0.787, + 0.84 + ], + "angle": 0, + "content": "We train 3DGazeNet with ground truth supervision from MPII as well as pseudo-labels and multi-view supervision from the four versions of ITWG-MV." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "14" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.389, + 0.127 + ], + "angle": 0, + "content": "E. Ververas et al." + }, + { + "type": "image", + "bbox": [ + 0.304, + 0.146, + 0.699, + 0.317 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.33, + 0.788, + 0.375 + ], + "angle": 0, + "content": "Fig. 6: Gaze error of G360 across head poses when training with MPII and subsets of ITWG-MV. Wider range of head poses in the ITWG-MV data, lead to significantly lower errors in large poses." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.403, + 0.788, + 0.555 + ], + "angle": 0, + "content": "The results of testing on G360 are presented in Fig. 6. The resulting curves clearly demonstrate the effect of the available head pose variation in the training data. Specifically, utilizing the entirety of ITWG-MV leads to the lowest errors which are relatively consistent across the head pose range. As expected, decreasing the available head pose variation, increasingly affects model performance with the worst case being training with MPII alone. Based on the above finding we argue that the gap between small and wide distribution gaze datasets (regarding head pose) can effectively close by employing similarly large distribution unlabeled face datasets, which is crucial for training plug-n-play gaze estimation models that can be directly employed in applications." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.58, + 0.519, + 0.596 + ], + "angle": 0, + "content": "5 Limitations and Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.613, + 0.787, + 0.688 + ], + "angle": 0, + "content": "In Sec. 4, we shown that pseudo-ground truth can be effectively utilized in gaze estimation. Nevertheless, a limitation of our method is that pseudo-annotation accuracy is related to the accuracy of 3D face and 2D iris alignment. In addition, our current method cannot operate on images without a visible face (when the face is looking away from the camera)." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.689, + 0.788, + 0.841 + ], + "angle": 0, + "content": "Overall, In this work, we present a novel weakly-supervised method for gaze generalization, based on dense 3D eye mesh regression. We demonstrate that by utilizing both 3D eye coordinates and gaze labels during training, instead of just gaze labels, we can achieve lower prediction errors. Moreover, we explore the possibility of exploiting the abundantly available in-the-wild face data for improving gaze estimation generalization. To that end, we propose a novel methodology to generate robust, 3D geometry-aware pseudo ground truth labels, as well as a multi-view weak-supervision framework for effective training. By enforcing these constraints, we are able to successfully utilize in-the-wild face data and achieve improvements in cross-dataset and within-dataset experiments." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.652, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "3DGazeNet" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.117, + 0.786, + 0.127 + ], + "angle": 0, + "content": "15" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.147, + 0.785, + 0.178 + ], + "angle": 0, + "content": "Acknowledgments. S. Zafeiriou was supported by EPSRC Project DEFORM (EP/S010203/1) and GNOMON (EP/X011364)." + }, + { + "type": "title", + "bbox": [ + 0.217, + 0.203, + 0.323, + 0.218 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.236, + 0.784, + 0.278 + ], + "angle": 0, + "content": "1. Abdelrahman, A.A., Hempel, T., Khalifa, A., Al-Hamadi, A., Dinges, L.: L2cs-net: Fine-grained gaze estimation in unconstrained environments. In: ICFSP. pp. 98-102. IEEE (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.279, + 0.785, + 0.307 + ], + "angle": 0, + "content": "2. Albiero, V., Chen, X., Yin, X., Pang, G., Hassner, T.: img2pose: Face alignment and detection via 6dof, face pose estimation. In: CVPR (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.308, + 0.785, + 0.335 + ], + "angle": 0, + "content": "3. Alp Guler, R., Trigeorgis, G., Antonakos, E., Snape, P., Zafeiriou, S., Kokkinos, I.: Densereg: Fully convolutional dense shape regression in-the-wild. In: CVPR (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.336, + 0.785, + 0.363 + ], + "angle": 0, + "content": "4. Andrist, S., Tan, X.Z., Gleicher, M., Mutlu, B.: Conversational gaze aversion for humanlike robots. In: HRI (2014)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.364, + 0.785, + 0.391 + ], + "angle": 0, + "content": "5. Bao, Y., Liu, Y., Wang, H., Lu, F.: Generalizing gaze estimation with rotation consistency. In: CVPR (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.392, + 0.785, + 0.433 + ], + "angle": 0, + "content": "6. Burova, A., Mäkelä, J., Hakulinen, J., Keskinen, T., Heinonen, H., Siltanen, S., Turunen, M.: Utilizing vr and gaze tracking to develop ar solutions for industrial maintenance. In: CHI (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.434, + 0.785, + 0.462 + ], + "angle": 0, + "content": "7. Cai, X., Zeng, J., Shan, S., Chen, X.: Source-free adaptive gaze estimation by uncertainty reduction. In: CVPR. pp. 22035-22045 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.463, + 0.785, + 0.49 + ], + "angle": 0, + "content": "8. Cai, Y., Ge, L., Cai, J., Yuan, J.: Weakly-supervised 3d hand pose estimation from monocular rgb images. In: ECCV (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.491, + 0.785, + 0.532 + ], + "angle": 0, + "content": "9. Castner, N., Kuebler, T.C., Scheiter, K., Richter, J., Eder, T., Hützig, F., Keutel, C., Kasneci, E.: Deep semantic gaze embedding and scanpath comparison for expertise classification during opt viewing. In: ACM ETRA (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.533, + 0.785, + 0.56 + ], + "angle": 0, + "content": "0. Chen, M., Jin, Y., Goodall, T., Yu, X., Bovik, A.C.: Study of 3d virtual reality picture quality. IEEE Journal of Selected Topics in Signal Processing (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.561, + 0.785, + 0.587 + ], + "angle": 0, + "content": "1. Cheng, Y., Bao, Y., Lu, F.: Puregaze: Purifying gaze feature for generalizable gaze estimation. In: AAAI (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.588, + 0.785, + 0.616 + ], + "angle": 0, + "content": "2. Cheng, Y., Huang, S., Wang, F., Qian, C., Lu, F.: A coarse-to-fine adaptive network for appearance-based gaze estimation. In: AAAI (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.617, + 0.717, + 0.63 + ], + "angle": 0, + "content": "3. Cheng, Y., Lu, F.: Gaze estimation using transformer. In: ICPR (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.631, + 0.785, + 0.658 + ], + "angle": 0, + "content": "4. Cheng, Y., Lu, F., Zhang, X.: Appearance-based gaze estimation via evaluation-guided asymmetric regression. In: ECCV (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.659, + 0.785, + 0.687 + ], + "angle": 0, + "content": "5. Cheng, Y., Wang, H., Bao, Y., Lu, F.: Appearance-based gaze estimation with deep learning: A review and benchmark. arXiv preprint arXiv:2104.12668 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.688, + 0.785, + 0.714 + ], + "angle": 0, + "content": "6. Deng, J., Guo, J., Ververas, E., Kotsia, I., Zafeiriou, S.: Retinaface: Single-shot multi-level face localisation in the wild. In: CVPR (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.715, + 0.785, + 0.757 + ], + "angle": 0, + "content": "7. Deng, Y., Yang, J., Xu, S., Chen, D., Jia, Y., Tong, X.: Accurate 3d face reconstruction with weakly-supervised learning: From single image to image set. In: CVPR Workshops (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.758, + 0.785, + 0.785 + ], + "angle": 0, + "content": "8. Doukas, M.C., Koujan, M.R., Sharmanska, V., Roussos, A., Zafeiriou, S.: Head2head++: Deep facial attributes re-targeting. T-BIOM (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.786, + 0.785, + 0.813 + ], + "angle": 0, + "content": "9. Doukas, M.C., Zafeiriou, S., Sharmanska, V.: Headgan: One-shot neural head synthesis and editing. In: ICCV (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.814, + 0.785, + 0.84 + ], + "angle": 0, + "content": "20. Fischer, T., Chang, H.J., Demiris, Y.: Rt-gene: Real-time eye gaze estimation in natural environments. In: ECCV (2018)" + }, + { + "type": "list", + "bbox": [ + 0.226, + 0.236, + 0.785, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "16" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.388, + 0.127 + ], + "angle": 0, + "content": "E. Ververas et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.147, + 0.785, + 0.189 + ], + "angle": 0, + "content": "21. Fuhl, W., Kasneci, G., Kasneci, E.: Teyed: Over 20 million real-world eye images with pupil, eyelid, and iris 2d and 3d segmentations, 2d and 3d landmarks, 3d eyeball, gaze vector, and eye movement types. ISMAR (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.19, + 0.785, + 0.231 + ], + "angle": 0, + "content": "22. Funes Mora, K.A., Monay, F., Odobez, J.M.: Eyediap: A database for the development and evaluation of gaze estimation algorithms from rgb and rgb-d cameras. In: ACM ETRA (2014)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.232, + 0.785, + 0.259 + ], + "angle": 0, + "content": "23. Gecer, B., Ploumpis, S., Kotsia, I., Zafeiriou, S.: Ganfit: Generative adversarial network fitting for high fidelity 3d face reconstruction. In: CVPR (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.26, + 0.785, + 0.286 + ], + "angle": 0, + "content": "24. Ghosh, S., Hayat, M., Dhall, A., Knibbe, J.: Mtgls: Multi-task gaze estimation with limited supervision. In: WACV (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.288, + 0.785, + 0.327 + ], + "angle": 0, + "content": "25. Gu, C., Sun, C., Ross, D.A., Vondrick, C., Pantofaru, C., Li, Y., Vijayanarasimhan, S., Toderici, G., Ricco, S., Sukthankar, R., Schmid, C., Malik, J.: Ava: A video dataset of spatio-temporally localized atomic visual actions. In: CVPR (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.329, + 0.785, + 0.355 + ], + "angle": 0, + "content": "26. Guler, R.A., Kokkinos, I.: Holopose: Holistic 3d human reconstruction in-the-wild. In: CVPR (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.357, + 0.785, + 0.384 + ], + "angle": 0, + "content": "27. Guo, Z., Yuan, Z., Zhang, C., Chi, W., Ling, Y., Zhang, S.: Domain adaptation gaze estimation by embedding with prediction consistency. In: ACCV (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.385, + 0.785, + 0.411 + ], + "angle": 0, + "content": "28. Hao, Z., Mallya, A., Belongie, S., Liu, M.Y.: GANcraft: Unsupervised 3D Neural Rendering of Minecraft Worlds. In: ICCV (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.412, + 0.785, + 0.452 + ], + "angle": 0, + "content": "29. He, J., Pham, K., Valliappan, N., Xu, P., Roberts, C., Lagun, D., Navalpakkam, V.: On-device few-shot personalization for real-time gaze estimation. In: ICCV Workshops (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.453, + 0.785, + 0.479 + ], + "angle": 0, + "content": "30. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: CVPR (2016)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.481, + 0.785, + 0.508 + ], + "angle": 0, + "content": "31. Iqbal, U., Molchanov, P., Kautz, J.: Weakly-supervised 3d human pose learning via multi-view images in the wild. In: CVPR (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.509, + 0.785, + 0.549 + ], + "angle": 0, + "content": "32. Joo, H., Liu, H., Tan, L., Gui, L., Nabbe, B., Matthews, I., Kanade, T., Nobuhara, S., Sheikh, Y.: Panoptic studio: A massively multiview system for social motion capture. In: ICCV (2015)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.55, + 0.785, + 0.577 + ], + "angle": 0, + "content": "33. Karras, T., Laine, S., Aila, T.: A style-based generator architecture for generative adversarial networks. In: CVPR (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.578, + 0.785, + 0.604 + ], + "angle": 0, + "content": "34. Kasahara, I., Stent, S., Park, H.S.: Look both ways: Self-supervising driver gaze estimation and road scene saliency. In: ECCV (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.606, + 0.785, + 0.633 + ], + "angle": 0, + "content": "35. Kellnhofer, P., Recasens, A., Stent, S., Matusik, W.,, Torralba, A.: Gaze360: Physically unconstrained gaze estimation in the wild. In: ICCV (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.634, + 0.785, + 0.66 + ], + "angle": 0, + "content": "36. Kim, H., Garrido, P., Tewari, A., Xu, W., Thies, J., Nießner, M., Pérez, P., Richardt, C., Zolloffer, M., Theobalt, C.: Deep video portraits. TOG (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.661, + 0.785, + 0.688 + ], + "angle": 0, + "content": "37. Kleinke, C.L.: Gaze and eye contact: a research review. Psychological bulletin (1986)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.689, + 0.785, + 0.729 + ], + "angle": 0, + "content": "38. Koestinger, M., Wohlhart, P., Roth, P.M., Bischof, H.: Annotated facial landmarks in the wild: A large-scale, real-world database for facial landmark localization. In: ICCVW (2011)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.73, + 0.785, + 0.757 + ], + "angle": 0, + "content": "39. Konrad, R., Angelopoulos, A., Wetzstein, G.: Gaze-contingent ocular parallax rendering for virtual reality. In: TOG (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.758, + 0.785, + 0.785 + ], + "angle": 0, + "content": "40. Kothari, R., De Mello, S., Iqbal, U., Byeon, W., Park, S., Kautz, J.: Weakly-supervised physically unconstrained gaze estimation. In: CVPR (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.786, + 0.785, + 0.812 + ], + "angle": 0, + "content": "41. Koujan, M.R., Doukas, M.C., Roussos, A., Zafeiriou, S.: Head2head: Video-based neural head synthesis. In: FG (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.813, + 0.785, + 0.84 + ], + "angle": 0, + "content": "42. Krafka, K., Khosla, A., Kellnhofer, P., Kannan, H., Bhandarkar, S., Matusik, W., Torralba, A.: Eye tracking for everyone. In: CVPR (2016)" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.147, + 0.785, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.653, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "3DGazeNet" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.117, + 0.786, + 0.127 + ], + "angle": 0, + "content": "17" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.175 + ], + "angle": 0, + "content": "43. Kulon, D., Guler, R.A., Kokkinos, I., Bronstein, M.M., Zafeiriou, S.: Weakly-supervised mesh-convolutional hand reconstruction in the wild. In: CVPR (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.175, + 0.788, + 0.203 + ], + "angle": 0, + "content": "44. Li, Y., Li, K., Jiang, S., Zhang, Z., Huang, C., Xu, R.Y.D.: Geometry-driven self-supervised method for 3d human pose estimation. In: AAAI (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.203, + 0.788, + 0.231 + ], + "angle": 0, + "content": "45. Liu, G., Yu, Y., Mora, K., Odobez, J.: A differential approach for gaze estimation with calibration. In: BMVC (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.231, + 0.788, + 0.259 + ], + "angle": 0, + "content": "46. Liu, G., Yu, Y., Mora, K.A.F., Odobez, J.M.: A differential approach for gaze estimation with calibration. In: BMVC (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.259, + 0.788, + 0.286 + ], + "angle": 0, + "content": "47. Liu, Y., Liu, R., Wang, H., Lu, F.: Generalizing gaze estimation with outlier-guided collaborative adaptation. In: ICCV (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.286, + 0.788, + 0.314 + ], + "angle": 0, + "content": "48. Marín-Jiménez, M.J., Kalogeiton, V., Medina-Suárez, P., , Zisserman, A.: LAEO-Net++: revisiting people Looking At Each Other in videos. TPAMI (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.314, + 0.788, + 0.342 + ], + "angle": 0, + "content": "49. Marin-Jimenez, M.J., Kalogeiton, V., Medina-Suarez, P., Zisserman, A.: Laeo-net: Revisiting people looking at each other in videos. In: CVPR (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.342, + 0.788, + 0.37 + ], + "angle": 0, + "content": "50. Mavely, A.G., Judith, J.E., Sahal, P.A., Kuruvilla, S.A.: Eye gaze tracking based driver monitoring system. In: ICCS (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.37, + 0.788, + 0.398 + ], + "angle": 0, + "content": "51. O Oh, J., Chang, H.J., Choi, S.I.: Self-attention with convolution and deconvolution for efficient eye gaze estimation from a full face image. In: CVPRW (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.398, + 0.788, + 0.424 + ], + "angle": 0, + "content": "52. Park, S., Aksan, E., Zhang, X., Hilliges, O.: Towards end-to-end video-based eyetracking. In: ECCV (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.424, + 0.788, + 0.452 + ], + "angle": 0, + "content": "53. Park, S., Mello, S.D., Molchanov, P., Iqbal, U., Hilliges, O., Kautz, J.: Few-shot adaptive gaze estimation. In: ICCV (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.452, + 0.788, + 0.467 + ], + "angle": 0, + "content": "54. Park, S., Spurr, A., Hilliges, O.: Deep pictorial gaze estimation. In: ECCV (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.467, + 0.788, + 0.494 + ], + "angle": 0, + "content": "55. Park, S., Zhang, X., Bulling, A., Hilliges, O.: Learning to find eye region landmarks for remote gaze estimation in unconstrained settings. In: ACM ETRA (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.494, + 0.788, + 0.522 + ], + "angle": 0, + "content": "56. Park, T., Liu, M.Y., Wang, T.C., Zhu, J.Y.: Semantic image synthesis with spatially-adaptive normalization. In: CVPR (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.522, + 0.788, + 0.55 + ], + "angle": 0, + "content": "57. Richard, A., Lea, C., Ma, S., Gall, J., de la Torre, F., Sheikh, Y.: Audio- and gaze-driven facial animation of codec avatars. In: WACV (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.55, + 0.788, + 0.577 + ], + "angle": 0, + "content": "58. Riza Alp Guler, Natalia Neverova, I.K.: Densesepose: Dense human pose estimation in the wild. In: CVPR (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.577, + 0.788, + 0.605 + ], + "angle": 0, + "content": "59. Smith, B., Yin, Q., Feiner, S., Nayar, S.: Gaze Locking: Passive Eye Contact Detection for Human? Object Interaction. In: ACM UIST (2013)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.605, + 0.788, + 0.633 + ], + "angle": 0, + "content": "60. Sugano, Y., Matsushita, Y., Sato, Y.: Learning-by-synthesis for appearance-based 3d gaze estimation. In: CVPR (2014)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.633, + 0.788, + 0.673 + ], + "angle": 0, + "content": "61. Sun, J., Wang, X., Shi, Y., Wang, L., Wang, J., Liu, Y.: Ide-3d: Interactive disentangled editing for high-resolution 3d-aware portrait synthesis. ACM TOG 41(6), 1-10 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.673, + 0.788, + 0.702 + ], + "angle": 0, + "content": "62. Sun, J., Wang, X., Wang, L., Li, X., Zhang, Y., Zhang, H., Liu, Y.: Next3d: Generative neural texture rasterization for 3d-aware head avatars. In: CVPR (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.702, + 0.788, + 0.73 + ], + "angle": 0, + "content": "63. Sun, Y., Zeng, J., Shan, S., Chen, X.: Cross-encoder for unsupervised gaze representation learning. In: ICCV (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.73, + 0.788, + 0.757 + ], + "angle": 0, + "content": "64. Vidal, M., Turner, J., Bulling, A., Gellersen, H.: Wearable eye tracking for mental health monitoring. Computer Communications (2012)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.757, + 0.788, + 0.785 + ], + "angle": 0, + "content": "65. Wandt, B., Rudolph, M., Zell, P., Rhodin, H., Rosenhahn, B.: Canonpose: Self-supervised monocular 3d human pose estimation in the wild. In: CVPR (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.785, + 0.788, + 0.813 + ], + "angle": 0, + "content": "66. Wang, K., Ji, Q.: Real time eye gaze tracking with 3d deformable eye-face model. In: ICCV (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.813, + 0.788, + 0.84 + ], + "angle": 0, + "content": "67. Wang, K., Zhao, R., Ji, Q.: A hierarchical generative model for eye image synthesis and eye gaze estimation. In: CVPR (2018)" + }, + { + "type": "list", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "18" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.388, + 0.127 + ], + "angle": 0, + "content": "E. Ververas et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.148, + 0.785, + 0.175 + ], + "angle": 0, + "content": "68. Wang, K., Zhao, R., Su, H., Ji, Q.: Generalizing eye tracking with bayesian adversarial learning. In: CVPR (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.176, + 0.785, + 0.203 + ], + "angle": 0, + "content": "69. Wang, T.C., Liu, M.Y., Tao, A., Liu, G., Kautz, J., Catanzaro, B.: Few-shot video-to-video synthesis. In: NeurIPS (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.204, + 0.785, + 0.231 + ], + "angle": 0, + "content": "70. Wang, Y., Jiang, Y., Li, J., Ni, B., Dai, W., Li, C., Xiong, H., Li, T.: Contrastive regression for domain adaptation on gaze estimation. In: CVPR (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.232, + 0.785, + 0.259 + ], + "angle": 0, + "content": "71. Wood, E., Baltrusaitis, T., Morency, L.P., Robinson, P., Bulling, A.: A 3d morphable eye region model for gaze estimation. In: ECCV (2016)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.26, + 0.785, + 0.286 + ], + "angle": 0, + "content": "72. Yu, Y., Liu, G., Odobez, J.M.: Deep multitask gaze estimation with a constrained landmark-gaze model. In: ECCV Workshops (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.287, + 0.785, + 0.314 + ], + "angle": 0, + "content": "73. Yu, Y., Liu, G., Odobez, J.M.: Improving few-shot user-specific gaze adaptation via gaze redirection synthesis. In: CVPR (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.315, + 0.785, + 0.342 + ], + "angle": 0, + "content": "74. Yu, Y., Odobez, J.M.: Unsupervised representation learning for gaze estimation. In: CVPR (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.343, + 0.785, + 0.37 + ], + "angle": 0, + "content": "75. Zakharov, E., Shysheya, A., Burkov, E., Lempitsky, V.: Few-shot adversarial learning of realistic neural talking head models. ICCV (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.371, + 0.785, + 0.397 + ], + "angle": 0, + "content": "76. Zakharov, E., Ivakhnenko, A., Shysheya, A., Lempitsky, V.: Fast bi-layer neural synthesis of one-shot realistic head avatars. In: ECCV (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.398, + 0.785, + 0.438 + ], + "angle": 0, + "content": "77. Zhang, J., Chen, J., Tang, H., Wang, W., Yan, Y., Sangineto, E., Sebe, N.: Dual in-painting model for unsupervised gaze correction and animation in the wild. In: ACM MM (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.439, + 0.785, + 0.466 + ], + "angle": 0, + "content": "78. Zhang, M., Liu, Y., Lu, F.: Gazeonce: Real-time multi-person gaze estimation. In: CVPR (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.467, + 0.785, + 0.508 + ], + "angle": 0, + "content": "79. Zhang, X., Park, S., Beeler, T., Bradley, D., Tang, S., Hilliges, O.: Eth-xgaze: A large scale dataset for gaze estimation under extreme head pose and gaze variation. In: ECCV (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.509, + 0.785, + 0.536 + ], + "angle": 0, + "content": "80. Zhang, X., Sugano, Y., Bulling, A.: Revisiting data normalization for appearance-based gaze estimation. In: ACM ETRA (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.537, + 0.785, + 0.563 + ], + "angle": 0, + "content": "81. Zhang, X., Sugano, Y., Fritz, M., Bulling, A.: Appearance-based gaze estimation in the wild. In: CVPR (2015)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.565, + 0.785, + 0.591 + ], + "angle": 0, + "content": "82. Zhang, X., Sugano, Y., Fritz, M., Bulling, A.: It's written all over your face: Fullface appearance-based gaze estimation. In: CVPRW (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.592, + 0.785, + 0.619 + ], + "angle": 0, + "content": "83. Zhu, J.Y., Park, T., Isola, P., Efros, A.A.: Unpaired image-to-image translation using cycle-consistent adversarial networks. In: ICCV (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.62, + 0.785, + 0.647 + ], + "angle": 0, + "content": "84. Zhu, X., Liu, Y., Li, J., Wan, T., Qin, Z.: Emotion classification with data augmentation using generative adversarial networks. In: PAKDD (2018)" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.148, + 0.785, + 0.647 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/2024/3DGazeNet_ Generalizing Gaze Estimation with Weak Supervision from Synthetic Views/7d049317-38f5-44ac-a691-e07c992f4970_origin.pdf b/2024/3DGazeNet_ Generalizing Gaze Estimation with Weak Supervision from Synthetic Views/7d049317-38f5-44ac-a691-e07c992f4970_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..7b3c70dd108172b94501112444217a9755153778 --- /dev/null +++ b/2024/3DGazeNet_ Generalizing Gaze Estimation with Weak Supervision from Synthetic Views/7d049317-38f5-44ac-a691-e07c992f4970_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:125df9fbd5ba717180743c6adc719e0e10e07aba8c19f9edc06c3c3d301eddaf +size 6548885 diff --git a/2024/3DGazeNet_ Generalizing Gaze Estimation with Weak Supervision from Synthetic Views/full.md b/2024/3DGazeNet_ Generalizing Gaze Estimation with Weak Supervision from Synthetic Views/full.md new file mode 100644 index 0000000000000000000000000000000000000000..3eaf6b55efd744ebd7202338c6e82c7453f13e57 --- /dev/null +++ b/2024/3DGazeNet_ Generalizing Gaze Estimation with Weak Supervision from Synthetic Views/full.md @@ -0,0 +1,316 @@ +# 3DGazeNet: Generalizing 3D Gaze Estimation with Weak-Supervision from Synthetic Views + +Evangelos Ververas1,2, Polydefkis Gkagkos2, Jiankang Deng1,2, Michail Christos Doukas1, Jia Guo3, and Stefanos Zafeiriou1 + +1 Imperial College London, UK + +$^{2}$ Huawei Noah's Ark Lab, UK + +3 InsightFace + +https://eververas.github.io/3DGazeNet/ + +Abstract. Developing gaze estimation models that generalize well to unseen domains and in-the-wild conditions remains a challenge with no known best solution. This is mostly due to the difficulty of acquiring ground truth data that cover the distribution of faces, head poses, and environments that exist in the real world. Most recent methods attempt to close the gap between specific source and target domains using domain adaptation. In this work, we propose to train general gaze estimation models which can be directly employed in novel environments without adaptation. To do so, we leverage the observation that head, body, and hand pose estimation benefit from revising them as dense 3D coordinate prediction, and similarly express gaze estimation as regression of dense 3D eye meshes. To close the gap between image domains, we create a large-scale dataset of diverse faces with gaze pseudo-annotations, which we extract based on the 3D geometry of the face, and design a multi-view supervision framework to balance their effect during training. We test our method in the task of gaze generalization, in which we demonstrate improvement of up to $23\%$ compared to state-of-the-art when no ground truth data are available, and up to $10\%$ when they are. + +Keywords: 3D Gaze Estimation $\cdot$ 3D Eye Mesh $\cdot$ Gaze Generalization + +# 1 Introduction + +Eye gaze serves as a cue for understanding human behavior and intents, including attention, communication, and mental state. As a result, gaze information has been exploited by a lot of applications of various fields of interest, ranging from medical and psychological analysis [9,37,64] to human-computer interaction [4], efficient rendering in VR/AR headset systems [6,10,39], virtual character animation [57,61,62,77] and driver state monitoring [34,50]. When high accuracy is important, data collection under the particular capturing set up is crucial, e.g. specific VR headsets, static screen-camera setups. However, in numerous real-world applications robustness is equally important to high accuracy, e.g. face-unlocking in mobile devices, best frame capturing/selection in group photos and automatic gaze annotation of large datasets for face reenactment. + +![](images/8461aabaf554595059207969dbc8a35fa58eb3982c46f8faf331904eb2e1199c.jpg) +Fig. 1: Overview of our method 3DGazeNet. a) We approach 3D gaze estimation as dense 3D eye mesh regression, which is robust against sparse prediction errors. b) Domain generalization is one of the hardest challenges in gaze estimation. Training with common gaze datasets often results in poor cross-dataset performance. c) Our multi-view supervision method employs pseudo-labels from in-the-wild face images to close the gap between controlled and in-the-wild datasets. + +Typically, 3D gaze estimation is expressed as a direct mapping between input images and a few pose parameters [12, 42, 52, 70, 82], or sparse representations of the eyes [54, 55, 66]. Nevertheless, it has been shown that unconstrained face and body pose estimation from single images benefits from replacing predicting few pose or shape parameters by directly predicting dense 3D geometry [3, 16, 26, 43, 58]. In this work, we leverage this observation and revise the formulation of gaze estimation as end-to-end dense 3D eye mesh regression, which combined with standard vector regression induces multiple benefits. Existing datasets with ground truth 3D eyes include only images in the IR domain [21], however, IR images cannot be directly employed for RGB-based methods. As 3D eye meshes are not available for most gaze datasets, we define a unified eye representation, i.e. a rigid 3D eyeball template (Fig. 3(a)), which we fit on images based on sparse landmarks and the available gaze labels. + +Several gaze datasets have become available in the last decade [20, 22, 35, 42, 52, 59, 60, 79, 81], which have contributed to the recent progress in automatic 3D gaze estimation from monocular RGB images. However, collecting gaze datasets is a costly and challenging process which often restricts them being captured in controlled environments and consisting of limited unique identities, thus lacking variation compared to data from the real world. This causes the most common challenge in gaze estimation, which is cross-domain and in-the-wild generalization. In this work, we propose a method to exploit arbitrary, unlabeled face images to largely increase the diversity of our training data as well as our model's generalization capabilities. To that end, we design a simple pipeline to extract robust 3D gaze pseudo-labels based on the 3D shape of the face and eyes, without having any prior gaze information. Based on recent advancements on weakly-supervised head, body and hand pose estimation [8, 17, 31, 44, 65], we regularize inconsistencies of pseudo-labels, by a geometric constraint which encourages our + +We model to maintain prediction consistency between multiple synthetic views of the same subject. + +Most recent methods attempt to close the gap between diverse image domains using domain adaptation. Commonly, they employ a few samples of the target domain, with [29, 53, 73] or without [5, 7, 11, 24, 27, 47, 68, 70] their gaze labels, to fine-tune an initial model. Although successful, approaches following this scheme require knowledge of the target domain and model re-training, which prohibit their use as plug-n-play methods in real user applications. In contrast, we propose a method to train gaze estimation models that generalize well to unseen and inthe-wild environments without the constraints of domain adaption. Our method can effortlessly be employed by user applications in a plug-n-play fashion. + +An overview of our approach, which we name 3DGazeNet, is presented in Fig. 1. We evaluate our method in cross-dataset gaze generalization, showcasing improvements over the state-of-the-art, even by a large margin, and perform ablations over the model components. To summarize, the key contributions of our work are: + +- A simple automatic method to extract robust 3D eye meshes from arbitrary face images and a multi-view consistency regularization which allows to exploit them for improved gaze generalization. +- A revised formulation for gaze estimation, based on dense 3D eye mesh regression from images. To the best of our knowledge, we are the first to utilize an end-to-end 3D eye mesh regression approach for gaze estimation. +- Improved performance over the state-of-the-art in gaze generalization with $(10\%)$ and without $(23\%)$ using source domain ground truth, with a simple model architecture. Based on that, we believe that 3DGazeNet is an important step towards reliable plug-n-play gaze tracking. + +# 2 Related Work + +Numerous model designs for supervised 3D gaze estimation have been tested recently, investigating which face region to use as input [12,42,82], the model architecture [1,14,46,67] and what external stimuli to utilize to improve performance [52]. Motivated by the difficulties in collecting diverse and large scale data for gaze estimation, recent works have shown that valuable gaze representations can be extracted in fully unsupervised settings, by applying gaze redirection [74] or disentanglement constraints [63]. + +Gaze Adaptation and Generalization Much effort has been made to design methods that adapt well to known target subjects and environments, by employing either few labeled samples [29, 53, 73] or completely unlabeled data of the target domain [5, 7, 11, 24, 27, 47, 68, 70]. Differently from the above, gaze generalization models aim to improve cross-domain performance without any knowledge of the target domains. The models in [5, 11, 70], even though targeted for gaze adaptation, are based on learning general features for gaze estimation and thus, they perform well in target domain-agnostic settings. Moreover, [40] has shown + +that it is possible to train general gaze estimation models by employing geometric constraints in scenes depicting social interaction between people. We believe that [40] is the closest work to ours, as it is the only method which uses 3D geometric cues of the scene to learn gaze from arbitrary face data. Lastly, [78] proposes to improve generalization by employing synthetic images which are, however, limited by the gaze distribution of existing gaze datasets. Both the implementation and custom dataset are not public, which hinders reproducibility and reliable comparisons. + +Model-Based Gaze Estimation Differently from the above, sparse or semantic representations of the eye geometry have also been employed by some methods to infer gaze from images [54, 55, 66, 67, 71, 72]. However, such representations do not convey information about the 3D substance of eyes and are prone to noisy predictions. In contrast, by predicting 3D eye meshes we learn a much more robust representation, from which we can retrieve any other sparse or semantic one just by indexing. Recovering dense 3D geometry of the eye region from images by fitting parametric models of the shape and texture has been previously proposed [71]. However, restrictions posed by building large-scale parametric models and fitting in-the-wild images have resulted in low gaze accuracy compared to learning-based methods. + +Face Reenactment and Learning from Synthetic Data Synthetic image data have been previously used in training deep networks, mainly to augment the training datasets and provide pseudo-ground truth annotations. For instance, [84] used CycleGAN [83] to create a new training corpus in order to balance emotion classes in the task of emotion classification. More recently, GANcraft [28] employed SPADE [56] to generate pseudo-ground truth images that were used to supervise their neural rendering framework. In this work, we obtain access to image pairs of the same subject in different views, by taking advantage of HeadGAN [19], a face reenactment system. In contrast to person-specific reenactment methods [18, 36, 41] or person-generic landmark-driven approaches [69, 75, 76], HeadGAN is able to perform free-view synthesis using a single source image. + +![](images/54d377a55c6adaa4a090f6627c49621dd66d5167505f0a92fdf56187d7a886b2.jpg) +Fig. 2: We use HeadGAN [19] to generate novel views by manipulating the 3D pose of the face. During synthesis, angle $\theta_z$ is transferred to all facial parts including the eyes, thus the relative angle between the head and eyes (i.e. the gaze direction in the head coordinate system) is maintained. + +# 3 Method + +# 3.1 Problem Definition and Motivation + +The aim of this work is to design a method that given a face image $\mathbf{I}$ , it estimates $2\times N_{v}$ 3D coordinates $\mathbf{V} = [\mathbf{V}_l^T,\mathbf{V}_r^T ]^T$ , where $\mathbf{V}_l\in \mathbb{R}^{N_v\times 3}$ are coordinates corresponding to the left eyeball while $\mathbf{V}_r\in \mathbb{R}^{N_v\times 3}$ to the right, as well as a 3D gaze vector $g = (g_{x},g_{y},g_{z})$ . Then, the final gaze result is calculated by the mean direction of the two output components. Inspired by recent work in self-supervised 3D body pose estimation [31,44,65], we adopt multi-view constraints to train our model based on in-the-wild faces and automatically generated gaze pseudo-labels. + +To employ multi-view losses, we assume that images of the same subject with different head poses and the same gaze direction relatively to the head are available. For example, this condition is satisfied when a face picture is taken from different angles at the same time. As such images are not commonly available for in-the-wild datasets, we employ HeadGAN [19], a recent face reenactment method, to generate novel face poses from existing images. HeadGAN is able to synthesize face animations using dense face geometry, which covers the eyes, as a driving signal and single source images. Using dense geometry guarantees that the relative angle between the head and eyes is maintained when synthesizing novel poses, as it is shown in Fig. 2. + +# 3.2 Unified 3D Eye Representation + +Learning consistent eye meshes across different images and datasets, requires establishing a unified 3D eye representation. To that end, we define a 3D eyeball template as a rigid 3D triangular mesh with spherical shape, consisting of $N_{v} = 481$ vertices and $N_{t} = 928$ triangles. We create two mirrored versions, $\mathbf{M}_l$ and $\mathbf{M}_r$ , of the above mesh to represent a left and a right reference eyeball respectively. This representation allows us to allocate semantic labels to specific vertices of the eyeball, such as the iris border (Fig. 3 (a)), and calculate 3D gaze direction as the orientation of the central axis of our 3D eyeball template. In practice, an offset angle (the kappa coefficient) exists between the optical (central) and visual axes of eyes, which is subject-dependent and varies between $-2^{o}$ to $2^{o}$ across the population [73]. Accounting for this offset is essential for person-specific gaze estimation [29,45,53,73]. However, in our case of cross-dataset and in-the-wild gaze generalization, in which errors are much larger than the possible offset, data diversity is more important than anatomical precision and thus, our spherical eyeball is a reasonable approximation. + +3D Eyes Ground-Truth from Gaze Datasets For gaze estimation datasets, exact supervision can be acquired by automatically fitting the eyeball template on face images based on sparse iris landmarks and the available gaze labels, as shown in Fig. 3(b). Specifically, we first rotate the eyeball template around its center according to the gaze label. Then, we align (scale and translation) $x$ , + +![](images/9d7d58ea4fb7905809fe43185be6c3fca2fb763900fcf6120f1a4066415450e3.jpg) +M: $N_{v} = 481$ vertices, $N_{t} = 928$ triangles + +![](images/eb84fdff110a6cbad3c3dc547b0e17a22aa1a34d605f90f195d7f764d3dcb1a6.jpg) +(a) Eyeball template +(b) Ground truth generation +Fig. 3: (a) The employed rigid 3D eyeball mesh template. (b) Ground truth data generation, applied on gaze estimation datasets with available ground truth. (c) Pseudoground truth data generation, applied on arbitrary face images without any gaze label. + +![](images/d9aecc08e389f66d569df7b440a01df77fca7a3cbacefe69e706b885f19c9513.jpg) +(c) Pseudo-ground truth generation + +$y$ coordinates of the rotated eye mesh to the iris landmarks of the image and multiply $z$ coordinates with the same scale. To extract sparse iris landmarks we employed the method of [55] as a basis for building an iris localization model which is robust against occlusions and low resolution. More details about the iris localization model are provided in the supplemental material. + +3D Eyes Pseudo-Ground Truth from In-The-Wild Images To extract 3D eyes from images without gaze labels, we have developed an automatic pipeline based on 3D face alignment and 2D iris localization. First, we recover the 3D face with $x$ , $y$ in image space using an off-the-shelf method. Then, we align our eyeball templates in the eye sockets based on the face's eyelid landmarks and predefined eyelid landmarks around the eyeball templates. In fact, we use the two corner landmarks of each eye which do not move between open and closed eyes. Next, we lift 2D iris predictions to 3D by finding the nearest vertexes from the aligned 3D eye templates. Finally, we compute the rotation between the initially aligned eyes and the 3D-lifted iris center and rotate the eyeballs accordingly. For 3D face alignment, we employ RetinaFace [16] and for 2D iris localization [55] as above. The process is presented in Fig. 3(c). + +# 3.3 Joint 3D Eye Mesh and Vector Regression + +Given an input face image $\mathbf{I}$ , we utilize 5 face detection landmarks to crop patches around each one of the two eyes. We resize the patches to shape $128 \times 128 \times 3$ and stack them channel-wise along with a cropped image of the face. We employ a simple model architecture consisting of a ResNet-18 [30] to extract + +features, followed by two fully connected layers to map them to two separate eye modalities, which are a) dense 3D eye coordinates and b) a 3D gaze vector. As the final gaze output, we consider the mean direction calculated from the two modalities. + +To train the above network for mesh regression, similarly to [16], we enforce a vertex loss and an edge length loss between the model outputs and the respective ground truth or pseudo-ground truth, which can be expressed as: + +$$ +\mathcal {L} _ {v e r t} = \frac {1}{N _ {v}} \sum_ {j = \{l, r \}} \sum_ {i = 1} ^ {N _ {v}} \| \mathbf {V} _ {j, i} - \mathbf {V} _ {j, i} ^ {*} \| _ {1}, \tag {1} +$$ + +where $\mathbf{V}_j\in \mathbb{R}^{N_v\times 3}$ and $\mathbf{V}_j^*\in \mathbb{R}^{N_v\times 3}$ for $j = \{l,r\}$ are the output and the (pseudo-)ground truth coordinates, while the edge length loss (based on the fixed mesh triangulation of our template meshes) can be written as: + +$$ +\mathcal {L} _ {\text {e d g e}} = \frac {1}{3 N _ {t}} \sum_ {j = \{l, r \}} \sum_ {i = 1} ^ {3 N _ {t}} \| \mathbf {E} _ {j, i} - \mathbf {E} _ {j, i} ^ {*} \| _ {2}, \tag {2} +$$ + +where $\mathbf{E}_j\in \mathbb{R}^{3N_t}$ and $\mathbf{E}_j^*\in \mathbb{R}^{3N_t}$ for $j = \{l,r\}$ are the edge lengths of the predicted and the (pseudo-)ground truth eyes. As edge length we define the Euclidean distance between two vertices of the same triangle. In addition to the mesh regression losses, we enforce a gaze loss to the gaze output of our model, expressed as: + +$$ +\mathcal {L} _ {g a z e} = (1 8 0 / \pi) \operatorname {a r c c o s} \left(\mathbf {g} ^ {T} \mathbf {g} ^ {*}\right), \tag {3} +$$ + +where $\mathbf{g}$ and $\mathbf{g}^*$ are the normalized model output and the gaze (pseudo-)ground truth respectively. We combine losses of Eqs. (1) to (3) in a single loss function to train our models with supervision from (pseudo-)ground truth 3D eye meshes and gaze vectors. The combined loss is written as: + +$$ +\mathcal {L} _ {(P) G T} = \lambda_ {v} \mathcal {L} _ {v e r t} + \lambda_ {e} \mathcal {L} _ {e d g e} + \lambda_ {g} \mathcal {L} _ {g a z e}, \tag {4} +$$ + +where $\lambda_v, \lambda_e$ , and $\lambda_g$ are parameters which regularize the contribution of the loss terms in the overall loss. From our experiments we have selected their values to be $\lambda_v = 0.1$ , $\lambda_e = 0.01$ and $\lambda_g = 1$ . + +# 3.4 Multi-View Consistency Supervision + +Extending our training dataset with in-the-wild images and training using pseudoground truth, usually improves the ability of our models to generalize to unseen domains, as can be seen by our experiments in Sec. 4.3. However, automatically generated 3D eyes and gaze include inconsistencies which are hard to identify and filter out. To balance the feedback of direct supervision from pseudo-ground truth, we design a multi-view supervision framework, based on pairs of real and synthetic images with different head poses, generated by HeadGAN as described in Sec. 3.1. + +Recovering dense 3D face coordinates and pose from images has recently been quite reliable [2,16,16,23]. Having a pair of images $\mathbf{I}_1$ and $\mathbf{I}_2$ of the same + +![](images/b9fc82b93804cf19ce076b1926b7e8f68517b8e3dcd15a8d209a06a59321fc04.jpg) +Fig. 4: Overview of the proposed method 3DGazeNet. a) During training we employ single images with ground-truth supervision or pairs of synthetic views of the same subject with pseudo-annotations and different head poses. Different sets of losses are employed depending on the type of supervision. b) Detailed demonstration of $\mathcal{L}_{MV}$ . 3D transformation $\mathbf{P}$ which maps view 1 to view 2, is employed to transform points $\mathbf{V}_{l,1}$ and $\mathbf{V}_{r,1}$ , before calculating an L1 distance loss against $\mathbf{V}_{l,2}$ and $\mathbf{V}_{r,2}$ . c) The base network (3DEyeNet) of our model consists of a ResNet-18 backbone and two fully connected layers leading to the 3D eye mesh and gaze vector outputs. + +subject and their reconstructed 3D faces, we can compute a transformation matrix $\mathbf{P} \in \mathbb{R}^{3 \times 4}$ which aligns the two faces in image space. Assuming that gaze direction in both images remains still relative to the face, as is the case with images created by HeadGAN, we are able to supervise 3D regression of eyes by restricting our model's predictions to be consistent over an image pair, as output vertices should coincide when transformation $\mathbf{P}$ is applied to one of the pair's outputs. A similar approach has been employed successfully for weakly-supervised body pose estimation [31,44,65]. Particularly, we form the vertex loss of a pair as: + +$$ +\mathcal {L} _ {M V, v e r t e x} = \frac {1}{N _ {v}} \sum_ {j = \{l, r \}} \sum_ {i = 1} ^ {N _ {v}} \| \mathbf {V} _ {1, j, i} \mathbf {P} ^ {T} - \mathbf {V} _ {2, j, i} \| _ {1}, \tag {5} +$$ + +where $\mathbf{V}_{1,j},\mathbf{V}_{2,j}\in \mathbb{R}^{N_v\times 4}$ for $j = \{l,r\}$ are the output matrices for left and right eyes, which correspond to input images $\mathbf{I}_1$ and $\mathbf{I}_2$ . $\mathbf{V}_{1,j,i},\mathbf{V}_{2,j,i}\in \mathbb{R}^4$ are the specific homogeneous 3D coordinates indexed by $i$ in the above matrices. To enforce consistency constraints to the gaze head of our model, we analyse matrix $\mathbf{P}$ to scale $s$ , rotation $\mathbf{R}$ and translation $\mathbf{t}$ components and employ $\mathbf{R}$ in a gaze consistency loss within a pair: + +$$ +\mathcal {L} _ {M V, g a z e} = (1 8 0 / \pi) \arccos \left(\left(\mathbf {g} _ {1} ^ {T} \mathbf {R} ^ {T}\right) \mathbf {g} _ {2}\right), \tag {6} +$$ + +where $\mathbf{g}_1$ and $\mathbf{g}_2$ are the normalized model outputs for input images $\mathbf{I}_1$ and $\mathbf{I}_2$ respectively. We combine losses of Eqs. (5) and (6) in a single loss function to enforce multi-view consistency in mesh and gaze vector regression, between + +model outputs coming from pairs of input images. This loss is written as: + +$$ +\mathcal {L} _ {M V} = \lambda_ {M V, v} \mathcal {L} _ {M V, v e r t e x} + \lambda_ {M V, g} \mathcal {L} _ {M V, g a z e}, \tag {7} +$$ + +where $\lambda_{MV,v}$ and $\lambda_{MV,g}$ are parameters which regularize the contribution of the loss terms in the overall loss. In our experiments, we have selected their values to be $\lambda_{MV,v} = 0.1$ and $\lambda_{MV,g} = 1$ . To train models with all supervision signals, i.e. ground truth $(\mathcal{L}_{GT})$ , pseudo-ground truth $(\mathcal{L}_{PGT})$ and multi-view supervision $(\mathcal{L}_{MV})$ , we utilize the following overall loss function: + +$$ +\mathcal {L} = \lambda_ {G T} \mathcal {L} _ {G T} + \lambda_ {P G T} \mathcal {L} _ {P G T} + \lambda_ {M V} \mathcal {L} _ {M V}, \tag {8} +$$ + +with parameters $\lambda_{GT} = \lambda_{PGT} = \lambda_{MV} = 1$ . Implementation details are included in the supplemental material. An overview of 3DGazeNet is presented in Fig. 4. + +# 4 Experiments + +# 4.1 Datasets + +Gaze Datasets Captured in a lab environment, ETH-XGaze (EXG) [79] consists of 756K frames of 80 subjects and includes large head pose and gaze variation. Collected in uncontrolled indoor environments with mobile devices, MPI-IFaceGaze (MPII) [81] includes smaller head pose and gaze variation and consists of 45K images of 15 subjects, while GazeCapture (GC) [42] contains almost 2M frontal face images of 1474 subjects. In contrast to the above datasets, Gaze360 (G360) [35] is the only gaze dataset captured both indoors and outdoors and consists of 127K training sequences from 365 subjects. The large variation in head pose, gaze, and environmental conditions of Gaze360 makes it the most challenging yet appropriate benchmark for in-the-wild gaze estimation, available in literature. For our experiments, we normalized the above datasets based on [80], except for Gaze360 which we process to get normalized face crops. Additionally, we employ the predefined training-test splits, while for Gaze360 we only use the frontal facing images with head pose yaw angle up to $90^{\circ}$ . The head pose and gaze distributions of the above datasets are presented in Fig. 5. + +In-The-Wild Face Datasets In-the-wild face datasets consist of significantly more unique subjects and capturing environments. For our experiments, we employed four publicly-available datasets FFHQ [33] (70K images), AFLW [38] (25K images), AVA [25,48,49] and CMU-Panoptic [32]. FFHQ and AFLW are in-the-wild face datasets commonly used for face analysis, AVA is a large-scale in-the-wild human activity dataset annotated under the Looking-At-Each-Other condition and CMU-Panoptic is collected in lab conditions and captures interactions of multiple people in the same scene. FFHQ and AFLW include one face per image and thus are only processed to get normalized face crops. AVA and CMU-Panoptic include frames with multiple faces, from which we randomly select 80K faces from each dataset with a maximum head pose of $90^o$ . Similarly to [40], for CMU we employed only frames captured with cameras in eye height. We name this collection of 255K images as the "In-The-Wild Gaze" dataset (ITWG). Lastly, to enforce multi-view supervision as described in Sec. 3.4, we synthesized + +![](images/c6c73a25e4b5a9ae40439184f52c5dd9b3d75210945144df0a4045cf7e0139df.jpg) +Fig. 5: Distributions of the head pose (top row) and gaze (bottom row) of the gaze datasets (red) and the face datasets (blue). Wide distribution datasets CMU, AVA, FFHQ, and AFLW are exploited to close the gap between diverse image domains. + +![](images/897e2f78b2617c6a38b20bff068c56f4ef5fce8c4e766aeff5be652f83433fcc.jpg) + +novel views from images of ITWG using HeadGAN, sampling the pitch and yaw angles from Gaussians $\mathcal{N}(0,20)$ , relatively to the original head pose. We name this collection of images as "Multi-View In-The-Wild Gaze" dataset (ITWG-MV) and employ it to improve the generalization of gaze estimation. The head pose and gaze distributions of the above datasets are presented in Fig. 5. + +# 4.2 Gaze Generalization + +In this section, we evaluate 3DGazeNet in within-dataset and cross-dataset experiments. We believe that [40] is the most closely related method to ours, as it is the only method using 3D geometric cues of the scene to generalize gaze from arbitrary face data. + +Cross-dataset Evaluation We design two cross-dataset experiments to test the generalization of our method on G360 and report the results on Tab. 1(a) and (b). Particularly, the experiments are: a) we train our method on the CMU, AVA, and ITWG-MV datasets utilizing only our pseudo-labels and multi-view supervision and b) we additionally employ ground truth supervision from GC and EXG. From the results of the above experiments, it becomes obvious that our geometry-aware pseudo-labels employed within our multi-view supervision training effectively generalize gaze estimation to unseen domains, even without any available ground truth. In particular, in experiment a) our method outperforms [40] by $23\%$ with AVA, $22\%$ with CMU, $12.5\%$ with $\mathrm{AVA + CMU}$ and $20\%$ with our large-scale ITWG-MV. Similarly, in experiment b) 3DGazeNet outperforms [40] by $10\%$ and $9\%$ with GC and EXG respectively. + +Within-dataset Evaluation Here we compare our method against state-of-the-art within-dataset gaze estimation on G360. Similarly to [40], we employ AVA for additional supervision, while we also examine the effect of the larger-scale ITWG-MV. The results, presented in Tab. 1 (c), show that multi-view supervision from AVA does not improve performance (which is in line with the compared method), but the large-scale ITWG-MV does. + +Comparison with state-of-the-art We further compare 3DGazeNet against recent methods for gaze generalization. The works in [5,70] are developed with a + +Table 1: Weakly-supervised method evaluation in cross- and within-dataset experiments. In all cases, we calculate gaze error in degrees (lower is better), on the test set of Gaze360. CMU and AVA correspond to subsets of ITWG-MV (i.e. augmented for multi-view supervision), providing a clearer comparison with [40]. Our method trained with ITWG-MV outperforms the baselines in all cases. 3DGN refers to 3DGazeNet + +
(a) Cross-dataset +Synthetic Views(b) Cross-dataset +Ground Truth + Synthetic Views(c) Within-dataset +Ground Truth + Synthetic Views
Dataset[40]3DGNDataset[79][40]3DGNDataset[79][40]3DGN
AVA29.022.4GC30.229.227.5EXG27.320.522.1
CMU26.020.3GC+AVA-19.518.9EXG+AVA-16.917.1
CMU+AVA22.519.7GC+AVA+CMU--18.4EXG+AVA+CMU--16.7
ITWG-MV-18.1GC+ITWG-MV--17.6EXG+ITWG-MV--15.4
+ +Table 2: Comparison with state-of-the-art in domain generalization for gaze estimation. In all experiments our model outperforms the compared methods. Gaze error is in degrees (lower is better). + +
Stage 1 (Gaze Generalization Models)+ Stage 2 (Adaptation/Fine Tuning)
EXGEXG+ITWG-MVG360G360+ITWG-MVEXG+ITWG-MVG360+ITWG-MV
MethodMPII GCMPII GCMPII GCMPII GCMPII GCMPII GCMPII GCMPII GCMPII GCMPII GC
RAT/RUDA [5]7.18.47.08.29.39.09.18.56.88.17.98.3
CDG/CRGA [70]6.79.26.99.57.08.38.18.97.49.07.68.7
PureGaze [11]7.98.77.79.37.68.37.48.66.68.07.28.3
3DGazeNet7.710.76.07.89.112.16.38.0----
+ +focus on domain adaptation for gaze estimation and encompass two-stage training schemes, both training feature invariant models at the first stage. That is, in the first training stage RUDA [5] trains gaze estimation model invariant to image rotations, while CRGA [70] uses a contrastive loss to separate image features according to gaze. The second stage of the above methods is focused on adapting the initially trained models to specific target domains. As our method aims to train general gaze estimation models without knowledge of specific target domains, we implement the first-stage models of the above methods, namely RAT [5], CDG [70] and compare them with 3DGazeNet in cross-dataset experiments. Additionally, we compare against PureGaze [11] which is a gaze generalization method that purifies face features to achieve higher gaze estimation performance. To follow the evaluation protocol in the above works, we train all methods on EXG and G360 (+ITWG-MV) and test on MPII and GC. For completeness, we include results of the full models RUDA and CRGA after using ITWG-MV according to their domain adaptation schemes. For PureGaze, ITWG-MV was used for fine-tuning. Tab. 2 shows that the proposed method outperforms the baselines for gaze generalization when ITWG-MV is employed. The compared methods do not include regularization for the noisy labels of ITWG-MV, resulting in similar or worse performance, while our method exploits them through $\mathcal{L}_{MV}$ , benefiting from the extended variation. + +Table 3: Comparison between training targets Vector(V), Mesh(M) and Mesh+Vector(M+V) in within-dataset experiments (using only $\mathcal{L}_{GT}$ ). Target M+V leads to lower errors than state-of-the-art. Error is in degrees (lower is better). + +
DatasetCompared Methods3DGazeNet
[51][13][1][15,82][53][15,35][40][79]VMM+V
MPII4.044.003.924.95.34.06-4.84.14.24.0
G36010.710.610.414.9-11.110.1-9.89.89.6
GC----3.5--3.33.23.33.1
EXG---7.3---4.54.24.44.2
+ +# 4.3 Ablation studies + +Gaze Estimation via 3D Eye Mesh Regression Here we experimentally evaluate our suggestion that gaze estimation benefits from replacing the training target from gaze vectors or angles to dense 3D eye coordinates. To this end, we employ the fully supervised version of our model, utilizing data with exact ground truth and $\mathcal{L}_{GT}$ for training. We conduct within-dataset experiments on MPII, GC, G360 and EXG for which specific training-testing subsets are provided. We compare against state-of-the-art methods [1,13,15,35,40,51,53,79,82] and report the results in Tab. 3. In almost all cases, our model outperforms the baselines, while combining the two modalities, i.e. dense 3D meshes and gaze vectors $(\mathrm{M} + \mathrm{V})$ , improves performance compared to training with vector targets (V) or 3D mesh targets (M) alone. This is possibly due to the distinct nature of the two modalities, i.e. the vectors provide exact label supervision, while meshes provide a robust representation which limits sparse prediction errors. + +The main benefit of dense coordinate regression over pose parameters or sparse points prediction is that individual parameter errors have limited effect on the total outcome making them more robust to prediction inaccuracies [16]. This effect is particularly useful for our multi-view training scheme in which introducing consistency of dense correspondences between images rather than only vector consistency, offers stronger regularization. We validate this argument in gaze generalization experiments in G360, GC, EXG, and MPII, presented in Tab. 4. For this experiment, we consider three versions of 3DGazeNet: one which predicts only gaze vectors and no coordinates (Vector), one which predicts 8 3D iris landmarks instead of dense eye meshes (Iris+Vector), to highlight the effect of dense coordinate prediction, and the full 3DGazeNet (Mesh+Vector). The results show that employing combined training targets always benefits performance, while replacing dense 3D eye meshes with iris landmarks highly limits this effect. + +The Effect of Gaze Pseudo-Labels and Multi-View Supervision Here we examine the contribution of our automatic geometry-aware pseudo-labels and the multi-view supervision loss of our approach. To this end, we consider three training scenarios which are the following: a) training with ITWG and its pseudo-labels as ground truth $(\mathcal{L}_{PGT})$ , b) training with ITWG-MV utilizing only the multi-view consistency constraints and no pseudo-labels $(\mathcal{L}_{MV})$ and c) training with ITWG-MV while employing both pseudo-labels and the multi-view + +Table 4: Comparison between training targets Vector, Iris+Vector and Mesh+Vector for domain generalization when employing our full model (Eq. (8)). For the target Vector, we remove all mesh terms from the employed losses. In all experiments, the target Mesh+Vector results in a lower error. Gaze error is in degrees (lower is better). + +
Training DatasetVectorIris+VectorMesh+Vector
G360GCEXGMPIIG360GCEXGMPIIG360GCEXGMPII
ITWG-MV19.110.116.78.518.89.916.78.218.19.016.77.6
G360+ITWG-MV10.110.215.17.09.79.415.06.89.38.014.66.3
GC+ITWG-MV18.23.116.06.118.03.015.96.217.63.015.56.1
EXG+ITWG-MV16.510.24.56.616.39.64.56.415.47.84.36.0
MPII+ITWG-MV17.88.215.24.817.97.615.04.617.66.814.94.2
+ +Table 5: The effect of incorporating pseudo-ground truth and multi-view supervision during training. Both components contribute towards improving results in cross-dataset gaze estimation experiments. Gaze error is in degrees (lower is better). + +
Dataset\( {\mathcal{L}}_{GT} \)\( {\mathcal{L}}_{PGT} \)\( {\mathcal{L}}_{MV} \)G360GCEXGMPII
ITWG--23.114.824.313.6
ITWG-MV--47.433.241.132.8
ITWG-MV-18.19.016.77.6
GC--27.53.128.410.4
GC+ITWG-21.43.223.79.1
GC+ITWG-MV-24.73.526.210.1
GC+ITWG-MV17.63.015.56.1
+ +consistency loss $(\mathcal{L}_{PGT} + \mathcal{L}_{MV})$ . To further evaluate the effect of the pseudolabels and multi-view loss, we repeat the above experiments by adding ground truth supervision from GC $(+ \mathcal{L}_{GT})$ . We test our models on the test set of G360, GC, EXG, and MPII, and report the results in Tab. 5. In all cases, combining our pseudo-labels and multi-view loss yields the lowest error in degrees. Lastly, utilizing only $\mathcal{L}_{MV}$ on ITWG-MV leads to very high errors which is reasonable as no supervision for the eyeball topology exists, thus, the model outputs cannot follow the spherical shape of the eyeball template. + +The Effect of Head Pose Distribution of ITWG Head pose distribution difference between the train and test set is one of the main reasons that gaze-estimation models fail in cross-dataset situations. To close the gap between different training and testing scenarios, we have designed ITWG, a large-scale dataset with widespread variation in head pose and gaze angles. To study the effect of the head pose variation of ITWG in our experiments, we employ different subsets of ITWG with various levels of head pose variation and conduct cross-dataset experiments with them. In particular, we consider four subsets of ITWG, with maximum yaw angles of $5^o$ , $20^o$ , $40^o$ and $90^o$ (all) respectively. + +We train 3DGazeNet with ground truth supervision from MPII as well as pseudo-labels and multi-view supervision from the four versions of ITWG-MV. + +![](images/8e5c305ff8fd9108a272c436f6fc78dfc297cff71cffa33dc0809c624ae7ac3e.jpg) +Fig. 6: Gaze error of G360 across head poses when training with MPII and subsets of ITWG-MV. Wider range of head poses in the ITWG-MV data, lead to significantly lower errors in large poses. + +The results of testing on G360 are presented in Fig. 6. The resulting curves clearly demonstrate the effect of the available head pose variation in the training data. Specifically, utilizing the entirety of ITWG-MV leads to the lowest errors which are relatively consistent across the head pose range. As expected, decreasing the available head pose variation, increasingly affects model performance with the worst case being training with MPII alone. Based on the above finding we argue that the gap between small and wide distribution gaze datasets (regarding head pose) can effectively close by employing similarly large distribution unlabeled face datasets, which is crucial for training plug-n-play gaze estimation models that can be directly employed in applications. + +# 5 Limitations and Conclusion + +In Sec. 4, we shown that pseudo-ground truth can be effectively utilized in gaze estimation. Nevertheless, a limitation of our method is that pseudo-annotation accuracy is related to the accuracy of 3D face and 2D iris alignment. In addition, our current method cannot operate on images without a visible face (when the face is looking away from the camera). + +Overall, In this work, we present a novel weakly-supervised method for gaze generalization, based on dense 3D eye mesh regression. We demonstrate that by utilizing both 3D eye coordinates and gaze labels during training, instead of just gaze labels, we can achieve lower prediction errors. Moreover, we explore the possibility of exploiting the abundantly available in-the-wild face data for improving gaze estimation generalization. To that end, we propose a novel methodology to generate robust, 3D geometry-aware pseudo ground truth labels, as well as a multi-view weak-supervision framework for effective training. By enforcing these constraints, we are able to successfully utilize in-the-wild face data and achieve improvements in cross-dataset and within-dataset experiments. + +Acknowledgments. S. Zafeiriou was supported by EPSRC Project DEFORM (EP/S010203/1) and GNOMON (EP/X011364). + +# References + +1. Abdelrahman, A.A., Hempel, T., Khalifa, A., Al-Hamadi, A., Dinges, L.: L2cs-net: Fine-grained gaze estimation in unconstrained environments. In: ICFSP. pp. 98-102. IEEE (2023) +2. Albiero, V., Chen, X., Yin, X., Pang, G., Hassner, T.: img2pose: Face alignment and detection via 6dof, face pose estimation. In: CVPR (2021) +3. Alp Guler, R., Trigeorgis, G., Antonakos, E., Snape, P., Zafeiriou, S., Kokkinos, I.: Densereg: Fully convolutional dense shape regression in-the-wild. In: CVPR (2017) +4. Andrist, S., Tan, X.Z., Gleicher, M., Mutlu, B.: Conversational gaze aversion for humanlike robots. In: HRI (2014) +5. Bao, Y., Liu, Y., Wang, H., Lu, F.: Generalizing gaze estimation with rotation consistency. In: CVPR (2022) +6. Burova, A., Mäkelä, J., Hakulinen, J., Keskinen, T., Heinonen, H., Siltanen, S., Turunen, M.: Utilizing vr and gaze tracking to develop ar solutions for industrial maintenance. In: CHI (2020) +7. Cai, X., Zeng, J., Shan, S., Chen, X.: Source-free adaptive gaze estimation by uncertainty reduction. In: CVPR. pp. 22035-22045 (2023) +8. Cai, Y., Ge, L., Cai, J., Yuan, J.: Weakly-supervised 3d hand pose estimation from monocular rgb images. In: ECCV (2018) +9. Castner, N., Kuebler, T.C., Scheiter, K., Richter, J., Eder, T., Hützig, F., Keutel, C., Kasneci, E.: Deep semantic gaze embedding and scanpath comparison for expertise classification during opt viewing. In: ACM ETRA (2020) +0. Chen, M., Jin, Y., Goodall, T., Yu, X., Bovik, A.C.: Study of 3d virtual reality picture quality. IEEE Journal of Selected Topics in Signal Processing (2020) +1. Cheng, Y., Bao, Y., Lu, F.: Puregaze: Purifying gaze feature for generalizable gaze estimation. In: AAAI (2022) +2. Cheng, Y., Huang, S., Wang, F., Qian, C., Lu, F.: A coarse-to-fine adaptive network for appearance-based gaze estimation. In: AAAI (2020) +3. Cheng, Y., Lu, F.: Gaze estimation using transformer. In: ICPR (2022) +4. Cheng, Y., Lu, F., Zhang, X.: Appearance-based gaze estimation via evaluation-guided asymmetric regression. In: ECCV (2018) +5. Cheng, Y., Wang, H., Bao, Y., Lu, F.: Appearance-based gaze estimation with deep learning: A review and benchmark. arXiv preprint arXiv:2104.12668 (2021) +6. Deng, J., Guo, J., Ververas, E., Kotsia, I., Zafeiriou, S.: Retinaface: Single-shot multi-level face localisation in the wild. In: CVPR (2020) +7. Deng, Y., Yang, J., Xu, S., Chen, D., Jia, Y., Tong, X.: Accurate 3d face reconstruction with weakly-supervised learning: From single image to image set. In: CVPR Workshops (2019) +8. Doukas, M.C., Koujan, M.R., Sharmanska, V., Roussos, A., Zafeiriou, S.: Head2head++: Deep facial attributes re-targeting. T-BIOM (2021) +9. Doukas, M.C., Zafeiriou, S., Sharmanska, V.: Headgan: One-shot neural head synthesis and editing. In: ICCV (2021) +20. Fischer, T., Chang, H.J., Demiris, Y.: Rt-gene: Real-time eye gaze estimation in natural environments. In: ECCV (2018) + +21. Fuhl, W., Kasneci, G., Kasneci, E.: Teyed: Over 20 million real-world eye images with pupil, eyelid, and iris 2d and 3d segmentations, 2d and 3d landmarks, 3d eyeball, gaze vector, and eye movement types. ISMAR (2021) +22. Funes Mora, K.A., Monay, F., Odobez, J.M.: Eyediap: A database for the development and evaluation of gaze estimation algorithms from rgb and rgb-d cameras. In: ACM ETRA (2014) +23. Gecer, B., Ploumpis, S., Kotsia, I., Zafeiriou, S.: Ganfit: Generative adversarial network fitting for high fidelity 3d face reconstruction. In: CVPR (2019) +24. Ghosh, S., Hayat, M., Dhall, A., Knibbe, J.: Mtgls: Multi-task gaze estimation with limited supervision. In: WACV (2022) +25. Gu, C., Sun, C., Ross, D.A., Vondrick, C., Pantofaru, C., Li, Y., Vijayanarasimhan, S., Toderici, G., Ricco, S., Sukthankar, R., Schmid, C., Malik, J.: Ava: A video dataset of spatio-temporally localized atomic visual actions. In: CVPR (2018) +26. Guler, R.A., Kokkinos, I.: Holopose: Holistic 3d human reconstruction in-the-wild. In: CVPR (2019) +27. Guo, Z., Yuan, Z., Zhang, C., Chi, W., Ling, Y., Zhang, S.: Domain adaptation gaze estimation by embedding with prediction consistency. In: ACCV (2020) +28. Hao, Z., Mallya, A., Belongie, S., Liu, M.Y.: GANcraft: Unsupervised 3D Neural Rendering of Minecraft Worlds. In: ICCV (2021) +29. He, J., Pham, K., Valliappan, N., Xu, P., Roberts, C., Lagun, D., Navalpakkam, V.: On-device few-shot personalization for real-time gaze estimation. In: ICCV Workshops (2019) +30. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: CVPR (2016) +31. Iqbal, U., Molchanov, P., Kautz, J.: Weakly-supervised 3d human pose learning via multi-view images in the wild. In: CVPR (2020) +32. Joo, H., Liu, H., Tan, L., Gui, L., Nabbe, B., Matthews, I., Kanade, T., Nobuhara, S., Sheikh, Y.: Panoptic studio: A massively multiview system for social motion capture. In: ICCV (2015) +33. Karras, T., Laine, S., Aila, T.: A style-based generator architecture for generative adversarial networks. In: CVPR (2019) +34. Kasahara, I., Stent, S., Park, H.S.: Look both ways: Self-supervising driver gaze estimation and road scene saliency. In: ECCV (2022) +35. Kellnhofer, P., Recasens, A., Stent, S., Matusik, W.,, Torralba, A.: Gaze360: Physically unconstrained gaze estimation in the wild. In: ICCV (2019) +36. Kim, H., Garrido, P., Tewari, A., Xu, W., Thies, J., Nießner, M., Pérez, P., Richardt, C., Zolloffer, M., Theobalt, C.: Deep video portraits. TOG (2018) +37. Kleinke, C.L.: Gaze and eye contact: a research review. Psychological bulletin (1986) +38. Koestinger, M., Wohlhart, P., Roth, P.M., Bischof, H.: Annotated facial landmarks in the wild: A large-scale, real-world database for facial landmark localization. In: ICCVW (2011) +39. Konrad, R., Angelopoulos, A., Wetzstein, G.: Gaze-contingent ocular parallax rendering for virtual reality. In: TOG (2019) +40. Kothari, R., De Mello, S., Iqbal, U., Byeon, W., Park, S., Kautz, J.: Weakly-supervised physically unconstrained gaze estimation. In: CVPR (2021) +41. Koujan, M.R., Doukas, M.C., Roussos, A., Zafeiriou, S.: Head2head: Video-based neural head synthesis. In: FG (2020) +42. Krafka, K., Khosla, A., Kellnhofer, P., Kannan, H., Bhandarkar, S., Matusik, W., Torralba, A.: Eye tracking for everyone. In: CVPR (2016) + +43. Kulon, D., Guler, R.A., Kokkinos, I., Bronstein, M.M., Zafeiriou, S.: Weakly-supervised mesh-convolutional hand reconstruction in the wild. In: CVPR (2020) +44. Li, Y., Li, K., Jiang, S., Zhang, Z., Huang, C., Xu, R.Y.D.: Geometry-driven self-supervised method for 3d human pose estimation. In: AAAI (2020) +45. Liu, G., Yu, Y., Mora, K., Odobez, J.: A differential approach for gaze estimation with calibration. In: BMVC (2018) +46. Liu, G., Yu, Y., Mora, K.A.F., Odobez, J.M.: A differential approach for gaze estimation with calibration. In: BMVC (2018) +47. Liu, Y., Liu, R., Wang, H., Lu, F.: Generalizing gaze estimation with outlier-guided collaborative adaptation. In: ICCV (2021) +48. Marín-Jiménez, M.J., Kalogeiton, V., Medina-Suárez, P., , Zisserman, A.: LAEO-Net++: revisiting people Looking At Each Other in videos. TPAMI (2021) +49. Marin-Jimenez, M.J., Kalogeiton, V., Medina-Suarez, P., Zisserman, A.: Laeo-net: Revisiting people looking at each other in videos. In: CVPR (2019) +50. Mavely, A.G., Judith, J.E., Sahal, P.A., Kuruvilla, S.A.: Eye gaze tracking based driver monitoring system. In: ICCS (2017) +51. O Oh, J., Chang, H.J., Choi, S.I.: Self-attention with convolution and deconvolution for efficient eye gaze estimation from a full face image. In: CVPRW (2022) +52. Park, S., Aksan, E., Zhang, X., Hilliges, O.: Towards end-to-end video-based eyetracking. In: ECCV (2020) +53. Park, S., Mello, S.D., Molchanov, P., Iqbal, U., Hilliges, O., Kautz, J.: Few-shot adaptive gaze estimation. In: ICCV (2019) +54. Park, S., Spurr, A., Hilliges, O.: Deep pictorial gaze estimation. In: ECCV (2018) +55. Park, S., Zhang, X., Bulling, A., Hilliges, O.: Learning to find eye region landmarks for remote gaze estimation in unconstrained settings. In: ACM ETRA (2018) +56. Park, T., Liu, M.Y., Wang, T.C., Zhu, J.Y.: Semantic image synthesis with spatially-adaptive normalization. In: CVPR (2019) +57. Richard, A., Lea, C., Ma, S., Gall, J., de la Torre, F., Sheikh, Y.: Audio- and gaze-driven facial animation of codec avatars. In: WACV (2021) +58. Riza Alp Guler, Natalia Neverova, I.K.: Densesepose: Dense human pose estimation in the wild. In: CVPR (2018) +59. Smith, B., Yin, Q., Feiner, S., Nayar, S.: Gaze Locking: Passive Eye Contact Detection for Human? Object Interaction. In: ACM UIST (2013) +60. Sugano, Y., Matsushita, Y., Sato, Y.: Learning-by-synthesis for appearance-based 3d gaze estimation. In: CVPR (2014) +61. Sun, J., Wang, X., Shi, Y., Wang, L., Wang, J., Liu, Y.: Ide-3d: Interactive disentangled editing for high-resolution 3d-aware portrait synthesis. ACM TOG 41(6), 1-10 (2022) +62. Sun, J., Wang, X., Wang, L., Li, X., Zhang, Y., Zhang, H., Liu, Y.: Next3d: Generative neural texture rasterization for 3d-aware head avatars. In: CVPR (2023) +63. Sun, Y., Zeng, J., Shan, S., Chen, X.: Cross-encoder for unsupervised gaze representation learning. In: ICCV (2021) +64. Vidal, M., Turner, J., Bulling, A., Gellersen, H.: Wearable eye tracking for mental health monitoring. Computer Communications (2012) +65. Wandt, B., Rudolph, M., Zell, P., Rhodin, H., Rosenhahn, B.: Canonpose: Self-supervised monocular 3d human pose estimation in the wild. In: CVPR (2021) +66. Wang, K., Ji, Q.: Real time eye gaze tracking with 3d deformable eye-face model. In: ICCV (2017) +67. Wang, K., Zhao, R., Ji, Q.: A hierarchical generative model for eye image synthesis and eye gaze estimation. In: CVPR (2018) + +68. Wang, K., Zhao, R., Su, H., Ji, Q.: Generalizing eye tracking with bayesian adversarial learning. In: CVPR (2019) +69. Wang, T.C., Liu, M.Y., Tao, A., Liu, G., Kautz, J., Catanzaro, B.: Few-shot video-to-video synthesis. In: NeurIPS (2019) +70. Wang, Y., Jiang, Y., Li, J., Ni, B., Dai, W., Li, C., Xiong, H., Li, T.: Contrastive regression for domain adaptation on gaze estimation. In: CVPR (2022) +71. Wood, E., Baltrusaitis, T., Morency, L.P., Robinson, P., Bulling, A.: A 3d morphable eye region model for gaze estimation. In: ECCV (2016) +72. Yu, Y., Liu, G., Odobez, J.M.: Deep multitask gaze estimation with a constrained landmark-gaze model. In: ECCV Workshops (2018) +73. Yu, Y., Liu, G., Odobez, J.M.: Improving few-shot user-specific gaze adaptation via gaze redirection synthesis. In: CVPR (2019) +74. Yu, Y., Odobez, J.M.: Unsupervised representation learning for gaze estimation. In: CVPR (2020) +75. Zakharov, E., Shysheya, A., Burkov, E., Lempitsky, V.: Few-shot adversarial learning of realistic neural talking head models. ICCV (2019) +76. Zakharov, E., Ivakhnenko, A., Shysheya, A., Lempitsky, V.: Fast bi-layer neural synthesis of one-shot realistic head avatars. In: ECCV (2020) +77. Zhang, J., Chen, J., Tang, H., Wang, W., Yan, Y., Sangineto, E., Sebe, N.: Dual in-painting model for unsupervised gaze correction and animation in the wild. In: ACM MM (2020) +78. Zhang, M., Liu, Y., Lu, F.: Gazeonce: Real-time multi-person gaze estimation. In: CVPR (2022) +79. Zhang, X., Park, S., Beeler, T., Bradley, D., Tang, S., Hilliges, O.: Eth-xgaze: A large scale dataset for gaze estimation under extreme head pose and gaze variation. In: ECCV (2020) +80. Zhang, X., Sugano, Y., Bulling, A.: Revisiting data normalization for appearance-based gaze estimation. In: ACM ETRA (2018) +81. Zhang, X., Sugano, Y., Fritz, M., Bulling, A.: Appearance-based gaze estimation in the wild. In: CVPR (2015) +82. Zhang, X., Sugano, Y., Fritz, M., Bulling, A.: It's written all over your face: Fullface appearance-based gaze estimation. In: CVPRW (2017) +83. Zhu, J.Y., Park, T., Isola, P., Efros, A.A.: Unpaired image-to-image translation using cycle-consistent adversarial networks. In: ICCV (2017) +84. Zhu, X., Liu, Y., Li, J., Wan, T., Qin, Z.: Emotion classification with data augmentation using generative adversarial networks. In: PAKDD (2018) \ No newline at end of file diff --git a/2024/3DGazeNet_ Generalizing Gaze Estimation with Weak Supervision from Synthetic Views/images.zip b/2024/3DGazeNet_ Generalizing Gaze Estimation with Weak Supervision from Synthetic Views/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..bba6c2c29d7ef9f09236fcc5e8f6ac610245bc12 --- /dev/null +++ b/2024/3DGazeNet_ Generalizing Gaze Estimation with Weak Supervision from Synthetic Views/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:976c682c59db98c052447924fef03b11c512282f1963ad0a9270307d3d8ea340 +size 533502 diff --git a/2024/3DGazeNet_ Generalizing Gaze Estimation with Weak Supervision from Synthetic Views/layout.json b/2024/3DGazeNet_ Generalizing Gaze Estimation with Weak Supervision from Synthetic Views/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..e82f3c923c9238c0420f12777197b0c4aeca5065 --- /dev/null +++ b/2024/3DGazeNet_ Generalizing Gaze Estimation with Weak Supervision from Synthetic Views/layout.json @@ -0,0 +1,9951 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 145, + 111, + 469, + 148 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 111, + 469, + 148 + ], + "spans": [ + { + "bbox": [ + 145, + 111, + 469, + 148 + ], + "type": "text", + "content": "3DGazeNet: Generalizing 3D Gaze Estimation with Weak-Supervision from Synthetic Views" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 151, + 167, + 462, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 167, + 462, + 193 + ], + "spans": [ + { + "bbox": [ + 151, + 167, + 462, + 193 + ], + "type": "text", + "content": "Evangelos Ververas1,2, Polydefkis Gkagkos2, Jiankang Deng1,2, Michail Christos Doukas1, Jia Guo3, and Stefanos Zafeiriou1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 241, + 201, + 372, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 241, + 201, + 372, + 213 + ], + "spans": [ + { + "bbox": [ + 241, + 201, + 372, + 213 + ], + "type": "text", + "content": "1 Imperial College London, UK" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 241, + 213, + 370, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 241, + 213, + 370, + 223 + ], + "spans": [ + { + "bbox": [ + 241, + 213, + 370, + 223 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 241, + 213, + 370, + 223 + ], + "type": "text", + "content": " Huawei Noah's Ark Lab, UK" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 278, + 223, + 335, + 235 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 278, + 223, + 335, + 235 + ], + "spans": [ + { + "bbox": [ + 278, + 223, + 335, + 235 + ], + "type": "text", + "content": "3 InsightFace" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 215, + 236, + 397, + 246 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 215, + 236, + 397, + 246 + ], + "spans": [ + { + "bbox": [ + 215, + 236, + 397, + 246 + ], + "type": "text", + "content": "https://eververas.github.io/3DGazeNet/" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 160, + 271, + 453, + 470 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 271, + 453, + 470 + ], + "spans": [ + { + "bbox": [ + 160, + 271, + 453, + 470 + ], + "type": "text", + "content": "Abstract. Developing gaze estimation models that generalize well to unseen domains and in-the-wild conditions remains a challenge with no known best solution. This is mostly due to the difficulty of acquiring ground truth data that cover the distribution of faces, head poses, and environments that exist in the real world. Most recent methods attempt to close the gap between specific source and target domains using domain adaptation. In this work, we propose to train general gaze estimation models which can be directly employed in novel environments without adaptation. To do so, we leverage the observation that head, body, and hand pose estimation benefit from revising them as dense 3D coordinate prediction, and similarly express gaze estimation as regression of dense 3D eye meshes. To close the gap between image domains, we create a large-scale dataset of diverse faces with gaze pseudo-annotations, which we extract based on the 3D geometry of the face, and design a multi-view supervision framework to balance their effect during training. We test our method in the task of gaze generalization, in which we demonstrate improvement of up to " + }, + { + "bbox": [ + 160, + 271, + 453, + 470 + ], + "type": "inline_equation", + "content": "23\\%" + }, + { + "bbox": [ + 160, + 271, + 453, + 470 + ], + "type": "text", + "content": " compared to state-of-the-art when no ground truth data are available, and up to " + }, + { + "bbox": [ + 160, + 271, + 453, + 470 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 160, + 271, + 453, + 470 + ], + "type": "text", + "content": " when they are." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 160, + 479, + 452, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 479, + 452, + 491 + ], + "spans": [ + { + "bbox": [ + 160, + 479, + 452, + 491 + ], + "type": "text", + "content": "Keywords: 3D Gaze Estimation " + }, + { + "bbox": [ + 160, + 479, + 452, + 491 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 160, + 479, + 452, + 491 + ], + "type": "text", + "content": " 3D Eye Mesh " + }, + { + "bbox": [ + 160, + 479, + 452, + 491 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 160, + 479, + 452, + 491 + ], + "type": "text", + "content": " Gaze Generalization" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 133, + 510, + 230, + 522 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 510, + 230, + 522 + ], + "spans": [ + { + "bbox": [ + 133, + 510, + 230, + 522 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 533, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 533, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 533, + 482, + 666 + ], + "type": "text", + "content": "Eye gaze serves as a cue for understanding human behavior and intents, including attention, communication, and mental state. As a result, gaze information has been exploited by a lot of applications of various fields of interest, ranging from medical and psychological analysis [9,37,64] to human-computer interaction [4], efficient rendering in VR/AR headset systems [6,10,39], virtual character animation [57,61,62,77] and driver state monitoring [34,50]. When high accuracy is important, data collection under the particular capturing set up is crucial, e.g. specific VR headsets, static screen-camera setups. However, in numerous real-world applications robustness is equally important to high accuracy, e.g. face-unlocking in mobile devices, best frame capturing/selection in group photos and automatic gaze annotation of large datasets for face reenactment." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 136, + 114, + 478, + 241 + ], + "blocks": [ + { + "bbox": [ + 136, + 114, + 478, + 241 + ], + "lines": [ + { + "bbox": [ + 136, + 114, + 478, + 241 + ], + "spans": [ + { + "bbox": [ + 136, + 114, + 478, + 241 + ], + "type": "image", + "image_path": "8461aabaf554595059207969dbc8a35fa58eb3982c46f8faf331904eb2e1199c.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 243, + 482, + 308 + ], + "lines": [ + { + "bbox": [ + 130, + 243, + 482, + 308 + ], + "spans": [ + { + "bbox": [ + 130, + 243, + 482, + 308 + ], + "type": "text", + "content": "Fig. 1: Overview of our method 3DGazeNet. a) We approach 3D gaze estimation as dense 3D eye mesh regression, which is robust against sparse prediction errors. b) Domain generalization is one of the hardest challenges in gaze estimation. Training with common gaze datasets often results in poor cross-dataset performance. c) Our multi-view supervision method employs pseudo-labels from in-the-wild face images to close the gap between controlled and in-the-wild datasets." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 340, + 482, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 340, + 482, + 496 + ], + "spans": [ + { + "bbox": [ + 130, + 340, + 482, + 496 + ], + "type": "text", + "content": "Typically, 3D gaze estimation is expressed as a direct mapping between input images and a few pose parameters [12, 42, 52, 70, 82], or sparse representations of the eyes [54, 55, 66]. Nevertheless, it has been shown that unconstrained face and body pose estimation from single images benefits from replacing predicting few pose or shape parameters by directly predicting dense 3D geometry [3, 16, 26, 43, 58]. In this work, we leverage this observation and revise the formulation of gaze estimation as end-to-end dense 3D eye mesh regression, which combined with standard vector regression induces multiple benefits. Existing datasets with ground truth 3D eyes include only images in the IR domain [21], however, IR images cannot be directly employed for RGB-based methods. As 3D eye meshes are not available for most gaze datasets, we define a unified eye representation, i.e. a rigid 3D eyeball template (Fig. 3(a)), which we fit on images based on sparse landmarks and the available gaze labels." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 498, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 498, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 498, + 482, + 666 + ], + "type": "text", + "content": "Several gaze datasets have become available in the last decade [20, 22, 35, 42, 52, 59, 60, 79, 81], which have contributed to the recent progress in automatic 3D gaze estimation from monocular RGB images. However, collecting gaze datasets is a costly and challenging process which often restricts them being captured in controlled environments and consisting of limited unique identities, thus lacking variation compared to data from the real world. This causes the most common challenge in gaze estimation, which is cross-domain and in-the-wild generalization. In this work, we propose a method to exploit arbitrary, unlabeled face images to largely increase the diversity of our training data as well as our model's generalization capabilities. To that end, we design a simple pipeline to extract robust 3D gaze pseudo-labels based on the 3D shape of the face and eyes, without having any prior gaze information. Based on recent advancements on weakly-supervised head, body and hand pose estimation [8, 17, 31, 44, 65], we regularize inconsistencies of pseudo-labels, by a geometric constraint which encourages our" + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 237, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 237, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 237, + 100 + ], + "type": "text", + "content": "E. Ververas et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "type": "text", + "content": "We model to maintain prediction consistency between multiple synthetic views of the same subject." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 140, + 481, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 140, + 481, + 247 + ], + "spans": [ + { + "bbox": [ + 130, + 140, + 481, + 247 + ], + "type": "text", + "content": "Most recent methods attempt to close the gap between diverse image domains using domain adaptation. Commonly, they employ a few samples of the target domain, with [29, 53, 73] or without [5, 7, 11, 24, 27, 47, 68, 70] their gaze labels, to fine-tune an initial model. Although successful, approaches following this scheme require knowledge of the target domain and model re-training, which prohibit their use as plug-n-play methods in real user applications. In contrast, we propose a method to train gaze estimation models that generalize well to unseen and inthe-wild environments without the constraints of domain adaption. Our method can effortlessly be employed by user applications in a plug-n-play fashion." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 248, + 481, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 248, + 481, + 307 + ], + "spans": [ + { + "bbox": [ + 130, + 248, + 481, + 307 + ], + "type": "text", + "content": "An overview of our approach, which we name 3DGazeNet, is presented in Fig. 1. We evaluate our method in cross-dataset gaze generalization, showcasing improvements over the state-of-the-art, even by a large margin, and perform ablations over the model components. To summarize, the key contributions of our work are:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 137, + 314, + 479, + 431 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 138, + 314, + 479, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 314, + 479, + 348 + ], + "spans": [ + { + "bbox": [ + 138, + 314, + 479, + 348 + ], + "type": "text", + "content": "- A simple automatic method to extract robust 3D eye meshes from arbitrary face images and a multi-view consistency regularization which allows to exploit them for improved gaze generalization." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 137, + 350, + 479, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 350, + 479, + 384 + ], + "spans": [ + { + "bbox": [ + 137, + 350, + 479, + 384 + ], + "type": "text", + "content": "- A revised formulation for gaze estimation, based on dense 3D eye mesh regression from images. To the best of our knowledge, we are the first to utilize an end-to-end 3D eye mesh regression approach for gaze estimation." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 137, + 385, + 479, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 385, + 479, + 431 + ], + "spans": [ + { + "bbox": [ + 137, + 385, + 479, + 431 + ], + "type": "text", + "content": "- Improved performance over the state-of-the-art in gaze generalization with " + }, + { + "bbox": [ + 137, + 385, + 479, + 431 + ], + "type": "inline_equation", + "content": "(10\\%)" + }, + { + "bbox": [ + 137, + 385, + 479, + 431 + ], + "type": "text", + "content": " and without " + }, + { + "bbox": [ + 137, + 385, + 479, + 431 + ], + "type": "inline_equation", + "content": "(23\\%)" + }, + { + "bbox": [ + 137, + 385, + 479, + 431 + ], + "type": "text", + "content": " using source domain ground truth, with a simple model architecture. Based on that, we believe that 3DGazeNet is an important step towards reliable plug-n-play gaze tracking." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 132, + 451, + 237, + 464 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 451, + 237, + 464 + ], + "spans": [ + { + "bbox": [ + 132, + 451, + 237, + 464 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 475, + 479, + 558 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 475, + 479, + 558 + ], + "spans": [ + { + "bbox": [ + 130, + 475, + 479, + 558 + ], + "type": "text", + "content": "Numerous model designs for supervised 3D gaze estimation have been tested recently, investigating which face region to use as input [12,42,82], the model architecture [1,14,46,67] and what external stimuli to utilize to improve performance [52]. Motivated by the difficulties in collecting diverse and large scale data for gaze estimation, recent works have shown that valuable gaze representations can be extracted in fully unsupervised settings, by applying gaze redirection [74] or disentanglement constraints [63]." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 570, + 481, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 570, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 570, + 481, + 665 + ], + "type": "text", + "content": "Gaze Adaptation and Generalization Much effort has been made to design methods that adapt well to known target subjects and environments, by employing either few labeled samples [29, 53, 73] or completely unlabeled data of the target domain [5, 7, 11, 24, 27, 47, 68, 70]. Differently from the above, gaze generalization models aim to improve cross-domain performance without any knowledge of the target domains. The models in [5, 11, 70], even though targeted for gaze adaptation, are based on learning general features for gaze estimation and thus, they perform well in target domain-agnostic settings. Moreover, [40] has shown" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 399, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 399, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 399, + 91, + 447, + 100 + ], + "type": "text", + "content": "3DGazeNet" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 91, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 91, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 91, + 481, + 100 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 213 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 213 + ], + "type": "text", + "content": "that it is possible to train general gaze estimation models by employing geometric constraints in scenes depicting social interaction between people. We believe that [40] is the closest work to ours, as it is the only method which uses 3D geometric cues of the scene to learn gaze from arbitrary face data. Lastly, [78] proposes to improve generalization by employing synthetic images which are, however, limited by the gaze distribution of existing gaze datasets. Both the implementation and custom dataset are not public, which hinders reproducibility and reliable comparisons." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 228, + 483, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 228, + 483, + 360 + ], + "spans": [ + { + "bbox": [ + 130, + 228, + 483, + 360 + ], + "type": "text", + "content": "Model-Based Gaze Estimation Differently from the above, sparse or semantic representations of the eye geometry have also been employed by some methods to infer gaze from images [54, 55, 66, 67, 71, 72]. However, such representations do not convey information about the 3D substance of eyes and are prone to noisy predictions. In contrast, by predicting 3D eye meshes we learn a much more robust representation, from which we can retrieve any other sparse or semantic one just by indexing. Recovering dense 3D geometry of the eye region from images by fitting parametric models of the shape and texture has been previously proposed [71]. However, restrictions posed by building large-scale parametric models and fitting in-the-wild images have resulted in low gaze accuracy compared to learning-based methods." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 375, + 483, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 375, + 483, + 518 + ], + "spans": [ + { + "bbox": [ + 130, + 375, + 483, + 518 + ], + "type": "text", + "content": "Face Reenactment and Learning from Synthetic Data Synthetic image data have been previously used in training deep networks, mainly to augment the training datasets and provide pseudo-ground truth annotations. For instance, [84] used CycleGAN [83] to create a new training corpus in order to balance emotion classes in the task of emotion classification. More recently, GANcraft [28] employed SPADE [56] to generate pseudo-ground truth images that were used to supervise their neural rendering framework. In this work, we obtain access to image pairs of the same subject in different views, by taking advantage of HeadGAN [19], a face reenactment system. In contrast to person-specific reenactment methods [18, 36, 41] or person-generic landmark-driven approaches [69, 75, 76], HeadGAN is able to perform free-view synthesis using a single source image." + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 142, + 542, + 473, + 610 + ], + "blocks": [ + { + "bbox": [ + 142, + 542, + 473, + 610 + ], + "lines": [ + { + "bbox": [ + 142, + 542, + 473, + 610 + ], + "spans": [ + { + "bbox": [ + 142, + 542, + 473, + 610 + ], + "type": "image", + "image_path": "54d377a55c6adaa4a090f6627c49621dd66d5167505f0a92fdf56187d7a886b2.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 617, + 482, + 662 + ], + "lines": [ + { + "bbox": [ + 130, + 617, + 482, + 662 + ], + "spans": [ + { + "bbox": [ + 130, + 617, + 482, + 662 + ], + "type": "text", + "content": "Fig. 2: We use HeadGAN [19] to generate novel views by manipulating the 3D pose of the face. During synthesis, angle " + }, + { + "bbox": [ + 130, + 617, + 482, + 662 + ], + "type": "inline_equation", + "content": "\\theta_z" + }, + { + "bbox": [ + 130, + 617, + 482, + 662 + ], + "type": "text", + "content": " is transferred to all facial parts including the eyes, thus the relative angle between the head and eyes (i.e. the gaze direction in the head coordinate system) is maintained." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 237, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 237, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 237, + 100 + ], + "type": "text", + "content": "E. Ververas et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 114, + 202, + 127 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 114, + 202, + 127 + ], + "spans": [ + { + "bbox": [ + 132, + 114, + 202, + 127 + ], + "type": "text", + "content": "3 Method" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 140, + 338, + 152 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 140, + 338, + 152 + ], + "spans": [ + { + "bbox": [ + 132, + 140, + 338, + 152 + ], + "type": "text", + "content": "3.1 Problem Definition and Motivation" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 160, + 482, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 160, + 482, + 255 + ], + "spans": [ + { + "bbox": [ + 130, + 160, + 482, + 255 + ], + "type": "text", + "content": "The aim of this work is to design a method that given a face image " + }, + { + "bbox": [ + 130, + 160, + 482, + 255 + ], + "type": "inline_equation", + "content": "\\mathbf{I}" + }, + { + "bbox": [ + 130, + 160, + 482, + 255 + ], + "type": "text", + "content": ", it estimates " + }, + { + "bbox": [ + 130, + 160, + 482, + 255 + ], + "type": "inline_equation", + "content": "2\\times N_{v}" + }, + { + "bbox": [ + 130, + 160, + 482, + 255 + ], + "type": "text", + "content": " 3D coordinates " + }, + { + "bbox": [ + 130, + 160, + 482, + 255 + ], + "type": "inline_equation", + "content": "\\mathbf{V} = [\\mathbf{V}_l^T,\\mathbf{V}_r^T ]^T" + }, + { + "bbox": [ + 130, + 160, + 482, + 255 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 130, + 160, + 482, + 255 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_l\\in \\mathbb{R}^{N_v\\times 3}" + }, + { + "bbox": [ + 130, + 160, + 482, + 255 + ], + "type": "text", + "content": " are coordinates corresponding to the left eyeball while " + }, + { + "bbox": [ + 130, + 160, + 482, + 255 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_r\\in \\mathbb{R}^{N_v\\times 3}" + }, + { + "bbox": [ + 130, + 160, + 482, + 255 + ], + "type": "text", + "content": " to the right, as well as a 3D gaze vector " + }, + { + "bbox": [ + 130, + 160, + 482, + 255 + ], + "type": "inline_equation", + "content": "g = (g_{x},g_{y},g_{z})" + }, + { + "bbox": [ + 130, + 160, + 482, + 255 + ], + "type": "text", + "content": ". Then, the final gaze result is calculated by the mean direction of the two output components. Inspired by recent work in self-supervised 3D body pose estimation [31,44,65], we adopt multi-view constraints to train our model based on in-the-wild faces and automatically generated gaze pseudo-labels." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 256, + 482, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 256, + 482, + 376 + ], + "spans": [ + { + "bbox": [ + 130, + 256, + 482, + 376 + ], + "type": "text", + "content": "To employ multi-view losses, we assume that images of the same subject with different head poses and the same gaze direction relatively to the head are available. For example, this condition is satisfied when a face picture is taken from different angles at the same time. As such images are not commonly available for in-the-wild datasets, we employ HeadGAN [19], a recent face reenactment method, to generate novel face poses from existing images. HeadGAN is able to synthesize face animations using dense face geometry, which covers the eyes, as a driving signal and single source images. Using dense geometry guarantees that the relative angle between the head and eyes is maintained when synthesizing novel poses, as it is shown in Fig. 2." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 393, + 318, + 406 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 393, + 318, + 406 + ], + "spans": [ + { + "bbox": [ + 132, + 393, + 318, + 406 + ], + "type": "text", + "content": "3.2 Unified 3D Eye Representation" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 414, + 482, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 414, + 482, + 594 + ], + "spans": [ + { + "bbox": [ + 130, + 414, + 482, + 594 + ], + "type": "text", + "content": "Learning consistent eye meshes across different images and datasets, requires establishing a unified 3D eye representation. To that end, we define a 3D eyeball template as a rigid 3D triangular mesh with spherical shape, consisting of " + }, + { + "bbox": [ + 130, + 414, + 482, + 594 + ], + "type": "inline_equation", + "content": "N_{v} = 481" + }, + { + "bbox": [ + 130, + 414, + 482, + 594 + ], + "type": "text", + "content": " vertices and " + }, + { + "bbox": [ + 130, + 414, + 482, + 594 + ], + "type": "inline_equation", + "content": "N_{t} = 928" + }, + { + "bbox": [ + 130, + 414, + 482, + 594 + ], + "type": "text", + "content": " triangles. We create two mirrored versions, " + }, + { + "bbox": [ + 130, + 414, + 482, + 594 + ], + "type": "inline_equation", + "content": "\\mathbf{M}_l" + }, + { + "bbox": [ + 130, + 414, + 482, + 594 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 414, + 482, + 594 + ], + "type": "inline_equation", + "content": "\\mathbf{M}_r" + }, + { + "bbox": [ + 130, + 414, + 482, + 594 + ], + "type": "text", + "content": ", of the above mesh to represent a left and a right reference eyeball respectively. This representation allows us to allocate semantic labels to specific vertices of the eyeball, such as the iris border (Fig. 3 (a)), and calculate 3D gaze direction as the orientation of the central axis of our 3D eyeball template. In practice, an offset angle (the kappa coefficient) exists between the optical (central) and visual axes of eyes, which is subject-dependent and varies between " + }, + { + "bbox": [ + 130, + 414, + 482, + 594 + ], + "type": "inline_equation", + "content": "-2^{o}" + }, + { + "bbox": [ + 130, + 414, + 482, + 594 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 130, + 414, + 482, + 594 + ], + "type": "inline_equation", + "content": "2^{o}" + }, + { + "bbox": [ + 130, + 414, + 482, + 594 + ], + "type": "text", + "content": " across the population [73]. Accounting for this offset is essential for person-specific gaze estimation [29,45,53,73]. However, in our case of cross-dataset and in-the-wild gaze generalization, in which errors are much larger than the possible offset, data diversity is more important than anatomical precision and thus, our spherical eyeball is a reasonable approximation." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "type": "text", + "content": "3D Eyes Ground-Truth from Gaze Datasets For gaze estimation datasets, exact supervision can be acquired by automatically fitting the eyeball template on face images based on sparse iris landmarks and the available gaze labels, as shown in Fig. 3(b). Specifically, we first rotate the eyeball template around its center according to the gaze label. Then, we align (scale and translation) " + }, + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 399, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 399, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 399, + 91, + 447, + 100 + ], + "type": "text", + "content": "3DGazeNet" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 154, + 148, + 281, + 181 + ], + "blocks": [ + { + "bbox": [ + 154, + 148, + 281, + 181 + ], + "lines": [ + { + "bbox": [ + 154, + 148, + 281, + 181 + ], + "spans": [ + { + "bbox": [ + 154, + 148, + 281, + 181 + ], + "type": "image", + "image_path": "9d7d58ea4fb7905809fe43185be6c3fca2fb763900fcf6120f1a4066415450e3.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 133, + 183, + 301, + 195 + ], + "lines": [ + { + "bbox": [ + 133, + 183, + 301, + 195 + ], + "spans": [ + { + "bbox": [ + 133, + 183, + 301, + 195 + ], + "type": "text", + "content": "M: " + }, + { + "bbox": [ + 133, + 183, + 301, + 195 + ], + "type": "inline_equation", + "content": "N_{v} = 481" + }, + { + "bbox": [ + 133, + 183, + 301, + 195 + ], + "type": "text", + "content": " vertices, " + }, + { + "bbox": [ + 133, + 183, + 301, + 195 + ], + "type": "inline_equation", + "content": "N_{t} = 928" + }, + { + "bbox": [ + 133, + 183, + 301, + 195 + ], + "type": "text", + "content": " triangles" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 137, + 217, + 301, + 289 + ], + "blocks": [ + { + "bbox": [ + 174, + 196, + 258, + 206 + ], + "lines": [ + { + "bbox": [ + 174, + 196, + 258, + 206 + ], + "spans": [ + { + "bbox": [ + 174, + 196, + 258, + 206 + ], + "type": "text", + "content": "(a) Eyeball template" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 137, + 217, + 301, + 289 + ], + "lines": [ + { + "bbox": [ + 137, + 217, + 301, + 289 + ], + "spans": [ + { + "bbox": [ + 137, + 217, + 301, + 289 + ], + "type": "image", + "image_path": "eb84fdff110a6cbad3c3dc547b0e17a22aa1a34d605f90f195d7f764d3dcb1a6.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 158, + 292, + 276, + 303 + ], + "lines": [ + { + "bbox": [ + 158, + 292, + 276, + 303 + ], + "spans": [ + { + "bbox": [ + 158, + 292, + 276, + 303 + ], + "type": "text", + "content": "(b) Ground truth generation" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 130, + 311, + 480, + 346 + ], + "lines": [ + { + "bbox": [ + 130, + 311, + 480, + 346 + ], + "spans": [ + { + "bbox": [ + 130, + 311, + 480, + 346 + ], + "type": "text", + "content": "Fig. 3: (a) The employed rigid 3D eyeball mesh template. (b) Ground truth data generation, applied on gaze estimation datasets with available ground truth. (c) Pseudoground truth data generation, applied on arbitrary face images without any gaze label." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 313, + 116, + 481, + 289 + ], + "blocks": [ + { + "bbox": [ + 313, + 116, + 481, + 289 + ], + "lines": [ + { + "bbox": [ + 313, + 116, + 481, + 289 + ], + "spans": [ + { + "bbox": [ + 313, + 116, + 481, + 289 + ], + "type": "image", + "image_path": "d9aecc08e389f66d569df7b440a01df77fca7a3cbacefe69e706b885f19c9513.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 292, + 457, + 303 + ], + "lines": [ + { + "bbox": [ + 313, + 292, + 457, + 303 + ], + "spans": [ + { + "bbox": [ + 313, + 292, + 457, + 303 + ], + "type": "text", + "content": "(c) Pseudo-ground truth generation" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 367, + 482, + 428 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 367, + 482, + 428 + ], + "spans": [ + { + "bbox": [ + 130, + 367, + 482, + 428 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 130, + 367, + 482, + 428 + ], + "type": "text", + "content": " coordinates of the rotated eye mesh to the iris landmarks of the image and multiply " + }, + { + "bbox": [ + 130, + 367, + 482, + 428 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 130, + 367, + 482, + 428 + ], + "type": "text", + "content": " coordinates with the same scale. To extract sparse iris landmarks we employed the method of [55] as a basis for building an iris localization model which is robust against occlusions and low resolution. More details about the iris localization model are provided in the supplemental material." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 437, + 482, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 437, + 482, + 583 + ], + "spans": [ + { + "bbox": [ + 130, + 437, + 482, + 583 + ], + "type": "text", + "content": "3D Eyes Pseudo-Ground Truth from In-The-Wild Images To extract 3D eyes from images without gaze labels, we have developed an automatic pipeline based on 3D face alignment and 2D iris localization. First, we recover the 3D face with " + }, + { + "bbox": [ + 130, + 437, + 482, + 583 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 130, + 437, + 482, + 583 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 437, + 482, + 583 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 130, + 437, + 482, + 583 + ], + "type": "text", + "content": " in image space using an off-the-shelf method. Then, we align our eyeball templates in the eye sockets based on the face's eyelid landmarks and predefined eyelid landmarks around the eyeball templates. In fact, we use the two corner landmarks of each eye which do not move between open and closed eyes. Next, we lift 2D iris predictions to 3D by finding the nearest vertexes from the aligned 3D eye templates. Finally, we compute the rotation between the initially aligned eyes and the 3D-lifted iris center and rotate the eyeballs accordingly. For 3D face alignment, we employ RetinaFace [16] and for 2D iris localization [55] as above. The process is presented in Fig. 3(c)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 131, + 598, + 375, + 611 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 598, + 375, + 611 + ], + "spans": [ + { + "bbox": [ + 131, + 598, + 375, + 611 + ], + "type": "text", + "content": "3.3 Joint 3D Eye Mesh and Vector Regression" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 130, + 617, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 617, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 617, + 482, + 666 + ], + "type": "text", + "content": "Given an input face image " + }, + { + "bbox": [ + 130, + 617, + 482, + 666 + ], + "type": "inline_equation", + "content": "\\mathbf{I}" + }, + { + "bbox": [ + 130, + 617, + 482, + 666 + ], + "type": "text", + "content": ", we utilize 5 face detection landmarks to crop patches around each one of the two eyes. We resize the patches to shape " + }, + { + "bbox": [ + 130, + 617, + 482, + 666 + ], + "type": "inline_equation", + "content": "128 \\times 128 \\times 3" + }, + { + "bbox": [ + 130, + 617, + 482, + 666 + ], + "type": "text", + "content": " and stack them channel-wise along with a cropped image of the face. We employ a simple model architecture consisting of a ResNet-18 [30] to extract" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 237, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 237, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 237, + 100 + ], + "type": "text", + "content": "E. Ververas et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 479, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 479, + 163 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 479, + 163 + ], + "type": "text", + "content": "features, followed by two fully connected layers to map them to two separate eye modalities, which are a) dense 3D eye coordinates and b) a 3D gaze vector. As the final gaze output, we consider the mean direction calculated from the two modalities." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 163, + 480, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 163, + 480, + 200 + ], + "spans": [ + { + "bbox": [ + 130, + 163, + 480, + 200 + ], + "type": "text", + "content": "To train the above network for mesh regression, similarly to [16], we enforce a vertex loss and an edge length loss between the model outputs and the respective ground truth or pseudo-ground truth, which can be expressed as:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 225, + 202, + 481, + 236 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 225, + 202, + 481, + 236 + ], + "spans": [ + { + "bbox": [ + 225, + 202, + 481, + 236 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {v e r t} = \\frac {1}{N _ {v}} \\sum_ {j = \\{l, r \\}} \\sum_ {i = 1} ^ {N _ {v}} \\| \\mathbf {V} _ {j, i} - \\mathbf {V} _ {j, i} ^ {*} \\| _ {1}, \\tag {1}", + "image_path": "b2e11c9b14741aceb3152c6aca60b5e86886248a459d9c614f8db4cb14581264.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 131, + 240, + 479, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 240, + 479, + 277 + ], + "spans": [ + { + "bbox": [ + 131, + 240, + 479, + 277 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 131, + 240, + 479, + 277 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_j\\in \\mathbb{R}^{N_v\\times 3}" + }, + { + "bbox": [ + 131, + 240, + 479, + 277 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 131, + 240, + 479, + 277 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_j^*\\in \\mathbb{R}^{N_v\\times 3}" + }, + { + "bbox": [ + 131, + 240, + 479, + 277 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 131, + 240, + 479, + 277 + ], + "type": "inline_equation", + "content": "j = \\{l,r\\}" + }, + { + "bbox": [ + 131, + 240, + 479, + 277 + ], + "type": "text", + "content": " are the output and the (pseudo-)ground truth coordinates, while the edge length loss (based on the fixed mesh triangulation of our template meshes) can be written as:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 225, + 281, + 480, + 315 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 225, + 281, + 480, + 315 + ], + "spans": [ + { + "bbox": [ + 225, + 281, + 480, + 315 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {e d g e}} = \\frac {1}{3 N _ {t}} \\sum_ {j = \\{l, r \\}} \\sum_ {i = 1} ^ {3 N _ {t}} \\| \\mathbf {E} _ {j, i} - \\mathbf {E} _ {j, i} ^ {*} \\| _ {2}, \\tag {2}", + "image_path": "fdb72bd1ef26a5613d9c20bf4752387eff5034c5a508d32053226296859cd861.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 319, + 479, + 379 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 319, + 479, + 379 + ], + "spans": [ + { + "bbox": [ + 130, + 319, + 479, + 379 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 319, + 479, + 379 + ], + "type": "inline_equation", + "content": "\\mathbf{E}_j\\in \\mathbb{R}^{3N_t}" + }, + { + "bbox": [ + 130, + 319, + 479, + 379 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 319, + 479, + 379 + ], + "type": "inline_equation", + "content": "\\mathbf{E}_j^*\\in \\mathbb{R}^{3N_t}" + }, + { + "bbox": [ + 130, + 319, + 479, + 379 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 130, + 319, + 479, + 379 + ], + "type": "inline_equation", + "content": "j = \\{l,r\\}" + }, + { + "bbox": [ + 130, + 319, + 479, + 379 + ], + "type": "text", + "content": " are the edge lengths of the predicted and the (pseudo-)ground truth eyes. As edge length we define the Euclidean distance between two vertices of the same triangle. In addition to the mesh regression losses, we enforce a gaze loss to the gaze output of our model, expressed as:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 240, + 384, + 480, + 397 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 240, + 384, + 480, + 397 + ], + "spans": [ + { + "bbox": [ + 240, + 384, + 480, + 397 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {g a z e} = (1 8 0 / \\pi) \\operatorname {a r c c o s} \\left(\\mathbf {g} ^ {T} \\mathbf {g} ^ {*}\\right), \\tag {3}", + "image_path": "b49733bcfd4fbf87b29db039c68ec669dc7f247cef1ce339332d47ac9342dd11.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 403, + 479, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 403, + 479, + 450 + ], + "spans": [ + { + "bbox": [ + 130, + 403, + 479, + 450 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 403, + 479, + 450 + ], + "type": "inline_equation", + "content": "\\mathbf{g}" + }, + { + "bbox": [ + 130, + 403, + 479, + 450 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 403, + 479, + 450 + ], + "type": "inline_equation", + "content": "\\mathbf{g}^*" + }, + { + "bbox": [ + 130, + 403, + 479, + 450 + ], + "type": "text", + "content": " are the normalized model output and the gaze (pseudo-)ground truth respectively. We combine losses of Eqs. (1) to (3) in a single loss function to train our models with supervision from (pseudo-)ground truth 3D eye meshes and gaze vectors. The combined loss is written as:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 220, + 456, + 480, + 468 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 220, + 456, + 480, + 468 + ], + "spans": [ + { + "bbox": [ + 220, + 456, + 480, + 468 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {(P) G T} = \\lambda_ {v} \\mathcal {L} _ {v e r t} + \\lambda_ {e} \\mathcal {L} _ {e d g e} + \\lambda_ {g} \\mathcal {L} _ {g a z e}, \\tag {4}", + "image_path": "e6445e1830cb863b9b0cbd7a8b7ee010c33e99086dfd22fd431a56ec160830b3.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 472, + 479, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 472, + 479, + 507 + ], + "spans": [ + { + "bbox": [ + 130, + 472, + 479, + 507 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 472, + 479, + 507 + ], + "type": "inline_equation", + "content": "\\lambda_v, \\lambda_e" + }, + { + "bbox": [ + 130, + 472, + 479, + 507 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 130, + 472, + 479, + 507 + ], + "type": "inline_equation", + "content": "\\lambda_g" + }, + { + "bbox": [ + 130, + 472, + 479, + 507 + ], + "type": "text", + "content": " are parameters which regularize the contribution of the loss terms in the overall loss. From our experiments we have selected their values to be " + }, + { + "bbox": [ + 130, + 472, + 479, + 507 + ], + "type": "inline_equation", + "content": "\\lambda_v = 0.1" + }, + { + "bbox": [ + 130, + 472, + 479, + 507 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 472, + 479, + 507 + ], + "type": "inline_equation", + "content": "\\lambda_e = 0.01" + }, + { + "bbox": [ + 130, + 472, + 479, + 507 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 472, + 479, + 507 + ], + "type": "inline_equation", + "content": "\\lambda_g = 1" + }, + { + "bbox": [ + 130, + 472, + 479, + 507 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 131, + 525, + 345, + 537 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 525, + 345, + 537 + ], + "spans": [ + { + "bbox": [ + 131, + 525, + 345, + 537 + ], + "type": "text", + "content": "3.4 Multi-View Consistency Supervision" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 130, + 545, + 481, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 545, + 481, + 640 + ], + "spans": [ + { + "bbox": [ + 130, + 545, + 481, + 640 + ], + "type": "text", + "content": "Extending our training dataset with in-the-wild images and training using pseudoground truth, usually improves the ability of our models to generalize to unseen domains, as can be seen by our experiments in Sec. 4.3. However, automatically generated 3D eyes and gaze include inconsistencies which are hard to identify and filter out. To balance the feedback of direct supervision from pseudo-ground truth, we design a multi-view supervision framework, based on pairs of real and synthetic images with different head poses, generated by HeadGAN as described in Sec. 3.1." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 130, + 641, + 481, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 641, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 641, + 481, + 665 + ], + "type": "text", + "content": "Recovering dense 3D face coordinates and pose from images has recently been quite reliable [2,16,16,23]. Having a pair of images " + }, + { + "bbox": [ + 130, + 641, + 481, + 665 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_1" + }, + { + "bbox": [ + 130, + 641, + 481, + 665 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 641, + 481, + 665 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_2" + }, + { + "bbox": [ + 130, + 641, + 481, + 665 + ], + "type": "text", + "content": " of the same" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 399, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 399, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 399, + 91, + 447, + 100 + ], + "type": "text", + "content": "3DGazeNet" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 481, + 100 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 133, + 114, + 481, + 245 + ], + "blocks": [ + { + "bbox": [ + 133, + 114, + 481, + 245 + ], + "lines": [ + { + "bbox": [ + 133, + 114, + 481, + 245 + ], + "spans": [ + { + "bbox": [ + 133, + 114, + 481, + 245 + ], + "type": "image", + "image_path": "b9fc82b93804cf19ce076b1926b7e8f68517b8e3dcd15a8d209a06a59321fc04.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 254, + 482, + 342 + ], + "lines": [ + { + "bbox": [ + 130, + 254, + 482, + 342 + ], + "spans": [ + { + "bbox": [ + 130, + 254, + 482, + 342 + ], + "type": "text", + "content": "Fig. 4: Overview of the proposed method 3DGazeNet. a) During training we employ single images with ground-truth supervision or pairs of synthetic views of the same subject with pseudo-annotations and different head poses. Different sets of losses are employed depending on the type of supervision. b) Detailed demonstration of " + }, + { + "bbox": [ + 130, + 254, + 482, + 342 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{MV}" + }, + { + "bbox": [ + 130, + 254, + 482, + 342 + ], + "type": "text", + "content": ". 3D transformation " + }, + { + "bbox": [ + 130, + 254, + 482, + 342 + ], + "type": "inline_equation", + "content": "\\mathbf{P}" + }, + { + "bbox": [ + 130, + 254, + 482, + 342 + ], + "type": "text", + "content": " which maps view 1 to view 2, is employed to transform points " + }, + { + "bbox": [ + 130, + 254, + 482, + 342 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_{l,1}" + }, + { + "bbox": [ + 130, + 254, + 482, + 342 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 254, + 482, + 342 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_{r,1}" + }, + { + "bbox": [ + 130, + 254, + 482, + 342 + ], + "type": "text", + "content": ", before calculating an L1 distance loss against " + }, + { + "bbox": [ + 130, + 254, + 482, + 342 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_{l,2}" + }, + { + "bbox": [ + 130, + 254, + 482, + 342 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 254, + 482, + 342 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_{r,2}" + }, + { + "bbox": [ + 130, + 254, + 482, + 342 + ], + "type": "text", + "content": ". c) The base network (3DEyeNet) of our model consists of a ResNet-18 backbone and two fully connected layers leading to the 3D eye mesh and gaze vector outputs." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 388, + 482, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 388, + 482, + 496 + ], + "spans": [ + { + "bbox": [ + 130, + 388, + 482, + 496 + ], + "type": "text", + "content": "subject and their reconstructed 3D faces, we can compute a transformation matrix " + }, + { + "bbox": [ + 130, + 388, + 482, + 496 + ], + "type": "inline_equation", + "content": "\\mathbf{P} \\in \\mathbb{R}^{3 \\times 4}" + }, + { + "bbox": [ + 130, + 388, + 482, + 496 + ], + "type": "text", + "content": " which aligns the two faces in image space. Assuming that gaze direction in both images remains still relative to the face, as is the case with images created by HeadGAN, we are able to supervise 3D regression of eyes by restricting our model's predictions to be consistent over an image pair, as output vertices should coincide when transformation " + }, + { + "bbox": [ + 130, + 388, + 482, + 496 + ], + "type": "inline_equation", + "content": "\\mathbf{P}" + }, + { + "bbox": [ + 130, + 388, + 482, + 496 + ], + "type": "text", + "content": " is applied to one of the pair's outputs. A similar approach has been employed successfully for weakly-supervised body pose estimation [31,44,65]. Particularly, we form the vertex loss of a pair as:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 198, + 498, + 481, + 533 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 198, + 498, + 481, + 533 + ], + "spans": [ + { + "bbox": [ + 198, + 498, + 481, + 533 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {M V, v e r t e x} = \\frac {1}{N _ {v}} \\sum_ {j = \\{l, r \\}} \\sum_ {i = 1} ^ {N _ {v}} \\| \\mathbf {V} _ {1, j, i} \\mathbf {P} ^ {T} - \\mathbf {V} _ {2, j, i} \\| _ {1}, \\tag {5}", + "image_path": "0bb372aca05ffb0eee2beae225901baf20b36750373a8ec393d940322bb18e8d.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 536, + 482, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 536, + 482, + 609 + ], + "spans": [ + { + "bbox": [ + 130, + 536, + 482, + 609 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 536, + 482, + 609 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_{1,j},\\mathbf{V}_{2,j}\\in \\mathbb{R}^{N_v\\times 4}" + }, + { + "bbox": [ + 130, + 536, + 482, + 609 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 130, + 536, + 482, + 609 + ], + "type": "inline_equation", + "content": "j = \\{l,r\\}" + }, + { + "bbox": [ + 130, + 536, + 482, + 609 + ], + "type": "text", + "content": " are the output matrices for left and right eyes, which correspond to input images " + }, + { + "bbox": [ + 130, + 536, + 482, + 609 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_1" + }, + { + "bbox": [ + 130, + 536, + 482, + 609 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 536, + 482, + 609 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_2" + }, + { + "bbox": [ + 130, + 536, + 482, + 609 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 130, + 536, + 482, + 609 + ], + "type": "inline_equation", + "content": "\\mathbf{V}_{1,j,i},\\mathbf{V}_{2,j,i}\\in \\mathbb{R}^4" + }, + { + "bbox": [ + 130, + 536, + 482, + 609 + ], + "type": "text", + "content": " are the specific homogeneous 3D coordinates indexed by " + }, + { + "bbox": [ + 130, + 536, + 482, + 609 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 130, + 536, + 482, + 609 + ], + "type": "text", + "content": " in the above matrices. To enforce consistency constraints to the gaze head of our model, we analyse matrix " + }, + { + "bbox": [ + 130, + 536, + 482, + 609 + ], + "type": "inline_equation", + "content": "\\mathbf{P}" + }, + { + "bbox": [ + 130, + 536, + 482, + 609 + ], + "type": "text", + "content": " to scale " + }, + { + "bbox": [ + 130, + 536, + 482, + 609 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 130, + 536, + 482, + 609 + ], + "type": "text", + "content": ", rotation " + }, + { + "bbox": [ + 130, + 536, + 482, + 609 + ], + "type": "inline_equation", + "content": "\\mathbf{R}" + }, + { + "bbox": [ + 130, + 536, + 482, + 609 + ], + "type": "text", + "content": " and translation " + }, + { + "bbox": [ + 130, + 536, + 482, + 609 + ], + "type": "inline_equation", + "content": "\\mathbf{t}" + }, + { + "bbox": [ + 130, + 536, + 482, + 609 + ], + "type": "text", + "content": " components and employ " + }, + { + "bbox": [ + 130, + 536, + 482, + 609 + ], + "type": "inline_equation", + "content": "\\mathbf{R}" + }, + { + "bbox": [ + 130, + 536, + 482, + 609 + ], + "type": "text", + "content": " in a gaze consistency loss within a pair:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 219, + 611, + 481, + 625 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 611, + 481, + 625 + ], + "spans": [ + { + "bbox": [ + 219, + 611, + 481, + 625 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {M V, g a z e} = (1 8 0 / \\pi) \\arccos \\left(\\left(\\mathbf {g} _ {1} ^ {T} \\mathbf {R} ^ {T}\\right) \\mathbf {g} _ {2}\\right), \\tag {6}", + "image_path": "4e821c2be8ad845c8ed3910ffdb5f1fabefad90df6b6e0fb075a678c47941b53.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "type": "inline_equation", + "content": "\\mathbf{g}_1" + }, + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "type": "inline_equation", + "content": "\\mathbf{g}_2" + }, + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "type": "text", + "content": " are the normalized model outputs for input images " + }, + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_1" + }, + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_2" + }, + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "type": "text", + "content": " respectively. We combine losses of Eqs. (5) and (6) in a single loss function to enforce multi-view consistency in mesh and gaze vector regression, between" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 237, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 237, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 237, + 100 + ], + "type": "text", + "content": "E. Ververas et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 455, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 116, + 455, + 128 + ], + "spans": [ + { + "bbox": [ + 132, + 116, + 455, + 128 + ], + "type": "text", + "content": "model outputs coming from pairs of input images. This loss is written as:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 212, + 133, + 480, + 146 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 212, + 133, + 480, + 146 + ], + "spans": [ + { + "bbox": [ + 212, + 133, + 480, + 146 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {M V} = \\lambda_ {M V, v} \\mathcal {L} _ {M V, v e r t e x} + \\lambda_ {M V, g} \\mathcal {L} _ {M V, g a z e}, \\tag {7}", + "image_path": "83c020a63719effddef672069f77eafc42c1f7b40e768a0781fea43f93c1153d.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 131, + 149, + 481, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 149, + 481, + 209 + ], + "spans": [ + { + "bbox": [ + 131, + 149, + 481, + 209 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 131, + 149, + 481, + 209 + ], + "type": "inline_equation", + "content": "\\lambda_{MV,v}" + }, + { + "bbox": [ + 131, + 149, + 481, + 209 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 131, + 149, + 481, + 209 + ], + "type": "inline_equation", + "content": "\\lambda_{MV,g}" + }, + { + "bbox": [ + 131, + 149, + 481, + 209 + ], + "type": "text", + "content": " are parameters which regularize the contribution of the loss terms in the overall loss. In our experiments, we have selected their values to be " + }, + { + "bbox": [ + 131, + 149, + 481, + 209 + ], + "type": "inline_equation", + "content": "\\lambda_{MV,v} = 0.1" + }, + { + "bbox": [ + 131, + 149, + 481, + 209 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 131, + 149, + 481, + 209 + ], + "type": "inline_equation", + "content": "\\lambda_{MV,g} = 1" + }, + { + "bbox": [ + 131, + 149, + 481, + 209 + ], + "type": "text", + "content": ". To train models with all supervision signals, i.e. ground truth " + }, + { + "bbox": [ + 131, + 149, + 481, + 209 + ], + "type": "inline_equation", + "content": "(\\mathcal{L}_{GT})" + }, + { + "bbox": [ + 131, + 149, + 481, + 209 + ], + "type": "text", + "content": ", pseudo-ground truth " + }, + { + "bbox": [ + 131, + 149, + 481, + 209 + ], + "type": "inline_equation", + "content": "(\\mathcal{L}_{PGT})" + }, + { + "bbox": [ + 131, + 149, + 481, + 209 + ], + "type": "text", + "content": " and multi-view supervision " + }, + { + "bbox": [ + 131, + 149, + 481, + 209 + ], + "type": "inline_equation", + "content": "(\\mathcal{L}_{MV})" + }, + { + "bbox": [ + 131, + 149, + 481, + 209 + ], + "type": "text", + "content": ", we utilize the following overall loss function:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 218, + 214, + 480, + 228 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 218, + 214, + 480, + 228 + ], + "spans": [ + { + "bbox": [ + 218, + 214, + 480, + 228 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\lambda_ {G T} \\mathcal {L} _ {G T} + \\lambda_ {P G T} \\mathcal {L} _ {P G T} + \\lambda_ {M V} \\mathcal {L} _ {M V}, \\tag {8}", + "image_path": "2cc139827b7d18973266cfd5b11b1286d7feebd0b9033f301aa8890c794fa90c.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 131, + 230, + 481, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 230, + 481, + 255 + ], + "spans": [ + { + "bbox": [ + 131, + 230, + 481, + 255 + ], + "type": "text", + "content": "with parameters " + }, + { + "bbox": [ + 131, + 230, + 481, + 255 + ], + "type": "inline_equation", + "content": "\\lambda_{GT} = \\lambda_{PGT} = \\lambda_{MV} = 1" + }, + { + "bbox": [ + 131, + 230, + 481, + 255 + ], + "type": "text", + "content": ". Implementation details are included in the supplemental material. An overview of 3DGazeNet is presented in Fig. 4." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 272, + 230, + 285 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 272, + 230, + 285 + ], + "spans": [ + { + "bbox": [ + 132, + 272, + 230, + 285 + ], + "type": "text", + "content": "4 Experiments" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 297, + 205, + 307 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 297, + 205, + 307 + ], + "spans": [ + { + "bbox": [ + 132, + 297, + 205, + 307 + ], + "type": "text", + "content": "4.1 Datasets" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 131, + 316, + 482, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 316, + 482, + 495 + ], + "spans": [ + { + "bbox": [ + 131, + 316, + 482, + 495 + ], + "type": "text", + "content": "Gaze Datasets Captured in a lab environment, ETH-XGaze (EXG) [79] consists of 756K frames of 80 subjects and includes large head pose and gaze variation. Collected in uncontrolled indoor environments with mobile devices, MPI-IFaceGaze (MPII) [81] includes smaller head pose and gaze variation and consists of 45K images of 15 subjects, while GazeCapture (GC) [42] contains almost 2M frontal face images of 1474 subjects. In contrast to the above datasets, Gaze360 (G360) [35] is the only gaze dataset captured both indoors and outdoors and consists of 127K training sequences from 365 subjects. The large variation in head pose, gaze, and environmental conditions of Gaze360 makes it the most challenging yet appropriate benchmark for in-the-wild gaze estimation, available in literature. For our experiments, we normalized the above datasets based on [80], except for Gaze360 which we process to get normalized face crops. Additionally, we employ the predefined training-test splits, while for Gaze360 we only use the frontal facing images with head pose yaw angle up to " + }, + { + "bbox": [ + 131, + 316, + 482, + 495 + ], + "type": "inline_equation", + "content": "90^{\\circ}" + }, + { + "bbox": [ + 131, + 316, + 482, + 495 + ], + "type": "text", + "content": ". The head pose and gaze distributions of the above datasets are presented in Fig. 5." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 131, + 498, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 498, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 131, + 498, + 482, + 666 + ], + "type": "text", + "content": "In-The-Wild Face Datasets In-the-wild face datasets consist of significantly more unique subjects and capturing environments. For our experiments, we employed four publicly-available datasets FFHQ [33] (70K images), AFLW [38] (25K images), AVA [25,48,49] and CMU-Panoptic [32]. FFHQ and AFLW are in-the-wild face datasets commonly used for face analysis, AVA is a large-scale in-the-wild human activity dataset annotated under the Looking-At-Each-Other condition and CMU-Panoptic is collected in lab conditions and captures interactions of multiple people in the same scene. FFHQ and AFLW include one face per image and thus are only processed to get normalized face crops. AVA and CMU-Panoptic include frames with multiple faces, from which we randomly select 80K faces from each dataset with a maximum head pose of " + }, + { + "bbox": [ + 131, + 498, + 482, + 666 + ], + "type": "inline_equation", + "content": "90^o" + }, + { + "bbox": [ + 131, + 498, + 482, + 666 + ], + "type": "text", + "content": ". Similarly to [40], for CMU we employed only frames captured with cameras in eye height. We name this collection of 255K images as the \"In-The-Wild Gaze\" dataset (ITWG). Lastly, to enforce multi-view supervision as described in Sec. 3.4, we synthesized" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 399, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 399, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 399, + 91, + 447, + 100 + ], + "type": "text", + "content": "3DGazeNet" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 481, + 100 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 133, + 114, + 306, + 202 + ], + "blocks": [ + { + "bbox": [ + 133, + 114, + 306, + 202 + ], + "lines": [ + { + "bbox": [ + 133, + 114, + 306, + 202 + ], + "spans": [ + { + "bbox": [ + 133, + 114, + 306, + 202 + ], + "type": "image", + "image_path": "c6c73a25e4b5a9ae40439184f52c5dd9b3d75210945144df0a4045cf7e0139df.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 210, + 482, + 244 + ], + "lines": [ + { + "bbox": [ + 130, + 210, + 482, + 244 + ], + "spans": [ + { + "bbox": [ + 130, + 210, + 482, + 244 + ], + "type": "text", + "content": "Fig. 5: Distributions of the head pose (top row) and gaze (bottom row) of the gaze datasets (red) and the face datasets (blue). Wide distribution datasets CMU, AVA, FFHQ, and AFLW are exploited to close the gap between diverse image domains." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 307, + 113, + 482, + 203 + ], + "blocks": [ + { + "bbox": [ + 307, + 113, + 482, + 203 + ], + "lines": [ + { + "bbox": [ + 307, + 113, + 482, + 203 + ], + "spans": [ + { + "bbox": [ + 307, + 113, + 482, + 203 + ], + "type": "image", + "image_path": "897e2f78b2617c6a38b20bff068c56f4ef5fce8c4e766aeff5be652f83433fcc.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 268, + 479, + 328 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 268, + 479, + 328 + ], + "spans": [ + { + "bbox": [ + 130, + 268, + 479, + 328 + ], + "type": "text", + "content": "novel views from images of ITWG using HeadGAN, sampling the pitch and yaw angles from Gaussians " + }, + { + "bbox": [ + 130, + 268, + 479, + 328 + ], + "type": "inline_equation", + "content": "\\mathcal{N}(0,20)" + }, + { + "bbox": [ + 130, + 268, + 479, + 328 + ], + "type": "text", + "content": ", relatively to the original head pose. We name this collection of images as \"Multi-View In-The-Wild Gaze\" dataset (ITWG-MV) and employ it to improve the generalization of gaze estimation. The head pose and gaze distributions of the above datasets are presented in Fig. 5." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 347, + 263, + 358 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 347, + 263, + 358 + ], + "spans": [ + { + "bbox": [ + 132, + 347, + 263, + 358 + ], + "type": "text", + "content": "4.2 Gaze Generalization" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 368, + 480, + 416 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 368, + 480, + 416 + ], + "spans": [ + { + "bbox": [ + 130, + 368, + 480, + 416 + ], + "type": "text", + "content": "In this section, we evaluate 3DGazeNet in within-dataset and cross-dataset experiments. We believe that [40] is the most closely related method to ours, as it is the only method using 3D geometric cues of the scene to generalize gaze from arbitrary face data." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 419, + 482, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 419, + 482, + 563 + ], + "spans": [ + { + "bbox": [ + 130, + 419, + 482, + 563 + ], + "type": "text", + "content": "Cross-dataset Evaluation We design two cross-dataset experiments to test the generalization of our method on G360 and report the results on Tab. 1(a) and (b). Particularly, the experiments are: a) we train our method on the CMU, AVA, and ITWG-MV datasets utilizing only our pseudo-labels and multi-view supervision and b) we additionally employ ground truth supervision from GC and EXG. From the results of the above experiments, it becomes obvious that our geometry-aware pseudo-labels employed within our multi-view supervision training effectively generalize gaze estimation to unseen domains, even without any available ground truth. In particular, in experiment a) our method outperforms [40] by " + }, + { + "bbox": [ + 130, + 419, + 482, + 563 + ], + "type": "inline_equation", + "content": "23\\%" + }, + { + "bbox": [ + 130, + 419, + 482, + 563 + ], + "type": "text", + "content": " with AVA, " + }, + { + "bbox": [ + 130, + 419, + 482, + 563 + ], + "type": "inline_equation", + "content": "22\\%" + }, + { + "bbox": [ + 130, + 419, + 482, + 563 + ], + "type": "text", + "content": " with CMU, " + }, + { + "bbox": [ + 130, + 419, + 482, + 563 + ], + "type": "inline_equation", + "content": "12.5\\%" + }, + { + "bbox": [ + 130, + 419, + 482, + 563 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 130, + 419, + 482, + 563 + ], + "type": "inline_equation", + "content": "\\mathrm{AVA + CMU}" + }, + { + "bbox": [ + 130, + 419, + 482, + 563 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 419, + 482, + 563 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 130, + 419, + 482, + 563 + ], + "type": "text", + "content": " with our large-scale ITWG-MV. Similarly, in experiment b) 3DGazeNet outperforms [40] by " + }, + { + "bbox": [ + 130, + 419, + 482, + 563 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 130, + 419, + 482, + 563 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 419, + 482, + 563 + ], + "type": "inline_equation", + "content": "9\\%" + }, + { + "bbox": [ + 130, + 419, + 482, + 563 + ], + "type": "text", + "content": " with GC and EXG respectively." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 566, + 482, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 566, + 482, + 638 + ], + "spans": [ + { + "bbox": [ + 130, + 566, + 482, + 638 + ], + "type": "text", + "content": "Within-dataset Evaluation Here we compare our method against state-of-the-art within-dataset gaze estimation on G360. Similarly to [40], we employ AVA for additional supervision, while we also examine the effect of the larger-scale ITWG-MV. The results, presented in Tab. 1 (c), show that multi-view supervision from AVA does not improve performance (which is in line with the compared method), but the large-scale ITWG-MV does." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 641, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 641, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 641, + 482, + 666 + ], + "type": "text", + "content": "Comparison with state-of-the-art We further compare 3DGazeNet against recent methods for gaze generalization. The works in [5,70] are developed with a" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 237, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 237, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 237, + 100 + ], + "type": "text", + "content": "E. Ververas et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 135, + 179, + 480, + 237 + ], + "blocks": [ + { + "bbox": [ + 130, + 114, + 482, + 169 + ], + "lines": [ + { + "bbox": [ + 130, + 114, + 482, + 169 + ], + "spans": [ + { + "bbox": [ + 130, + 114, + 482, + 169 + ], + "type": "text", + "content": "Table 1: Weakly-supervised method evaluation in cross- and within-dataset experiments. In all cases, we calculate gaze error in degrees (lower is better), on the test set of Gaze360. CMU and AVA correspond to subsets of ITWG-MV (i.e. augmented for multi-view supervision), providing a clearer comparison with [40]. Our method trained with ITWG-MV outperforms the baselines in all cases. 3DGN refers to 3DGazeNet" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 135, + 179, + 480, + 237 + ], + "lines": [ + { + "bbox": [ + 135, + 179, + 480, + 237 + ], + "spans": [ + { + "bbox": [ + 135, + 179, + 480, + 237 + ], + "type": "table", + "html": "
(a) Cross-dataset\nSynthetic Views(b) Cross-dataset\nGround Truth + Synthetic Views(c) Within-dataset\nGround Truth + Synthetic Views
Dataset[40]3DGNDataset[79][40]3DGNDataset[79][40]3DGN
AVA29.022.4GC30.229.227.5EXG27.320.522.1
CMU26.020.3GC+AVA-19.518.9EXG+AVA-16.917.1
CMU+AVA22.519.7GC+AVA+CMU--18.4EXG+AVA+CMU--16.7
ITWG-MV-18.1GC+ITWG-MV--17.6EXG+ITWG-MV--15.4
", + "image_path": "5410cbb40f8a8fc0ae6edc4b06fa38a4999a12a44239087209db83e03e098cf0.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 134, + 310, + 479, + 384 + ], + "blocks": [ + { + "bbox": [ + 130, + 266, + 479, + 300 + ], + "lines": [ + { + "bbox": [ + 130, + 266, + 479, + 300 + ], + "spans": [ + { + "bbox": [ + 130, + 266, + 479, + 300 + ], + "type": "text", + "content": "Table 2: Comparison with state-of-the-art in domain generalization for gaze estimation. In all experiments our model outperforms the compared methods. Gaze error is in degrees (lower is better)." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 134, + 310, + 479, + 384 + ], + "lines": [ + { + "bbox": [ + 134, + 310, + 479, + 384 + ], + "spans": [ + { + "bbox": [ + 134, + 310, + 479, + 384 + ], + "type": "table", + "html": "
Stage 1 (Gaze Generalization Models)+ Stage 2 (Adaptation/Fine Tuning)
EXGEXG+ITWG-MVG360G360+ITWG-MVEXG+ITWG-MVG360+ITWG-MV
MethodMPII GCMPII GCMPII GCMPII GCMPII GCMPII GCMPII GCMPII GCMPII GCMPII GC
RAT/RUDA [5]7.18.47.08.29.39.09.18.56.88.17.98.3
CDG/CRGA [70]6.79.26.99.57.08.38.18.97.49.07.68.7
PureGaze [11]7.98.77.79.37.68.37.48.66.68.07.28.3
3DGazeNet7.710.76.07.89.112.16.38.0----
", + "image_path": "d49bfc6c1a1b2e0785d69bf565e147c80fc2a40614fbcc8e51cb74098ea8b83a.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 426, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 426, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 426, + 482, + 666 + ], + "type": "text", + "content": "focus on domain adaptation for gaze estimation and encompass two-stage training schemes, both training feature invariant models at the first stage. That is, in the first training stage RUDA [5] trains gaze estimation model invariant to image rotations, while CRGA [70] uses a contrastive loss to separate image features according to gaze. The second stage of the above methods is focused on adapting the initially trained models to specific target domains. As our method aims to train general gaze estimation models without knowledge of specific target domains, we implement the first-stage models of the above methods, namely RAT [5], CDG [70] and compare them with 3DGazeNet in cross-dataset experiments. Additionally, we compare against PureGaze [11] which is a gaze generalization method that purifies face features to achieve higher gaze estimation performance. To follow the evaluation protocol in the above works, we train all methods on EXG and G360 (+ITWG-MV) and test on MPII and GC. For completeness, we include results of the full models RUDA and CRGA after using ITWG-MV according to their domain adaptation schemes. For PureGaze, ITWG-MV was used for fine-tuning. Tab. 2 shows that the proposed method outperforms the baselines for gaze generalization when ITWG-MV is employed. The compared methods do not include regularization for the noisy labels of ITWG-MV, resulting in similar or worse performance, while our method exploits them through " + }, + { + "bbox": [ + 130, + 426, + 482, + 666 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{MV}" + }, + { + "bbox": [ + 130, + 426, + 482, + 666 + ], + "type": "text", + "content": ", benefiting from the extended variation." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 399, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 399, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 399, + 91, + 447, + 100 + ], + "type": "text", + "content": "3DGazeNet" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 480, + 100 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 187, + 157, + 427, + 224 + ], + "blocks": [ + { + "bbox": [ + 132, + 114, + 482, + 148 + ], + "lines": [ + { + "bbox": [ + 132, + 114, + 482, + 148 + ], + "spans": [ + { + "bbox": [ + 132, + 114, + 482, + 148 + ], + "type": "text", + "content": "Table 3: Comparison between training targets Vector(V), Mesh(M) and Mesh+Vector(M+V) in within-dataset experiments (using only " + }, + { + "bbox": [ + 132, + 114, + 482, + 148 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{GT}" + }, + { + "bbox": [ + 132, + 114, + 482, + 148 + ], + "type": "text", + "content": "). Target M+V leads to lower errors than state-of-the-art. Error is in degrees (lower is better)." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 187, + 157, + 427, + 224 + ], + "lines": [ + { + "bbox": [ + 187, + 157, + 427, + 224 + ], + "spans": [ + { + "bbox": [ + 187, + 157, + 427, + 224 + ], + "type": "table", + "html": "
DatasetCompared Methods3DGazeNet
[51][13][1][15,82][53][15,35][40][79]VMM+V
MPII4.044.003.924.95.34.06-4.84.14.24.0
G36010.710.610.414.9-11.110.1-9.89.89.6
GC----3.5--3.33.23.33.1
EXG---7.3---4.54.24.44.2
", + "image_path": "070547e17f6a4bbe2a377b9eedc95f39d6d7ffe6ebc6171b32c0519c58ced832.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 247, + 243, + 258 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 247, + 243, + 258 + ], + "spans": [ + { + "bbox": [ + 132, + 247, + 243, + 258 + ], + "type": "text", + "content": "4.3 Ablation studies" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 267, + 481, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 267, + 481, + 422 + ], + "spans": [ + { + "bbox": [ + 130, + 267, + 481, + 422 + ], + "type": "text", + "content": "Gaze Estimation via 3D Eye Mesh Regression Here we experimentally evaluate our suggestion that gaze estimation benefits from replacing the training target from gaze vectors or angles to dense 3D eye coordinates. To this end, we employ the fully supervised version of our model, utilizing data with exact ground truth and " + }, + { + "bbox": [ + 130, + 267, + 481, + 422 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{GT}" + }, + { + "bbox": [ + 130, + 267, + 481, + 422 + ], + "type": "text", + "content": " for training. We conduct within-dataset experiments on MPII, GC, G360 and EXG for which specific training-testing subsets are provided. We compare against state-of-the-art methods [1,13,15,35,40,51,53,79,82] and report the results in Tab. 3. In almost all cases, our model outperforms the baselines, while combining the two modalities, i.e. dense 3D meshes and gaze vectors " + }, + { + "bbox": [ + 130, + 267, + 481, + 422 + ], + "type": "inline_equation", + "content": "(\\mathrm{M} + \\mathrm{V})" + }, + { + "bbox": [ + 130, + 267, + 481, + 422 + ], + "type": "text", + "content": ", improves performance compared to training with vector targets (V) or 3D mesh targets (M) alone. This is possibly due to the distinct nature of the two modalities, i.e. the vectors provide exact label supervision, while meshes provide a robust representation which limits sparse prediction errors." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 423, + 481, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 423, + 481, + 579 + ], + "spans": [ + { + "bbox": [ + 130, + 423, + 481, + 579 + ], + "type": "text", + "content": "The main benefit of dense coordinate regression over pose parameters or sparse points prediction is that individual parameter errors have limited effect on the total outcome making them more robust to prediction inaccuracies [16]. This effect is particularly useful for our multi-view training scheme in which introducing consistency of dense correspondences between images rather than only vector consistency, offers stronger regularization. We validate this argument in gaze generalization experiments in G360, GC, EXG, and MPII, presented in Tab. 4. For this experiment, we consider three versions of 3DGazeNet: one which predicts only gaze vectors and no coordinates (Vector), one which predicts 8 3D iris landmarks instead of dense eye meshes (Iris+Vector), to highlight the effect of dense coordinate prediction, and the full 3DGazeNet (Mesh+Vector). The results show that employing combined training targets always benefits performance, while replacing dense 3D eye meshes with iris landmarks highly limits this effect." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 581, + 481, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 581, + 481, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 581, + 481, + 666 + ], + "type": "text", + "content": "The Effect of Gaze Pseudo-Labels and Multi-View Supervision Here we examine the contribution of our automatic geometry-aware pseudo-labels and the multi-view supervision loss of our approach. To this end, we consider three training scenarios which are the following: a) training with ITWG and its pseudo-labels as ground truth " + }, + { + "bbox": [ + 130, + 581, + 481, + 666 + ], + "type": "inline_equation", + "content": "(\\mathcal{L}_{PGT})" + }, + { + "bbox": [ + 130, + 581, + 481, + 666 + ], + "type": "text", + "content": ", b) training with ITWG-MV utilizing only the multi-view consistency constraints and no pseudo-labels " + }, + { + "bbox": [ + 130, + 581, + 481, + 666 + ], + "type": "inline_equation", + "content": "(\\mathcal{L}_{MV})" + }, + { + "bbox": [ + 130, + 581, + 481, + 666 + ], + "type": "text", + "content": " and c) training with ITWG-MV while employing both pseudo-labels and the multi-view" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 237, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 237, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 237, + 100 + ], + "type": "text", + "content": "E. Ververas et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 135, + 168, + 478, + 258 + ], + "blocks": [ + { + "bbox": [ + 132, + 114, + 482, + 159 + ], + "lines": [ + { + "bbox": [ + 132, + 114, + 482, + 159 + ], + "spans": [ + { + "bbox": [ + 132, + 114, + 482, + 159 + ], + "type": "text", + "content": "Table 4: Comparison between training targets Vector, Iris+Vector and Mesh+Vector for domain generalization when employing our full model (Eq. (8)). For the target Vector, we remove all mesh terms from the employed losses. In all experiments, the target Mesh+Vector results in a lower error. Gaze error is in degrees (lower is better)." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 135, + 168, + 478, + 258 + ], + "lines": [ + { + "bbox": [ + 135, + 168, + 478, + 258 + ], + "spans": [ + { + "bbox": [ + 135, + 168, + 478, + 258 + ], + "type": "table", + "html": "
Training DatasetVectorIris+VectorMesh+Vector
G360GCEXGMPIIG360GCEXGMPIIG360GCEXGMPII
ITWG-MV19.110.116.78.518.89.916.78.218.19.016.77.6
G360+ITWG-MV10.110.215.17.09.79.415.06.89.38.014.66.3
GC+ITWG-MV18.23.116.06.118.03.015.96.217.63.015.56.1
EXG+ITWG-MV16.510.24.56.616.39.64.56.415.47.84.36.0
MPII+ITWG-MV17.88.215.24.817.97.615.04.617.66.814.94.2
", + "image_path": "6173df637b2093e13e088c15f3ddc4f41a7b5e5492dd1f1863400b988435524e.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 187, + 310, + 427, + 412 + ], + "blocks": [ + { + "bbox": [ + 132, + 267, + 480, + 301 + ], + "lines": [ + { + "bbox": [ + 132, + 267, + 480, + 301 + ], + "spans": [ + { + "bbox": [ + 132, + 267, + 480, + 301 + ], + "type": "text", + "content": "Table 5: The effect of incorporating pseudo-ground truth and multi-view supervision during training. Both components contribute towards improving results in cross-dataset gaze estimation experiments. Gaze error is in degrees (lower is better)." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 187, + 310, + 427, + 412 + ], + "lines": [ + { + "bbox": [ + 187, + 310, + 427, + 412 + ], + "spans": [ + { + "bbox": [ + 187, + 310, + 427, + 412 + ], + "type": "table", + "html": "
Dataset\\( {\\mathcal{L}}_{GT} \\)\\( {\\mathcal{L}}_{PGT} \\)\\( {\\mathcal{L}}_{MV} \\)G360GCEXGMPII
ITWG--23.114.824.313.6
ITWG-MV--47.433.241.132.8
ITWG-MV-18.19.016.77.6
GC--27.53.128.410.4
GC+ITWG-21.43.223.79.1
GC+ITWG-MV-24.73.526.210.1
GC+ITWG-MV17.63.015.56.1
", + "image_path": "affe50515f33289a3b98a13bd04fcad04260e448f78a40b0fbe803f1a081403e.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 434, + 482, + 530 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 434, + 482, + 530 + ], + "spans": [ + { + "bbox": [ + 130, + 434, + 482, + 530 + ], + "type": "text", + "content": "consistency loss " + }, + { + "bbox": [ + 130, + 434, + 482, + 530 + ], + "type": "inline_equation", + "content": "(\\mathcal{L}_{PGT} + \\mathcal{L}_{MV})" + }, + { + "bbox": [ + 130, + 434, + 482, + 530 + ], + "type": "text", + "content": ". To further evaluate the effect of the pseudolabels and multi-view loss, we repeat the above experiments by adding ground truth supervision from GC " + }, + { + "bbox": [ + 130, + 434, + 482, + 530 + ], + "type": "inline_equation", + "content": "(+ \\mathcal{L}_{GT})" + }, + { + "bbox": [ + 130, + 434, + 482, + 530 + ], + "type": "text", + "content": ". We test our models on the test set of G360, GC, EXG, and MPII, and report the results in Tab. 5. In all cases, combining our pseudo-labels and multi-view loss yields the lowest error in degrees. Lastly, utilizing only " + }, + { + "bbox": [ + 130, + 434, + 482, + 530 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{MV}" + }, + { + "bbox": [ + 130, + 434, + 482, + 530 + ], + "type": "text", + "content": " on ITWG-MV leads to very high errors which is reasonable as no supervision for the eyeball topology exists, thus, the model outputs cannot follow the spherical shape of the eyeball template." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 533, + 482, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 533, + 482, + 641 + ], + "spans": [ + { + "bbox": [ + 130, + 533, + 482, + 641 + ], + "type": "text", + "content": "The Effect of Head Pose Distribution of ITWG Head pose distribution difference between the train and test set is one of the main reasons that gaze-estimation models fail in cross-dataset situations. To close the gap between different training and testing scenarios, we have designed ITWG, a large-scale dataset with widespread variation in head pose and gaze angles. To study the effect of the head pose variation of ITWG in our experiments, we employ different subsets of ITWG with various levels of head pose variation and conduct cross-dataset experiments with them. In particular, we consider four subsets of ITWG, with maximum yaw angles of " + }, + { + "bbox": [ + 130, + 533, + 482, + 641 + ], + "type": "inline_equation", + "content": "5^o" + }, + { + "bbox": [ + 130, + 533, + 482, + 641 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 533, + 482, + 641 + ], + "type": "inline_equation", + "content": "20^o" + }, + { + "bbox": [ + 130, + 533, + 482, + 641 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 533, + 482, + 641 + ], + "type": "inline_equation", + "content": "40^o" + }, + { + "bbox": [ + 130, + 533, + 482, + 641 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 533, + 482, + 641 + ], + "type": "inline_equation", + "content": "90^o" + }, + { + "bbox": [ + 130, + 533, + 482, + 641 + ], + "type": "text", + "content": " (all) respectively." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 641, + 481, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 641, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 641, + 481, + 665 + ], + "type": "text", + "content": "We train 3DGazeNet with ground truth supervision from MPII as well as pseudo-labels and multi-view supervision from the four versions of ITWG-MV." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 399, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 399, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 399, + 91, + 447, + 100 + ], + "type": "text", + "content": "3DGazeNet" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 186, + 115, + 427, + 251 + ], + "blocks": [ + { + "bbox": [ + 186, + 115, + 427, + 251 + ], + "lines": [ + { + "bbox": [ + 186, + 115, + 427, + 251 + ], + "spans": [ + { + "bbox": [ + 186, + 115, + 427, + 251 + ], + "type": "image", + "image_path": "8e5c305ff8fd9108a272c436f6fc78dfc297cff71cffa33dc0809c624ae7ac3e.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 261, + 482, + 297 + ], + "lines": [ + { + "bbox": [ + 130, + 261, + 482, + 297 + ], + "spans": [ + { + "bbox": [ + 130, + 261, + 482, + 297 + ], + "type": "text", + "content": "Fig. 6: Gaze error of G360 across head poses when training with MPII and subsets of ITWG-MV. Wider range of head poses in the ITWG-MV data, lead to significantly lower errors in large poses." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 319, + 482, + 439 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 319, + 482, + 439 + ], + "spans": [ + { + "bbox": [ + 130, + 319, + 482, + 439 + ], + "type": "text", + "content": "The results of testing on G360 are presented in Fig. 6. The resulting curves clearly demonstrate the effect of the available head pose variation in the training data. Specifically, utilizing the entirety of ITWG-MV leads to the lowest errors which are relatively consistent across the head pose range. As expected, decreasing the available head pose variation, increasingly affects model performance with the worst case being training with MPII alone. Based on the above finding we argue that the gap between small and wide distribution gaze datasets (regarding head pose) can effectively close by employing similarly large distribution unlabeled face datasets, which is crucial for training plug-n-play gaze estimation models that can be directly employed in applications." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 459, + 317, + 472 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 459, + 317, + 472 + ], + "spans": [ + { + "bbox": [ + 132, + 459, + 317, + 472 + ], + "type": "text", + "content": "5 Limitations and Conclusion" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 485, + 481, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 485, + 481, + 544 + ], + "spans": [ + { + "bbox": [ + 130, + 485, + 481, + 544 + ], + "type": "text", + "content": "In Sec. 4, we shown that pseudo-ground truth can be effectively utilized in gaze estimation. Nevertheless, a limitation of our method is that pseudo-annotation accuracy is related to the accuracy of 3D face and 2D iris alignment. In addition, our current method cannot operate on images without a visible face (when the face is looking away from the camera)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 545, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 545, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 545, + 482, + 666 + ], + "type": "text", + "content": "Overall, In this work, we present a novel weakly-supervised method for gaze generalization, based on dense 3D eye mesh regression. We demonstrate that by utilizing both 3D eye coordinates and gaze labels during training, instead of just gaze labels, we can achieve lower prediction errors. Moreover, we explore the possibility of exploiting the abundantly available in-the-wild face data for improving gaze estimation generalization. To that end, we propose a novel methodology to generate robust, 3D geometry-aware pseudo ground truth labels, as well as a multi-view weak-supervision framework for effective training. By enforcing these constraints, we are able to successfully utilize in-the-wild face data and achieve improvements in cross-dataset and within-dataset experiments." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 238, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 238, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 238, + 100 + ], + "type": "text", + "content": "E. Ververas et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 480, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 116, + 480, + 140 + ], + "spans": [ + { + "bbox": [ + 132, + 116, + 480, + 140 + ], + "type": "text", + "content": "Acknowledgments. S. Zafeiriou was supported by EPSRC Project DEFORM (EP/S010203/1) and GNOMON (EP/X011364)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 160, + 197, + 172 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 160, + 197, + 172 + ], + "spans": [ + { + "bbox": [ + 132, + 160, + 197, + 172 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 138, + 186, + 480, + 665 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 138, + 186, + 479, + 220 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 186, + 479, + 220 + ], + "spans": [ + { + "bbox": [ + 138, + 186, + 479, + 220 + ], + "type": "text", + "content": "1. Abdelrahman, A.A., Hempel, T., Khalifa, A., Al-Hamadi, A., Dinges, L.: L2cs-net: Fine-grained gaze estimation in unconstrained environments. In: ICFSP. pp. 98-102. IEEE (2023)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 138, + 220, + 480, + 243 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 220, + 480, + 243 + ], + "spans": [ + { + "bbox": [ + 138, + 220, + 480, + 243 + ], + "type": "text", + "content": "2. Albiero, V., Chen, X., Yin, X., Pang, G., Hassner, T.: img2pose: Face alignment and detection via 6dof, face pose estimation. In: CVPR (2021)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 138, + 243, + 480, + 265 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 243, + 480, + 265 + ], + "spans": [ + { + "bbox": [ + 138, + 243, + 480, + 265 + ], + "type": "text", + "content": "3. Alp Guler, R., Trigeorgis, G., Antonakos, E., Snape, P., Zafeiriou, S., Kokkinos, I.: Densereg: Fully convolutional dense shape regression in-the-wild. In: CVPR (2017)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 138, + 266, + 480, + 287 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 266, + 480, + 287 + ], + "spans": [ + { + "bbox": [ + 138, + 266, + 480, + 287 + ], + "type": "text", + "content": "4. Andrist, S., Tan, X.Z., Gleicher, M., Mutlu, B.: Conversational gaze aversion for humanlike robots. In: HRI (2014)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 138, + 288, + 480, + 309 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 288, + 480, + 309 + ], + "spans": [ + { + "bbox": [ + 138, + 288, + 480, + 309 + ], + "type": "text", + "content": "5. Bao, Y., Liu, Y., Wang, H., Lu, F.: Generalizing gaze estimation with rotation consistency. In: CVPR (2022)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 138, + 310, + 480, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 310, + 480, + 342 + ], + "spans": [ + { + "bbox": [ + 138, + 310, + 480, + 342 + ], + "type": "text", + "content": "6. Burova, A., Mäkelä, J., Hakulinen, J., Keskinen, T., Heinonen, H., Siltanen, S., Turunen, M.: Utilizing vr and gaze tracking to develop ar solutions for industrial maintenance. In: CHI (2020)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 138, + 343, + 480, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 343, + 480, + 365 + ], + "spans": [ + { + "bbox": [ + 138, + 343, + 480, + 365 + ], + "type": "text", + "content": "7. Cai, X., Zeng, J., Shan, S., Chen, X.: Source-free adaptive gaze estimation by uncertainty reduction. In: CVPR. pp. 22035-22045 (2023)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 138, + 366, + 480, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 366, + 480, + 388 + ], + "spans": [ + { + "bbox": [ + 138, + 366, + 480, + 388 + ], + "type": "text", + "content": "8. Cai, Y., Ge, L., Cai, J., Yuan, J.: Weakly-supervised 3d hand pose estimation from monocular rgb images. In: ECCV (2018)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 138, + 388, + 480, + 421 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 388, + 480, + 421 + ], + "spans": [ + { + "bbox": [ + 138, + 388, + 480, + 421 + ], + "type": "text", + "content": "9. Castner, N., Kuebler, T.C., Scheiter, K., Richter, J., Eder, T., Hützig, F., Keutel, C., Kasneci, E.: Deep semantic gaze embedding and scanpath comparison for expertise classification during opt viewing. In: ACM ETRA (2020)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 138, + 422, + 480, + 443 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 422, + 480, + 443 + ], + "spans": [ + { + "bbox": [ + 138, + 422, + 480, + 443 + ], + "type": "text", + "content": "0. Chen, M., Jin, Y., Goodall, T., Yu, X., Bovik, A.C.: Study of 3d virtual reality picture quality. IEEE Journal of Selected Topics in Signal Processing (2020)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 138, + 444, + 480, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 444, + 480, + 464 + ], + "spans": [ + { + "bbox": [ + 138, + 444, + 480, + 464 + ], + "type": "text", + "content": "1. Cheng, Y., Bao, Y., Lu, F.: Puregaze: Purifying gaze feature for generalizable gaze estimation. In: AAAI (2022)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 138, + 465, + 480, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 465, + 480, + 487 + ], + "spans": [ + { + "bbox": [ + 138, + 465, + 480, + 487 + ], + "type": "text", + "content": "2. Cheng, Y., Huang, S., Wang, F., Qian, C., Lu, F.: A coarse-to-fine adaptive network for appearance-based gaze estimation. In: AAAI (2020)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 138, + 488, + 438, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 488, + 438, + 498 + ], + "spans": [ + { + "bbox": [ + 138, + 488, + 438, + 498 + ], + "type": "text", + "content": "3. Cheng, Y., Lu, F.: Gaze estimation using transformer. In: ICPR (2022)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 138, + 499, + 480, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 499, + 480, + 521 + ], + "spans": [ + { + "bbox": [ + 138, + 499, + 480, + 521 + ], + "type": "text", + "content": "4. Cheng, Y., Lu, F., Zhang, X.: Appearance-based gaze estimation via evaluation-guided asymmetric regression. In: ECCV (2018)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 138, + 521, + 480, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 521, + 480, + 544 + ], + "spans": [ + { + "bbox": [ + 138, + 521, + 480, + 544 + ], + "type": "text", + "content": "5. Cheng, Y., Wang, H., Bao, Y., Lu, F.: Appearance-based gaze estimation with deep learning: A review and benchmark. arXiv preprint arXiv:2104.12668 (2021)" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 138, + 544, + 480, + 565 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 544, + 480, + 565 + ], + "spans": [ + { + "bbox": [ + 138, + 544, + 480, + 565 + ], + "type": "text", + "content": "6. Deng, J., Guo, J., Ververas, E., Kotsia, I., Zafeiriou, S.: Retinaface: Single-shot multi-level face localisation in the wild. In: CVPR (2020)" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 138, + 566, + 480, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 566, + 480, + 599 + ], + "spans": [ + { + "bbox": [ + 138, + 566, + 480, + 599 + ], + "type": "text", + "content": "7. Deng, Y., Yang, J., Xu, S., Chen, D., Jia, Y., Tong, X.: Accurate 3d face reconstruction with weakly-supervised learning: From single image to image set. In: CVPR Workshops (2019)" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 138, + 600, + 480, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 600, + 480, + 621 + ], + "spans": [ + { + "bbox": [ + 138, + 600, + 480, + 621 + ], + "type": "text", + "content": "8. Doukas, M.C., Koujan, M.R., Sharmanska, V., Roussos, A., Zafeiriou, S.: Head2head++: Deep facial attributes re-targeting. T-BIOM (2021)" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 138, + 622, + 480, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 622, + 480, + 643 + ], + "spans": [ + { + "bbox": [ + 138, + 622, + 480, + 643 + ], + "type": "text", + "content": "9. Doukas, M.C., Zafeiriou, S., Sharmanska, V.: Headgan: One-shot neural head synthesis and editing. In: ICCV (2021)" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 138, + 644, + 480, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 644, + 480, + 665 + ], + "spans": [ + { + "bbox": [ + 138, + 644, + 480, + 665 + ], + "type": "text", + "content": "20. Fischer, T., Chang, H.J., Demiris, Y.: Rt-gene: Real-time eye gaze estimation in natural environments. In: ECCV (2018)" + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 399, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 399, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 399, + 91, + 447, + 100 + ], + "type": "text", + "content": "3DGazeNet" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 481, + 100 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 480, + 665 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 132, + 116, + 480, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 116, + 480, + 149 + ], + "spans": [ + { + "bbox": [ + 132, + 116, + 480, + 149 + ], + "type": "text", + "content": "21. Fuhl, W., Kasneci, G., Kasneci, E.: Teyed: Over 20 million real-world eye images with pupil, eyelid, and iris 2d and 3d segmentations, 2d and 3d landmarks, 3d eyeball, gaze vector, and eye movement types. ISMAR (2021)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 150, + 480, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 150, + 480, + 182 + ], + "spans": [ + { + "bbox": [ + 132, + 150, + 480, + 182 + ], + "type": "text", + "content": "22. Funes Mora, K.A., Monay, F., Odobez, J.M.: Eyediap: A database for the development and evaluation of gaze estimation algorithms from rgb and rgb-d cameras. In: ACM ETRA (2014)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 183, + 480, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 183, + 480, + 205 + ], + "spans": [ + { + "bbox": [ + 132, + 183, + 480, + 205 + ], + "type": "text", + "content": "23. Gecer, B., Ploumpis, S., Kotsia, I., Zafeiriou, S.: Ganfit: Generative adversarial network fitting for high fidelity 3d face reconstruction. In: CVPR (2019)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 205, + 480, + 226 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 205, + 480, + 226 + ], + "spans": [ + { + "bbox": [ + 132, + 205, + 480, + 226 + ], + "type": "text", + "content": "24. Ghosh, S., Hayat, M., Dhall, A., Knibbe, J.: Mtgls: Multi-task gaze estimation with limited supervision. In: WACV (2022)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 228, + 480, + 258 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 228, + 480, + 258 + ], + "spans": [ + { + "bbox": [ + 132, + 228, + 480, + 258 + ], + "type": "text", + "content": "25. Gu, C., Sun, C., Ross, D.A., Vondrick, C., Pantofaru, C., Li, Y., Vijayanarasimhan, S., Toderici, G., Ricco, S., Sukthankar, R., Schmid, C., Malik, J.: Ava: A video dataset of spatio-temporally localized atomic visual actions. In: CVPR (2018)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 260, + 480, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 260, + 480, + 281 + ], + "spans": [ + { + "bbox": [ + 132, + 260, + 480, + 281 + ], + "type": "text", + "content": "26. Guler, R.A., Kokkinos, I.: Holopose: Holistic 3d human reconstruction in-the-wild. In: CVPR (2019)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 282, + 480, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 282, + 480, + 304 + ], + "spans": [ + { + "bbox": [ + 132, + 282, + 480, + 304 + ], + "type": "text", + "content": "27. Guo, Z., Yuan, Z., Zhang, C., Chi, W., Ling, Y., Zhang, S.: Domain adaptation gaze estimation by embedding with prediction consistency. In: ACCV (2020)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 304, + 480, + 325 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 304, + 480, + 325 + ], + "spans": [ + { + "bbox": [ + 132, + 304, + 480, + 325 + ], + "type": "text", + "content": "28. Hao, Z., Mallya, A., Belongie, S., Liu, M.Y.: GANcraft: Unsupervised 3D Neural Rendering of Minecraft Worlds. In: ICCV (2021)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 326, + 480, + 357 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 326, + 480, + 357 + ], + "spans": [ + { + "bbox": [ + 132, + 326, + 480, + 357 + ], + "type": "text", + "content": "29. He, J., Pham, K., Valliappan, N., Xu, P., Roberts, C., Lagun, D., Navalpakkam, V.: On-device few-shot personalization for real-time gaze estimation. In: ICCV Workshops (2019)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 358, + 480, + 379 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 358, + 480, + 379 + ], + "spans": [ + { + "bbox": [ + 132, + 358, + 480, + 379 + ], + "type": "text", + "content": "30. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: CVPR (2016)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 380, + 480, + 402 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 380, + 480, + 402 + ], + "spans": [ + { + "bbox": [ + 132, + 380, + 480, + 402 + ], + "type": "text", + "content": "31. Iqbal, U., Molchanov, P., Kautz, J.: Weakly-supervised 3d human pose learning via multi-view images in the wild. In: CVPR (2020)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 132, + 403, + 480, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 403, + 480, + 434 + ], + "spans": [ + { + "bbox": [ + 132, + 403, + 480, + 434 + ], + "type": "text", + "content": "32. Joo, H., Liu, H., Tan, L., Gui, L., Nabbe, B., Matthews, I., Kanade, T., Nobuhara, S., Sheikh, Y.: Panoptic studio: A massively multiview system for social motion capture. In: ICCV (2015)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 132, + 435, + 480, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 435, + 480, + 456 + ], + "spans": [ + { + "bbox": [ + 132, + 435, + 480, + 456 + ], + "type": "text", + "content": "33. Karras, T., Laine, S., Aila, T.: A style-based generator architecture for generative adversarial networks. In: CVPR (2019)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 132, + 457, + 480, + 478 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 457, + 480, + 478 + ], + "spans": [ + { + "bbox": [ + 132, + 457, + 480, + 478 + ], + "type": "text", + "content": "34. Kasahara, I., Stent, S., Park, H.S.: Look both ways: Self-supervising driver gaze estimation and road scene saliency. In: ECCV (2022)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 132, + 479, + 480, + 501 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 479, + 480, + 501 + ], + "spans": [ + { + "bbox": [ + 132, + 479, + 480, + 501 + ], + "type": "text", + "content": "35. Kellnhofer, P., Recasens, A., Stent, S., Matusik, W.,, Torralba, A.: Gaze360: Physically unconstrained gaze estimation in the wild. In: ICCV (2019)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 132, + 502, + 480, + 522 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 502, + 480, + 522 + ], + "spans": [ + { + "bbox": [ + 132, + 502, + 480, + 522 + ], + "type": "text", + "content": "36. Kim, H., Garrido, P., Tewari, A., Xu, W., Thies, J., Nießner, M., Pérez, P., Richardt, C., Zolloffer, M., Theobalt, C.: Deep video portraits. TOG (2018)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 132, + 523, + 480, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 523, + 480, + 544 + ], + "spans": [ + { + "bbox": [ + 132, + 523, + 480, + 544 + ], + "type": "text", + "content": "37. Kleinke, C.L.: Gaze and eye contact: a research review. Psychological bulletin (1986)" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 132, + 545, + 480, + 577 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 545, + 480, + 577 + ], + "spans": [ + { + "bbox": [ + 132, + 545, + 480, + 577 + ], + "type": "text", + "content": "38. Koestinger, M., Wohlhart, P., Roth, P.M., Bischof, H.: Annotated facial landmarks in the wild: A large-scale, real-world database for facial landmark localization. In: ICCVW (2011)" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 132, + 578, + 480, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 578, + 480, + 599 + ], + "spans": [ + { + "bbox": [ + 132, + 578, + 480, + 599 + ], + "type": "text", + "content": "39. Konrad, R., Angelopoulos, A., Wetzstein, G.: Gaze-contingent ocular parallax rendering for virtual reality. In: TOG (2019)" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 132, + 600, + 480, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 600, + 480, + 621 + ], + "spans": [ + { + "bbox": [ + 132, + 600, + 480, + 621 + ], + "type": "text", + "content": "40. Kothari, R., De Mello, S., Iqbal, U., Byeon, W., Park, S., Kautz, J.: Weakly-supervised physically unconstrained gaze estimation. In: CVPR (2021)" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 132, + 622, + 480, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 622, + 480, + 643 + ], + "spans": [ + { + "bbox": [ + 132, + 622, + 480, + 643 + ], + "type": "text", + "content": "41. Koujan, M.R., Doukas, M.C., Roussos, A., Zafeiriou, S.: Head2head: Video-based neural head synthesis. In: FG (2020)" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 132, + 643, + 480, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 643, + 480, + 665 + ], + "spans": [ + { + "bbox": [ + 132, + 643, + 480, + 665 + ], + "type": "text", + "content": "42. Krafka, K., Khosla, A., Kellnhofer, P., Kannan, H., Bhandarkar, S., Matusik, W., Torralba, A.: Eye tracking for everyone. In: CVPR (2016)" + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 237, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 237, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 237, + 100 + ], + "type": "text", + "content": "E. Ververas et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 665 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 138 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 138 + ], + "type": "text", + "content": "43. Kulon, D., Guler, R.A., Kokkinos, I., Bronstein, M.M., Zafeiriou, S.: Weakly-supervised mesh-convolutional hand reconstruction in the wild. In: CVPR (2020)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 138, + 482, + 160 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 138, + 482, + 160 + ], + "spans": [ + { + "bbox": [ + 130, + 138, + 482, + 160 + ], + "type": "text", + "content": "44. Li, Y., Li, K., Jiang, S., Zhang, Z., Huang, C., Xu, R.Y.D.: Geometry-driven self-supervised method for 3d human pose estimation. In: AAAI (2020)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 160, + 482, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 160, + 482, + 182 + ], + "spans": [ + { + "bbox": [ + 130, + 160, + 482, + 182 + ], + "type": "text", + "content": "45. Liu, G., Yu, Y., Mora, K., Odobez, J.: A differential approach for gaze estimation with calibration. In: BMVC (2018)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 182, + 482, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 182, + 482, + 205 + ], + "spans": [ + { + "bbox": [ + 130, + 182, + 482, + 205 + ], + "type": "text", + "content": "46. Liu, G., Yu, Y., Mora, K.A.F., Odobez, J.M.: A differential approach for gaze estimation with calibration. In: BMVC (2018)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 205, + 482, + 226 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 205, + 482, + 226 + ], + "spans": [ + { + "bbox": [ + 130, + 205, + 482, + 226 + ], + "type": "text", + "content": "47. Liu, Y., Liu, R., Wang, H., Lu, F.: Generalizing gaze estimation with outlier-guided collaborative adaptation. In: ICCV (2021)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 226, + 482, + 248 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 226, + 482, + 248 + ], + "spans": [ + { + "bbox": [ + 130, + 226, + 482, + 248 + ], + "type": "text", + "content": "48. Marín-Jiménez, M.J., Kalogeiton, V., Medina-Suárez, P., , Zisserman, A.: LAEO-Net++: revisiting people Looking At Each Other in videos. TPAMI (2021)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 248, + 482, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 248, + 482, + 270 + ], + "spans": [ + { + "bbox": [ + 130, + 248, + 482, + 270 + ], + "type": "text", + "content": "49. Marin-Jimenez, M.J., Kalogeiton, V., Medina-Suarez, P., Zisserman, A.: Laeo-net: Revisiting people looking at each other in videos. In: CVPR (2019)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 270, + 482, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 270, + 482, + 293 + ], + "spans": [ + { + "bbox": [ + 130, + 270, + 482, + 293 + ], + "type": "text", + "content": "50. Mavely, A.G., Judith, J.E., Sahal, P.A., Kuruvilla, S.A.: Eye gaze tracking based driver monitoring system. In: ICCS (2017)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 293, + 482, + 315 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 293, + 482, + 315 + ], + "spans": [ + { + "bbox": [ + 130, + 293, + 482, + 315 + ], + "type": "text", + "content": "51. O Oh, J., Chang, H.J., Choi, S.I.: Self-attention with convolution and deconvolution for efficient eye gaze estimation from a full face image. In: CVPRW (2022)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 315, + 482, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 315, + 482, + 335 + ], + "spans": [ + { + "bbox": [ + 130, + 315, + 482, + 335 + ], + "type": "text", + "content": "52. Park, S., Aksan, E., Zhang, X., Hilliges, O.: Towards end-to-end video-based eyetracking. In: ECCV (2020)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 130, + 335, + 482, + 357 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 335, + 482, + 357 + ], + "spans": [ + { + "bbox": [ + 130, + 335, + 482, + 357 + ], + "type": "text", + "content": "53. Park, S., Mello, S.D., Molchanov, P., Iqbal, U., Hilliges, O., Kautz, J.: Few-shot adaptive gaze estimation. In: ICCV (2019)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 130, + 357, + 482, + 369 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 357, + 482, + 369 + ], + "spans": [ + { + "bbox": [ + 130, + 357, + 482, + 369 + ], + "type": "text", + "content": "54. Park, S., Spurr, A., Hilliges, O.: Deep pictorial gaze estimation. In: ECCV (2018)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 130, + 369, + 482, + 391 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 369, + 482, + 391 + ], + "spans": [ + { + "bbox": [ + 130, + 369, + 482, + 391 + ], + "type": "text", + "content": "55. Park, S., Zhang, X., Bulling, A., Hilliges, O.: Learning to find eye region landmarks for remote gaze estimation in unconstrained settings. In: ACM ETRA (2018)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 130, + 391, + 482, + 413 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 391, + 482, + 413 + ], + "spans": [ + { + "bbox": [ + 130, + 391, + 482, + 413 + ], + "type": "text", + "content": "56. Park, T., Liu, M.Y., Wang, T.C., Zhu, J.Y.: Semantic image synthesis with spatially-adaptive normalization. In: CVPR (2019)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 130, + 413, + 482, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 413, + 482, + 435 + ], + "spans": [ + { + "bbox": [ + 130, + 413, + 482, + 435 + ], + "type": "text", + "content": "57. Richard, A., Lea, C., Ma, S., Gall, J., de la Torre, F., Sheikh, Y.: Audio- and gaze-driven facial animation of codec avatars. In: WACV (2021)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 130, + 435, + 482, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 435, + 482, + 456 + ], + "spans": [ + { + "bbox": [ + 130, + 435, + 482, + 456 + ], + "type": "text", + "content": "58. Riza Alp Guler, Natalia Neverova, I.K.: Densesepose: Dense human pose estimation in the wild. In: CVPR (2018)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 130, + 456, + 482, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 456, + 482, + 479 + ], + "spans": [ + { + "bbox": [ + 130, + 456, + 482, + 479 + ], + "type": "text", + "content": "59. Smith, B., Yin, Q., Feiner, S., Nayar, S.: Gaze Locking: Passive Eye Contact Detection for Human? Object Interaction. In: ACM UIST (2013)" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 130, + 479, + 482, + 501 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 479, + 482, + 501 + ], + "spans": [ + { + "bbox": [ + 130, + 479, + 482, + 501 + ], + "type": "text", + "content": "60. Sugano, Y., Matsushita, Y., Sato, Y.: Learning-by-synthesis for appearance-based 3d gaze estimation. In: CVPR (2014)" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 130, + 501, + 482, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 501, + 482, + 533 + ], + "spans": [ + { + "bbox": [ + 130, + 501, + 482, + 533 + ], + "type": "text", + "content": "61. Sun, J., Wang, X., Shi, Y., Wang, L., Wang, J., Liu, Y.: Ide-3d: Interactive disentangled editing for high-resolution 3d-aware portrait synthesis. ACM TOG 41(6), 1-10 (2022)" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 130, + 533, + 482, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 533, + 482, + 555 + ], + "spans": [ + { + "bbox": [ + 130, + 533, + 482, + 555 + ], + "type": "text", + "content": "62. Sun, J., Wang, X., Wang, L., Li, X., Zhang, Y., Zhang, H., Liu, Y.: Next3d: Generative neural texture rasterization for 3d-aware head avatars. In: CVPR (2023)" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 130, + 555, + 482, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 555, + 482, + 578 + ], + "spans": [ + { + "bbox": [ + 130, + 555, + 482, + 578 + ], + "type": "text", + "content": "63. Sun, Y., Zeng, J., Shan, S., Chen, X.: Cross-encoder for unsupervised gaze representation learning. In: ICCV (2021)" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 130, + 578, + 482, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 578, + 482, + 599 + ], + "spans": [ + { + "bbox": [ + 130, + 578, + 482, + 599 + ], + "type": "text", + "content": "64. Vidal, M., Turner, J., Bulling, A., Gellersen, H.: Wearable eye tracking for mental health monitoring. Computer Communications (2012)" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 130, + 599, + 482, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 599, + 482, + 621 + ], + "spans": [ + { + "bbox": [ + 130, + 599, + 482, + 621 + ], + "type": "text", + "content": "65. Wandt, B., Rudolph, M., Zell, P., Rhodin, H., Rosenhahn, B.: Canonpose: Self-supervised monocular 3d human pose estimation in the wild. In: CVPR (2021)" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 130, + 621, + 482, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 621, + 482, + 643 + ], + "spans": [ + { + "bbox": [ + 130, + 621, + 482, + 643 + ], + "type": "text", + "content": "66. Wang, K., Ji, Q.: Real time eye gaze tracking with 3d deformable eye-face model. In: ICCV (2017)" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 130, + 643, + 482, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 643, + 482, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 643, + 482, + 665 + ], + "type": "text", + "content": "67. Wang, K., Zhao, R., Ji, Q.: A hierarchical generative model for eye image synthesis and eye gaze estimation. In: CVPR (2018)" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 399, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 399, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 399, + 91, + 447, + 100 + ], + "type": "text", + "content": "3DGazeNet" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 481, + 100 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 117, + 480, + 512 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 132, + 117, + 480, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 117, + 480, + 138 + ], + "spans": [ + { + "bbox": [ + 132, + 117, + 480, + 138 + ], + "type": "text", + "content": "68. Wang, K., Zhao, R., Su, H., Ji, Q.: Generalizing eye tracking with bayesian adversarial learning. In: CVPR (2019)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 139, + 480, + 160 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 139, + 480, + 160 + ], + "spans": [ + { + "bbox": [ + 132, + 139, + 480, + 160 + ], + "type": "text", + "content": "69. Wang, T.C., Liu, M.Y., Tao, A., Liu, G., Kautz, J., Catanzaro, B.: Few-shot video-to-video synthesis. In: NeurIPS (2019)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 161, + 480, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 161, + 480, + 182 + ], + "spans": [ + { + "bbox": [ + 132, + 161, + 480, + 182 + ], + "type": "text", + "content": "70. Wang, Y., Jiang, Y., Li, J., Ni, B., Dai, W., Li, C., Xiong, H., Li, T.: Contrastive regression for domain adaptation on gaze estimation. In: CVPR (2022)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 183, + 480, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 183, + 480, + 205 + ], + "spans": [ + { + "bbox": [ + 132, + 183, + 480, + 205 + ], + "type": "text", + "content": "71. Wood, E., Baltrusaitis, T., Morency, L.P., Robinson, P., Bulling, A.: A 3d morphable eye region model for gaze estimation. In: ECCV (2016)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 205, + 480, + 226 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 205, + 480, + 226 + ], + "spans": [ + { + "bbox": [ + 132, + 205, + 480, + 226 + ], + "type": "text", + "content": "72. Yu, Y., Liu, G., Odobez, J.M.: Deep multitask gaze estimation with a constrained landmark-gaze model. In: ECCV Workshops (2018)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 227, + 480, + 248 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 227, + 480, + 248 + ], + "spans": [ + { + "bbox": [ + 132, + 227, + 480, + 248 + ], + "type": "text", + "content": "73. Yu, Y., Liu, G., Odobez, J.M.: Improving few-shot user-specific gaze adaptation via gaze redirection synthesis. In: CVPR (2019)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 249, + 480, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 249, + 480, + 270 + ], + "spans": [ + { + "bbox": [ + 132, + 249, + 480, + 270 + ], + "type": "text", + "content": "74. Yu, Y., Odobez, J.M.: Unsupervised representation learning for gaze estimation. In: CVPR (2020)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 271, + 480, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 271, + 480, + 293 + ], + "spans": [ + { + "bbox": [ + 132, + 271, + 480, + 293 + ], + "type": "text", + "content": "75. Zakharov, E., Shysheya, A., Burkov, E., Lempitsky, V.: Few-shot adversarial learning of realistic neural talking head models. ICCV (2019)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 293, + 480, + 314 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 293, + 480, + 314 + ], + "spans": [ + { + "bbox": [ + 132, + 293, + 480, + 314 + ], + "type": "text", + "content": "76. Zakharov, E., Ivakhnenko, A., Shysheya, A., Lempitsky, V.: Fast bi-layer neural synthesis of one-shot realistic head avatars. In: ECCV (2020)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 315, + 480, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 315, + 480, + 346 + ], + "spans": [ + { + "bbox": [ + 132, + 315, + 480, + 346 + ], + "type": "text", + "content": "77. Zhang, J., Chen, J., Tang, H., Wang, W., Yan, Y., Sangineto, E., Sebe, N.: Dual in-painting model for unsupervised gaze correction and animation in the wild. In: ACM MM (2020)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 347, + 480, + 369 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 347, + 480, + 369 + ], + "spans": [ + { + "bbox": [ + 132, + 347, + 480, + 369 + ], + "type": "text", + "content": "78. Zhang, M., Liu, Y., Lu, F.: Gazeonce: Real-time multi-person gaze estimation. In: CVPR (2022)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 132, + 369, + 480, + 402 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 369, + 480, + 402 + ], + "spans": [ + { + "bbox": [ + 132, + 369, + 480, + 402 + ], + "type": "text", + "content": "79. Zhang, X., Park, S., Beeler, T., Bradley, D., Tang, S., Hilliges, O.: Eth-xgaze: A large scale dataset for gaze estimation under extreme head pose and gaze variation. In: ECCV (2020)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 132, + 403, + 480, + 424 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 403, + 480, + 424 + ], + "spans": [ + { + "bbox": [ + 132, + 403, + 480, + 424 + ], + "type": "text", + "content": "80. Zhang, X., Sugano, Y., Bulling, A.: Revisiting data normalization for appearance-based gaze estimation. In: ACM ETRA (2018)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 132, + 425, + 480, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 425, + 480, + 445 + ], + "spans": [ + { + "bbox": [ + 132, + 425, + 480, + 445 + ], + "type": "text", + "content": "81. Zhang, X., Sugano, Y., Fritz, M., Bulling, A.: Appearance-based gaze estimation in the wild. In: CVPR (2015)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 132, + 447, + 480, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 447, + 480, + 468 + ], + "spans": [ + { + "bbox": [ + 132, + 447, + 480, + 468 + ], + "type": "text", + "content": "82. Zhang, X., Sugano, Y., Fritz, M., Bulling, A.: It's written all over your face: Fullface appearance-based gaze estimation. In: CVPRW (2017)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 132, + 468, + 480, + 490 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 468, + 480, + 490 + ], + "spans": [ + { + "bbox": [ + 132, + 468, + 480, + 490 + ], + "type": "text", + "content": "83. Zhu, J.Y., Park, T., Isola, P., Efros, A.A.: Unpaired image-to-image translation using cycle-consistent adversarial networks. In: ICCV (2017)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 132, + 491, + 480, + 512 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 491, + 480, + 512 + ], + "spans": [ + { + "bbox": [ + 132, + 491, + 480, + 512 + ], + "type": "text", + "content": "84. Zhu, X., Liu, Y., Li, J., Wan, T., Qin, Z.: Emotion classification with data augmentation using generative adversarial networks. In: PAKDD (2018)" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 237, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 237, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 237, + 100 + ], + "type": "text", + "content": "E. Ververas et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/3DSA_Multi-View 3D Human Pose Estimation With 3D Space Attention Mechanisms/61874178-0339-4f9c-84c1-e29c381e8d91_content_list.json b/2024/3DSA_Multi-View 3D Human Pose Estimation With 3D Space Attention Mechanisms/61874178-0339-4f9c-84c1-e29c381e8d91_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..d7fd70edd578b186ed514b27fdaa50c2aa2d5b21 --- /dev/null +++ b/2024/3DSA_Multi-View 3D Human Pose Estimation With 3D Space Attention Mechanisms/61874178-0339-4f9c-84c1-e29c381e8d91_content_list.json @@ -0,0 +1,1597 @@ +[ + { + "type": "text", + "text": "3DSA :Multi-View 3D Human Pose Estimation With 3D Space Attention Mechanisms", + "text_level": 1, + "bbox": [ + 232, + 140, + 772, + 186 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Pohan Chen and Chiachi Tsi", + "bbox": [ + 372, + 212, + 627, + 227 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "National Cheng Kung University n28111089,cctsai@gs.ncku.edu.tw", + "bbox": [ + 379, + 239, + 622, + 267 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract. In this study, we introduce the 3D space attention module (3DSA) as a novel approach to address the drawback of multi-view 3D human pose estimation methods, which fail to recognize the object's significance from diverse viewpoints. Specifically, we utilize the 3D space subdivision algorithm to divide the feature volume into multiple regions. Predicted 3D space attention scores are assigned to the different regions to construct the feature volume with space attention. The purpose of the 3D space attention module is to distinguish the significance of individual regions within the feature volume by applying weighted attention adjustments derived from corresponding viewpoints. We conduct experiments on existing voxel-based methods, VoxelPose and Faster VoxelPose. By incorporating the space attention module, both achieve state-of-the-art performance on the CMU Panoptic Studio dataset.", + "bbox": [ + 261, + 306, + 743, + 487 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Keywords: : 3D Human Pose Estimation $\\cdot$ 3D space attention", + "bbox": [ + 261, + 500, + 686, + 515 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 217, + 541, + 375, + 556 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Estimating multiple 3D human poses simultaneously from multiple camera views is an enduring challenge in computer vision. The aim is to determine the 3D locations of the body joints for all people present in a scene. It is a task that offers benefits to numerous real-world applications, including intelligent sports analysis [5] and retail monitoring [35].", + "bbox": [ + 212, + 573, + 787, + 648 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In 2D-3D lifting approaches [9,10,42], a monocular pose estimator identifies 2D bounding boxes and 2D poses for individuals in each view. A multi-view matching algorithm then establishes consistent correspondences between the 2D poses across different views. Finally, the matched 2D poses are lifted to 3D using geometry models such as triangulation [15] or Pictorial Structure Models [2,3,14].", + "bbox": [ + 212, + 648, + 787, + 724 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "As shown in Fig. 1(a), the voxel-based method [35] constructs the discretized feature volume from the detected heatmaps through 2D-3D projection. Based on the identified per-person proposal, the 3D pose for each person is individually estimated by feeding the feature volume into 3D-CNNs. To reduce the computational cost, another voxel-based approach [38] re-projects the feature volume to three two-dimensional coordinate planes and replaces the 3D-CNNs", + "bbox": [ + 212, + 724, + 787, + 816 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "* Corresponding author: Chiachi Tsi, cctsai@gs.ncku.edu.tw", + "bbox": [ + 217, + 824, + 620, + 839 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/0099411d7b1de9091d8834d13380852dc604cb21e991da6a25bf5023f0621061.jpg", + "image_caption": [ + "Fig. 1: Comparison between our method and the existing voxel-based method. The primary distinction is that: (a) The existing method simply projects heatmaps into 3D space. (b) We enhance feature volumes using space attention, maintaining 3D information, and emphasizing critical regions within the feature volume." + ], + "image_footnote": [], + "bbox": [ + 276, + 148, + 723, + 329 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "with 2D-CNNs, which increases inference speed. The existing direct prediction method [40] uses the Transformer architecture to regress multi-person 3D poses directly, bypassing the need for intermediate tasks. However, owing to the constraints of the transformer architecture, the inference speed of the method still falls behind the Voxel-based 2D-CNNs method [38].", + "bbox": [ + 212, + 430, + 787, + 506 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Existing multi-view approaches often fail to consider an important visual phenomenon: the visible parts of the same object should differ when observed from various angles. As depicted in Fig. 2, the four people in the scene are visible in Views 1 to 4. However, in View 5, only three are visible in the image due to obstruction by one of the people. To address this issue, we propose the 3D space attention module (referred to as 3DSA) and apply it to two open-source voxel-based methods [35,38]. Fig. 1(b) shows the overview of our proposed method. We added the space attention layers to the end of backbone network to predict the space attention scores. Directly estimating the importance of each voxel within the feature volume could lead to excessive computational demands. Therefore, we employed the 3D space subdivision algorithm to divide the feature volume into multiple regions. The voxels within each region were treated as a group, and the space attention scores were assigned to the group, representing the importance of the region. Finally, the feature volume with space attention was constructed, retaining the 3D information and paying more attention to crucial regions.", + "bbox": [ + 212, + 507, + 787, + 733 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We have conducted extensive experiments on the 3D human pose benchmark, Panoptic [21], to evaluate the efficacy of our space attention module. By applying the space attention module into the VoxelPose [35] and Faster VoxelPose [38] methods, our models show significant improvements of $20.93\\%$ and $20.32\\%$ in MPJPE respectively, both models achieve the state-of-the-art results. The voxel-based methods undermine their performance on the $AP_{25}$ metric when compared to other multi-view methods. Our space attention module addresses", + "bbox": [ + 212, + 734, + 787, + 839 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Chen et al.", + "bbox": [ + 271, + 114, + 346, + 126 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/4ae930174a96d7868b535b8f28cbcc2f9f53b0ca98ad2aaa3004032716fef757.jpg", + "image_caption": [ + "View1" + ], + "image_footnote": [], + "bbox": [ + 220, + 148, + 331, + 189 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/da8188c0bf4ff0b7464527a8d3d41219bfbb47c7deec6a6d55179331f0895941.jpg", + "image_caption": [ + "View2", + "Fig. 2: The visual phenomenon in the 3D space. Due to differences in camera viewing angles and obstruction issues, the visibility of the same person changes across different viewpoints. As shown by the red arrow in the figure, we can clearly observe the person in the images from Views 1 to 4, but they are not visible in View 5. This visual phenomenon is commonly encountered in multi-view human pose estimation tasks." + ], + "image_footnote": [], + "bbox": [ + 333, + 148, + 442, + 189 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/a73242bfe87ad7e534792de37e4b1b0dd95a2f74e2406c7bf15fc17ca483faff.jpg", + "image_caption": [ + "View3" + ], + "image_footnote": [], + "bbox": [ + 444, + 148, + 557, + 189 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/c228b5cd8a029bbfde48814c37f7b41dd74204e74ac268f50ba1884d2175f41c.jpg", + "image_caption": [ + "View4" + ], + "image_footnote": [], + "bbox": [ + 558, + 148, + 669, + 189 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/1101992fbba6d1a3a4f9082ca880d0fa9fe61b2e1c4cb3983e1ea5b54f09255c.jpg", + "image_caption": [ + "View5" + ], + "image_footnote": [], + "bbox": [ + 671, + 148, + 784, + 189 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "this weakness, resulting in our model achieving $94.2\\%$ and $94.22\\%$ on the metric. Compared to the baseline model [35, 38], these scores demonstrate a significant improvement, surpassing all existing multi-view approaches.", + "bbox": [ + 212, + 310, + 782, + 356 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our contributions are summarized as follows: (1) We proposed the 3D space attention module (3DSA), which addresses the drawbacks of the existing multiview 3D human pose estimation methods and validates its effectiveness on existing voxel-based methods [35, 38]. (2) We introduced a 3D space subdivision algorithm to reduce the computational complexity of the module. (3) By incorporating our space attention module into existing voxel-based methods [35, 38], both models achieve state-of-the-art results on the Panoptic benchmark, demonstrating the effectiveness of this attention mechanism.", + "bbox": [ + 212, + 356, + 784, + 476 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Related work", + "text_level": 1, + "bbox": [ + 215, + 498, + 382, + 513 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.1 Multi-view 3D human pose estimation", + "text_level": 1, + "bbox": [ + 214, + 529, + 578, + 545 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Unlike monocular 3D human pose estimation [8, 13, 33, 34], multi-view human pose estimation leverages image information from different viewpoints. This approach not only effectively overcomes challenges such as occlusion and depth ambiguity but also ensures a richer and more accurate depiction of the 3D pose. Existing methods can be categorized into three types: (1)2D to 3D lifting methods [1-3,5,9,10,18,25,42] (2)Voxel-based methods [6,7,19,20,27,30,32,35,38,41] (3)Direct regression method [40].", + "bbox": [ + 212, + 551, + 784, + 657 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2D to 3D lifting method Firstly, a monocular pose estimator is utilized to estimate the 2D joints of each person in each view, through triangulation [15] and a 3D pictorial model [14], the 3D pose of each person is reconstructed from the associated 2D poses. Dong et al. [9,10] propose MvPose. MvPose utilizes a human pose detector to generate and cluster 2D bounding boxes and associated poses for each view. Each cluster represents the same person from different views. The 3D pose of each person is then reconstructed from these clusters using triangulation and a 3D pictorial model. The drawback of this 2D to 3D pose lifting method is its significant dependence on the preceding steps of 2D pose estimation and cross-view matching, as their quality directly influences the results.", + "bbox": [ + 212, + 657, + 785, + 809 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Voxel-based method In contrast to the 2D to 3D lifting methods, which require establishing cross-view correspondence based on noisy and incomplete 2D", + "bbox": [ + 214, + 809, + 785, + 839 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "3DSA", + "bbox": [ + 689, + 114, + 730, + 126 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 774, + 114, + 785, + 126 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "pose estimates, the voxel-based method directly operates in the 3D space and therefore avoids making incorrect decisions in each camera view. Tu et al. [35] propose VoxelPose, the method that discretizes the 3D space into voxels and uses 2D heatmaps to construct a 3D feature volume. 3D-CNNs process this volume to locate human proposals and regress the 3D joint. Since the voxel-based method heavily relies on 3D convolutions, it requires higher computational cost and inference time to predict 3D joints. To enhance the model efficiency, Ye et al. [38] proposed Faster VoxelPose, an optimization method based on orthographic projection. This method projects the 3D feature volume to three mutually perpendicular planes and then utilizes 2D-CNNs to locate the center proposal and regress the 3D joint. By doing this, it eliminates the need for time-consuming 3D convolutions. Choudhury et al. [7] proposed TEMPO, which utilizes temporal context to enhance pose estimation, delivering smoother and more accurate human poses by integrating spatiotemporal information.", + "bbox": [ + 212, + 146, + 787, + 358 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "It has been observed that the voxel-based methods generally yield lower scores on the $AP_{25}$ metric in Panoptic datasets when compared to other methods. In this paper, we introduce a novel 3D space attention module, which applies weighted attention adjustments to the feature volume from corresponding viewpoints. This attention mechanism guides the network to focus more effectively on crucial feature regions and yields significant improvements in the $AP_{25}$ metric.", + "bbox": [ + 212, + 362, + 787, + 454 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Direct regression method In contrast to previous methods, Zhang et al. [40] proposed MvP, which leverages the Transformer architecture to regress multi-person 3D poses directly, thus eliminating the need for intermediate tasks. MvP achieved impressive results on the Panoptic [21] datasets. It showed significant progress (8%) on the most stringent $AP_{25}$ compared to the Voxel-based methods [35, 38] and is more robust and accurate than previous models. However, due to the limitations of the transformer architecture, the inference speed of MvP still can't compete with 2D CNN-based voxel method [38], which is not conducive to its deployment in practical applications.", + "bbox": [ + 212, + 458, + 789, + 595 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.2 Multi-view 3D body mesh estimation", + "text_level": 1, + "bbox": [ + 215, + 638, + 570, + 655 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Multi-view 3D body mesh estimation [11,20,24,31,39] is a task closely related to 3D pose estimation. Instead of directly estimating joint positions, this task involves predicting the parameters of SMPL [26] or employing a fitting method [4] to align the SMPL model with detected joint positions. Yu et al. [39] use neural networks to directly predict local attention, assigning importance to visual features across views. Our method focuses on using space subdivision and space attention to address the varying importance of different viewpoints in the same 3D space. Directly predicting the space attention and projecting to the 3D space will result in equal attention values along the projection ray, which prevents the model from accurately identifying depth information.", + "bbox": [ + 212, + 688, + 787, + 840 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Chen et al.", + "bbox": [ + 271, + 114, + 346, + 127 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.3 Attention mechanisms", + "text_level": 1, + "bbox": [ + 215, + 146, + 446, + 159 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The methodology of predicting attention scores from input features and then using these scores to enhance discriminative feature learning has been adopted by numerous studies [17, 23, 36]. The most famous is SENet proposed by Hu et al. [17], which employs attention mechanisms to adaptively recalibrate channelwise features by modeling inter-channel dependencies. Ma et al. proposed global attention in ContextPose [28], which focus on features within each voxel by estimating confidence scores for each joint, effectively reducing interference from non-human body voxels and improving joint estimation accuracy. Regarding merging 3D features extracted from different 2D viewpoints, the inherent physical characteristics of imaging result in varying importance of different viewpoints for the same 3D space. Therefore, we introduced the space attention module to solve this problem in a voxel-represented 3D space.", + "bbox": [ + 212, + 167, + 787, + 349 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3 Method", + "text_level": 1, + "bbox": [ + 215, + 369, + 330, + 386 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.1 The drawback of existing multi-view 3D human pose method", + "text_level": 1, + "bbox": [ + 212, + 398, + 764, + 414 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Despite the impressive achievements of the existing multi-view 3D human pose methods, they ignore an important visual phenomenon: the visible parts of an object could vary when observed from different viewpoints. Specifically, an object's visibility can differ dramatically across various viewpoints, for instance, an object may be distinctly visible from viewpoint A, yet as we transition to viewpoint B, its visibility may diminish or even vanish due to interposing obstacles or occluded persons.", + "bbox": [ + 212, + 420, + 787, + 526 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In this work, we introduce the space attention module to address the drawback, and we validate its effectiveness on existing voxel-based methods [35,38]. The existing methods merely project heatmaps into 3D space. As depicted in Fig. 1(b), our approach leverages the space attention module to enhance feature volumes. This not only preserves 3D information but also emphasizes crucial regions inside the feature volume. The objective of this attention mechanism is to focus on significant regions within the feature volume, by applying weighted attention adjustments to the feature volume from corresponding viewpoints.", + "bbox": [ + 212, + 527, + 787, + 647 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2 Network architecture", + "text_level": 1, + "bbox": [ + 215, + 666, + 439, + 681 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Heatmap and space attention prediction. As shown in Fig. 3 (a), our model adopts a simple multi-layer design with a backbone and two additional layers. In the heatmap layer, the probability of a 2D pose heatmaps for the corresponding view is predicted. Meanwhile, in the space attention layer, the attention scores of the feature volume are determined. The attention scores are dynamically adjusted based on the input image, emphasizing regions with higher visibility in the 3D space.", + "bbox": [ + 212, + 688, + 787, + 794 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Space attention with person proposal generation. As shown in Fig. 3 (b), by projecting the output heatmaps to the 3D space, the discretized feature volume $\\{\\mathbf{G} \\in \\mathbb{R}^{80 \\times 80 \\times 20}\\}$ is constructed. Following [35], the 3D space is discretized", + "bbox": [ + 212, + 795, + 787, + 842 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "3DSA", + "bbox": [ + 687, + 114, + 730, + 126 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 774, + 116, + 785, + 126 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/0ad11621644ac5f435ec30ae3f68f88ba4d0d510f328687dbf244a9de56dd28b.jpg", + "image_caption": [ + "Fig. 3: Overview of network architecture. (a) Given the multi-view image as input, the backbone network predicted both the heatmaps and the space attention scores for each view. Each heatmap is projected to a 3D space, which is physically shared but independent for each view, constructing the feature volume. The space attention scores for each view are assigned to the different regions in the shared 3D space. (b) By performing an element-wise multiplication of the raw feature volume with the space attention scores, we produce a feature volume infused with spatial attention. Subsequently, this attention-enhanced feature volume is fed into 3D-CNNs to locate the per-person proposal. (c) A more detailed feature volume corresponding to the proposal was generated. By calculating the spatial relationship between the proposal and the feature volume, space attention scores for the proposal were sampled from the attention in 3D space. Finally, the human pose was estimated." + ], + "image_footnote": [], + "bbox": [ + 222, + 148, + 782, + 368 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "into $X \\times Y \\times Z$ locations. Based on observations from the space $[2, 21], X, Y$ and $Z$ are set to be 80, 80, and 20 respectively to maintain a good balance between speed and precision. Let the 2D heatmap of a view be denoted as $M_v \\in \\mathbb{R}^{K \\times w \\times h}$ , where $K$ is the number of person's joints. For each voxel location $G^{X,Y,Z}$ , the projected location in 2D view $\\mathbf{V}$ is represented as $P_v^{X,Y,Z}$ . The heatmap values at $P_v^{X,Y,Z}$ is denoted as $M_v^{X,Y,Z} \\in \\mathbb{R}^K$ . $\\mathbf{v} \\in \\mathbb{R}^V$ represents one view from total $\\mathbf{V}$ views.", + "bbox": [ + 212, + 590, + 787, + 696 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Directly predicting the importance of each voxel in the feature volume would result in an overwhelming computational burden (Given that the output dimension of the model equals the number of voxels in the feature volume, which is 128,000). To reduce computational complexity, we use a 3D space subdivision algorithm to divide the feature volume $\\{G_V \\in \\mathbb{R}^{80 \\times 80 \\times 20}\\}$ from each view, $V$ into several regions $\\{Div\\overline{G}_V \\in \\mathbb{R}^{80 \\times 80 \\times 20}\\}$ . Subsequently, the space attention scores predicted from the model are assigned to each region in the divided feature volume to compute the attention of the feature volume $\\{V_v^{X,Y,Z} \\in \\mathbb{R}^{80 \\times 80 \\times 20}\\}$ , which represents the attention scores for view $v$ .", + "bbox": [ + 212, + 702, + 787, + 840 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Chen et al.", + "bbox": [ + 271, + 114, + 346, + 126 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "After that, an element-wise multiplication is performed between the space attention $\\{V_{\\pmb{v}}\\}$ and the raw feature volume $\\{M_{\\pmb{v}}\\}$ to obtain a feature volume with space attention on view $\\pmb{v}$ , denoted as $MV_{\\pmb{v}}$ . Following this, the feature volumes (with space attention) constructed from multi-view images are fused on average to obtain the aggregated feature volume $\\{\\pmb{F} \\in \\mathbb{R}^{80 \\times 80 \\times 20}\\}$ :", + "bbox": [ + 212, + 146, + 787, + 223 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nF = \\frac {1}{V} \\sum_ {v = 1} ^ {V} M _ {v} \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 449, + 234, + 785, + 273 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $\\mathbf{V}$ represents the number of cameras. $F$ represents the likelihood of $K$ joints in $G$ . Through applying space attention to the feature volume, 3D information is retained while emphasizing important voxels. Finally, the aggregated feature volume $\\mathbf{F}$ is input into the 3D convolutional network to determine the per-person likelihood in the 3D discretized feature volume.", + "bbox": [ + 212, + 282, + 784, + 356 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Space attention with per-person pose regression. In the final step, the completed 3D human pose corresponding to the proposal is predicted, as illustrated in Fig. 3 (c). For a fair evaluation of the effect of the space attention module, [35] is adopted to build an individual fine-grained feature volume centered at each predicted proposal. The size of the fine-grained feature volume is set to be $2000\\mathrm{mm} \\times 2000\\mathrm{mm} \\times 2000\\mathrm{mm}$ , and the feature volume is divided into a discrete grid with $X' \\times Y' \\times Z'$ voxel where $X', Y', Z'$ equal to 64. Each feature volume under a particular perspective will only have one space attention score to indicate its importance. In this work, we sample the attention score for each proposal by analyzing the spatial relationship between the proposal and the feature volume. Then, we employ a nearest neighbor sampling method to precisely calculate the attention scores for each proposal. The aggregated fine-grained feature volume is computed based on the descriptions from the previous stage. Finally, the 3D heatmap is estimated and the complete 3D human poses of the persons in the space are regressed.", + "bbox": [ + 212, + 358, + 787, + 584 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.3 3D space subdivision algorithm", + "text_level": 1, + "bbox": [ + 215, + 604, + 519, + 619 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "As mentioned in Sec. 3.2, the 3D space subdivision algorithm is crucial to the implementation of our space attention module. Computational challenges arise when directly predicting the significance of each voxel in the feature volume. Inspired by Lai et al. [22] utilizing the cell subdivision search algorithm to reduce the computational complexity associated with searching through a large amount of data points, we employ a 3D space subdivision algorithm to divide the feature volume into distinct regions. Specifically, the voxels within each region are considered as a group, and attention scores are assigned to these groups to signify the importance of each region. Through the backbone network, the weight of each region is predicted, representing the importance of corresponding areas within the same viewpoint in the feature volume. If voxels within a specific region exhibit higher confidence levels, this indicates their relative importance. Conversely, lower confidence levels in voxels, caused by obstructions, occlusion, or other factors, suggest that they are less significant within that region. As", + "bbox": [ + 212, + 628, + 787, + 840 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "3DSA", + "bbox": [ + 689, + 114, + 730, + 126 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 774, + 116, + 785, + 126 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/65486a35377e7c2042bd4f723ce23d11904ae7df6aecfdfc5ba7f4d58a6c9b93.jpg", + "image_caption": [ + "Fig. 4: Subdivision of the voxel within the feature volume. We utilize a 3D space subdivision algorithm to partition the feature volume into separate regions, with the voxels in each region being treated as a group." + ], + "image_footnote": [], + "bbox": [ + 390, + 155, + 591, + 300 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Fig. 4 depicts, the feature volume in 3D space $\\pmb{G}$ is divided into several cells along the $x$ , $y$ , and $z$ axes. Assume $l_{i}$ , $i \\in \\{x,y,z\\}$ represents the length, width, and height of the feature volume, while $\\delta_{i}$ , $i \\in \\{x,y,z\\}$ represents the cell length along a particular axis. The relationship between $l_{i}$ and $\\delta_{i}$ can be expressed as follows:", + "bbox": [ + 214, + 392, + 787, + 467 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\delta_ {i} = \\operatorname {i n t} \\left(\\frac {l _ {i}}{n _ {i}}\\right) + 1 \\quad i \\in \\{x, y, z \\} \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 383, + 476, + 785, + 510 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "where $n_i$ represents the number of regions divided along the $i$ -axis. The total number of regions $n_{total}$ in the 3D space is given by the product of", + "bbox": [ + 214, + 520, + 787, + 551 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\nn _ {\\text {T o t a l}} = n _ {x} \\times n _ {y} \\times n _ {z} \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 423, + 563, + 785, + 580 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Let the position vector of a voxel be $\\mathbf{V} = [v_x, v_y, v_z]^\\top$ . Then, the region that $\\mathbf{V}$ resides in can be computed using the following equation:", + "bbox": [ + 214, + 592, + 787, + 625 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\ni _ {j} = \\operatorname {f l o o r} \\left(\\frac {\\left(v _ {j} - j _ {\\min }\\right)}{\\delta_ {j}}\\right) + 1 \\quad j \\in \\{x, y, z \\} \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 346, + 633, + 785, + 667 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "where $i_j$ represents the indices of the voxel in $x, y, z$ directions, floor() is used to round down to integer representation, and $j_{\\mathrm{min}}$ represents the minimum coordinates in $x, y$ and $z$ directions of the voxel within the feature volume. Finally, the region id of the voxel (Voxel_id) within the feature volume can be calculated by the following formula:", + "bbox": [ + 214, + 678, + 787, + 753 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\nV o x e l _ {i d} = i _ {z} \\times \\left(n _ {x} \\times n _ {y}\\right) + i _ {y} \\times n _ {x} + i _ {x} \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 359, + 766, + 785, + 782 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The ID of each voxel can be calculated according to the formulas, however, in practical applications, the total number of voxels in the feature volume is substantial, which could lead to excessive computation times. To tackle this", + "bbox": [ + 214, + 794, + 787, + 840 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Chen et al.", + "bbox": [ + 271, + 114, + 346, + 127 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/8492bfdd2fa95289084140bf90f1958609de002fbe8bd8a5253a09b1867a01a3.jpg", + "image_caption": [ + "Fig. 5: Detailed architecture of space attention module." + ], + "image_footnote": [], + "bbox": [ + 290, + 150, + 718, + 354 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "challenge, we have optimized the weight assignment process within the space attention module, adopting the following Python code (Algorithm 1). Compared Eq. (2) to Eq. (5), our approach is better adapted to practical applications, achieving the same objectives and results more efficiently.", + "bbox": [ + 212, + 416, + 784, + 477 + ], + "page_idx": 8 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "Algorithm 1 Weight assignment algorithm" + ], + "code_body": "Suppose we have 3 intervals along x, y, and z axis $x,y,z = [0,27,54,80],[0,27,54,80],[0,7,14,20]$ # Assign space attention value to the tensor for one view. \n subdivision num $= 0$ \nfor i in range (3): for j in range (3): for k in range (3): space attention[x [i]: x [i+1],y [j]: y [j+1],z [k]: z [k+1] $\\equiv$ attention value[subdivision num] subdivision num $=$ subdivision num+1", + "guess_lang": "txt", + "bbox": [ + 215, + 522, + 661, + 664 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "3.4 Implementation of space attention module", + "text_level": 1, + "bbox": [ + 215, + 720, + 609, + 736 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In implementations, only the following adjustments were made: (1) A simple branch was derived from the backbone network [16] to predict the space attention scores. (2) We executed an element-wise multiplication of the raw feature volume with the space attention scores calculated by Algorithm 1. (3) The attention scores of the proposal are computed by analyzing the positional relationship between the proposal and the feature volume.", + "bbox": [ + 212, + 750, + 784, + 839 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "3DSA", + "bbox": [ + 689, + 114, + 730, + 126 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 774, + 116, + 785, + 126 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "The space attention module can be easily applied to existing multi-person voxel-based human pose methods [7, 35, 38, 41]. However, since some of these methods are not open-sourced, it prevents us from performing validation. Consequently, we chose to validate our method using the two open-sourced voxel-based methods [35, 38].", + "bbox": [ + 212, + 146, + 782, + 222 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "It is important to emphasize that for a fair evaluation of the impact of the space attention module on existing voxel-based methods [35,38], the network architecture [29] used for locating the person proposal and regressing the 3D pose remained unaltered. For the model's loss function and hyperparameter configuration, the original design proposed by [35,38] has remained.", + "bbox": [ + 212, + 222, + 784, + 297 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "The architecture of the space attention layer is presented in Fig. 5. It is a straightforward and lightweight design, which uses a simple convolutional block followed by global average pooling and the sigmoid activation function to estimate the space attention scores of the corresponding image. The purpose of the global average pooling is to replace the traditional fully connected layers, thereby reducing the number of parameters. The output dimensions of the space attention layer are equal to the number of regions in the feature volume. The space attention scores $S \\in \\mathbb{R}^n$ represent the $n$ space attention values, indicating that the feature volume is divided into $n$ regions.", + "bbox": [ + 212, + 297, + 785, + 434 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4 Experiments", + "text_level": 1, + "bbox": [ + 215, + 454, + 375, + 472 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4.1 Implementation detail", + "text_level": 1, + "bbox": [ + 215, + 484, + 444, + 500 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Training and evaluation datasets. CMU Panoptic [21] is a 3D dataset with multi-view images. To evaluate and analyze our approach, we conducted extensive experiments on the Panoptic dataset. Following VoxelPose [35], the same data sequences were used for both training and evaluating our model. Our experiments were conducted using five HD cameras with camera IDs 3, 6, 12, 13, 23. Shelf and Campus [2] are two datasets that are commonly used in multi-view and multi-person research. We evaluated our method using the same data setup as in [35].", + "bbox": [ + 212, + 507, + 784, + 628 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Evaluation metric. For the Panoptic datasets [21], we adopt the Average Precision $(AP^K)$ and Mean Per Joint Position Error (MPJPE) as metrics that demonstrate the robustness and accuracy of multi-person 3D pose estimation. To assess the influence of the space attention module on model size and computational complexity, we consider key metrics such as MACs and model parameters. For both the Campus and Shelf datasets, we present the results in terms of the Percentage of Correct Parts (PCP).", + "bbox": [ + 212, + 628, + 784, + 734 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Training details. For the Panoptic datasets, we use an off-the-shelf pose estimation model constructed based on ResNet-50 [16] to extract features from multi-view images. The difference from VoxelPose [35] is that since our backbone network needs to predict the space attention scores, the parameters of the model are updated throughout the training iteration.", + "bbox": [ + 212, + 734, + 784, + 809 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Due to the incomplete data annotation in the Campus and Shelf datasets [2], Tu et al. [35] use synthetic 3D poses to train the network. To implement the", + "bbox": [ + 214, + 809, + 784, + 840 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Chen et al.", + "bbox": [ + 271, + 114, + 346, + 127 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "space attention module, we use the synthetic heatmap as the input feature to predict the space attention scores. In summary, the space attention module has two modes: the first predicts the space attention scores from the ground truth multi-view image, referred to as Image-based input; the second predicts the space attention scores from the synthetic heatmaps, referred to as Heatmap-based input.", + "bbox": [ + 212, + 146, + 787, + 238 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "4.2 Comparisons to Existing Methods", + "text_level": 1, + "bbox": [ + 214, + 258, + 545, + 273 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Panoptic. We first evaluate our model on the Panoptic dataset [21] and compare it with the state-of-the-art model. As illustrated in Tab. 1, by incorporating the space attention module $(10 \\times 10 \\times 3$ configuration) with two voxel-based methods, VoxelPose [35] and Faster VoxelPose [38], our model achieves $94.2\\%$ and $94.22\\%$ on the most strict evaluation metric $AP_{25}$ , outperforming the transformer model MvP [40]. Our proposed method shows inferior performance in terms of $AP@50,100,150$ when compared to VoxelPose, and this $0.5\\%$ performance gap is generally attributed to model variation. It particularly emphasizes that in terms of the $AP_{25}$ metric, our method has significantly improved, outperforming VoxelPose by $12.69\\%$ and Faster VoxelPose by $10.56\\%$ . Remarkably, both methods achieved much lower MPJPE with values of 13.98 and 14.55, outperforming the TEMPO [7] and achieving the SOTA records. This demonstrates the effectiveness of our space attention module.", + "bbox": [ + 212, + 284, + 789, + 481 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/bed8890e7df110761af63834d924d97f4479666c579fb0093052d4887ab1fdd6.jpg", + "table_caption": [ + "Table 1: Comparison with existing methods on the Panoptic datasets." + ], + "table_footnote": [], + "table_body": "
MethodAP25AP50AP100AP150MPJPE
VoxelPose [35]83.5998.3399.7699.9117.68mm
Faster VoxelPose [38]85.2298.0899.3299.4818.26mm
PlaneSweep Pose [25]92.1298.9699.8199.8416.75mm
RPGN [37]----15.84mm
MvP [40]92.2896.697.4597.6915.76mm
TEMPO [7]89.0199.0899.7699.9314.68mm
VoxelPose + 3DSA94.298.4999.2199.3113.98mm
Faster VoxelPose + 3DSA94.2298.6599.4999.7514.55mm
", + "bbox": [ + 250, + 547, + 743, + 688 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Campus and Shelf. The quantitative evaluation results on Shelf and Campus datasets [2] are presented in Tab. 2. Our proposed method (VoxelPose [35] with space attention, $10 \\times 10 \\times 3$ configuration) remains competitive on both datasets. The performance of space attention is not as outstanding on Panoptic datasets [21], and we believe this is related to the Heatmap-based input. Since the heatmap lacks image information, the model is hard to determine the importance of different regions in 3D space from the heatmap. We will detail our research on this issue in the subsequent ablation study.", + "bbox": [ + 212, + 718, + 787, + 840 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "3DSA", + "bbox": [ + 689, + 114, + 730, + 126 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 767, + 114, + 784, + 126 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/67f322dcc48367f815059c40111c684184b7a19bc2284eb9c76fb1b477c8b6df.jpg", + "table_caption": [ + "Table 2: Quantitative results on Shelf and Campus datasets." + ], + "table_footnote": [], + "table_body": "
MethodShelfCampus
Actor1Actor2Actor3AverageActor1Actor2Actor3Average
Ershadi et al. [12]93.375.994.88894.292.984.690.6
Dong et al. [10]98.894.197.896.997.693.39896.3
MvP [40]99.394.197.897.498.294.197.496.6
TEMPO [7]99.395.197.897.497.795.597.997.3
Faster VoxelPose. [38]99.49697.597.696.594.197.996.2
VoxelPose [35]99.394.197.69797.693.898.896.7
Ours99.495.497.697.59893.498.696.7
", + "bbox": [ + 218, + 171, + 781, + 320 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/74d00a6df15ec6e72e81e282f54af9c27c0684495e4d8d86d1888a932f11c536.jpg", + "table_caption": [ + "Table 3: Space subdivision and efficiency analysis on the Panoptic dataset" + ], + "table_footnote": [], + "table_body": "
VoxelPose incorporate with space attention
Space subdivisionAP25AP100MPJPEMACs(G)Parameter(M)
Tu et al. [35]83.5999.7617.68178.8840.62
3 × 3 × 392.7399.5814.78179.0940.64
7 × 7 × 393.7199.3314.41180.0440.77
10 × 10 × 394.299.2113.98181.2440.92
15 × 15 × 694.3399.113.97193.2442.47
20 × 20 × 994.4499.4413.94221.5846.15
Faster VoxelPose incorporate with space attention
Space subdivisionAP25AP100MPJPEMACs(G)Parameter(M)
Ye et al. [38]85.2299.3218.26106.8736.37
3 × 3 × 392.5799.6115.54107.0836.39
7 × 7 × 393.7599.5414.88108.0336.52
10 × 10 × 394.2299.4914.55109.2336.67
", + "bbox": [ + 267, + 358, + 730, + 588 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "4.3 Ablation studies", + "text_level": 1, + "bbox": [ + 215, + 614, + 398, + 628 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "In this section, we conduct ablative experiments to analyze a variety of factors within our approach.", + "bbox": [ + 212, + 642, + 785, + 672 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Individual contributions of the space attention module and the 3D space subdivision algorithm. By comparing the results in Tab. 3, we can see that the finer the subdivision of the 3D space, the model's accuracy and precision improve correspondingly. However, the model's performance tends to converge after subdividing into $10 \\times 10 \\times 3$ regions. The result demonstrates the critical importance of the space subdivision algorithm within the space attention module. The direct prediction of all voxels does not result in significant improvements in performance.", + "bbox": [ + 212, + 672, + 787, + 792 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Efficiency analysis. In this work, we focus on comparing our method with existing voxel-based methods [35, 38]. Tab. 3 demonstrates that incorporating the space attention module into the voxel-based approach resulted in a slight", + "bbox": [ + 212, + 794, + 787, + 840 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Chen et al.", + "bbox": [ + 271, + 114, + 346, + 127 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "increase in the model's complexity. Regarding the model we eventually selected (VoxelPose with $10 \\times 10 \\times 3$ space attention module), MACs increased by $1.32\\%$ and parameters by $0.74\\%$ when compared to the VoxelPose method. As previously mentioned, excessively increasing the number of spatial subdivisions does not enhance performance but significantly increases the model's complexity. For instance, subdividing the space into $20 \\times 20 \\times 9$ regions resulted in a $23.8\\%$ increase in the model's MACs and a $13.6\\%$ increase in parameters. This further demonstrates the importance of the space subdivision algorithm in improving the efficiency of the space attention module. To strike a balance between performance and efficiency, we adopt the $10 \\times 10 \\times 3$ space attention configuration on VoxelPose [35] to study the impact of the individual factors.", + "bbox": [ + 212, + 146, + 787, + 313 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Number of cameras. We compared our method with existing 3D Pose methods [7,35,38,40]. Tab. 4 shows that the feature volume representation is diminished with fewer camera views, leading to a drop in accuracy. The improvement in both $AP$ and $MPJPE$ metrics over other models, as the number of cameras increases, underscores the significance of multi-view images for enhancing the space attention module's performance.", + "bbox": [ + 212, + 315, + 787, + 406 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Image-based input /Heatmap-based input. To further validate the impact of different inputs on the space attention module, we conducted experiments on the Panoptic dataset [2]. As shown in Tab. 5, although the space attention with Heatmap-based input shows an improvement compared to the baseline model [35], it is noticeably inferior to the space attention with Image-based input. We consider that this disparity occurs because heatmaps lack spatial and depth information in comparison to images.", + "bbox": [ + 212, + 409, + 787, + 513 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/2b94fac917455989a8aa3d679aa6357923e6086abda6fb1d3c59b2693e010dcd.jpg", + "table_caption": [ + "Table 4: Number of cameras analysis on the Panoptic dataset" + ], + "table_footnote": [], + "table_body": "
MethodCam\\(AP_{25}\\)\\(AP_{50}\\)\\(AP_{100}\\)\\(AP_{150}\\)MPJPE
Faster VoxelPose [38]73.9597.0299.2199.3521.12
MvP [40]484.1-96.7-19.3
TEMPO [7]----17.34
ours88.498.199.5999.716.78
VoxelPose [35]58.9493.8898.4599.3224.29
Faster VoxelPose [38]53.6891.8997.498.326.13
MvP [40]371.8-95.1-21.1
TEMPO [7]----19.22
ours73.0695.2398.6499.2519.03
MvP [40]37.7-93-34.8
TEMPO [7]2----32.13
ours47.9588.7497.8498.827.35
", + "bbox": [ + 295, + 587, + 704, + 801 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "3DSA", + "bbox": [ + 689, + 114, + 730, + 126 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 767, + 114, + 785, + 126 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/6b5dda47d93cd4ac0d345aaa282b13917febf0b36f845e436c2f5620b38b9c0b.jpg", + "table_caption": [ + "Table 5: Effect of different inputs on space attention module" + ], + "table_footnote": [], + "table_body": "
Image-based input / Heatmap-based input
InputAP25AP50AP100AP150MPJPE
Image94.298.4999.2199.3113.98
Heatmap86.9798.399.299 9.3817.21
", + "bbox": [ + 346, + 171, + 651, + 243 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "4.4 3D space attention visualization", + "text_level": 1, + "bbox": [ + 215, + 273, + 526, + 287 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "In Fig. 6, we provide the space attention visualization results on Panoptic datasets. Red regions indicate attention scores above 0.8, while blue for below 0.8. Observing the spatial distribution of attention in 3D space (1st row), most key attention areas are focused where people are present. In view 5, an obscured person is not visible from that angle, resulting in lower attention scores in that area. This result aligns with our hypothesis, confirming that the space attention mechanism discriminates the importance of different regions in the feature volume based on visibility. More visualization results are provided in the supplementary material.", + "bbox": [ + 212, + 301, + 792, + 422 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/743321a5f7b19114d7b2d7911667a821f6ebc7b7a187fd4d96786f2abe27f9ba.jpg", + "image_caption": [ + "Fig. 6: 3D space attention visualization. We marked areas with scores above 0.8 (red regions) in 3D space (1st row) and projected them onto the corresponding 2D image (2nd row)." + ], + "image_footnote": [], + "bbox": [ + 218, + 469, + 784, + 608 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "5 Conclusion", + "text_level": 1, + "bbox": [ + 215, + 699, + 359, + 714 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "In this paper, we present the novel space attention module for the voxel-based multi-view 3D pose estimation method. We learn the space attention scores from the input image and utilize the 3D space subdivision algorithm to divide the feature volume, finally constructing the feature volumes with space attention. By integrating our space attention module into two existing voxel-based methods, both models achieve the state-of-the-art results on the panoptic benchmarks.", + "bbox": [ + 212, + 733, + 787, + 825 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Chen et al.", + "bbox": [ + 271, + 114, + 346, + 127 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Acknowledgements", + "text_level": 1, + "bbox": [ + 217, + 143, + 401, + 162 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "This work is supported and National Science and Technology Council (NSTC), Taiwan R.O.C. projects with grants 112-2222-E-006-009-, 113-2218-E-035-001-, 113-2425-H-006-007- and NSTC 113-2627-M-006-005 -.", + "bbox": [ + 215, + 175, + 784, + 220 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 217, + 243, + 321, + 258 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "1. Amin, S., Andriluka, M., Rohrbach, M., Schiele, B.: Multi-view pictorial structures for 3d human pose estimation. In: BMvc. vol. 1. Bristol, UK (2013)", + "2. Belagiannis, V., Amin, S., Andriluka, M., Schiele, B., Navab, N., Ilic, S.: 3d pictorial structures for multiple human pose estimation. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 1669-1676 (2014)", + "3. Belagiannis, V., Amin, S., Andriluka, M., Schiele, B., Navab, N., Ilic, S.: 3d pictorial structures revisited: Multiple human pose estimation. IEEE transactions on pattern analysis and machine intelligence 38(10), 1929-1942 (2015)", + "4. Bogo, F., Kanazawa, A., Lassner, C., Gehler, P., Romero, J., Black, M.J.: Keep it smpl: Automatic estimation of 3d human pose and shape from a single image. In: Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part V 14. pp. 561-578. Springer (2016)", + "5. Bridgeman, L., Volino, M., Guillemaut, J.Y., Hilton, A.: Multi-person 3d pose estimation and tracking in sports. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition workshops. pp. 0-0 (2019)", + "6. Chen, Y., Gu, R., Huang, O., Jia, G.: Vtp: volumetric transformer for multi-view multi-person 3d pose estimation. Applied Intelligence 53(22), 26568-26579 (2023)", + "7. Choudhury, R., Kitani, K.M., Jeni, L.A.: Tempo: Efficient multi-view pose estimation, tracking, and forecasting. In: 2023 IEEE/CVF International Conference on Computer Vision (ICCV). pp. 14704-14714 (2023). https://doi.org/10.1109/ICCV51070.2023.01355", + "8. Dabral, R., Mundhada, A., Kusupati, U., Afaque, S., Sharma, A., Jain, A.: Learning 3d human pose from structure and motion. In: Proceedings of the European conference on computer vision (ECCV). pp. 668-683 (2018)", + "9. Dong, J., Fang, Q., Jiang, W., Yang, Y., Huang, Q., Bao, H., Zhou, X.: Fast and robust multi-person 3d pose estimation and tracking from multiple views. IEEE Transactions on Pattern Analysis and Machine Intelligence 44(10), 6981-6992 (2021)", + "0. Dong, J., Jiang, W., Huang, Q., Bao, H., Zhou, X.: Fast and robust multi-person 3d pose estimation from multiple views. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 7792-7801 (2019)", + "1. Dong, Z., Song, J., Chen, X., Guo, C., Hilliges, O.: Shape-aware multi-person pose estimation from multi-view images. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 11158-11168 (2021)", + "2. Ershadi-Nasab, S., Noury, E., Kasaei, S., Sanaei, E.: Multiple human 3d pose estimation from multiview images. Multimedia Tools and Applications 77, 15573-15601 (2018)", + "3. Fang, H.S., Xie, S., Tai, Y.W., Lu, C.: Rmpe: Regional multi-person pose estimation. In: Proceedings of the IEEE international conference on computer vision. pp. 2334-2343 (2017)" + ], + "bbox": [ + 225, + 273, + 784, + 839 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "3DSA", + "bbox": [ + 689, + 114, + 730, + 126 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 767, + 116, + 785, + 126 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "14. Fischler, M.A., Elschlager, R.A.: The representation and matching of pictorial structures. IEEE Transactions on computers 100(1), 67-92 (1973)", + "15. Hartley, R., Zisserman, A.: Multiple view geometry in computer vision. Cambridge university press (2003)", + "16. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 770-778 (2016)", + "17. Hu, J., Shen, L., Sun, G.: Squeeze-and-excitation networks. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 7132-7141 (2018)", + "18. Huang, C., Jiang, S., Li, Y., Zhang, Z., Traish, J., Deng, C., Ferguson, S., Da Xu, R.Y.: End-to-end dynamic matching network for multi-view multi-person 3d pose estimation. In: Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XXVIII 16. pp. 477-493. Springer (2020)", + "19. Iskakov, K., Burkov, E., Lempitsky, V., Malkov, Y.: Learnable triangulation of human pose. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 7718-7727 (2019)", + "20. Iskakov, K., Burkov, E., Lempitsky, V., Malkov, Y.: Learnable triangulation of human pose. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 7718-7727 (2019)", + "21. Joo, H., Liu, H., Tan, L., Gui, L., Nabbe, B., Matthews, I., Kanade, T., Nobuhara, S., Sheikh, Y.: Panoptic studio: A massively multiview system for social motion capture. In: Proceedings of the IEEE International Conference on Computer Vision. pp. 3334-3342 (2015)", + "22. Lai, J.Y., Shu, S.H., Huang, Y.C.: A cell subdivision strategy for r-nearest neighbors computation. Journal of the Chinese Institute of Engineers 29(6), 953-965 (2006)", + "23. Li, X., Wang, W., Hu, X., Yang, J.: Selective kernel networks. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 510-519 (2019)", + "24. Li, Z., Oskarsson, M., Heyden, A.: 3d human pose and shape estimation through collaborative learning and multi-view model-fitting. In: Proceedings of the IEEE/CVF winter conference on applications of computer vision. pp. 1888-1897 (2021)", + "25. Lin, J., Lee, G.H.: Multi-view multi-person 3d pose estimation with plane sweep stereo. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 11886-11895 (2021)", + "26. Loper, M., Mahmood, N., Romero, J., Pons-Moll, G., Black, M.J.: Spl: A skinned multi-person linear model. In: Seminal Graphics Papers: Pushing the Boundaries, Volume 2, pp. 851-866 (2023)", + "27. Ma, X., Su, J., Wang, C., Ci, H., Wang, Y.: Context modeling in 3d human pose estimation: A unified perspective. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 6238-6247 (2021)", + "28. Ma, X., Su, J., Wang, C., Ci, H., Wang, Y.: Context modeling in 3d human pose estimation: A unified perspective. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 6238-6247 (2021)", + "29. Moon, G., Chang, J.Y., Lee, K.M.: V2v-posenet: Voxel-to-voxel prediction network for accurate 3d hand and human pose estimation from a single depth map. In: Proceedings of the IEEE conference on computer vision and pattern Recognition. pp. 5079-5088 (2018)" + ], + "bbox": [ + 217, + 147, + 784, + 839 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Chen et al.", + "bbox": [ + 271, + 114, + 346, + 126 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "30. Reddy, N.D., Guigues, L., Pishchulin, L., Eledath, J., Narasimhan, S.G.: Tessen-track: End-to-end learnable multi-person articulated 3d pose tracking. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 15190-15200 (2021)", + "31. Shin, S., Halilaj, E.: Multi-view human pose and shape estimation using learnable volumetric aggregation. arXiv preprint arXiv:2011.13427 (2020)", + "32. Su, J., Wang, C., Ma, X., Zeng, W., Wang, Y.: Virtualpose: Learning generalizable 3d human pose models from virtual data. In: European Conference on Computer Vision. pp. 55-71. Springer (2022)", + "33. Sun, Y., Bao, Q., Liu, W., Fu, Y., Black, M.J., Mei, T.: Monocular, one-stage, regression of multiple 3d people. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 11179-11188 (2021)", + "34. Sun, Y., Liu, W., Bao, Q., Fu, Y., Mei, T., Black, M.J.: Putting people in their place: Monocular regression of 3d people in depth. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 13243-13252 (2022)", + "35. Tu, H., Wang, C., Zeng, W.: Voxelpos: Towards multi-camera 3d human pose estimation in wild environment. In: Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part I 16. pp. 197-212. Springer (2020)", + "36. Woo, S., Park, J., Lee, J.Y., Kweon, I.S.: Cbam: Convolutional block attention module. In: Proceedings of the European conference on computer vision (ECCV). pp. 3-19 (2018)", + "37. Wu, S., Jin, S., Liu, W., Bai, L., Qian, C., Liu, D., Ouyang, W.: Graph-based 3d multi-person pose estimation using multi-view images. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 11148-11157 (2021)", + "38. Ye, H., Zhu, W., Wang, C., Wu, R., Wang, Y.: Faster voxelpose: Real-time 3d human pose estimation by orthographic projection. In: European Conference on Computer Vision. pp. 142-159. Springer (2022)", + "39. Yu, Z., Zhang, L., Xu, Y., Tang, C., Tran, L., Keskin, C., Park, H.S.: Multiview human body reconstruction from uncalibrated cameras. Advances in Neural Information Processing Systems 35, 7879-7891 (2022)", + "40. Zhang, J., Cai, Y., Yan, S., Feng, J., et al.: Direct multi-view multi-person 3d pose estimation. Advances in Neural Information Processing Systems 34, 13153-13164 (2021)", + "41. Zhang, Y., Wang, C., Wang, X., Liu, W., Zeng, W.: Voxeltrack: Multi-person 3d human pose estimation and tracking in the wild. IEEE Transactions on Pattern Analysis and Machine Intelligence 45(2), 2613-2626 (2022)", + "42. Zhang, Y., An, L., Yu, T., Li, X., Li, K., Liu, Y.: 4d association graph for realtime multi-person motion capture using multiple video cameras. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 1324-1333 (2020)" + ], + "bbox": [ + 212, + 146, + 787, + 729 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "3DSA", + "bbox": [ + 689, + 114, + 730, + 126 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 767, + 114, + 785, + 126 + ], + "page_idx": 16 + } +] \ No newline at end of file diff --git a/2024/3DSA_Multi-View 3D Human Pose Estimation With 3D Space Attention Mechanisms/61874178-0339-4f9c-84c1-e29c381e8d91_model.json b/2024/3DSA_Multi-View 3D Human Pose Estimation With 3D Space Attention Mechanisms/61874178-0339-4f9c-84c1-e29c381e8d91_model.json new file mode 100644 index 0000000000000000000000000000000000000000..3ed258b8bb1082c22651b6c93b4c9c984c178fac --- /dev/null +++ b/2024/3DSA_Multi-View 3D Human Pose Estimation With 3D Space Attention Mechanisms/61874178-0339-4f9c-84c1-e29c381e8d91_model.json @@ -0,0 +1,2137 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.233, + 0.141, + 0.773, + 0.187 + ], + "angle": 0, + "content": "3DSA :Multi-View 3D Human Pose Estimation With 3D Space Attention Mechanisms" + }, + { + "type": "text", + "bbox": [ + 0.374, + 0.213, + 0.629, + 0.228 + ], + "angle": 0, + "content": "Pohan Chen and Chiachi Tsi" + }, + { + "type": "text", + "bbox": [ + 0.38, + 0.241, + 0.623, + 0.268 + ], + "angle": 0, + "content": "National Cheng Kung University n28111089,cctsai@gs.ncku.edu.tw" + }, + { + "type": "text", + "bbox": [ + 0.263, + 0.307, + 0.744, + 0.488 + ], + "angle": 0, + "content": "Abstract. In this study, we introduce the 3D space attention module (3DSA) as a novel approach to address the drawback of multi-view 3D human pose estimation methods, which fail to recognize the object's significance from diverse viewpoints. Specifically, we utilize the 3D space subdivision algorithm to divide the feature volume into multiple regions. Predicted 3D space attention scores are assigned to the different regions to construct the feature volume with space attention. The purpose of the 3D space attention module is to distinguish the significance of individual regions within the feature volume by applying weighted attention adjustments derived from corresponding viewpoints. We conduct experiments on existing voxel-based methods, VoxelPose and Faster VoxelPose. By incorporating the space attention module, both achieve state-of-the-art performance on the CMU Panoptic Studio dataset." + }, + { + "type": "text", + "bbox": [ + 0.263, + 0.501, + 0.687, + 0.516 + ], + "angle": 0, + "content": "Keywords: : 3D Human Pose Estimation \\(\\cdot\\) 3D space attention" + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.542, + 0.377, + 0.558 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.574, + 0.788, + 0.65 + ], + "angle": 0, + "content": "Estimating multiple 3D human poses simultaneously from multiple camera views is an enduring challenge in computer vision. The aim is to determine the 3D locations of the body joints for all people present in a scene. It is a task that offers benefits to numerous real-world applications, including intelligent sports analysis [5] and retail monitoring [35]." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.65, + 0.788, + 0.726 + ], + "angle": 0, + "content": "In 2D-3D lifting approaches [9,10,42], a monocular pose estimator identifies 2D bounding boxes and 2D poses for individuals in each view. A multi-view matching algorithm then establishes consistent correspondences between the 2D poses across different views. Finally, the matched 2D poses are lifted to 3D using geometry models such as triangulation [15] or Pictorial Structure Models [2,3,14]." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.726, + 0.788, + 0.817 + ], + "angle": 0, + "content": "As shown in Fig. 1(a), the voxel-based method [35] constructs the discretized feature volume from the detected heatmaps through 2D-3D projection. Based on the identified per-person proposal, the 3D pose for each person is individually estimated by feeding the feature volume into 3D-CNNs. To reduce the computational cost, another voxel-based approach [38] re-projects the feature volume to three two-dimensional coordinate planes and replaces the 3D-CNNs" + }, + { + "type": "page_footnote", + "bbox": [ + 0.218, + 0.825, + 0.621, + 0.84 + ], + "angle": 0, + "content": "* Corresponding author: Chiachi Tsi, cctsai@gs.ncku.edu.tw" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "2" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.347, + 0.127 + ], + "angle": 0, + "content": "Chen et al." + }, + { + "type": "image", + "bbox": [ + 0.277, + 0.149, + 0.725, + 0.33 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.343, + 0.788, + 0.4 + ], + "angle": 0, + "content": "Fig. 1: Comparison between our method and the existing voxel-based method. The primary distinction is that: (a) The existing method simply projects heatmaps into 3D space. (b) We enhance feature volumes using space attention, maintaining 3D information, and emphasizing critical regions within the feature volume." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.431, + 0.788, + 0.507 + ], + "angle": 0, + "content": "with 2D-CNNs, which increases inference speed. The existing direct prediction method [40] uses the Transformer architecture to regress multi-person 3D poses directly, bypassing the need for intermediate tasks. However, owing to the constraints of the transformer architecture, the inference speed of the method still falls behind the Voxel-based 2D-CNNs method [38]." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.508, + 0.788, + 0.734 + ], + "angle": 0, + "content": "Existing multi-view approaches often fail to consider an important visual phenomenon: the visible parts of the same object should differ when observed from various angles. As depicted in Fig. 2, the four people in the scene are visible in Views 1 to 4. However, in View 5, only three are visible in the image due to obstruction by one of the people. To address this issue, we propose the 3D space attention module (referred to as 3DSA) and apply it to two open-source voxel-based methods [35,38]. Fig. 1(b) shows the overview of our proposed method. We added the space attention layers to the end of backbone network to predict the space attention scores. Directly estimating the importance of each voxel within the feature volume could lead to excessive computational demands. Therefore, we employed the 3D space subdivision algorithm to divide the feature volume into multiple regions. The voxels within each region were treated as a group, and the space attention scores were assigned to the group, representing the importance of the region. Finally, the feature volume with space attention was constructed, retaining the 3D information and paying more attention to crucial regions." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.735, + 0.788, + 0.84 + ], + "angle": 0, + "content": "We have conducted extensive experiments on the 3D human pose benchmark, Panoptic [21], to evaluate the efficacy of our space attention module. By applying the space attention module into the VoxelPose [35] and Faster VoxelPose [38] methods, our models show significant improvements of \\(20.93\\%\\) and \\(20.32\\%\\) in MPJPE respectively, both models achieve the state-of-the-art results. The voxel-based methods undermine their performance on the \\(AP_{25}\\) metric when compared to other multi-view methods. Our space attention module addresses" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.69, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "3DSA" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.116, + 0.787, + 0.127 + ], + "angle": 0, + "content": "3" + }, + { + "type": "image", + "bbox": [ + 0.221, + 0.149, + 0.333, + 0.19 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.262, + 0.191, + 0.29, + 0.2 + ], + "angle": 0, + "content": "View1" + }, + { + "type": "image", + "bbox": [ + 0.334, + 0.149, + 0.444, + 0.19 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.375, + 0.191, + 0.403, + 0.2 + ], + "angle": 0, + "content": "View2" + }, + { + "type": "image", + "bbox": [ + 0.445, + 0.149, + 0.558, + 0.19 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.487, + 0.191, + 0.515, + 0.2 + ], + "angle": 0, + "content": "View3" + }, + { + "type": "image", + "bbox": [ + 0.559, + 0.149, + 0.671, + 0.19 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.6, + 0.191, + 0.628, + 0.2 + ], + "angle": 0, + "content": "View4" + }, + { + "type": "image", + "bbox": [ + 0.673, + 0.149, + 0.785, + 0.19 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.717, + 0.191, + 0.745, + 0.2 + ], + "angle": 0, + "content": "View5" + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.213, + 0.785, + 0.283 + ], + "angle": 0, + "content": "Fig. 2: The visual phenomenon in the 3D space. Due to differences in camera viewing angles and obstruction issues, the visibility of the same person changes across different viewpoints. As shown by the red arrow in the figure, we can clearly observe the person in the images from Views 1 to 4, but they are not visible in View 5. This visual phenomenon is commonly encountered in multi-view human pose estimation tasks." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.311, + 0.784, + 0.357 + ], + "angle": 0, + "content": "this weakness, resulting in our model achieving \\(94.2\\%\\) and \\(94.22\\%\\) on the metric. Compared to the baseline model [35, 38], these scores demonstrate a significant improvement, surpassing all existing multi-view approaches." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.357, + 0.785, + 0.477 + ], + "angle": 0, + "content": "Our contributions are summarized as follows: (1) We proposed the 3D space attention module (3DSA), which addresses the drawbacks of the existing multiview 3D human pose estimation methods and validates its effectiveness on existing voxel-based methods [35, 38]. (2) We introduced a 3D space subdivision algorithm to reduce the computational complexity of the module. (3) By incorporating our space attention module into existing voxel-based methods [35, 38], both models achieve state-of-the-art results on the Panoptic benchmark, demonstrating the effectiveness of this attention mechanism." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.499, + 0.383, + 0.515 + ], + "angle": 0, + "content": "2 Related work" + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.53, + 0.579, + 0.546 + ], + "angle": 0, + "content": "2.1 Multi-view 3D human pose estimation" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.553, + 0.785, + 0.659 + ], + "angle": 0, + "content": "Unlike monocular 3D human pose estimation [8, 13, 33, 34], multi-view human pose estimation leverages image information from different viewpoints. This approach not only effectively overcomes challenges such as occlusion and depth ambiguity but also ensures a richer and more accurate depiction of the 3D pose. Existing methods can be categorized into three types: (1)2D to 3D lifting methods [1-3,5,9,10,18,25,42] (2)Voxel-based methods [6,7,19,20,27,30,32,35,38,41] (3)Direct regression method [40]." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.659, + 0.786, + 0.81 + ], + "angle": 0, + "content": "2D to 3D lifting method Firstly, a monocular pose estimator is utilized to estimate the 2D joints of each person in each view, through triangulation [15] and a 3D pictorial model [14], the 3D pose of each person is reconstructed from the associated 2D poses. Dong et al. [9,10] propose MvPose. MvPose utilizes a human pose detector to generate and cluster 2D bounding boxes and associated poses for each view. Each cluster represents the same person from different views. The 3D pose of each person is then reconstructed from these clusters using triangulation and a 3D pictorial model. The drawback of this 2D to 3D pose lifting method is its significant dependence on the preceding steps of 2D pose estimation and cross-view matching, as their quality directly influences the results." + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.81, + 0.786, + 0.84 + ], + "angle": 0, + "content": "Voxel-based method In contrast to the 2D to 3D lifting methods, which require establishing cross-view correspondence based on noisy and incomplete 2D" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "4" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.348, + 0.128 + ], + "angle": 0, + "content": "Chen et al." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.359 + ], + "angle": 0, + "content": "pose estimates, the voxel-based method directly operates in the 3D space and therefore avoids making incorrect decisions in each camera view. Tu et al. [35] propose VoxelPose, the method that discretizes the 3D space into voxels and uses 2D heatmaps to construct a 3D feature volume. 3D-CNNs process this volume to locate human proposals and regress the 3D joint. Since the voxel-based method heavily relies on 3D convolutions, it requires higher computational cost and inference time to predict 3D joints. To enhance the model efficiency, Ye et al. [38] proposed Faster VoxelPose, an optimization method based on orthographic projection. This method projects the 3D feature volume to three mutually perpendicular planes and then utilizes 2D-CNNs to locate the center proposal and regress the 3D joint. By doing this, it eliminates the need for time-consuming 3D convolutions. Choudhury et al. [7] proposed TEMPO, which utilizes temporal context to enhance pose estimation, delivering smoother and more accurate human poses by integrating spatiotemporal information." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.363, + 0.788, + 0.455 + ], + "angle": 0, + "content": "It has been observed that the voxel-based methods generally yield lower scores on the \\(AP_{25}\\) metric in Panoptic datasets when compared to other methods. In this paper, we introduce a novel 3D space attention module, which applies weighted attention adjustments to the feature volume from corresponding viewpoints. This attention mechanism guides the network to focus more effectively on crucial feature regions and yields significant improvements in the \\(AP_{25}\\) metric." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.459, + 0.79, + 0.596 + ], + "angle": 0, + "content": "Direct regression method In contrast to previous methods, Zhang et al. [40] proposed MvP, which leverages the Transformer architecture to regress multi-person 3D poses directly, thus eliminating the need for intermediate tasks. MvP achieved impressive results on the Panoptic [21] datasets. It showed significant progress (8%) on the most stringent \\(AP_{25}\\) compared to the Voxel-based methods [35, 38] and is more robust and accurate than previous models. However, due to the limitations of the transformer architecture, the inference speed of MvP still can't compete with 2D CNN-based voxel method [38], which is not conducive to its deployment in practical applications." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.64, + 0.571, + 0.656 + ], + "angle": 0, + "content": "2.2 Multi-view 3D body mesh estimation" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.689, + 0.788, + 0.841 + ], + "angle": 0, + "content": "Multi-view 3D body mesh estimation [11,20,24,31,39] is a task closely related to 3D pose estimation. Instead of directly estimating joint positions, this task involves predicting the parameters of SMPL [26] or employing a fitting method [4] to align the SMPL model with detected joint positions. Yu et al. [39] use neural networks to directly predict local attention, assigning importance to visual features across views. Our method focuses on using space subdivision and space attention to address the varying importance of different viewpoints in the same 3D space. Directly predicting the space attention and projecting to the 3D space will result in equal attention values along the projection ray, which prevents the model from accurately identifying depth information." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.689, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "3DSA" + }, + { + "type": "page_number", + "bbox": [ + 0.776, + 0.117, + 0.786, + 0.127 + ], + "angle": 0, + "content": "5" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.147, + 0.447, + 0.16 + ], + "angle": 0, + "content": "2.3 Attention mechanisms" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.169, + 0.788, + 0.351 + ], + "angle": 0, + "content": "The methodology of predicting attention scores from input features and then using these scores to enhance discriminative feature learning has been adopted by numerous studies [17, 23, 36]. The most famous is SENet proposed by Hu et al. [17], which employs attention mechanisms to adaptively recalibrate channelwise features by modeling inter-channel dependencies. Ma et al. proposed global attention in ContextPose [28], which focus on features within each voxel by estimating confidence scores for each joint, effectively reducing interference from non-human body voxels and improving joint estimation accuracy. Regarding merging 3D features extracted from different 2D viewpoints, the inherent physical characteristics of imaging result in varying importance of different viewpoints for the same 3D space. Therefore, we introduced the space attention module to solve this problem in a voxel-represented 3D space." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.371, + 0.331, + 0.387 + ], + "angle": 0, + "content": "3 Method" + }, + { + "type": "title", + "bbox": [ + 0.214, + 0.399, + 0.766, + 0.415 + ], + "angle": 0, + "content": "3.1 The drawback of existing multi-view 3D human pose method" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.421, + 0.788, + 0.527 + ], + "angle": 0, + "content": "Despite the impressive achievements of the existing multi-view 3D human pose methods, they ignore an important visual phenomenon: the visible parts of an object could vary when observed from different viewpoints. Specifically, an object's visibility can differ dramatically across various viewpoints, for instance, an object may be distinctly visible from viewpoint A, yet as we transition to viewpoint B, its visibility may diminish or even vanish due to interposing obstacles or occluded persons." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.528, + 0.788, + 0.648 + ], + "angle": 0, + "content": "In this work, we introduce the space attention module to address the drawback, and we validate its effectiveness on existing voxel-based methods [35,38]. The existing methods merely project heatmaps into 3D space. As depicted in Fig. 1(b), our approach leverages the space attention module to enhance feature volumes. This not only preserves 3D information but also emphasizes crucial regions inside the feature volume. The objective of this attention mechanism is to focus on significant regions within the feature volume, by applying weighted attention adjustments to the feature volume from corresponding viewpoints." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.667, + 0.44, + 0.683 + ], + "angle": 0, + "content": "3.2 Network architecture" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.689, + 0.788, + 0.795 + ], + "angle": 0, + "content": "Heatmap and space attention prediction. As shown in Fig. 3 (a), our model adopts a simple multi-layer design with a backbone and two additional layers. In the heatmap layer, the probability of a 2D pose heatmaps for the corresponding view is predicted. Meanwhile, in the space attention layer, the attention scores of the feature volume are determined. The attention scores are dynamically adjusted based on the input image, emphasizing regions with higher visibility in the 3D space." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.796, + 0.788, + 0.843 + ], + "angle": 0, + "content": "Space attention with person proposal generation. As shown in Fig. 3 (b), by projecting the output heatmaps to the 3D space, the discretized feature volume \\(\\{\\mathbf{G} \\in \\mathbb{R}^{80 \\times 80 \\times 20}\\}\\) is constructed. Following [35], the 3D space is discretized" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "6" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.347, + 0.127 + ], + "angle": 0, + "content": "Chen et al." + }, + { + "type": "image", + "bbox": [ + 0.223, + 0.15, + 0.784, + 0.369 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.383, + 0.788, + 0.552 + ], + "angle": 0, + "content": "Fig. 3: Overview of network architecture. (a) Given the multi-view image as input, the backbone network predicted both the heatmaps and the space attention scores for each view. Each heatmap is projected to a 3D space, which is physically shared but independent for each view, constructing the feature volume. The space attention scores for each view are assigned to the different regions in the shared 3D space. (b) By performing an element-wise multiplication of the raw feature volume with the space attention scores, we produce a feature volume infused with spatial attention. Subsequently, this attention-enhanced feature volume is fed into 3D-CNNs to locate the per-person proposal. (c) A more detailed feature volume corresponding to the proposal was generated. By calculating the spatial relationship between the proposal and the feature volume, space attention scores for the proposal were sampled from the attention in 3D space. Finally, the human pose was estimated." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.591, + 0.788, + 0.697 + ], + "angle": 0, + "content": "into \\(X \\times Y \\times Z\\) locations. Based on observations from the space \\([2, 21], X, Y\\) and \\(Z\\) are set to be 80, 80, and 20 respectively to maintain a good balance between speed and precision. Let the 2D heatmap of a view be denoted as \\(M_v \\in \\mathbb{R}^{K \\times w \\times h}\\), where \\(K\\) is the number of person's joints. For each voxel location \\(G^{X,Y,Z}\\), the projected location in 2D view \\(\\mathbf{V}\\) is represented as \\(P_v^{X,Y,Z}\\). The heatmap values at \\(P_v^{X,Y,Z}\\) is denoted as \\(M_v^{X,Y,Z} \\in \\mathbb{R}^K\\). \\(\\mathbf{v} \\in \\mathbb{R}^V\\) represents one view from total \\(\\mathbf{V}\\) views." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.703, + 0.788, + 0.841 + ], + "angle": 0, + "content": "Directly predicting the importance of each voxel in the feature volume would result in an overwhelming computational burden (Given that the output dimension of the model equals the number of voxels in the feature volume, which is 128,000). To reduce computational complexity, we use a 3D space subdivision algorithm to divide the feature volume \\(\\{G_V \\in \\mathbb{R}^{80 \\times 80 \\times 20}\\}\\) from each view, \\(V\\) into several regions \\(\\{Div\\overline{G}_V \\in \\mathbb{R}^{80 \\times 80 \\times 20}\\}\\). Subsequently, the space attention scores predicted from the model are assigned to each region in the divided feature volume to compute the attention of the feature volume \\(\\{V_v^{X,Y,Z} \\in \\mathbb{R}^{80 \\times 80 \\times 20}\\}\\), which represents the attention scores for view \\(v\\)." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.69, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "3DSA" + }, + { + "type": "page_number", + "bbox": [ + 0.776, + 0.117, + 0.786, + 0.127 + ], + "angle": 0, + "content": "7" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.224 + ], + "angle": 0, + "content": "After that, an element-wise multiplication is performed between the space attention \\(\\{V_{\\pmb{v}}\\}\\) and the raw feature volume \\(\\{M_{\\pmb{v}}\\}\\) to obtain a feature volume with space attention on view \\(\\pmb{v}\\), denoted as \\(MV_{\\pmb{v}}\\). Following this, the feature volumes (with space attention) constructed from multi-view images are fused on average to obtain the aggregated feature volume \\(\\{\\pmb{F} \\in \\mathbb{R}^{80 \\times 80 \\times 20}\\}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.45, + 0.235, + 0.786, + 0.274 + ], + "angle": 0, + "content": "\\[\nF = \\frac {1}{V} \\sum_ {v = 1} ^ {V} M _ {v} \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.283, + 0.785, + 0.357 + ], + "angle": 0, + "content": "where \\(\\mathbf{V}\\) represents the number of cameras. \\(F\\) represents the likelihood of \\(K\\) joints in \\(G\\). Through applying space attention to the feature volume, 3D information is retained while emphasizing important voxels. Finally, the aggregated feature volume \\(\\mathbf{F}\\) is input into the 3D convolutional network to determine the per-person likelihood in the 3D discretized feature volume." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.359, + 0.788, + 0.585 + ], + "angle": 0, + "content": "Space attention with per-person pose regression. In the final step, the completed 3D human pose corresponding to the proposal is predicted, as illustrated in Fig. 3 (c). For a fair evaluation of the effect of the space attention module, [35] is adopted to build an individual fine-grained feature volume centered at each predicted proposal. The size of the fine-grained feature volume is set to be \\(2000\\mathrm{mm} \\times 2000\\mathrm{mm} \\times 2000\\mathrm{mm}\\), and the feature volume is divided into a discrete grid with \\(X' \\times Y' \\times Z'\\) voxel where \\(X', Y', Z'\\) equal to 64. Each feature volume under a particular perspective will only have one space attention score to indicate its importance. In this work, we sample the attention score for each proposal by analyzing the spatial relationship between the proposal and the feature volume. Then, we employ a nearest neighbor sampling method to precisely calculate the attention scores for each proposal. The aggregated fine-grained feature volume is computed based on the descriptions from the previous stage. Finally, the 3D heatmap is estimated and the complete 3D human poses of the persons in the space are regressed." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.606, + 0.521, + 0.621 + ], + "angle": 0, + "content": "3.3 3D space subdivision algorithm" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.629, + 0.788, + 0.841 + ], + "angle": 0, + "content": "As mentioned in Sec. 3.2, the 3D space subdivision algorithm is crucial to the implementation of our space attention module. Computational challenges arise when directly predicting the significance of each voxel in the feature volume. Inspired by Lai et al. [22] utilizing the cell subdivision search algorithm to reduce the computational complexity associated with searching through a large amount of data points, we employ a 3D space subdivision algorithm to divide the feature volume into distinct regions. Specifically, the voxels within each region are considered as a group, and attention scores are assigned to these groups to signify the importance of each region. Through the backbone network, the weight of each region is predicted, representing the importance of corresponding areas within the same viewpoint in the feature volume. If voxels within a specific region exhibit higher confidence levels, this indicates their relative importance. Conversely, lower confidence levels in voxels, caused by obstructions, occlusion, or other factors, suggest that they are less significant within that region. As" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "8" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.348, + 0.128 + ], + "angle": 0, + "content": "Chen et al." + }, + { + "type": "image", + "bbox": [ + 0.391, + 0.156, + 0.592, + 0.301 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.215, + 0.321, + 0.788, + 0.364 + ], + "angle": 0, + "content": "Fig. 4: Subdivision of the voxel within the feature volume. We utilize a 3D space subdivision algorithm to partition the feature volume into separate regions, with the voxels in each region being treated as a group." + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.393, + 0.789, + 0.468 + ], + "angle": 0, + "content": "Fig. 4 depicts, the feature volume in 3D space \\(\\pmb{G}\\) is divided into several cells along the \\(x\\), \\(y\\), and \\(z\\) axes. Assume \\(l_{i}\\), \\(i \\in \\{x,y,z\\}\\) represents the length, width, and height of the feature volume, while \\(\\delta_{i}\\), \\(i \\in \\{x,y,z\\}\\) represents the cell length along a particular axis. The relationship between \\(l_{i}\\) and \\(\\delta_{i}\\) can be expressed as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.384, + 0.477, + 0.787, + 0.511 + ], + "angle": 0, + "content": "\\[\n\\delta_ {i} = \\operatorname {i n t} \\left(\\frac {l _ {i}}{n _ {i}}\\right) + 1 \\quad i \\in \\{x, y, z \\} \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.521, + 0.788, + 0.552 + ], + "angle": 0, + "content": "where \\( n_i \\) represents the number of regions divided along the \\( i \\)-axis. The total number of regions \\( n_{total} \\) in the 3D space is given by the product of" + }, + { + "type": "equation", + "bbox": [ + 0.424, + 0.564, + 0.787, + 0.581 + ], + "angle": 0, + "content": "\\[\nn _ {\\text {T o t a l}} = n _ {x} \\times n _ {y} \\times n _ {z} \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.593, + 0.788, + 0.625 + ], + "angle": 0, + "content": "Let the position vector of a voxel be \\(\\mathbf{V} = [v_x, v_y, v_z]^\\top\\). Then, the region that \\(\\mathbf{V}\\) resides in can be computed using the following equation:" + }, + { + "type": "equation", + "bbox": [ + 0.347, + 0.634, + 0.787, + 0.668 + ], + "angle": 0, + "content": "\\[\ni _ {j} = \\operatorname {f l o o r} \\left(\\frac {\\left(v _ {j} - j _ {\\min }\\right)}{\\delta_ {j}}\\right) + 1 \\quad j \\in \\{x, y, z \\} \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.679, + 0.788, + 0.755 + ], + "angle": 0, + "content": "where \\( i_j \\) represents the indices of the voxel in \\( x, y, z \\) directions, floor() is used to round down to integer representation, and \\( j_{\\mathrm{min}} \\) represents the minimum coordinates in \\( x, y \\) and \\( z \\) directions of the voxel within the feature volume. Finally, the region id of the voxel (Voxel_id) within the feature volume can be calculated by the following formula:" + }, + { + "type": "equation", + "bbox": [ + 0.36, + 0.767, + 0.787, + 0.784 + ], + "angle": 0, + "content": "\\[\nV o x e l _ {i d} = i _ {z} \\times \\left(n _ {x} \\times n _ {y}\\right) + i _ {y} \\times n _ {x} + i _ {x} \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.795, + 0.788, + 0.841 + ], + "angle": 0, + "content": "The ID of each voxel can be calculated according to the formulas, however, in practical applications, the total number of voxels in the feature volume is substantial, which could lead to excessive computation times. To tackle this" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.69, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "3DSA" + }, + { + "type": "page_number", + "bbox": [ + 0.776, + 0.117, + 0.786, + 0.127 + ], + "angle": 0, + "content": "9" + }, + { + "type": "image", + "bbox": [ + 0.291, + 0.151, + 0.72, + 0.355 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.314, + 0.372, + 0.687, + 0.385 + ], + "angle": 0, + "content": "Fig. 5: Detailed architecture of space attention module." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.417, + 0.785, + 0.478 + ], + "angle": 0, + "content": "challenge, we have optimized the weight assignment process within the space attention module, adopting the following Python code (Algorithm 1). Compared Eq. (2) to Eq. (5), our approach is better adapted to practical applications, achieving the same objectives and results more efficiently." + }, + { + "type": "code_caption", + "bbox": [ + 0.218, + 0.506, + 0.535, + 0.521 + ], + "angle": 0, + "content": "Algorithm 1 Weight assignment algorithm" + }, + { + "type": "code", + "bbox": [ + 0.217, + 0.523, + 0.663, + 0.665 + ], + "angle": 0, + "content": "Suppose we have 3 intervals along x, y, and z axis \\(x,y,z = [0,27,54,80],[0,27,54,80],[0,7,14,20]\\) # Assign space attention value to the tensor for one view. \n subdivision num \\(= 0\\) \nfor i in range (3): for j in range (3): for k in range (3): space attention[x [i]: x [i+1],y [j]: y [j+1],z [k]: z [k+1] \\(\\equiv\\) attention value[subdivision num] subdivision num \\(=\\) subdivision num+1" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.722, + 0.611, + 0.737 + ], + "angle": 0, + "content": "3.4 Implementation of space attention module" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.75, + 0.785, + 0.84 + ], + "angle": 0, + "content": "In implementations, only the following adjustments were made: (1) A simple branch was derived from the backbone network [16] to predict the space attention scores. (2) We executed an element-wise multiplication of the raw feature volume with the space attention scores calculated by Algorithm 1. (3) The attention scores of the proposal are computed by analyzing the positional relationship between the proposal and the feature volume." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "10" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.348, + 0.128 + ], + "angle": 0, + "content": "Chen et al." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.784, + 0.223 + ], + "angle": 0, + "content": "The space attention module can be easily applied to existing multi-person voxel-based human pose methods [7, 35, 38, 41]. However, since some of these methods are not open-sourced, it prevents us from performing validation. Consequently, we chose to validate our method using the two open-sourced voxel-based methods [35, 38]." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.223, + 0.785, + 0.298 + ], + "angle": 0, + "content": "It is important to emphasize that for a fair evaluation of the impact of the space attention module on existing voxel-based methods [35,38], the network architecture [29] used for locating the person proposal and regressing the 3D pose remained unaltered. For the model's loss function and hyperparameter configuration, the original design proposed by [35,38] has remained." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.299, + 0.787, + 0.435 + ], + "angle": 0, + "content": "The architecture of the space attention layer is presented in Fig. 5. It is a straightforward and lightweight design, which uses a simple convolutional block followed by global average pooling and the sigmoid activation function to estimate the space attention scores of the corresponding image. The purpose of the global average pooling is to replace the traditional fully connected layers, thereby reducing the number of parameters. The output dimensions of the space attention layer are equal to the number of regions in the feature volume. The space attention scores \\( S \\in \\mathbb{R}^n \\) represent the \\( n \\) space attention values, indicating that the feature volume is divided into \\( n \\) regions." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.455, + 0.376, + 0.473 + ], + "angle": 0, + "content": "4 Experiments" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.485, + 0.446, + 0.5 + ], + "angle": 0, + "content": "4.1 Implementation detail" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.508, + 0.785, + 0.629 + ], + "angle": 0, + "content": "Training and evaluation datasets. CMU Panoptic [21] is a 3D dataset with multi-view images. To evaluate and analyze our approach, we conducted extensive experiments on the Panoptic dataset. Following VoxelPose [35], the same data sequences were used for both training and evaluating our model. Our experiments were conducted using five HD cameras with camera IDs 3, 6, 12, 13, 23. Shelf and Campus [2] are two datasets that are commonly used in multi-view and multi-person research. We evaluated our method using the same data setup as in [35]." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.63, + 0.785, + 0.735 + ], + "angle": 0, + "content": "Evaluation metric. For the Panoptic datasets [21], we adopt the Average Precision \\((AP^K)\\) and Mean Per Joint Position Error (MPJPE) as metrics that demonstrate the robustness and accuracy of multi-person 3D pose estimation. To assess the influence of the space attention module on model size and computational complexity, we consider key metrics such as MACs and model parameters. For both the Campus and Shelf datasets, we present the results in terms of the Percentage of Correct Parts (PCP)." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.735, + 0.785, + 0.81 + ], + "angle": 0, + "content": "Training details. For the Panoptic datasets, we use an off-the-shelf pose estimation model constructed based on ResNet-50 [16] to extract features from multi-view images. The difference from VoxelPose [35] is that since our backbone network needs to predict the space attention scores, the parameters of the model are updated throughout the training iteration." + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.81, + 0.785, + 0.842 + ], + "angle": 0, + "content": "Due to the incomplete data annotation in the Campus and Shelf datasets [2], Tu et al. [35] use synthetic 3D poses to train the network. To implement the" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.69, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "3DSA" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.116, + 0.785, + 0.127 + ], + "angle": 0, + "content": "11" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.239 + ], + "angle": 0, + "content": "space attention module, we use the synthetic heatmap as the input feature to predict the space attention scores. In summary, the space attention module has two modes: the first predicts the space attention scores from the ground truth multi-view image, referred to as Image-based input; the second predicts the space attention scores from the synthetic heatmaps, referred to as Heatmap-based input." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.26, + 0.546, + 0.275 + ], + "angle": 0, + "content": "4.2 Comparisons to Existing Methods" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.285, + 0.79, + 0.482 + ], + "angle": 0, + "content": "Panoptic. We first evaluate our model on the Panoptic dataset [21] and compare it with the state-of-the-art model. As illustrated in Tab. 1, by incorporating the space attention module \\((10 \\times 10 \\times 3\\) configuration) with two voxel-based methods, VoxelPose [35] and Faster VoxelPose [38], our model achieves \\(94.2\\%\\) and \\(94.22\\%\\) on the most strict evaluation metric \\(AP_{25}\\), outperforming the transformer model MvP [40]. Our proposed method shows inferior performance in terms of \\(AP@50,100,150\\) when compared to VoxelPose, and this \\(0.5\\%\\) performance gap is generally attributed to model variation. It particularly emphasizes that in terms of the \\(AP_{25}\\) metric, our method has significantly improved, outperforming VoxelPose by \\(12.69\\%\\) and Faster VoxelPose by \\(10.56\\%\\). Remarkably, both methods achieved much lower MPJPE with values of 13.98 and 14.55, outperforming the TEMPO [7] and achieving the SOTA records. This demonstrates the effectiveness of our space attention module." + }, + { + "type": "table_caption", + "bbox": [ + 0.264, + 0.522, + 0.74, + 0.537 + ], + "angle": 0, + "content": "Table 1: Comparison with existing methods on the Panoptic datasets." + }, + { + "type": "table", + "bbox": [ + 0.251, + 0.548, + 0.744, + 0.689 + ], + "angle": 0, + "content": "
MethodAP25AP50AP100AP150MPJPE
VoxelPose [35]83.5998.3399.7699.9117.68mm
Faster VoxelPose [38]85.2298.0899.3299.4818.26mm
PlaneSweep Pose [25]92.1298.9699.8199.8416.75mm
RPGN [37]----15.84mm
MvP [40]92.2896.697.4597.6915.76mm
TEMPO [7]89.0199.0899.7699.9314.68mm
VoxelPose + 3DSA94.298.4999.2199.3113.98mm
Faster VoxelPose + 3DSA94.2298.6599.4999.7514.55mm
" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.719, + 0.788, + 0.841 + ], + "angle": 0, + "content": "Campus and Shelf. The quantitative evaluation results on Shelf and Campus datasets [2] are presented in Tab. 2. Our proposed method (VoxelPose [35] with space attention, \\(10 \\times 10 \\times 3\\) configuration) remains competitive on both datasets. The performance of space attention is not as outstanding on Panoptic datasets [21], and we believe this is related to the Heatmap-based input. Since the heatmap lacks image information, the model is hard to determine the importance of different regions in 3D space from the heatmap. We will detail our research on this issue in the subsequent ablation study." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "12" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.348, + 0.128 + ], + "angle": 0, + "content": "Chen et al." + }, + { + "type": "table_caption", + "bbox": [ + 0.295, + 0.145, + 0.707, + 0.16 + ], + "angle": 0, + "content": "Table 2: Quantitative results on Shelf and Campus datasets." + }, + { + "type": "table", + "bbox": [ + 0.22, + 0.172, + 0.782, + 0.321 + ], + "angle": 0, + "content": "
MethodShelfCampus
Actor1Actor2Actor3AverageActor1Actor2Actor3Average
Ershadi et al. [12]93.375.994.88894.292.984.690.6
Dong et al. [10]98.894.197.896.997.693.39896.3
MvP [40]99.394.197.897.498.294.197.496.6
TEMPO [7]99.395.197.897.497.795.597.997.3
Faster VoxelPose. [38]99.49697.597.696.594.197.996.2
VoxelPose [35]99.394.197.69797.693.898.896.7
Ours99.495.497.697.59893.498.696.7
" + }, + { + "type": "table_caption", + "bbox": [ + 0.25, + 0.333, + 0.753, + 0.348 + ], + "angle": 0, + "content": "Table 3: Space subdivision and efficiency analysis on the Panoptic dataset" + }, + { + "type": "table", + "bbox": [ + 0.269, + 0.359, + 0.732, + 0.589 + ], + "angle": 0, + "content": "
VoxelPose incorporate with space attention
Space subdivisionAP25AP100MPJPEMACs(G)Parameter(M)
Tu et al. [35]83.5999.7617.68178.8840.62
3 × 3 × 392.7399.5814.78179.0940.64
7 × 7 × 393.7199.3314.41180.0440.77
10 × 10 × 394.299.2113.98181.2440.92
15 × 15 × 694.3399.113.97193.2442.47
20 × 20 × 994.4499.4413.94221.5846.15
Faster VoxelPose incorporate with space attention
Space subdivisionAP25AP100MPJPEMACs(G)Parameter(M)
Ye et al. [38]85.2299.3218.26106.8736.37
3 × 3 × 392.5799.6115.54107.0836.39
7 × 7 × 393.7599.5414.88108.0336.52
10 × 10 × 394.2299.4914.55109.2336.67
" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.616, + 0.4, + 0.63 + ], + "angle": 0, + "content": "4.3 Ablation studies" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.643, + 0.787, + 0.673 + ], + "angle": 0, + "content": "In this section, we conduct ablative experiments to analyze a variety of factors within our approach." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.674, + 0.788, + 0.794 + ], + "angle": 0, + "content": "Individual contributions of the space attention module and the 3D space subdivision algorithm. By comparing the results in Tab. 3, we can see that the finer the subdivision of the 3D space, the model's accuracy and precision improve correspondingly. However, the model's performance tends to converge after subdividing into \\(10 \\times 10 \\times 3\\) regions. The result demonstrates the critical importance of the space subdivision algorithm within the space attention module. The direct prediction of all voxels does not result in significant improvements in performance." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.795, + 0.788, + 0.842 + ], + "angle": 0, + "content": "Efficiency analysis. In this work, we focus on comparing our method with existing voxel-based methods [35, 38]. Tab. 3 demonstrates that incorporating the space attention module into the voxel-based approach resulted in a slight" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.69, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "3DSA" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.116, + 0.786, + 0.127 + ], + "angle": 0, + "content": "13" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.314 + ], + "angle": 0, + "content": "increase in the model's complexity. Regarding the model we eventually selected (VoxelPose with \\(10 \\times 10 \\times 3\\) space attention module), MACs increased by \\(1.32\\%\\) and parameters by \\(0.74\\%\\) when compared to the VoxelPose method. As previously mentioned, excessively increasing the number of spatial subdivisions does not enhance performance but significantly increases the model's complexity. For instance, subdividing the space into \\(20 \\times 20 \\times 9\\) regions resulted in a \\(23.8\\%\\) increase in the model's MACs and a \\(13.6\\%\\) increase in parameters. This further demonstrates the importance of the space subdivision algorithm in improving the efficiency of the space attention module. To strike a balance between performance and efficiency, we adopt the \\(10 \\times 10 \\times 3\\) space attention configuration on VoxelPose [35] to study the impact of the individual factors." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.316, + 0.788, + 0.407 + ], + "angle": 0, + "content": "Number of cameras. We compared our method with existing 3D Pose methods [7,35,38,40]. Tab. 4 shows that the feature volume representation is diminished with fewer camera views, leading to a drop in accuracy. The improvement in both \\(AP\\) and \\(MPJPE\\) metrics over other models, as the number of cameras increases, underscores the significance of multi-view images for enhancing the space attention module's performance." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.41, + 0.788, + 0.515 + ], + "angle": 0, + "content": "Image-based input /Heatmap-based input. To further validate the impact of different inputs on the space attention module, we conducted experiments on the Panoptic dataset [2]. As shown in Tab. 5, although the space attention with Heatmap-based input shows an improvement compared to the baseline model [35], it is noticeably inferior to the space attention with Image-based input. We consider that this disparity occurs because heatmaps lack spatial and depth information in comparison to images." + }, + { + "type": "table_caption", + "bbox": [ + 0.292, + 0.56, + 0.712, + 0.574 + ], + "angle": 0, + "content": "Table 4: Number of cameras analysis on the Panoptic dataset" + }, + { + "type": "table", + "bbox": [ + 0.296, + 0.588, + 0.705, + 0.803 + ], + "angle": 0, + "content": "
MethodCam\\(AP_{25}\\)\\(AP_{50}\\)\\(AP_{100}\\)\\(AP_{150}\\)MPJPE
Faster VoxelPose [38]73.9597.0299.2199.3521.12
MvP [40]484.1-96.7-19.3
TEMPO [7]----17.34
ours88.498.199.5999.716.78
VoxelPose [35]58.9493.8898.4599.3224.29
Faster VoxelPose [38]53.6891.8997.498.326.13
MvP [40]371.8-95.1-21.1
TEMPO [7]----19.22
ours73.0695.2398.6499.2519.03
MvP [40]37.7-93-34.8
TEMPO [7]2----32.13
ours47.9588.7497.8498.827.35
" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "14" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.347, + 0.128 + ], + "angle": 0, + "content": "Chen et al." + }, + { + "type": "table_caption", + "bbox": [ + 0.295, + 0.145, + 0.707, + 0.159 + ], + "angle": 0, + "content": "Table 5: Effect of different inputs on space attention module" + }, + { + "type": "table", + "bbox": [ + 0.348, + 0.172, + 0.652, + 0.244 + ], + "angle": 0, + "content": "
Image-based input / Heatmap-based input
InputAP25AP50AP100AP150MPJPE
Image94.298.4999.2199.3113.98
Heatmap86.9798.399.299 9.3817.21
" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.274, + 0.527, + 0.289 + ], + "angle": 0, + "content": "4.4 3D space attention visualization" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.302, + 0.794, + 0.424 + ], + "angle": 0, + "content": "In Fig. 6, we provide the space attention visualization results on Panoptic datasets. Red regions indicate attention scores above 0.8, while blue for below 0.8. Observing the spatial distribution of attention in 3D space (1st row), most key attention areas are focused where people are present. In view 5, an obscured person is not visible from that angle, resulting in lower attention scores in that area. This result aligns with our hypothesis, confirming that the space attention mechanism discriminates the importance of different regions in the feature volume based on visibility. More visualization results are provided in the supplementary material." + }, + { + "type": "image", + "bbox": [ + 0.219, + 0.47, + 0.785, + 0.609 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.622, + 0.788, + 0.665 + ], + "angle": 0, + "content": "Fig. 6: 3D space attention visualization. We marked areas with scores above 0.8 (red regions) in 3D space (1st row) and projected them onto the corresponding 2D image (2nd row)." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.7, + 0.36, + 0.715 + ], + "angle": 0, + "content": "5 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.734, + 0.788, + 0.826 + ], + "angle": 0, + "content": "In this paper, we present the novel space attention module for the voxel-based multi-view 3D pose estimation method. We learn the space attention scores from the input image and utilize the 3D space subdivision algorithm to divide the feature volume, finally constructing the feature volumes with space attention. By integrating our space attention module into two existing voxel-based methods, both models achieve the state-of-the-art results on the panoptic benchmarks." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.69, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "3DSA" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.117, + 0.786, + 0.127 + ], + "angle": 0, + "content": "15" + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.145, + 0.403, + 0.163 + ], + "angle": 0, + "content": "Acknowledgements" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.176, + 0.785, + 0.221 + ], + "angle": 0, + "content": "This work is supported and National Science and Technology Council (NSTC), Taiwan R.O.C. projects with grants 112-2222-E-006-009-, 113-2218-E-035-001-, 113-2425-H-006-007- and NSTC 113-2627-M-006-005 -." + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.244, + 0.323, + 0.26 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.274, + 0.785, + 0.302 + ], + "angle": 0, + "content": "1. Amin, S., Andriluka, M., Rohrbach, M., Schiele, B.: Multi-view pictorial structures for 3d human pose estimation. In: BMvc. vol. 1. Bristol, UK (2013)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.303, + 0.785, + 0.344 + ], + "angle": 0, + "content": "2. Belagiannis, V., Amin, S., Andriluka, M., Schiele, B., Navab, N., Ilic, S.: 3d pictorial structures for multiple human pose estimation. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 1669-1676 (2014)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.344, + 0.785, + 0.385 + ], + "angle": 0, + "content": "3. Belagiannis, V., Amin, S., Andriluka, M., Schiele, B., Navab, N., Ilic, S.: 3d pictorial structures revisited: Multiple human pose estimation. IEEE transactions on pattern analysis and machine intelligence 38(10), 1929-1942 (2015)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.385, + 0.785, + 0.454 + ], + "angle": 0, + "content": "4. Bogo, F., Kanazawa, A., Lassner, C., Gehler, P., Romero, J., Black, M.J.: Keep it smpl: Automatic estimation of 3d human pose and shape from a single image. In: Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part V 14. pp. 561-578. Springer (2016)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.455, + 0.785, + 0.495 + ], + "angle": 0, + "content": "5. Bridgeman, L., Volino, M., Guillemaut, J.Y., Hilton, A.: Multi-person 3d pose estimation and tracking in sports. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition workshops. pp. 0-0 (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.496, + 0.785, + 0.523 + ], + "angle": 0, + "content": "6. Chen, Y., Gu, R., Huang, O., Jia, G.: Vtp: volumetric transformer for multi-view multi-person 3d pose estimation. Applied Intelligence 53(22), 26568-26579 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.524, + 0.785, + 0.577 + ], + "angle": 0, + "content": "7. Choudhury, R., Kitani, K.M., Jeni, L.A.: Tempo: Efficient multi-view pose estimation, tracking, and forecasting. In: 2023 IEEE/CVF International Conference on Computer Vision (ICCV). pp. 14704-14714 (2023). https://doi.org/10.1109/ICCV51070.2023.01355" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.579, + 0.785, + 0.62 + ], + "angle": 0, + "content": "8. Dabral, R., Mundhada, A., Kusupati, U., Afaque, S., Sharma, A., Jain, A.: Learning 3d human pose from structure and motion. In: Proceedings of the European conference on computer vision (ECCV). pp. 668-683 (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.621, + 0.785, + 0.674 + ], + "angle": 0, + "content": "9. Dong, J., Fang, Q., Jiang, W., Yang, Y., Huang, Q., Bao, H., Zhou, X.: Fast and robust multi-person 3d pose estimation and tracking from multiple views. IEEE Transactions on Pattern Analysis and Machine Intelligence 44(10), 6981-6992 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.675, + 0.785, + 0.716 + ], + "angle": 0, + "content": "0. Dong, J., Jiang, W., Huang, Q., Bao, H., Zhou, X.: Fast and robust multi-person 3d pose estimation from multiple views. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 7792-7801 (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.717, + 0.785, + 0.758 + ], + "angle": 0, + "content": "1. Dong, Z., Song, J., Chen, X., Guo, C., Hilliges, O.: Shape-aware multi-person pose estimation from multi-view images. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 11158-11168 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.759, + 0.785, + 0.799 + ], + "angle": 0, + "content": "2. Ershadi-Nasab, S., Noury, E., Kasaei, S., Sanaei, E.: Multiple human 3d pose estimation from multiview images. Multimedia Tools and Applications 77, 15573-15601 (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.799, + 0.785, + 0.84 + ], + "angle": 0, + "content": "3. Fang, H.S., Xie, S., Tai, Y.W., Lu, C.: Rmpe: Regional multi-person pose estimation. In: Proceedings of the IEEE international conference on computer vision. pp. 2334-2343 (2017)" + }, + { + "type": "list", + "bbox": [ + 0.226, + 0.274, + 0.785, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "16" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.347, + 0.127 + ], + "angle": 0, + "content": "Chen et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.148, + 0.785, + 0.175 + ], + "angle": 0, + "content": "14. Fischler, M.A., Elschlager, R.A.: The representation and matching of pictorial structures. IEEE Transactions on computers 100(1), 67-92 (1973)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.177, + 0.785, + 0.203 + ], + "angle": 0, + "content": "15. Hartley, R., Zisserman, A.: Multiple view geometry in computer vision. Cambridge university press (2003)" + }, + { + "type": "ref_text", + "bbox": [ + 0.219, + 0.204, + 0.785, + 0.245 + ], + "angle": 0, + "content": "16. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 770-778 (2016)" + }, + { + "type": "ref_text", + "bbox": [ + 0.219, + 0.246, + 0.785, + 0.273 + ], + "angle": 0, + "content": "17. Hu, J., Shen, L., Sun, G.: Squeeze-and-excitation networks. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 7132-7141 (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.219, + 0.274, + 0.785, + 0.342 + ], + "angle": 0, + "content": "18. Huang, C., Jiang, S., Li, Y., Zhang, Z., Traish, J., Deng, C., Ferguson, S., Da Xu, R.Y.: End-to-end dynamic matching network for multi-view multi-person 3d pose estimation. In: Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XXVIII 16. pp. 477-493. Springer (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.219, + 0.343, + 0.785, + 0.383 + ], + "angle": 0, + "content": "19. Iskakov, K., Burkov, E., Lempitsky, V., Malkov, Y.: Learnable triangulation of human pose. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 7718-7727 (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.219, + 0.384, + 0.785, + 0.424 + ], + "angle": 0, + "content": "20. Iskakov, K., Burkov, E., Lempitsky, V., Malkov, Y.: Learnable triangulation of human pose. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 7718-7727 (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.219, + 0.425, + 0.785, + 0.48 + ], + "angle": 0, + "content": "21. Joo, H., Liu, H., Tan, L., Gui, L., Nabbe, B., Matthews, I., Kanade, T., Nobuhara, S., Sheikh, Y.: Panoptic studio: A massively multiview system for social motion capture. In: Proceedings of the IEEE International Conference on Computer Vision. pp. 3334-3342 (2015)" + }, + { + "type": "ref_text", + "bbox": [ + 0.219, + 0.481, + 0.785, + 0.522 + ], + "angle": 0, + "content": "22. Lai, J.Y., Shu, S.H., Huang, Y.C.: A cell subdivision strategy for r-nearest neighbors computation. Journal of the Chinese Institute of Engineers 29(6), 953-965 (2006)" + }, + { + "type": "ref_text", + "bbox": [ + 0.219, + 0.523, + 0.785, + 0.563 + ], + "angle": 0, + "content": "23. Li, X., Wang, W., Hu, X., Yang, J.: Selective kernel networks. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 510-519 (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.219, + 0.565, + 0.785, + 0.618 + ], + "angle": 0, + "content": "24. Li, Z., Oskarsson, M., Heyden, A.: 3d human pose and shape estimation through collaborative learning and multi-view model-fitting. In: Proceedings of the IEEE/CVF winter conference on applications of computer vision. pp. 1888-1897 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.219, + 0.62, + 0.785, + 0.66 + ], + "angle": 0, + "content": "25. Lin, J., Lee, G.H.: Multi-view multi-person 3d pose estimation with plane sweep stereo. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 11886-11895 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.219, + 0.661, + 0.785, + 0.702 + ], + "angle": 0, + "content": "26. Loper, M., Mahmood, N., Romero, J., Pons-Moll, G., Black, M.J.: Spl: A skinned multi-person linear model. In: Seminal Graphics Papers: Pushing the Boundaries, Volume 2, pp. 851-866 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.219, + 0.703, + 0.785, + 0.743 + ], + "angle": 0, + "content": "27. Ma, X., Su, J., Wang, C., Ci, H., Wang, Y.: Context modeling in 3d human pose estimation: A unified perspective. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 6238-6247 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.219, + 0.744, + 0.785, + 0.785 + ], + "angle": 0, + "content": "28. Ma, X., Su, J., Wang, C., Ci, H., Wang, Y.: Context modeling in 3d human pose estimation: A unified perspective. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 6238-6247 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.219, + 0.786, + 0.785, + 0.84 + ], + "angle": 0, + "content": "29. Moon, G., Chang, J.Y., Lee, K.M.: V2v-posenet: Voxel-to-voxel prediction network for accurate 3d hand and human pose estimation from a single depth map. In: Proceedings of the IEEE conference on computer vision and pattern Recognition. pp. 5079-5088 (2018)" + }, + { + "type": "list", + "bbox": [ + 0.218, + 0.148, + 0.785, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.69, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "3DSA" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.116, + 0.786, + 0.127 + ], + "angle": 0, + "content": "17" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.204 + ], + "angle": 0, + "content": "30. Reddy, N.D., Guigues, L., Pishchulin, L., Eledath, J., Narasimhan, S.G.: Tessen-track: End-to-end learnable multi-person articulated 3d pose tracking. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 15190-15200 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.205, + 0.787, + 0.231 + ], + "angle": 0, + "content": "31. Shin, S., Halilaj, E.: Multi-view human pose and shape estimation using learnable volumetric aggregation. arXiv preprint arXiv:2011.13427 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.232, + 0.786, + 0.272 + ], + "angle": 0, + "content": "32. Su, J., Wang, C., Ma, X., Zeng, W., Wang, Y.: Virtualpose: Learning generalizable 3d human pose models from virtual data. In: European Conference on Computer Vision. pp. 55-71. Springer (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.273, + 0.787, + 0.314 + ], + "angle": 0, + "content": "33. Sun, Y., Bao, Q., Liu, W., Fu, Y., Black, M.J., Mei, T.: Monocular, one-stage, regression of multiple 3d people. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 11179-11188 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.315, + 0.787, + 0.369 + ], + "angle": 0, + "content": "34. Sun, Y., Liu, W., Bao, Q., Fu, Y., Mei, T., Black, M.J.: Putting people in their place: Monocular regression of 3d people in depth. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 13243-13252 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.37, + 0.787, + 0.424 + ], + "angle": 0, + "content": "35. Tu, H., Wang, C., Zeng, W.: Voxelpos: Towards multi-camera 3d human pose estimation in wild environment. In: Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part I 16. pp. 197-212. Springer (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.425, + 0.787, + 0.467 + ], + "angle": 0, + "content": "36. Woo, S., Park, J., Lee, J.Y., Kweon, I.S.: Cbam: Convolutional block attention module. In: Proceedings of the European conference on computer vision (ECCV). pp. 3-19 (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.468, + 0.787, + 0.508 + ], + "angle": 0, + "content": "37. Wu, S., Jin, S., Liu, W., Bai, L., Qian, C., Liu, D., Ouyang, W.: Graph-based 3d multi-person pose estimation using multi-view images. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 11148-11157 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.509, + 0.787, + 0.55 + ], + "angle": 0, + "content": "38. Ye, H., Zhu, W., Wang, C., Wu, R., Wang, Y.: Faster voxelpose: Real-time 3d human pose estimation by orthographic projection. In: European Conference on Computer Vision. pp. 142-159. Springer (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.55, + 0.787, + 0.591 + ], + "angle": 0, + "content": "39. Yu, Z., Zhang, L., Xu, Y., Tang, C., Tran, L., Keskin, C., Park, H.S.: Multiview human body reconstruction from uncalibrated cameras. Advances in Neural Information Processing Systems 35, 7879-7891 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.592, + 0.787, + 0.632 + ], + "angle": 0, + "content": "40. Zhang, J., Cai, Y., Yan, S., Feng, J., et al.: Direct multi-view multi-person 3d pose estimation. Advances in Neural Information Processing Systems 34, 13153-13164 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.633, + 0.787, + 0.674 + ], + "angle": 0, + "content": "41. Zhang, Y., Wang, C., Wang, X., Liu, W., Zeng, W.: Voxeltrack: Multi-person 3d human pose estimation and tracking in the wild. IEEE Transactions on Pattern Analysis and Machine Intelligence 45(2), 2613-2626 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.675, + 0.787, + 0.73 + ], + "angle": 0, + "content": "42. Zhang, Y., An, L., Yu, T., Li, X., Li, K., Liu, Y.: 4d association graph for realtime multi-person motion capture using multiple video cameras. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 1324-1333 (2020)" + }, + { + "type": "list", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.73 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/2024/3DSA_Multi-View 3D Human Pose Estimation With 3D Space Attention Mechanisms/61874178-0339-4f9c-84c1-e29c381e8d91_origin.pdf b/2024/3DSA_Multi-View 3D Human Pose Estimation With 3D Space Attention Mechanisms/61874178-0339-4f9c-84c1-e29c381e8d91_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..bfeaffdce74a0cf398cc6a65eb23169d0700adeb --- /dev/null +++ b/2024/3DSA_Multi-View 3D Human Pose Estimation With 3D Space Attention Mechanisms/61874178-0339-4f9c-84c1-e29c381e8d91_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:16a018f5168a42f8227c683c3069d91530e8dddbbc57d7258ffd2635ad41a0ec +size 2707669 diff --git a/2024/3DSA_Multi-View 3D Human Pose Estimation With 3D Space Attention Mechanisms/full.md b/2024/3DSA_Multi-View 3D Human Pose Estimation With 3D Space Attention Mechanisms/full.md new file mode 100644 index 0000000000000000000000000000000000000000..e94db4504084e4e84368eb1ec48c82385716860a --- /dev/null +++ b/2024/3DSA_Multi-View 3D Human Pose Estimation With 3D Space Attention Mechanisms/full.md @@ -0,0 +1,272 @@ +# 3DSA :Multi-View 3D Human Pose Estimation With 3D Space Attention Mechanisms + +Pohan Chen and Chiachi Tsi + +National Cheng Kung University n28111089,cctsai@gs.ncku.edu.tw + +Abstract. In this study, we introduce the 3D space attention module (3DSA) as a novel approach to address the drawback of multi-view 3D human pose estimation methods, which fail to recognize the object's significance from diverse viewpoints. Specifically, we utilize the 3D space subdivision algorithm to divide the feature volume into multiple regions. Predicted 3D space attention scores are assigned to the different regions to construct the feature volume with space attention. The purpose of the 3D space attention module is to distinguish the significance of individual regions within the feature volume by applying weighted attention adjustments derived from corresponding viewpoints. We conduct experiments on existing voxel-based methods, VoxelPose and Faster VoxelPose. By incorporating the space attention module, both achieve state-of-the-art performance on the CMU Panoptic Studio dataset. + +Keywords: : 3D Human Pose Estimation $\cdot$ 3D space attention + +# 1 Introduction + +Estimating multiple 3D human poses simultaneously from multiple camera views is an enduring challenge in computer vision. The aim is to determine the 3D locations of the body joints for all people present in a scene. It is a task that offers benefits to numerous real-world applications, including intelligent sports analysis [5] and retail monitoring [35]. + +In 2D-3D lifting approaches [9,10,42], a monocular pose estimator identifies 2D bounding boxes and 2D poses for individuals in each view. A multi-view matching algorithm then establishes consistent correspondences between the 2D poses across different views. Finally, the matched 2D poses are lifted to 3D using geometry models such as triangulation [15] or Pictorial Structure Models [2,3,14]. + +As shown in Fig. 1(a), the voxel-based method [35] constructs the discretized feature volume from the detected heatmaps through 2D-3D projection. Based on the identified per-person proposal, the 3D pose for each person is individually estimated by feeding the feature volume into 3D-CNNs. To reduce the computational cost, another voxel-based approach [38] re-projects the feature volume to three two-dimensional coordinate planes and replaces the 3D-CNNs + +![](images/0099411d7b1de9091d8834d13380852dc604cb21e991da6a25bf5023f0621061.jpg) +Fig. 1: Comparison between our method and the existing voxel-based method. The primary distinction is that: (a) The existing method simply projects heatmaps into 3D space. (b) We enhance feature volumes using space attention, maintaining 3D information, and emphasizing critical regions within the feature volume. + +with 2D-CNNs, which increases inference speed. The existing direct prediction method [40] uses the Transformer architecture to regress multi-person 3D poses directly, bypassing the need for intermediate tasks. However, owing to the constraints of the transformer architecture, the inference speed of the method still falls behind the Voxel-based 2D-CNNs method [38]. + +Existing multi-view approaches often fail to consider an important visual phenomenon: the visible parts of the same object should differ when observed from various angles. As depicted in Fig. 2, the four people in the scene are visible in Views 1 to 4. However, in View 5, only three are visible in the image due to obstruction by one of the people. To address this issue, we propose the 3D space attention module (referred to as 3DSA) and apply it to two open-source voxel-based methods [35,38]. Fig. 1(b) shows the overview of our proposed method. We added the space attention layers to the end of backbone network to predict the space attention scores. Directly estimating the importance of each voxel within the feature volume could lead to excessive computational demands. Therefore, we employed the 3D space subdivision algorithm to divide the feature volume into multiple regions. The voxels within each region were treated as a group, and the space attention scores were assigned to the group, representing the importance of the region. Finally, the feature volume with space attention was constructed, retaining the 3D information and paying more attention to crucial regions. + +We have conducted extensive experiments on the 3D human pose benchmark, Panoptic [21], to evaluate the efficacy of our space attention module. By applying the space attention module into the VoxelPose [35] and Faster VoxelPose [38] methods, our models show significant improvements of $20.93\%$ and $20.32\%$ in MPJPE respectively, both models achieve the state-of-the-art results. The voxel-based methods undermine their performance on the $AP_{25}$ metric when compared to other multi-view methods. Our space attention module addresses + +![](images/4ae930174a96d7868b535b8f28cbcc2f9f53b0ca98ad2aaa3004032716fef757.jpg) +View1 + +![](images/da8188c0bf4ff0b7464527a8d3d41219bfbb47c7deec6a6d55179331f0895941.jpg) +View2 +Fig. 2: The visual phenomenon in the 3D space. Due to differences in camera viewing angles and obstruction issues, the visibility of the same person changes across different viewpoints. As shown by the red arrow in the figure, we can clearly observe the person in the images from Views 1 to 4, but they are not visible in View 5. This visual phenomenon is commonly encountered in multi-view human pose estimation tasks. + +![](images/a73242bfe87ad7e534792de37e4b1b0dd95a2f74e2406c7bf15fc17ca483faff.jpg) +View3 + +![](images/c228b5cd8a029bbfde48814c37f7b41dd74204e74ac268f50ba1884d2175f41c.jpg) +View4 + +![](images/1101992fbba6d1a3a4f9082ca880d0fa9fe61b2e1c4cb3983e1ea5b54f09255c.jpg) +View5 + +this weakness, resulting in our model achieving $94.2\%$ and $94.22\%$ on the metric. Compared to the baseline model [35, 38], these scores demonstrate a significant improvement, surpassing all existing multi-view approaches. + +Our contributions are summarized as follows: (1) We proposed the 3D space attention module (3DSA), which addresses the drawbacks of the existing multiview 3D human pose estimation methods and validates its effectiveness on existing voxel-based methods [35, 38]. (2) We introduced a 3D space subdivision algorithm to reduce the computational complexity of the module. (3) By incorporating our space attention module into existing voxel-based methods [35, 38], both models achieve state-of-the-art results on the Panoptic benchmark, demonstrating the effectiveness of this attention mechanism. + +# 2 Related work + +# 2.1 Multi-view 3D human pose estimation + +Unlike monocular 3D human pose estimation [8, 13, 33, 34], multi-view human pose estimation leverages image information from different viewpoints. This approach not only effectively overcomes challenges such as occlusion and depth ambiguity but also ensures a richer and more accurate depiction of the 3D pose. Existing methods can be categorized into three types: (1)2D to 3D lifting methods [1-3,5,9,10,18,25,42] (2)Voxel-based methods [6,7,19,20,27,30,32,35,38,41] (3)Direct regression method [40]. + +2D to 3D lifting method Firstly, a monocular pose estimator is utilized to estimate the 2D joints of each person in each view, through triangulation [15] and a 3D pictorial model [14], the 3D pose of each person is reconstructed from the associated 2D poses. Dong et al. [9,10] propose MvPose. MvPose utilizes a human pose detector to generate and cluster 2D bounding boxes and associated poses for each view. Each cluster represents the same person from different views. The 3D pose of each person is then reconstructed from these clusters using triangulation and a 3D pictorial model. The drawback of this 2D to 3D pose lifting method is its significant dependence on the preceding steps of 2D pose estimation and cross-view matching, as their quality directly influences the results. + +Voxel-based method In contrast to the 2D to 3D lifting methods, which require establishing cross-view correspondence based on noisy and incomplete 2D + +pose estimates, the voxel-based method directly operates in the 3D space and therefore avoids making incorrect decisions in each camera view. Tu et al. [35] propose VoxelPose, the method that discretizes the 3D space into voxels and uses 2D heatmaps to construct a 3D feature volume. 3D-CNNs process this volume to locate human proposals and regress the 3D joint. Since the voxel-based method heavily relies on 3D convolutions, it requires higher computational cost and inference time to predict 3D joints. To enhance the model efficiency, Ye et al. [38] proposed Faster VoxelPose, an optimization method based on orthographic projection. This method projects the 3D feature volume to three mutually perpendicular planes and then utilizes 2D-CNNs to locate the center proposal and regress the 3D joint. By doing this, it eliminates the need for time-consuming 3D convolutions. Choudhury et al. [7] proposed TEMPO, which utilizes temporal context to enhance pose estimation, delivering smoother and more accurate human poses by integrating spatiotemporal information. + +It has been observed that the voxel-based methods generally yield lower scores on the $AP_{25}$ metric in Panoptic datasets when compared to other methods. In this paper, we introduce a novel 3D space attention module, which applies weighted attention adjustments to the feature volume from corresponding viewpoints. This attention mechanism guides the network to focus more effectively on crucial feature regions and yields significant improvements in the $AP_{25}$ metric. + +Direct regression method In contrast to previous methods, Zhang et al. [40] proposed MvP, which leverages the Transformer architecture to regress multi-person 3D poses directly, thus eliminating the need for intermediate tasks. MvP achieved impressive results on the Panoptic [21] datasets. It showed significant progress (8%) on the most stringent $AP_{25}$ compared to the Voxel-based methods [35, 38] and is more robust and accurate than previous models. However, due to the limitations of the transformer architecture, the inference speed of MvP still can't compete with 2D CNN-based voxel method [38], which is not conducive to its deployment in practical applications. + +# 2.2 Multi-view 3D body mesh estimation + +Multi-view 3D body mesh estimation [11,20,24,31,39] is a task closely related to 3D pose estimation. Instead of directly estimating joint positions, this task involves predicting the parameters of SMPL [26] or employing a fitting method [4] to align the SMPL model with detected joint positions. Yu et al. [39] use neural networks to directly predict local attention, assigning importance to visual features across views. Our method focuses on using space subdivision and space attention to address the varying importance of different viewpoints in the same 3D space. Directly predicting the space attention and projecting to the 3D space will result in equal attention values along the projection ray, which prevents the model from accurately identifying depth information. + +# 2.3 Attention mechanisms + +The methodology of predicting attention scores from input features and then using these scores to enhance discriminative feature learning has been adopted by numerous studies [17, 23, 36]. The most famous is SENet proposed by Hu et al. [17], which employs attention mechanisms to adaptively recalibrate channelwise features by modeling inter-channel dependencies. Ma et al. proposed global attention in ContextPose [28], which focus on features within each voxel by estimating confidence scores for each joint, effectively reducing interference from non-human body voxels and improving joint estimation accuracy. Regarding merging 3D features extracted from different 2D viewpoints, the inherent physical characteristics of imaging result in varying importance of different viewpoints for the same 3D space. Therefore, we introduced the space attention module to solve this problem in a voxel-represented 3D space. + +# 3 Method + +# 3.1 The drawback of existing multi-view 3D human pose method + +Despite the impressive achievements of the existing multi-view 3D human pose methods, they ignore an important visual phenomenon: the visible parts of an object could vary when observed from different viewpoints. Specifically, an object's visibility can differ dramatically across various viewpoints, for instance, an object may be distinctly visible from viewpoint A, yet as we transition to viewpoint B, its visibility may diminish or even vanish due to interposing obstacles or occluded persons. + +In this work, we introduce the space attention module to address the drawback, and we validate its effectiveness on existing voxel-based methods [35,38]. The existing methods merely project heatmaps into 3D space. As depicted in Fig. 1(b), our approach leverages the space attention module to enhance feature volumes. This not only preserves 3D information but also emphasizes crucial regions inside the feature volume. The objective of this attention mechanism is to focus on significant regions within the feature volume, by applying weighted attention adjustments to the feature volume from corresponding viewpoints. + +# 3.2 Network architecture + +Heatmap and space attention prediction. As shown in Fig. 3 (a), our model adopts a simple multi-layer design with a backbone and two additional layers. In the heatmap layer, the probability of a 2D pose heatmaps for the corresponding view is predicted. Meanwhile, in the space attention layer, the attention scores of the feature volume are determined. The attention scores are dynamically adjusted based on the input image, emphasizing regions with higher visibility in the 3D space. + +Space attention with person proposal generation. As shown in Fig. 3 (b), by projecting the output heatmaps to the 3D space, the discretized feature volume $\{\mathbf{G} \in \mathbb{R}^{80 \times 80 \times 20}\}$ is constructed. Following [35], the 3D space is discretized + +![](images/0ad11621644ac5f435ec30ae3f68f88ba4d0d510f328687dbf244a9de56dd28b.jpg) +Fig. 3: Overview of network architecture. (a) Given the multi-view image as input, the backbone network predicted both the heatmaps and the space attention scores for each view. Each heatmap is projected to a 3D space, which is physically shared but independent for each view, constructing the feature volume. The space attention scores for each view are assigned to the different regions in the shared 3D space. (b) By performing an element-wise multiplication of the raw feature volume with the space attention scores, we produce a feature volume infused with spatial attention. Subsequently, this attention-enhanced feature volume is fed into 3D-CNNs to locate the per-person proposal. (c) A more detailed feature volume corresponding to the proposal was generated. By calculating the spatial relationship between the proposal and the feature volume, space attention scores for the proposal were sampled from the attention in 3D space. Finally, the human pose was estimated. + +into $X \times Y \times Z$ locations. Based on observations from the space $[2, 21], X, Y$ and $Z$ are set to be 80, 80, and 20 respectively to maintain a good balance between speed and precision. Let the 2D heatmap of a view be denoted as $M_v \in \mathbb{R}^{K \times w \times h}$ , where $K$ is the number of person's joints. For each voxel location $G^{X,Y,Z}$ , the projected location in 2D view $\mathbf{V}$ is represented as $P_v^{X,Y,Z}$ . The heatmap values at $P_v^{X,Y,Z}$ is denoted as $M_v^{X,Y,Z} \in \mathbb{R}^K$ . $\mathbf{v} \in \mathbb{R}^V$ represents one view from total $\mathbf{V}$ views. + +Directly predicting the importance of each voxel in the feature volume would result in an overwhelming computational burden (Given that the output dimension of the model equals the number of voxels in the feature volume, which is 128,000). To reduce computational complexity, we use a 3D space subdivision algorithm to divide the feature volume $\{G_V \in \mathbb{R}^{80 \times 80 \times 20}\}$ from each view, $V$ into several regions $\{Div\overline{G}_V \in \mathbb{R}^{80 \times 80 \times 20}\}$ . Subsequently, the space attention scores predicted from the model are assigned to each region in the divided feature volume to compute the attention of the feature volume $\{V_v^{X,Y,Z} \in \mathbb{R}^{80 \times 80 \times 20}\}$ , which represents the attention scores for view $v$ . + +After that, an element-wise multiplication is performed between the space attention $\{V_{\pmb{v}}\}$ and the raw feature volume $\{M_{\pmb{v}}\}$ to obtain a feature volume with space attention on view $\pmb{v}$ , denoted as $MV_{\pmb{v}}$ . Following this, the feature volumes (with space attention) constructed from multi-view images are fused on average to obtain the aggregated feature volume $\{\pmb{F} \in \mathbb{R}^{80 \times 80 \times 20}\}$ : + +$$ +F = \frac {1}{V} \sum_ {v = 1} ^ {V} M _ {v} \tag {1} +$$ + +where $\mathbf{V}$ represents the number of cameras. $F$ represents the likelihood of $K$ joints in $G$ . Through applying space attention to the feature volume, 3D information is retained while emphasizing important voxels. Finally, the aggregated feature volume $\mathbf{F}$ is input into the 3D convolutional network to determine the per-person likelihood in the 3D discretized feature volume. + +Space attention with per-person pose regression. In the final step, the completed 3D human pose corresponding to the proposal is predicted, as illustrated in Fig. 3 (c). For a fair evaluation of the effect of the space attention module, [35] is adopted to build an individual fine-grained feature volume centered at each predicted proposal. The size of the fine-grained feature volume is set to be $2000\mathrm{mm} \times 2000\mathrm{mm} \times 2000\mathrm{mm}$ , and the feature volume is divided into a discrete grid with $X' \times Y' \times Z'$ voxel where $X', Y', Z'$ equal to 64. Each feature volume under a particular perspective will only have one space attention score to indicate its importance. In this work, we sample the attention score for each proposal by analyzing the spatial relationship between the proposal and the feature volume. Then, we employ a nearest neighbor sampling method to precisely calculate the attention scores for each proposal. The aggregated fine-grained feature volume is computed based on the descriptions from the previous stage. Finally, the 3D heatmap is estimated and the complete 3D human poses of the persons in the space are regressed. + +# 3.3 3D space subdivision algorithm + +As mentioned in Sec. 3.2, the 3D space subdivision algorithm is crucial to the implementation of our space attention module. Computational challenges arise when directly predicting the significance of each voxel in the feature volume. Inspired by Lai et al. [22] utilizing the cell subdivision search algorithm to reduce the computational complexity associated with searching through a large amount of data points, we employ a 3D space subdivision algorithm to divide the feature volume into distinct regions. Specifically, the voxels within each region are considered as a group, and attention scores are assigned to these groups to signify the importance of each region. Through the backbone network, the weight of each region is predicted, representing the importance of corresponding areas within the same viewpoint in the feature volume. If voxels within a specific region exhibit higher confidence levels, this indicates their relative importance. Conversely, lower confidence levels in voxels, caused by obstructions, occlusion, or other factors, suggest that they are less significant within that region. As + +![](images/65486a35377e7c2042bd4f723ce23d11904ae7df6aecfdfc5ba7f4d58a6c9b93.jpg) +Fig. 4: Subdivision of the voxel within the feature volume. We utilize a 3D space subdivision algorithm to partition the feature volume into separate regions, with the voxels in each region being treated as a group. + +Fig. 4 depicts, the feature volume in 3D space $\pmb{G}$ is divided into several cells along the $x$ , $y$ , and $z$ axes. Assume $l_{i}$ , $i \in \{x,y,z\}$ represents the length, width, and height of the feature volume, while $\delta_{i}$ , $i \in \{x,y,z\}$ represents the cell length along a particular axis. The relationship between $l_{i}$ and $\delta_{i}$ can be expressed as follows: + +$$ +\delta_ {i} = \operatorname {i n t} \left(\frac {l _ {i}}{n _ {i}}\right) + 1 \quad i \in \{x, y, z \} \tag {2} +$$ + +where $n_i$ represents the number of regions divided along the $i$ -axis. The total number of regions $n_{total}$ in the 3D space is given by the product of + +$$ +n _ {\text {T o t a l}} = n _ {x} \times n _ {y} \times n _ {z} \tag {3} +$$ + +Let the position vector of a voxel be $\mathbf{V} = [v_x, v_y, v_z]^\top$ . Then, the region that $\mathbf{V}$ resides in can be computed using the following equation: + +$$ +i _ {j} = \operatorname {f l o o r} \left(\frac {\left(v _ {j} - j _ {\min }\right)}{\delta_ {j}}\right) + 1 \quad j \in \{x, y, z \} \tag {4} +$$ + +where $i_j$ represents the indices of the voxel in $x, y, z$ directions, floor() is used to round down to integer representation, and $j_{\mathrm{min}}$ represents the minimum coordinates in $x, y$ and $z$ directions of the voxel within the feature volume. Finally, the region id of the voxel (Voxel_id) within the feature volume can be calculated by the following formula: + +$$ +V o x e l _ {i d} = i _ {z} \times \left(n _ {x} \times n _ {y}\right) + i _ {y} \times n _ {x} + i _ {x} \tag {5} +$$ + +The ID of each voxel can be calculated according to the formulas, however, in practical applications, the total number of voxels in the feature volume is substantial, which could lead to excessive computation times. To tackle this + +![](images/8492bfdd2fa95289084140bf90f1958609de002fbe8bd8a5253a09b1867a01a3.jpg) +Fig. 5: Detailed architecture of space attention module. + +challenge, we have optimized the weight assignment process within the space attention module, adopting the following Python code (Algorithm 1). Compared Eq. (2) to Eq. (5), our approach is better adapted to practical applications, achieving the same objectives and results more efficiently. + +Algorithm 1 Weight assignment algorithm +```txt +Suppose we have 3 intervals along x, y, and z axis $x,y,z = [0,27,54,80],[0,27,54,80],[0,7,14,20]$ # Assign space attention value to the tensor for one view. + subdivision num $= 0$ +for i in range (3): for j in range (3): for k in range (3): space attention[x [i]: x [i+1],y [j]: y [j+1],z [k]: z [k+1] $\equiv$ attention value[subdivision num] subdivision num $=$ subdivision num+1 +``` + +# 3.4 Implementation of space attention module + +In implementations, only the following adjustments were made: (1) A simple branch was derived from the backbone network [16] to predict the space attention scores. (2) We executed an element-wise multiplication of the raw feature volume with the space attention scores calculated by Algorithm 1. (3) The attention scores of the proposal are computed by analyzing the positional relationship between the proposal and the feature volume. + +The space attention module can be easily applied to existing multi-person voxel-based human pose methods [7, 35, 38, 41]. However, since some of these methods are not open-sourced, it prevents us from performing validation. Consequently, we chose to validate our method using the two open-sourced voxel-based methods [35, 38]. + +It is important to emphasize that for a fair evaluation of the impact of the space attention module on existing voxel-based methods [35,38], the network architecture [29] used for locating the person proposal and regressing the 3D pose remained unaltered. For the model's loss function and hyperparameter configuration, the original design proposed by [35,38] has remained. + +The architecture of the space attention layer is presented in Fig. 5. It is a straightforward and lightweight design, which uses a simple convolutional block followed by global average pooling and the sigmoid activation function to estimate the space attention scores of the corresponding image. The purpose of the global average pooling is to replace the traditional fully connected layers, thereby reducing the number of parameters. The output dimensions of the space attention layer are equal to the number of regions in the feature volume. The space attention scores $S \in \mathbb{R}^n$ represent the $n$ space attention values, indicating that the feature volume is divided into $n$ regions. + +# 4 Experiments + +# 4.1 Implementation detail + +Training and evaluation datasets. CMU Panoptic [21] is a 3D dataset with multi-view images. To evaluate and analyze our approach, we conducted extensive experiments on the Panoptic dataset. Following VoxelPose [35], the same data sequences were used for both training and evaluating our model. Our experiments were conducted using five HD cameras with camera IDs 3, 6, 12, 13, 23. Shelf and Campus [2] are two datasets that are commonly used in multi-view and multi-person research. We evaluated our method using the same data setup as in [35]. + +Evaluation metric. For the Panoptic datasets [21], we adopt the Average Precision $(AP^K)$ and Mean Per Joint Position Error (MPJPE) as metrics that demonstrate the robustness and accuracy of multi-person 3D pose estimation. To assess the influence of the space attention module on model size and computational complexity, we consider key metrics such as MACs and model parameters. For both the Campus and Shelf datasets, we present the results in terms of the Percentage of Correct Parts (PCP). + +Training details. For the Panoptic datasets, we use an off-the-shelf pose estimation model constructed based on ResNet-50 [16] to extract features from multi-view images. The difference from VoxelPose [35] is that since our backbone network needs to predict the space attention scores, the parameters of the model are updated throughout the training iteration. + +Due to the incomplete data annotation in the Campus and Shelf datasets [2], Tu et al. [35] use synthetic 3D poses to train the network. To implement the + +space attention module, we use the synthetic heatmap as the input feature to predict the space attention scores. In summary, the space attention module has two modes: the first predicts the space attention scores from the ground truth multi-view image, referred to as Image-based input; the second predicts the space attention scores from the synthetic heatmaps, referred to as Heatmap-based input. + +# 4.2 Comparisons to Existing Methods + +Panoptic. We first evaluate our model on the Panoptic dataset [21] and compare it with the state-of-the-art model. As illustrated in Tab. 1, by incorporating the space attention module $(10 \times 10 \times 3$ configuration) with two voxel-based methods, VoxelPose [35] and Faster VoxelPose [38], our model achieves $94.2\%$ and $94.22\%$ on the most strict evaluation metric $AP_{25}$ , outperforming the transformer model MvP [40]. Our proposed method shows inferior performance in terms of $AP@50,100,150$ when compared to VoxelPose, and this $0.5\%$ performance gap is generally attributed to model variation. It particularly emphasizes that in terms of the $AP_{25}$ metric, our method has significantly improved, outperforming VoxelPose by $12.69\%$ and Faster VoxelPose by $10.56\%$ . Remarkably, both methods achieved much lower MPJPE with values of 13.98 and 14.55, outperforming the TEMPO [7] and achieving the SOTA records. This demonstrates the effectiveness of our space attention module. + +Table 1: Comparison with existing methods on the Panoptic datasets. + +
MethodAP25AP50AP100AP150MPJPE
VoxelPose [35]83.5998.3399.7699.9117.68mm
Faster VoxelPose [38]85.2298.0899.3299.4818.26mm
PlaneSweep Pose [25]92.1298.9699.8199.8416.75mm
RPGN [37]----15.84mm
MvP [40]92.2896.697.4597.6915.76mm
TEMPO [7]89.0199.0899.7699.9314.68mm
VoxelPose + 3DSA94.298.4999.2199.3113.98mm
Faster VoxelPose + 3DSA94.2298.6599.4999.7514.55mm
+ +Campus and Shelf. The quantitative evaluation results on Shelf and Campus datasets [2] are presented in Tab. 2. Our proposed method (VoxelPose [35] with space attention, $10 \times 10 \times 3$ configuration) remains competitive on both datasets. The performance of space attention is not as outstanding on Panoptic datasets [21], and we believe this is related to the Heatmap-based input. Since the heatmap lacks image information, the model is hard to determine the importance of different regions in 3D space from the heatmap. We will detail our research on this issue in the subsequent ablation study. + +Table 2: Quantitative results on Shelf and Campus datasets. + +
MethodShelfCampus
Actor1Actor2Actor3AverageActor1Actor2Actor3Average
Ershadi et al. [12]93.375.994.88894.292.984.690.6
Dong et al. [10]98.894.197.896.997.693.39896.3
MvP [40]99.394.197.897.498.294.197.496.6
TEMPO [7]99.395.197.897.497.795.597.997.3
Faster VoxelPose. [38]99.49697.597.696.594.197.996.2
VoxelPose [35]99.394.197.69797.693.898.896.7
Ours99.495.497.697.59893.498.696.7
+ +Table 3: Space subdivision and efficiency analysis on the Panoptic dataset + +
VoxelPose incorporate with space attention
Space subdivisionAP25AP100MPJPEMACs(G)Parameter(M)
Tu et al. [35]83.5999.7617.68178.8840.62
3 × 3 × 392.7399.5814.78179.0940.64
7 × 7 × 393.7199.3314.41180.0440.77
10 × 10 × 394.299.2113.98181.2440.92
15 × 15 × 694.3399.113.97193.2442.47
20 × 20 × 994.4499.4413.94221.5846.15
Faster VoxelPose incorporate with space attention
Space subdivisionAP25AP100MPJPEMACs(G)Parameter(M)
Ye et al. [38]85.2299.3218.26106.8736.37
3 × 3 × 392.5799.6115.54107.0836.39
7 × 7 × 393.7599.5414.88108.0336.52
10 × 10 × 394.2299.4914.55109.2336.67
+ +# 4.3 Ablation studies + +In this section, we conduct ablative experiments to analyze a variety of factors within our approach. + +Individual contributions of the space attention module and the 3D space subdivision algorithm. By comparing the results in Tab. 3, we can see that the finer the subdivision of the 3D space, the model's accuracy and precision improve correspondingly. However, the model's performance tends to converge after subdividing into $10 \times 10 \times 3$ regions. The result demonstrates the critical importance of the space subdivision algorithm within the space attention module. The direct prediction of all voxels does not result in significant improvements in performance. + +Efficiency analysis. In this work, we focus on comparing our method with existing voxel-based methods [35, 38]. Tab. 3 demonstrates that incorporating the space attention module into the voxel-based approach resulted in a slight + +increase in the model's complexity. Regarding the model we eventually selected (VoxelPose with $10 \times 10 \times 3$ space attention module), MACs increased by $1.32\%$ and parameters by $0.74\%$ when compared to the VoxelPose method. As previously mentioned, excessively increasing the number of spatial subdivisions does not enhance performance but significantly increases the model's complexity. For instance, subdividing the space into $20 \times 20 \times 9$ regions resulted in a $23.8\%$ increase in the model's MACs and a $13.6\%$ increase in parameters. This further demonstrates the importance of the space subdivision algorithm in improving the efficiency of the space attention module. To strike a balance between performance and efficiency, we adopt the $10 \times 10 \times 3$ space attention configuration on VoxelPose [35] to study the impact of the individual factors. + +Number of cameras. We compared our method with existing 3D Pose methods [7,35,38,40]. Tab. 4 shows that the feature volume representation is diminished with fewer camera views, leading to a drop in accuracy. The improvement in both $AP$ and $MPJPE$ metrics over other models, as the number of cameras increases, underscores the significance of multi-view images for enhancing the space attention module's performance. + +Image-based input /Heatmap-based input. To further validate the impact of different inputs on the space attention module, we conducted experiments on the Panoptic dataset [2]. As shown in Tab. 5, although the space attention with Heatmap-based input shows an improvement compared to the baseline model [35], it is noticeably inferior to the space attention with Image-based input. We consider that this disparity occurs because heatmaps lack spatial and depth information in comparison to images. + +Table 4: Number of cameras analysis on the Panoptic dataset + +
MethodCam\(AP_{25}\)\(AP_{50}\)\(AP_{100}\)\(AP_{150}\)MPJPE
Faster VoxelPose [38]73.9597.0299.2199.3521.12
MvP [40]484.1-96.7-19.3
TEMPO [7]----17.34
ours88.498.199.5999.716.78
VoxelPose [35]58.9493.8898.4599.3224.29
Faster VoxelPose [38]53.6891.8997.498.326.13
MvP [40]371.8-95.1-21.1
TEMPO [7]----19.22
ours73.0695.2398.6499.2519.03
MvP [40]37.7-93-34.8
TEMPO [7]2----32.13
ours47.9588.7497.8498.827.35
+ +Table 5: Effect of different inputs on space attention module + +
Image-based input / Heatmap-based input
InputAP25AP50AP100AP150MPJPE
Image94.298.4999.2199.3113.98
Heatmap86.9798.399.299 9.3817.21
+ +# 4.4 3D space attention visualization + +In Fig. 6, we provide the space attention visualization results on Panoptic datasets. Red regions indicate attention scores above 0.8, while blue for below 0.8. Observing the spatial distribution of attention in 3D space (1st row), most key attention areas are focused where people are present. In view 5, an obscured person is not visible from that angle, resulting in lower attention scores in that area. This result aligns with our hypothesis, confirming that the space attention mechanism discriminates the importance of different regions in the feature volume based on visibility. More visualization results are provided in the supplementary material. + +![](images/743321a5f7b19114d7b2d7911667a821f6ebc7b7a187fd4d96786f2abe27f9ba.jpg) +Fig. 6: 3D space attention visualization. We marked areas with scores above 0.8 (red regions) in 3D space (1st row) and projected them onto the corresponding 2D image (2nd row). + +# 5 Conclusion + +In this paper, we present the novel space attention module for the voxel-based multi-view 3D pose estimation method. We learn the space attention scores from the input image and utilize the 3D space subdivision algorithm to divide the feature volume, finally constructing the feature volumes with space attention. By integrating our space attention module into two existing voxel-based methods, both models achieve the state-of-the-art results on the panoptic benchmarks. + +# Acknowledgements + +This work is supported and National Science and Technology Council (NSTC), Taiwan R.O.C. projects with grants 112-2222-E-006-009-, 113-2218-E-035-001-, 113-2425-H-006-007- and NSTC 113-2627-M-006-005 -. + +# References + +1. Amin, S., Andriluka, M., Rohrbach, M., Schiele, B.: Multi-view pictorial structures for 3d human pose estimation. In: BMvc. vol. 1. Bristol, UK (2013) +2. Belagiannis, V., Amin, S., Andriluka, M., Schiele, B., Navab, N., Ilic, S.: 3d pictorial structures for multiple human pose estimation. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 1669-1676 (2014) +3. Belagiannis, V., Amin, S., Andriluka, M., Schiele, B., Navab, N., Ilic, S.: 3d pictorial structures revisited: Multiple human pose estimation. IEEE transactions on pattern analysis and machine intelligence 38(10), 1929-1942 (2015) +4. Bogo, F., Kanazawa, A., Lassner, C., Gehler, P., Romero, J., Black, M.J.: Keep it smpl: Automatic estimation of 3d human pose and shape from a single image. In: Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part V 14. pp. 561-578. Springer (2016) +5. Bridgeman, L., Volino, M., Guillemaut, J.Y., Hilton, A.: Multi-person 3d pose estimation and tracking in sports. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition workshops. pp. 0-0 (2019) +6. Chen, Y., Gu, R., Huang, O., Jia, G.: Vtp: volumetric transformer for multi-view multi-person 3d pose estimation. Applied Intelligence 53(22), 26568-26579 (2023) +7. Choudhury, R., Kitani, K.M., Jeni, L.A.: Tempo: Efficient multi-view pose estimation, tracking, and forecasting. In: 2023 IEEE/CVF International Conference on Computer Vision (ICCV). pp. 14704-14714 (2023). https://doi.org/10.1109/ICCV51070.2023.01355 +8. Dabral, R., Mundhada, A., Kusupati, U., Afaque, S., Sharma, A., Jain, A.: Learning 3d human pose from structure and motion. In: Proceedings of the European conference on computer vision (ECCV). pp. 668-683 (2018) +9. Dong, J., Fang, Q., Jiang, W., Yang, Y., Huang, Q., Bao, H., Zhou, X.: Fast and robust multi-person 3d pose estimation and tracking from multiple views. IEEE Transactions on Pattern Analysis and Machine Intelligence 44(10), 6981-6992 (2021) +0. Dong, J., Jiang, W., Huang, Q., Bao, H., Zhou, X.: Fast and robust multi-person 3d pose estimation from multiple views. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 7792-7801 (2019) +1. Dong, Z., Song, J., Chen, X., Guo, C., Hilliges, O.: Shape-aware multi-person pose estimation from multi-view images. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 11158-11168 (2021) +2. Ershadi-Nasab, S., Noury, E., Kasaei, S., Sanaei, E.: Multiple human 3d pose estimation from multiview images. Multimedia Tools and Applications 77, 15573-15601 (2018) +3. Fang, H.S., Xie, S., Tai, Y.W., Lu, C.: Rmpe: Regional multi-person pose estimation. In: Proceedings of the IEEE international conference on computer vision. pp. 2334-2343 (2017) + +14. Fischler, M.A., Elschlager, R.A.: The representation and matching of pictorial structures. IEEE Transactions on computers 100(1), 67-92 (1973) +15. Hartley, R., Zisserman, A.: Multiple view geometry in computer vision. Cambridge university press (2003) +16. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 770-778 (2016) +17. Hu, J., Shen, L., Sun, G.: Squeeze-and-excitation networks. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 7132-7141 (2018) +18. Huang, C., Jiang, S., Li, Y., Zhang, Z., Traish, J., Deng, C., Ferguson, S., Da Xu, R.Y.: End-to-end dynamic matching network for multi-view multi-person 3d pose estimation. In: Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XXVIII 16. pp. 477-493. Springer (2020) +19. Iskakov, K., Burkov, E., Lempitsky, V., Malkov, Y.: Learnable triangulation of human pose. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 7718-7727 (2019) +20. Iskakov, K., Burkov, E., Lempitsky, V., Malkov, Y.: Learnable triangulation of human pose. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 7718-7727 (2019) +21. Joo, H., Liu, H., Tan, L., Gui, L., Nabbe, B., Matthews, I., Kanade, T., Nobuhara, S., Sheikh, Y.: Panoptic studio: A massively multiview system for social motion capture. In: Proceedings of the IEEE International Conference on Computer Vision. pp. 3334-3342 (2015) +22. Lai, J.Y., Shu, S.H., Huang, Y.C.: A cell subdivision strategy for r-nearest neighbors computation. Journal of the Chinese Institute of Engineers 29(6), 953-965 (2006) +23. Li, X., Wang, W., Hu, X., Yang, J.: Selective kernel networks. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 510-519 (2019) +24. Li, Z., Oskarsson, M., Heyden, A.: 3d human pose and shape estimation through collaborative learning and multi-view model-fitting. In: Proceedings of the IEEE/CVF winter conference on applications of computer vision. pp. 1888-1897 (2021) +25. Lin, J., Lee, G.H.: Multi-view multi-person 3d pose estimation with plane sweep stereo. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 11886-11895 (2021) +26. Loper, M., Mahmood, N., Romero, J., Pons-Moll, G., Black, M.J.: Spl: A skinned multi-person linear model. In: Seminal Graphics Papers: Pushing the Boundaries, Volume 2, pp. 851-866 (2023) +27. Ma, X., Su, J., Wang, C., Ci, H., Wang, Y.: Context modeling in 3d human pose estimation: A unified perspective. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 6238-6247 (2021) +28. Ma, X., Su, J., Wang, C., Ci, H., Wang, Y.: Context modeling in 3d human pose estimation: A unified perspective. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 6238-6247 (2021) +29. Moon, G., Chang, J.Y., Lee, K.M.: V2v-posenet: Voxel-to-voxel prediction network for accurate 3d hand and human pose estimation from a single depth map. In: Proceedings of the IEEE conference on computer vision and pattern Recognition. pp. 5079-5088 (2018) + +30. Reddy, N.D., Guigues, L., Pishchulin, L., Eledath, J., Narasimhan, S.G.: Tessen-track: End-to-end learnable multi-person articulated 3d pose tracking. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 15190-15200 (2021) +31. Shin, S., Halilaj, E.: Multi-view human pose and shape estimation using learnable volumetric aggregation. arXiv preprint arXiv:2011.13427 (2020) +32. Su, J., Wang, C., Ma, X., Zeng, W., Wang, Y.: Virtualpose: Learning generalizable 3d human pose models from virtual data. In: European Conference on Computer Vision. pp. 55-71. Springer (2022) +33. Sun, Y., Bao, Q., Liu, W., Fu, Y., Black, M.J., Mei, T.: Monocular, one-stage, regression of multiple 3d people. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 11179-11188 (2021) +34. Sun, Y., Liu, W., Bao, Q., Fu, Y., Mei, T., Black, M.J.: Putting people in their place: Monocular regression of 3d people in depth. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 13243-13252 (2022) +35. Tu, H., Wang, C., Zeng, W.: Voxelpos: Towards multi-camera 3d human pose estimation in wild environment. In: Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part I 16. pp. 197-212. Springer (2020) +36. Woo, S., Park, J., Lee, J.Y., Kweon, I.S.: Cbam: Convolutional block attention module. In: Proceedings of the European conference on computer vision (ECCV). pp. 3-19 (2018) +37. Wu, S., Jin, S., Liu, W., Bai, L., Qian, C., Liu, D., Ouyang, W.: Graph-based 3d multi-person pose estimation using multi-view images. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 11148-11157 (2021) +38. Ye, H., Zhu, W., Wang, C., Wu, R., Wang, Y.: Faster voxelpose: Real-time 3d human pose estimation by orthographic projection. In: European Conference on Computer Vision. pp. 142-159. Springer (2022) +39. Yu, Z., Zhang, L., Xu, Y., Tang, C., Tran, L., Keskin, C., Park, H.S.: Multiview human body reconstruction from uncalibrated cameras. Advances in Neural Information Processing Systems 35, 7879-7891 (2022) +40. Zhang, J., Cai, Y., Yan, S., Feng, J., et al.: Direct multi-view multi-person 3d pose estimation. Advances in Neural Information Processing Systems 34, 13153-13164 (2021) +41. Zhang, Y., Wang, C., Wang, X., Liu, W., Zeng, W.: Voxeltrack: Multi-person 3d human pose estimation and tracking in the wild. IEEE Transactions on Pattern Analysis and Machine Intelligence 45(2), 2613-2626 (2022) +42. Zhang, Y., An, L., Yu, T., Li, X., Li, K., Liu, Y.: 4d association graph for realtime multi-person motion capture using multiple video cameras. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 1324-1333 (2020) \ No newline at end of file diff --git a/2024/3DSA_Multi-View 3D Human Pose Estimation With 3D Space Attention Mechanisms/images.zip b/2024/3DSA_Multi-View 3D Human Pose Estimation With 3D Space Attention Mechanisms/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..1d7e0899defbc0e2297d99b04a22e3a82c8e4e91 --- /dev/null +++ b/2024/3DSA_Multi-View 3D Human Pose Estimation With 3D Space Attention Mechanisms/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:868c142b37bb9412f5417b38b3d26f37ac68486375aa165bbfd9ad69d089b5b6 +size 476701 diff --git a/2024/3DSA_Multi-View 3D Human Pose Estimation With 3D Space Attention Mechanisms/layout.json b/2024/3DSA_Multi-View 3D Human Pose Estimation With 3D Space Attention Mechanisms/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..bac8605fb6a96b5f9f374c7f298e91f96be96170 --- /dev/null +++ b/2024/3DSA_Multi-View 3D Human Pose Estimation With 3D Space Attention Mechanisms/layout.json @@ -0,0 +1,8403 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 142, + 111, + 473, + 148 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 142, + 111, + 473, + 148 + ], + "spans": [ + { + "bbox": [ + 142, + 111, + 473, + 148 + ], + "type": "text", + "content": "3DSA :Multi-View 3D Human Pose Estimation With 3D Space Attention Mechanisms" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 228, + 168, + 384, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 228, + 168, + 384, + 180 + ], + "spans": [ + { + "bbox": [ + 228, + 168, + 384, + 180 + ], + "type": "text", + "content": "Pohan Chen and Chiachi Tsi" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 232, + 190, + 381, + 212 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 232, + 190, + 381, + 212 + ], + "spans": [ + { + "bbox": [ + 232, + 190, + 381, + 212 + ], + "type": "text", + "content": "National Cheng Kung University n28111089,cctsai@gs.ncku.edu.tw" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 160, + 243, + 455, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 243, + 455, + 386 + ], + "spans": [ + { + "bbox": [ + 160, + 243, + 455, + 386 + ], + "type": "text", + "content": "Abstract. In this study, we introduce the 3D space attention module (3DSA) as a novel approach to address the drawback of multi-view 3D human pose estimation methods, which fail to recognize the object's significance from diverse viewpoints. Specifically, we utilize the 3D space subdivision algorithm to divide the feature volume into multiple regions. Predicted 3D space attention scores are assigned to the different regions to construct the feature volume with space attention. The purpose of the 3D space attention module is to distinguish the significance of individual regions within the feature volume by applying weighted attention adjustments derived from corresponding viewpoints. We conduct experiments on existing voxel-based methods, VoxelPose and Faster VoxelPose. By incorporating the space attention module, both achieve state-of-the-art performance on the CMU Panoptic Studio dataset." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 160, + 396, + 420, + 408 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 396, + 420, + 408 + ], + "spans": [ + { + "bbox": [ + 160, + 396, + 420, + 408 + ], + "type": "text", + "content": "Keywords: : 3D Human Pose Estimation " + }, + { + "bbox": [ + 160, + 396, + 420, + 408 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 160, + 396, + 420, + 408 + ], + "type": "text", + "content": " 3D space attention" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 133, + 429, + 230, + 441 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 429, + 230, + 441 + ], + "spans": [ + { + "bbox": [ + 133, + 429, + 230, + 441 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 454, + 482, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 454, + 482, + 514 + ], + "spans": [ + { + "bbox": [ + 130, + 454, + 482, + 514 + ], + "type": "text", + "content": "Estimating multiple 3D human poses simultaneously from multiple camera views is an enduring challenge in computer vision. The aim is to determine the 3D locations of the body joints for all people present in a scene. It is a task that offers benefits to numerous real-world applications, including intelligent sports analysis [5] and retail monitoring [35]." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 514, + 482, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 514, + 482, + 574 + ], + "spans": [ + { + "bbox": [ + 130, + 514, + 482, + 574 + ], + "type": "text", + "content": "In 2D-3D lifting approaches [9,10,42], a monocular pose estimator identifies 2D bounding boxes and 2D poses for individuals in each view. A multi-view matching algorithm then establishes consistent correspondences between the 2D poses across different views. Finally, the matched 2D poses are lifted to 3D using geometry models such as triangulation [15] or Pictorial Structure Models [2,3,14]." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 574, + 482, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 574, + 482, + 647 + ], + "spans": [ + { + "bbox": [ + 130, + 574, + 482, + 647 + ], + "type": "text", + "content": "As shown in Fig. 1(a), the voxel-based method [35] constructs the discretized feature volume from the detected heatmaps through 2D-3D projection. Based on the identified per-person proposal, the 3D pose for each person is individually estimated by feeding the feature volume into 3D-CNNs. To reduce the computational cost, another voxel-based approach [38] re-projects the feature volume to three two-dimensional coordinate planes and replaces the 3D-CNNs" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 653, + 380, + 665 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 653, + 380, + 665 + ], + "spans": [ + { + "bbox": [ + 133, + 653, + 380, + 665 + ], + "type": "text", + "content": "* Corresponding author: Chiachi Tsi, cctsai@gs.ncku.edu.tw" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 169, + 118, + 443, + 261 + ], + "blocks": [ + { + "bbox": [ + 169, + 118, + 443, + 261 + ], + "lines": [ + { + "bbox": [ + 169, + 118, + 443, + 261 + ], + "spans": [ + { + "bbox": [ + 169, + 118, + 443, + 261 + ], + "type": "image", + "image_path": "0099411d7b1de9091d8834d13380852dc604cb21e991da6a25bf5023f0621061.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 271, + 482, + 316 + ], + "lines": [ + { + "bbox": [ + 130, + 271, + 482, + 316 + ], + "spans": [ + { + "bbox": [ + 130, + 271, + 482, + 316 + ], + "type": "text", + "content": "Fig. 1: Comparison between our method and the existing voxel-based method. The primary distinction is that: (a) The existing method simply projects heatmaps into 3D space. (b) We enhance feature volumes using space attention, maintaining 3D information, and emphasizing critical regions within the feature volume." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 341, + 482, + 401 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 341, + 482, + 401 + ], + "spans": [ + { + "bbox": [ + 130, + 341, + 482, + 401 + ], + "type": "text", + "content": "with 2D-CNNs, which increases inference speed. The existing direct prediction method [40] uses the Transformer architecture to regress multi-person 3D poses directly, bypassing the need for intermediate tasks. However, owing to the constraints of the transformer architecture, the inference speed of the method still falls behind the Voxel-based 2D-CNNs method [38]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 402, + 482, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 402, + 482, + 581 + ], + "spans": [ + { + "bbox": [ + 130, + 402, + 482, + 581 + ], + "type": "text", + "content": "Existing multi-view approaches often fail to consider an important visual phenomenon: the visible parts of the same object should differ when observed from various angles. As depicted in Fig. 2, the four people in the scene are visible in Views 1 to 4. However, in View 5, only three are visible in the image due to obstruction by one of the people. To address this issue, we propose the 3D space attention module (referred to as 3DSA) and apply it to two open-source voxel-based methods [35,38]. Fig. 1(b) shows the overview of our proposed method. We added the space attention layers to the end of backbone network to predict the space attention scores. Directly estimating the importance of each voxel within the feature volume could lead to excessive computational demands. Therefore, we employed the 3D space subdivision algorithm to divide the feature volume into multiple regions. The voxels within each region were treated as a group, and the space attention scores were assigned to the group, representing the importance of the region. Finally, the feature volume with space attention was constructed, retaining the 3D information and paying more attention to crucial regions." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 582, + 482, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 582, + 482, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 582, + 482, + 665 + ], + "type": "text", + "content": "We have conducted extensive experiments on the 3D human pose benchmark, Panoptic [21], to evaluate the efficacy of our space attention module. By applying the space attention module into the VoxelPose [35] and Faster VoxelPose [38] methods, our models show significant improvements of " + }, + { + "bbox": [ + 130, + 582, + 482, + 665 + ], + "type": "inline_equation", + "content": "20.93\\%" + }, + { + "bbox": [ + 130, + 582, + 482, + 665 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 582, + 482, + 665 + ], + "type": "inline_equation", + "content": "20.32\\%" + }, + { + "bbox": [ + 130, + 582, + 482, + 665 + ], + "type": "text", + "content": " in MPJPE respectively, both models achieve the state-of-the-art results. The voxel-based methods undermine their performance on the " + }, + { + "bbox": [ + 130, + 582, + 482, + 665 + ], + "type": "inline_equation", + "content": "AP_{25}" + }, + { + "bbox": [ + 130, + 582, + 482, + 665 + ], + "type": "text", + "content": " metric when compared to other multi-view methods. Our space attention module addresses" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 212, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 212, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 212, + 100 + ], + "type": "text", + "content": "Chen et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 135, + 118, + 203, + 150 + ], + "blocks": [ + { + "bbox": [ + 135, + 118, + 203, + 150 + ], + "lines": [ + { + "bbox": [ + 135, + 118, + 203, + 150 + ], + "spans": [ + { + "bbox": [ + 135, + 118, + 203, + 150 + ], + "type": "image", + "image_path": "4ae930174a96d7868b535b8f28cbcc2f9f53b0ca98ad2aaa3004032716fef757.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 160, + 151, + 177, + 158 + ], + "lines": [ + { + "bbox": [ + 160, + 151, + 177, + 158 + ], + "spans": [ + { + "bbox": [ + 160, + 151, + 177, + 158 + ], + "type": "text", + "content": "View1" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 204, + 118, + 271, + 150 + ], + "blocks": [ + { + "bbox": [ + 204, + 118, + 271, + 150 + ], + "lines": [ + { + "bbox": [ + 204, + 118, + 271, + 150 + ], + "spans": [ + { + "bbox": [ + 204, + 118, + 271, + 150 + ], + "type": "image", + "image_path": "da8188c0bf4ff0b7464527a8d3d41219bfbb47c7deec6a6d55179331f0895941.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 229, + 151, + 246, + 158 + ], + "lines": [ + { + "bbox": [ + 229, + 151, + 246, + 158 + ], + "spans": [ + { + "bbox": [ + 229, + 151, + 246, + 158 + ], + "type": "text", + "content": "View2" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 130, + 168, + 480, + 224 + ], + "lines": [ + { + "bbox": [ + 130, + 168, + 480, + 224 + ], + "spans": [ + { + "bbox": [ + 130, + 168, + 480, + 224 + ], + "type": "text", + "content": "Fig. 2: The visual phenomenon in the 3D space. Due to differences in camera viewing angles and obstruction issues, the visibility of the same person changes across different viewpoints. As shown by the red arrow in the figure, we can clearly observe the person in the images from Views 1 to 4, but they are not visible in View 5. This visual phenomenon is commonly encountered in multi-view human pose estimation tasks." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 272, + 118, + 341, + 150 + ], + "blocks": [ + { + "bbox": [ + 272, + 118, + 341, + 150 + ], + "lines": [ + { + "bbox": [ + 272, + 118, + 341, + 150 + ], + "spans": [ + { + "bbox": [ + 272, + 118, + 341, + 150 + ], + "type": "image", + "image_path": "a73242bfe87ad7e534792de37e4b1b0dd95a2f74e2406c7bf15fc17ca483faff.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 298, + 151, + 315, + 158 + ], + "lines": [ + { + "bbox": [ + 298, + 151, + 315, + 158 + ], + "spans": [ + { + "bbox": [ + 298, + 151, + 315, + 158 + ], + "type": "text", + "content": "View3" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 342, + 118, + 410, + 150 + ], + "blocks": [ + { + "bbox": [ + 342, + 118, + 410, + 150 + ], + "lines": [ + { + "bbox": [ + 342, + 118, + 410, + 150 + ], + "spans": [ + { + "bbox": [ + 342, + 118, + 410, + 150 + ], + "type": "image", + "image_path": "c228b5cd8a029bbfde48814c37f7b41dd74204e74ac268f50ba1884d2175f41c.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 367, + 151, + 384, + 158 + ], + "lines": [ + { + "bbox": [ + 367, + 151, + 384, + 158 + ], + "spans": [ + { + "bbox": [ + 367, + 151, + 384, + 158 + ], + "type": "text", + "content": "View4" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 411, + 118, + 480, + 150 + ], + "blocks": [ + { + "bbox": [ + 411, + 118, + 480, + 150 + ], + "lines": [ + { + "bbox": [ + 411, + 118, + 480, + 150 + ], + "spans": [ + { + "bbox": [ + 411, + 118, + 480, + 150 + ], + "type": "image", + "image_path": "1101992fbba6d1a3a4f9082ca880d0fa9fe61b2e1c4cb3983e1ea5b54f09255c.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 438, + 151, + 455, + 158 + ], + "lines": [ + { + "bbox": [ + 438, + 151, + 455, + 158 + ], + "spans": [ + { + "bbox": [ + 438, + 151, + 455, + 158 + ], + "type": "text", + "content": "View5" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 246, + 479, + 282 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 246, + 479, + 282 + ], + "spans": [ + { + "bbox": [ + 130, + 246, + 479, + 282 + ], + "type": "text", + "content": "this weakness, resulting in our model achieving " + }, + { + "bbox": [ + 130, + 246, + 479, + 282 + ], + "type": "inline_equation", + "content": "94.2\\%" + }, + { + "bbox": [ + 130, + 246, + 479, + 282 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 246, + 479, + 282 + ], + "type": "inline_equation", + "content": "94.22\\%" + }, + { + "bbox": [ + 130, + 246, + 479, + 282 + ], + "type": "text", + "content": " on the metric. Compared to the baseline model [35, 38], these scores demonstrate a significant improvement, surpassing all existing multi-view approaches." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 130, + 282, + 480, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 282, + 480, + 377 + ], + "spans": [ + { + "bbox": [ + 130, + 282, + 480, + 377 + ], + "type": "text", + "content": "Our contributions are summarized as follows: (1) We proposed the 3D space attention module (3DSA), which addresses the drawbacks of the existing multiview 3D human pose estimation methods and validates its effectiveness on existing voxel-based methods [35, 38]. (2) We introduced a 3D space subdivision algorithm to reduce the computational complexity of the module. (3) By incorporating our space attention module into existing voxel-based methods [35, 38], both models achieve state-of-the-art results on the Panoptic benchmark, demonstrating the effectiveness of this attention mechanism." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 132, + 395, + 234, + 407 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 395, + 234, + 407 + ], + "spans": [ + { + "bbox": [ + 132, + 395, + 234, + 407 + ], + "type": "text", + "content": "2 Related work" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 131, + 419, + 354, + 432 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 419, + 354, + 432 + ], + "spans": [ + { + "bbox": [ + 131, + 419, + 354, + 432 + ], + "type": "text", + "content": "2.1 Multi-view 3D human pose estimation" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 130, + 437, + 480, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 437, + 480, + 521 + ], + "spans": [ + { + "bbox": [ + 130, + 437, + 480, + 521 + ], + "type": "text", + "content": "Unlike monocular 3D human pose estimation [8, 13, 33, 34], multi-view human pose estimation leverages image information from different viewpoints. This approach not only effectively overcomes challenges such as occlusion and depth ambiguity but also ensures a richer and more accurate depiction of the 3D pose. Existing methods can be categorized into three types: (1)2D to 3D lifting methods [1-3,5,9,10,18,25,42] (2)Voxel-based methods [6,7,19,20,27,30,32,35,38,41] (3)Direct regression method [40]." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 130, + 521, + 481, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 521, + 481, + 641 + ], + "spans": [ + { + "bbox": [ + 130, + 521, + 481, + 641 + ], + "type": "text", + "content": "2D to 3D lifting method Firstly, a monocular pose estimator is utilized to estimate the 2D joints of each person in each view, through triangulation [15] and a 3D pictorial model [14], the 3D pose of each person is reconstructed from the associated 2D poses. Dong et al. [9,10] propose MvPose. MvPose utilizes a human pose detector to generate and cluster 2D bounding boxes and associated poses for each view. Each cluster represents the same person from different views. The 3D pose of each person is then reconstructed from these clusters using triangulation and a 3D pictorial model. The drawback of this 2D to 3D pose lifting method is its significant dependence on the preceding steps of 2D pose estimation and cross-view matching, as their quality directly influences the results." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 131, + 641, + 481, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 641, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 131, + 641, + 481, + 665 + ], + "type": "text", + "content": "Voxel-based method In contrast to the 2D to 3D lifting methods, which require establishing cross-view correspondence based on noisy and incomplete 2D" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 422, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 422, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 422, + 91, + 447, + 100 + ], + "type": "text", + "content": "3DSA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 91, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 91, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 91, + 481, + 100 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 284 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 284 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 284 + ], + "type": "text", + "content": "pose estimates, the voxel-based method directly operates in the 3D space and therefore avoids making incorrect decisions in each camera view. Tu et al. [35] propose VoxelPose, the method that discretizes the 3D space into voxels and uses 2D heatmaps to construct a 3D feature volume. 3D-CNNs process this volume to locate human proposals and regress the 3D joint. Since the voxel-based method heavily relies on 3D convolutions, it requires higher computational cost and inference time to predict 3D joints. To enhance the model efficiency, Ye et al. [38] proposed Faster VoxelPose, an optimization method based on orthographic projection. This method projects the 3D feature volume to three mutually perpendicular planes and then utilizes 2D-CNNs to locate the center proposal and regress the 3D joint. By doing this, it eliminates the need for time-consuming 3D convolutions. Choudhury et al. [7] proposed TEMPO, which utilizes temporal context to enhance pose estimation, delivering smoother and more accurate human poses by integrating spatiotemporal information." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 287, + 482, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 287, + 482, + 360 + ], + "spans": [ + { + "bbox": [ + 130, + 287, + 482, + 360 + ], + "type": "text", + "content": "It has been observed that the voxel-based methods generally yield lower scores on the " + }, + { + "bbox": [ + 130, + 287, + 482, + 360 + ], + "type": "inline_equation", + "content": "AP_{25}" + }, + { + "bbox": [ + 130, + 287, + 482, + 360 + ], + "type": "text", + "content": " metric in Panoptic datasets when compared to other methods. In this paper, we introduce a novel 3D space attention module, which applies weighted attention adjustments to the feature volume from corresponding viewpoints. This attention mechanism guides the network to focus more effectively on crucial feature regions and yields significant improvements in the " + }, + { + "bbox": [ + 130, + 287, + 482, + 360 + ], + "type": "inline_equation", + "content": "AP_{25}" + }, + { + "bbox": [ + 130, + 287, + 482, + 360 + ], + "type": "text", + "content": " metric." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 363, + 483, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 363, + 483, + 472 + ], + "spans": [ + { + "bbox": [ + 130, + 363, + 483, + 472 + ], + "type": "text", + "content": "Direct regression method In contrast to previous methods, Zhang et al. [40] proposed MvP, which leverages the Transformer architecture to regress multi-person 3D poses directly, thus eliminating the need for intermediate tasks. MvP achieved impressive results on the Panoptic [21] datasets. It showed significant progress (8%) on the most stringent " + }, + { + "bbox": [ + 130, + 363, + 483, + 472 + ], + "type": "inline_equation", + "content": "AP_{25}" + }, + { + "bbox": [ + 130, + 363, + 483, + 472 + ], + "type": "text", + "content": " compared to the Voxel-based methods [35, 38] and is more robust and accurate than previous models. However, due to the limitations of the transformer architecture, the inference speed of MvP still can't compete with 2D CNN-based voxel method [38], which is not conducive to its deployment in practical applications." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 506, + 349, + 519 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 506, + 349, + 519 + ], + "spans": [ + { + "bbox": [ + 132, + 506, + 349, + 519 + ], + "type": "text", + "content": "2.2 Multi-view 3D body mesh estimation" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 545, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 545, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 545, + 482, + 666 + ], + "type": "text", + "content": "Multi-view 3D body mesh estimation [11,20,24,31,39] is a task closely related to 3D pose estimation. Instead of directly estimating joint positions, this task involves predicting the parameters of SMPL [26] or employing a fitting method [4] to align the SMPL model with detected joint positions. Yu et al. [39] use neural networks to directly predict local attention, assigning importance to visual features across views. Our method focuses on using space subdivision and space attention to address the varying importance of different viewpoints in the same 3D space. Directly predicting the space attention and projecting to the 3D space will result in equal attention values along the projection ray, which prevents the model from accurately identifying depth information." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 212, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 212, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 212, + 101 + ], + "type": "text", + "content": "Chen et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 273, + 126 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 116, + 273, + 126 + ], + "spans": [ + { + "bbox": [ + 132, + 116, + 273, + 126 + ], + "type": "text", + "content": "2.3 Attention mechanisms" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 133, + 482, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 133, + 482, + 277 + ], + "spans": [ + { + "bbox": [ + 130, + 133, + 482, + 277 + ], + "type": "text", + "content": "The methodology of predicting attention scores from input features and then using these scores to enhance discriminative feature learning has been adopted by numerous studies [17, 23, 36]. The most famous is SENet proposed by Hu et al. [17], which employs attention mechanisms to adaptively recalibrate channelwise features by modeling inter-channel dependencies. Ma et al. proposed global attention in ContextPose [28], which focus on features within each voxel by estimating confidence scores for each joint, effectively reducing interference from non-human body voxels and improving joint estimation accuracy. Regarding merging 3D features extracted from different 2D viewpoints, the inherent physical characteristics of imaging result in varying importance of different viewpoints for the same 3D space. Therefore, we introduced the space attention module to solve this problem in a voxel-represented 3D space." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 293, + 202, + 306 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 293, + 202, + 306 + ], + "spans": [ + { + "bbox": [ + 132, + 293, + 202, + 306 + ], + "type": "text", + "content": "3 Method" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 316, + 468, + 328 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 316, + 468, + 328 + ], + "spans": [ + { + "bbox": [ + 130, + 316, + 468, + 328 + ], + "type": "text", + "content": "3.1 The drawback of existing multi-view 3D human pose method" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 333, + 482, + 417 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 333, + 482, + 417 + ], + "spans": [ + { + "bbox": [ + 130, + 333, + 482, + 417 + ], + "type": "text", + "content": "Despite the impressive achievements of the existing multi-view 3D human pose methods, they ignore an important visual phenomenon: the visible parts of an object could vary when observed from different viewpoints. Specifically, an object's visibility can differ dramatically across various viewpoints, for instance, an object may be distinctly visible from viewpoint A, yet as we transition to viewpoint B, its visibility may diminish or even vanish due to interposing obstacles or occluded persons." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 418, + 482, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 418, + 482, + 513 + ], + "spans": [ + { + "bbox": [ + 130, + 418, + 482, + 513 + ], + "type": "text", + "content": "In this work, we introduce the space attention module to address the drawback, and we validate its effectiveness on existing voxel-based methods [35,38]. The existing methods merely project heatmaps into 3D space. As depicted in Fig. 1(b), our approach leverages the space attention module to enhance feature volumes. This not only preserves 3D information but also emphasizes crucial regions inside the feature volume. The objective of this attention mechanism is to focus on significant regions within the feature volume, by applying weighted attention adjustments to the feature volume from corresponding viewpoints." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 528, + 269, + 540 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 528, + 269, + 540 + ], + "spans": [ + { + "bbox": [ + 132, + 528, + 269, + 540 + ], + "type": "text", + "content": "3.2 Network architecture" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 545, + 482, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 545, + 482, + 629 + ], + "spans": [ + { + "bbox": [ + 130, + 545, + 482, + 629 + ], + "type": "text", + "content": "Heatmap and space attention prediction. As shown in Fig. 3 (a), our model adopts a simple multi-layer design with a backbone and two additional layers. In the heatmap layer, the probability of a 2D pose heatmaps for the corresponding view is predicted. Meanwhile, in the space attention layer, the attention scores of the feature volume are determined. The attention scores are dynamically adjusted based on the input image, emphasizing regions with higher visibility in the 3D space." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 630, + 482, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 630, + 482, + 667 + ], + "spans": [ + { + "bbox": [ + 130, + 630, + 482, + 667 + ], + "type": "text", + "content": "Space attention with person proposal generation. As shown in Fig. 3 (b), by projecting the output heatmaps to the 3D space, the discretized feature volume " + }, + { + "bbox": [ + 130, + 630, + 482, + 667 + ], + "type": "inline_equation", + "content": "\\{\\mathbf{G} \\in \\mathbb{R}^{80 \\times 80 \\times 20}\\}" + }, + { + "bbox": [ + 130, + 630, + 482, + 667 + ], + "type": "text", + "content": " is constructed. Following [35], the 3D space is discretized" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 421, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 421, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 421, + 91, + 447, + 100 + ], + "type": "text", + "content": "3DSA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 481, + 100 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 136, + 118, + 479, + 292 + ], + "blocks": [ + { + "bbox": [ + 136, + 118, + 479, + 292 + ], + "lines": [ + { + "bbox": [ + 136, + 118, + 479, + 292 + ], + "spans": [ + { + "bbox": [ + 136, + 118, + 479, + 292 + ], + "type": "image", + "image_path": "0ad11621644ac5f435ec30ae3f68f88ba4d0d510f328687dbf244a9de56dd28b.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 303, + 482, + 437 + ], + "lines": [ + { + "bbox": [ + 130, + 303, + 482, + 437 + ], + "spans": [ + { + "bbox": [ + 130, + 303, + 482, + 437 + ], + "type": "text", + "content": "Fig. 3: Overview of network architecture. (a) Given the multi-view image as input, the backbone network predicted both the heatmaps and the space attention scores for each view. Each heatmap is projected to a 3D space, which is physically shared but independent for each view, constructing the feature volume. The space attention scores for each view are assigned to the different regions in the shared 3D space. (b) By performing an element-wise multiplication of the raw feature volume with the space attention scores, we produce a feature volume infused with spatial attention. Subsequently, this attention-enhanced feature volume is fed into 3D-CNNs to locate the per-person proposal. (c) A more detailed feature volume corresponding to the proposal was generated. By calculating the spatial relationship between the proposal and the feature volume, space attention scores for the proposal were sampled from the attention in 3D space. Finally, the human pose was estimated." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 468, + 482, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 468, + 482, + 552 + ], + "spans": [ + { + "bbox": [ + 130, + 468, + 482, + 552 + ], + "type": "text", + "content": "into " + }, + { + "bbox": [ + 130, + 468, + 482, + 552 + ], + "type": "inline_equation", + "content": "X \\times Y \\times Z" + }, + { + "bbox": [ + 130, + 468, + 482, + 552 + ], + "type": "text", + "content": " locations. Based on observations from the space " + }, + { + "bbox": [ + 130, + 468, + 482, + 552 + ], + "type": "inline_equation", + "content": "[2, 21], X, Y" + }, + { + "bbox": [ + 130, + 468, + 482, + 552 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 468, + 482, + 552 + ], + "type": "inline_equation", + "content": "Z" + }, + { + "bbox": [ + 130, + 468, + 482, + 552 + ], + "type": "text", + "content": " are set to be 80, 80, and 20 respectively to maintain a good balance between speed and precision. Let the 2D heatmap of a view be denoted as " + }, + { + "bbox": [ + 130, + 468, + 482, + 552 + ], + "type": "inline_equation", + "content": "M_v \\in \\mathbb{R}^{K \\times w \\times h}" + }, + { + "bbox": [ + 130, + 468, + 482, + 552 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 130, + 468, + 482, + 552 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 130, + 468, + 482, + 552 + ], + "type": "text", + "content": " is the number of person's joints. For each voxel location " + }, + { + "bbox": [ + 130, + 468, + 482, + 552 + ], + "type": "inline_equation", + "content": "G^{X,Y,Z}" + }, + { + "bbox": [ + 130, + 468, + 482, + 552 + ], + "type": "text", + "content": ", the projected location in 2D view " + }, + { + "bbox": [ + 130, + 468, + 482, + 552 + ], + "type": "inline_equation", + "content": "\\mathbf{V}" + }, + { + "bbox": [ + 130, + 468, + 482, + 552 + ], + "type": "text", + "content": " is represented as " + }, + { + "bbox": [ + 130, + 468, + 482, + 552 + ], + "type": "inline_equation", + "content": "P_v^{X,Y,Z}" + }, + { + "bbox": [ + 130, + 468, + 482, + 552 + ], + "type": "text", + "content": ". The heatmap values at " + }, + { + "bbox": [ + 130, + 468, + 482, + 552 + ], + "type": "inline_equation", + "content": "P_v^{X,Y,Z}" + }, + { + "bbox": [ + 130, + 468, + 482, + 552 + ], + "type": "text", + "content": " is denoted as " + }, + { + "bbox": [ + 130, + 468, + 482, + 552 + ], + "type": "inline_equation", + "content": "M_v^{X,Y,Z} \\in \\mathbb{R}^K" + }, + { + "bbox": [ + 130, + 468, + 482, + 552 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 130, + 468, + 482, + 552 + ], + "type": "inline_equation", + "content": "\\mathbf{v} \\in \\mathbb{R}^V" + }, + { + "bbox": [ + 130, + 468, + 482, + 552 + ], + "type": "text", + "content": " represents one view from total " + }, + { + "bbox": [ + 130, + 468, + 482, + 552 + ], + "type": "inline_equation", + "content": "\\mathbf{V}" + }, + { + "bbox": [ + 130, + 468, + 482, + 552 + ], + "type": "text", + "content": " views." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 556, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 556, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 556, + 482, + 666 + ], + "type": "text", + "content": "Directly predicting the importance of each voxel in the feature volume would result in an overwhelming computational burden (Given that the output dimension of the model equals the number of voxels in the feature volume, which is 128,000). To reduce computational complexity, we use a 3D space subdivision algorithm to divide the feature volume " + }, + { + "bbox": [ + 130, + 556, + 482, + 666 + ], + "type": "inline_equation", + "content": "\\{G_V \\in \\mathbb{R}^{80 \\times 80 \\times 20}\\}" + }, + { + "bbox": [ + 130, + 556, + 482, + 666 + ], + "type": "text", + "content": " from each view, " + }, + { + "bbox": [ + 130, + 556, + 482, + 666 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 130, + 556, + 482, + 666 + ], + "type": "text", + "content": " into several regions " + }, + { + "bbox": [ + 130, + 556, + 482, + 666 + ], + "type": "inline_equation", + "content": "\\{Div\\overline{G}_V \\in \\mathbb{R}^{80 \\times 80 \\times 20}\\}" + }, + { + "bbox": [ + 130, + 556, + 482, + 666 + ], + "type": "text", + "content": ". Subsequently, the space attention scores predicted from the model are assigned to each region in the divided feature volume to compute the attention of the feature volume " + }, + { + "bbox": [ + 130, + 556, + 482, + 666 + ], + "type": "inline_equation", + "content": "\\{V_v^{X,Y,Z} \\in \\mathbb{R}^{80 \\times 80 \\times 20}\\}" + }, + { + "bbox": [ + 130, + 556, + 482, + 666 + ], + "type": "text", + "content": ", which represents the attention scores for view " + }, + { + "bbox": [ + 130, + 556, + 482, + 666 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 130, + 556, + 482, + 666 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 212, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 212, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 212, + 100 + ], + "type": "text", + "content": "Chen et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 177 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 177 + ], + "type": "text", + "content": "After that, an element-wise multiplication is performed between the space attention " + }, + { + "bbox": [ + 130, + 116, + 482, + 177 + ], + "type": "inline_equation", + "content": "\\{V_{\\pmb{v}}\\}" + }, + { + "bbox": [ + 130, + 116, + 482, + 177 + ], + "type": "text", + "content": " and the raw feature volume " + }, + { + "bbox": [ + 130, + 116, + 482, + 177 + ], + "type": "inline_equation", + "content": "\\{M_{\\pmb{v}}\\}" + }, + { + "bbox": [ + 130, + 116, + 482, + 177 + ], + "type": "text", + "content": " to obtain a feature volume with space attention on view " + }, + { + "bbox": [ + 130, + 116, + 482, + 177 + ], + "type": "inline_equation", + "content": "\\pmb{v}" + }, + { + "bbox": [ + 130, + 116, + 482, + 177 + ], + "type": "text", + "content": ", denoted as " + }, + { + "bbox": [ + 130, + 116, + 482, + 177 + ], + "type": "inline_equation", + "content": "MV_{\\pmb{v}}" + }, + { + "bbox": [ + 130, + 116, + 482, + 177 + ], + "type": "text", + "content": ". Following this, the feature volumes (with space attention) constructed from multi-view images are fused on average to obtain the aggregated feature volume " + }, + { + "bbox": [ + 130, + 116, + 482, + 177 + ], + "type": "inline_equation", + "content": "\\{\\pmb{F} \\in \\mathbb{R}^{80 \\times 80 \\times 20}\\}" + }, + { + "bbox": [ + 130, + 116, + 482, + 177 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 275, + 186, + 481, + 217 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 275, + 186, + 481, + 217 + ], + "spans": [ + { + "bbox": [ + 275, + 186, + 481, + 217 + ], + "type": "interline_equation", + "content": "F = \\frac {1}{V} \\sum_ {v = 1} ^ {V} M _ {v} \\tag {1}", + "image_path": "cb8505055591996962c48642786cabcd5b9744a25db69b4e873b8f62020f5264.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 224, + 480, + 282 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 224, + 480, + 282 + ], + "spans": [ + { + "bbox": [ + 130, + 224, + 480, + 282 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 224, + 480, + 282 + ], + "type": "inline_equation", + "content": "\\mathbf{V}" + }, + { + "bbox": [ + 130, + 224, + 480, + 282 + ], + "type": "text", + "content": " represents the number of cameras. " + }, + { + "bbox": [ + 130, + 224, + 480, + 282 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 130, + 224, + 480, + 282 + ], + "type": "text", + "content": " represents the likelihood of " + }, + { + "bbox": [ + 130, + 224, + 480, + 282 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 130, + 224, + 480, + 282 + ], + "type": "text", + "content": " joints in " + }, + { + "bbox": [ + 130, + 224, + 480, + 282 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 130, + 224, + 480, + 282 + ], + "type": "text", + "content": ". Through applying space attention to the feature volume, 3D information is retained while emphasizing important voxels. Finally, the aggregated feature volume " + }, + { + "bbox": [ + 130, + 224, + 480, + 282 + ], + "type": "inline_equation", + "content": "\\mathbf{F}" + }, + { + "bbox": [ + 130, + 224, + 480, + 282 + ], + "type": "text", + "content": " is input into the 3D convolutional network to determine the per-person likelihood in the 3D discretized feature volume." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 284, + 482, + 463 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 284, + 482, + 463 + ], + "spans": [ + { + "bbox": [ + 130, + 284, + 482, + 463 + ], + "type": "text", + "content": "Space attention with per-person pose regression. In the final step, the completed 3D human pose corresponding to the proposal is predicted, as illustrated in Fig. 3 (c). For a fair evaluation of the effect of the space attention module, [35] is adopted to build an individual fine-grained feature volume centered at each predicted proposal. The size of the fine-grained feature volume is set to be " + }, + { + "bbox": [ + 130, + 284, + 482, + 463 + ], + "type": "inline_equation", + "content": "2000\\mathrm{mm} \\times 2000\\mathrm{mm} \\times 2000\\mathrm{mm}" + }, + { + "bbox": [ + 130, + 284, + 482, + 463 + ], + "type": "text", + "content": ", and the feature volume is divided into a discrete grid with " + }, + { + "bbox": [ + 130, + 284, + 482, + 463 + ], + "type": "inline_equation", + "content": "X' \\times Y' \\times Z'" + }, + { + "bbox": [ + 130, + 284, + 482, + 463 + ], + "type": "text", + "content": " voxel where " + }, + { + "bbox": [ + 130, + 284, + 482, + 463 + ], + "type": "inline_equation", + "content": "X', Y', Z'" + }, + { + "bbox": [ + 130, + 284, + 482, + 463 + ], + "type": "text", + "content": " equal to 64. Each feature volume under a particular perspective will only have one space attention score to indicate its importance. In this work, we sample the attention score for each proposal by analyzing the spatial relationship between the proposal and the feature volume. Then, we employ a nearest neighbor sampling method to precisely calculate the attention scores for each proposal. The aggregated fine-grained feature volume is computed based on the descriptions from the previous stage. Finally, the 3D heatmap is estimated and the complete 3D human poses of the persons in the space are regressed." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 479, + 318, + 491 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 479, + 318, + 491 + ], + "spans": [ + { + "bbox": [ + 132, + 479, + 318, + 491 + ], + "type": "text", + "content": "3.3 3D space subdivision algorithm" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 498, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 498, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 498, + 482, + 666 + ], + "type": "text", + "content": "As mentioned in Sec. 3.2, the 3D space subdivision algorithm is crucial to the implementation of our space attention module. Computational challenges arise when directly predicting the significance of each voxel in the feature volume. Inspired by Lai et al. [22] utilizing the cell subdivision search algorithm to reduce the computational complexity associated with searching through a large amount of data points, we employ a 3D space subdivision algorithm to divide the feature volume into distinct regions. Specifically, the voxels within each region are considered as a group, and attention scores are assigned to these groups to signify the importance of each region. Through the backbone network, the weight of each region is predicted, representing the importance of corresponding areas within the same viewpoint in the feature volume. If voxels within a specific region exhibit higher confidence levels, this indicates their relative importance. Conversely, lower confidence levels in voxels, caused by obstructions, occlusion, or other factors, suggest that they are less significant within that region. As" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 422, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 422, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 422, + 91, + 447, + 100 + ], + "type": "text", + "content": "3DSA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 481, + 100 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 239, + 123, + 362, + 238 + ], + "blocks": [ + { + "bbox": [ + 239, + 123, + 362, + 238 + ], + "lines": [ + { + "bbox": [ + 239, + 123, + 362, + 238 + ], + "spans": [ + { + "bbox": [ + 239, + 123, + 362, + 238 + ], + "type": "image", + "image_path": "65486a35377e7c2042bd4f723ce23d11904ae7df6aecfdfc5ba7f4d58a6c9b93.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 131, + 254, + 482, + 288 + ], + "lines": [ + { + "bbox": [ + 131, + 254, + 482, + 288 + ], + "spans": [ + { + "bbox": [ + 131, + 254, + 482, + 288 + ], + "type": "text", + "content": "Fig. 4: Subdivision of the voxel within the feature volume. We utilize a 3D space subdivision algorithm to partition the feature volume into separate regions, with the voxels in each region being treated as a group." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 131, + 311, + 482, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 311, + 482, + 370 + ], + "spans": [ + { + "bbox": [ + 131, + 311, + 482, + 370 + ], + "type": "text", + "content": "Fig. 4 depicts, the feature volume in 3D space " + }, + { + "bbox": [ + 131, + 311, + 482, + 370 + ], + "type": "inline_equation", + "content": "\\pmb{G}" + }, + { + "bbox": [ + 131, + 311, + 482, + 370 + ], + "type": "text", + "content": " is divided into several cells along the " + }, + { + "bbox": [ + 131, + 311, + 482, + 370 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 131, + 311, + 482, + 370 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 131, + 311, + 482, + 370 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 131, + 311, + 482, + 370 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 131, + 311, + 482, + 370 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 131, + 311, + 482, + 370 + ], + "type": "text", + "content": " axes. Assume " + }, + { + "bbox": [ + 131, + 311, + 482, + 370 + ], + "type": "inline_equation", + "content": "l_{i}" + }, + { + "bbox": [ + 131, + 311, + 482, + 370 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 131, + 311, + 482, + 370 + ], + "type": "inline_equation", + "content": "i \\in \\{x,y,z\\}" + }, + { + "bbox": [ + 131, + 311, + 482, + 370 + ], + "type": "text", + "content": " represents the length, width, and height of the feature volume, while " + }, + { + "bbox": [ + 131, + 311, + 482, + 370 + ], + "type": "inline_equation", + "content": "\\delta_{i}" + }, + { + "bbox": [ + 131, + 311, + 482, + 370 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 131, + 311, + 482, + 370 + ], + "type": "inline_equation", + "content": "i \\in \\{x,y,z\\}" + }, + { + "bbox": [ + 131, + 311, + 482, + 370 + ], + "type": "text", + "content": " represents the cell length along a particular axis. The relationship between " + }, + { + "bbox": [ + 131, + 311, + 482, + 370 + ], + "type": "inline_equation", + "content": "l_{i}" + }, + { + "bbox": [ + 131, + 311, + 482, + 370 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 131, + 311, + 482, + 370 + ], + "type": "inline_equation", + "content": "\\delta_{i}" + }, + { + "bbox": [ + 131, + 311, + 482, + 370 + ], + "type": "text", + "content": " can be expressed as follows:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 235, + 377, + 481, + 404 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 235, + 377, + 481, + 404 + ], + "spans": [ + { + "bbox": [ + 235, + 377, + 481, + 404 + ], + "type": "interline_equation", + "content": "\\delta_ {i} = \\operatorname {i n t} \\left(\\frac {l _ {i}}{n _ {i}}\\right) + 1 \\quad i \\in \\{x, y, z \\} \\tag {2}", + "image_path": "c9cf36551405d09d205a7bb9eff93a1676e47b71f26723810ce9d89cb4fbba36.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 131, + 412, + 482, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 412, + 482, + 437 + ], + "spans": [ + { + "bbox": [ + 131, + 412, + 482, + 437 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 131, + 412, + 482, + 437 + ], + "type": "inline_equation", + "content": "n_i" + }, + { + "bbox": [ + 131, + 412, + 482, + 437 + ], + "type": "text", + "content": " represents the number of regions divided along the " + }, + { + "bbox": [ + 131, + 412, + 482, + 437 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 131, + 412, + 482, + 437 + ], + "type": "text", + "content": "-axis. The total number of regions " + }, + { + "bbox": [ + 131, + 412, + 482, + 437 + ], + "type": "inline_equation", + "content": "n_{total}" + }, + { + "bbox": [ + 131, + 412, + 482, + 437 + ], + "type": "text", + "content": " in the 3D space is given by the product of" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 259, + 446, + 481, + 460 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 259, + 446, + 481, + 460 + ], + "spans": [ + { + "bbox": [ + 259, + 446, + 481, + 460 + ], + "type": "interline_equation", + "content": "n _ {\\text {T o t a l}} = n _ {x} \\times n _ {y} \\times n _ {z} \\tag {3}", + "image_path": "285e8d7fd4e91c44c73b234f5b0852c86be15823b236f122ce6a312916ef888d.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 131, + 469, + 482, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 469, + 482, + 495 + ], + "spans": [ + { + "bbox": [ + 131, + 469, + 482, + 495 + ], + "type": "text", + "content": "Let the position vector of a voxel be " + }, + { + "bbox": [ + 131, + 469, + 482, + 495 + ], + "type": "inline_equation", + "content": "\\mathbf{V} = [v_x, v_y, v_z]^\\top" + }, + { + "bbox": [ + 131, + 469, + 482, + 495 + ], + "type": "text", + "content": ". Then, the region that " + }, + { + "bbox": [ + 131, + 469, + 482, + 495 + ], + "type": "inline_equation", + "content": "\\mathbf{V}" + }, + { + "bbox": [ + 131, + 469, + 482, + 495 + ], + "type": "text", + "content": " resides in can be computed using the following equation:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 212, + 502, + 481, + 529 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 212, + 502, + 481, + 529 + ], + "spans": [ + { + "bbox": [ + 212, + 502, + 481, + 529 + ], + "type": "interline_equation", + "content": "i _ {j} = \\operatorname {f l o o r} \\left(\\frac {\\left(v _ {j} - j _ {\\min }\\right)}{\\delta_ {j}}\\right) + 1 \\quad j \\in \\{x, y, z \\} \\tag {4}", + "image_path": "c2978119093576a95799517215c72a246b7648301dcf3b5892b72be53443c680.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 131, + 537, + 482, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 537, + 482, + 597 + ], + "spans": [ + { + "bbox": [ + 131, + 537, + 482, + 597 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 131, + 537, + 482, + 597 + ], + "type": "inline_equation", + "content": "i_j" + }, + { + "bbox": [ + 131, + 537, + 482, + 597 + ], + "type": "text", + "content": " represents the indices of the voxel in " + }, + { + "bbox": [ + 131, + 537, + 482, + 597 + ], + "type": "inline_equation", + "content": "x, y, z" + }, + { + "bbox": [ + 131, + 537, + 482, + 597 + ], + "type": "text", + "content": " directions, floor() is used to round down to integer representation, and " + }, + { + "bbox": [ + 131, + 537, + 482, + 597 + ], + "type": "inline_equation", + "content": "j_{\\mathrm{min}}" + }, + { + "bbox": [ + 131, + 537, + 482, + 597 + ], + "type": "text", + "content": " represents the minimum coordinates in " + }, + { + "bbox": [ + 131, + 537, + 482, + 597 + ], + "type": "inline_equation", + "content": "x, y" + }, + { + "bbox": [ + 131, + 537, + 482, + 597 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 131, + 537, + 482, + 597 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 131, + 537, + 482, + 597 + ], + "type": "text", + "content": " directions of the voxel within the feature volume. Finally, the region id of the voxel (Voxel_id) within the feature volume can be calculated by the following formula:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 220, + 607, + 481, + 620 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 220, + 607, + 481, + 620 + ], + "spans": [ + { + "bbox": [ + 220, + 607, + 481, + 620 + ], + "type": "interline_equation", + "content": "V o x e l _ {i d} = i _ {z} \\times \\left(n _ {x} \\times n _ {y}\\right) + i _ {y} \\times n _ {x} + i _ {x} \\tag {5}", + "image_path": "64d6bef53d57b4af4fc49c5a777884a540154fb0ba41d11f604f5877b28e82de.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 131, + 629, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 629, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 131, + 629, + 482, + 666 + ], + "type": "text", + "content": "The ID of each voxel can be calculated according to the formulas, however, in practical applications, the total number of voxels in the feature volume is substantial, which could lead to excessive computation times. To tackle this" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 212, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 212, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 212, + 101 + ], + "type": "text", + "content": "Chen et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 178, + 119, + 440, + 281 + ], + "blocks": [ + { + "bbox": [ + 178, + 119, + 440, + 281 + ], + "lines": [ + { + "bbox": [ + 178, + 119, + 440, + 281 + ], + "spans": [ + { + "bbox": [ + 178, + 119, + 440, + 281 + ], + "type": "image", + "image_path": "8492bfdd2fa95289084140bf90f1958609de002fbe8bd8a5253a09b1867a01a3.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 192, + 294, + 420, + 304 + ], + "lines": [ + { + "bbox": [ + 192, + 294, + 420, + 304 + ], + "spans": [ + { + "bbox": [ + 192, + 294, + 420, + 304 + ], + "type": "text", + "content": "Fig. 5: Detailed architecture of space attention module." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 330, + 480, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 330, + 480, + 378 + ], + "spans": [ + { + "bbox": [ + 130, + 330, + 480, + 378 + ], + "type": "text", + "content": "challenge, we have optimized the weight assignment process within the space attention module, adopting the following Python code (Algorithm 1). Compared Eq. (2) to Eq. (5), our approach is better adapted to practical applications, achieving the same objectives and results more efficiently." + } + ] + } + ], + "index": 4 + }, + { + "type": "code", + "bbox": [ + 132, + 414, + 405, + 526 + ], + "blocks": [ + { + "bbox": [ + 133, + 400, + 327, + 412 + ], + "lines": [ + { + "bbox": [ + 133, + 400, + 327, + 412 + ], + "spans": [ + { + "bbox": [ + 133, + 400, + 327, + 412 + ], + "type": "text", + "content": "Algorithm 1 Weight assignment algorithm" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 132, + 414, + 405, + 526 + ], + "lines": [ + { + "bbox": [ + 132, + 414, + 405, + 526 + ], + "spans": [ + { + "bbox": [ + 132, + 414, + 405, + 526 + ], + "type": "text", + "content": "Suppose we have 3 intervals along x, y, and z axis " + }, + { + "bbox": [ + 132, + 414, + 405, + 526 + ], + "type": "inline_equation", + "content": "x,y,z = [0,27,54,80],[0,27,54,80],[0,7,14,20]" + }, + { + "bbox": [ + 132, + 414, + 405, + 526 + ], + "type": "text", + "content": " # Assign space attention value to the tensor for one view. \n subdivision num " + }, + { + "bbox": [ + 132, + 414, + 405, + 526 + ], + "type": "inline_equation", + "content": "= 0" + }, + { + "bbox": [ + 132, + 414, + 405, + 526 + ], + "type": "text", + "content": " \nfor i in range (3): for j in range (3): for k in range (3): space attention[x [i]: x [i+1],y [j]: y [j+1],z [k]: z [k+1] " + }, + { + "bbox": [ + 132, + 414, + 405, + 526 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 132, + 414, + 405, + 526 + ], + "type": "text", + "content": " attention value[subdivision num] subdivision num " + }, + { + "bbox": [ + 132, + 414, + 405, + 526 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 132, + 414, + 405, + 526 + ], + "type": "text", + "content": " subdivision num+1" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "code_body" + } + ], + "index": 6, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 132, + 571, + 373, + 583 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 571, + 373, + 583 + ], + "spans": [ + { + "bbox": [ + 132, + 571, + 373, + 583 + ], + "type": "text", + "content": "3.4 Implementation of space attention module" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 594, + 480, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 594, + 480, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 594, + 480, + 665 + ], + "type": "text", + "content": "In implementations, only the following adjustments were made: (1) A simple branch was derived from the backbone network [16] to predict the space attention scores. (2) We executed an element-wise multiplication of the raw feature volume with the space attention scores calculated by Algorithm 1. (3) The attention scores of the proposal are computed by analyzing the positional relationship between the proposal and the feature volume." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 422, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 422, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 422, + 91, + 447, + 100 + ], + "type": "text", + "content": "3DSA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 481, + 100 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 479, + 176 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 479, + 176 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 479, + 176 + ], + "type": "text", + "content": "The space attention module can be easily applied to existing multi-person voxel-based human pose methods [7, 35, 38, 41]. However, since some of these methods are not open-sourced, it prevents us from performing validation. Consequently, we chose to validate our method using the two open-sourced voxel-based methods [35, 38]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 176, + 480, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 176, + 480, + 236 + ], + "spans": [ + { + "bbox": [ + 130, + 176, + 480, + 236 + ], + "type": "text", + "content": "It is important to emphasize that for a fair evaluation of the impact of the space attention module on existing voxel-based methods [35,38], the network architecture [29] used for locating the person proposal and regressing the 3D pose remained unaltered. For the model's loss function and hyperparameter configuration, the original design proposed by [35,38] has remained." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 236, + 481, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 236, + 481, + 344 + ], + "spans": [ + { + "bbox": [ + 130, + 236, + 481, + 344 + ], + "type": "text", + "content": "The architecture of the space attention layer is presented in Fig. 5. It is a straightforward and lightweight design, which uses a simple convolutional block followed by global average pooling and the sigmoid activation function to estimate the space attention scores of the corresponding image. The purpose of the global average pooling is to replace the traditional fully connected layers, thereby reducing the number of parameters. The output dimensions of the space attention layer are equal to the number of regions in the feature volume. The space attention scores " + }, + { + "bbox": [ + 130, + 236, + 481, + 344 + ], + "type": "inline_equation", + "content": "S \\in \\mathbb{R}^n" + }, + { + "bbox": [ + 130, + 236, + 481, + 344 + ], + "type": "text", + "content": " represent the " + }, + { + "bbox": [ + 130, + 236, + 481, + 344 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 130, + 236, + 481, + 344 + ], + "type": "text", + "content": " space attention values, indicating that the feature volume is divided into " + }, + { + "bbox": [ + 130, + 236, + 481, + 344 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 130, + 236, + 481, + 344 + ], + "type": "text", + "content": " regions." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 360, + 230, + 374 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 360, + 230, + 374 + ], + "spans": [ + { + "bbox": [ + 132, + 360, + 230, + 374 + ], + "type": "text", + "content": "4 Experiments" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 384, + 272, + 396 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 384, + 272, + 396 + ], + "spans": [ + { + "bbox": [ + 132, + 384, + 272, + 396 + ], + "type": "text", + "content": "4.1 Implementation detail" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 402, + 480, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 402, + 480, + 498 + ], + "spans": [ + { + "bbox": [ + 130, + 402, + 480, + 498 + ], + "type": "text", + "content": "Training and evaluation datasets. CMU Panoptic [21] is a 3D dataset with multi-view images. To evaluate and analyze our approach, we conducted extensive experiments on the Panoptic dataset. Following VoxelPose [35], the same data sequences were used for both training and evaluating our model. Our experiments were conducted using five HD cameras with camera IDs 3, 6, 12, 13, 23. Shelf and Campus [2] are two datasets that are commonly used in multi-view and multi-person research. We evaluated our method using the same data setup as in [35]." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 498, + 480, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 498, + 480, + 582 + ], + "spans": [ + { + "bbox": [ + 130, + 498, + 480, + 582 + ], + "type": "text", + "content": "Evaluation metric. For the Panoptic datasets [21], we adopt the Average Precision " + }, + { + "bbox": [ + 130, + 498, + 480, + 582 + ], + "type": "inline_equation", + "content": "(AP^K)" + }, + { + "bbox": [ + 130, + 498, + 480, + 582 + ], + "type": "text", + "content": " and Mean Per Joint Position Error (MPJPE) as metrics that demonstrate the robustness and accuracy of multi-person 3D pose estimation. To assess the influence of the space attention module on model size and computational complexity, we consider key metrics such as MACs and model parameters. For both the Campus and Shelf datasets, we present the results in terms of the Percentage of Correct Parts (PCP)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 582, + 480, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 582, + 480, + 641 + ], + "spans": [ + { + "bbox": [ + 130, + 582, + 480, + 641 + ], + "type": "text", + "content": "Training details. For the Panoptic datasets, we use an off-the-shelf pose estimation model constructed based on ResNet-50 [16] to extract features from multi-view images. The difference from VoxelPose [35] is that since our backbone network needs to predict the space attention scores, the parameters of the model are updated throughout the training iteration." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 131, + 641, + 480, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 641, + 480, + 666 + ], + "spans": [ + { + "bbox": [ + 131, + 641, + 480, + 666 + ], + "type": "text", + "content": "Due to the incomplete data annotation in the Campus and Shelf datasets [2], Tu et al. [35] use synthetic 3D poses to train the network. To implement the" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 212, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 212, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 212, + 101 + ], + "type": "text", + "content": "Chen et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 189 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 189 + ], + "type": "text", + "content": "space attention module, we use the synthetic heatmap as the input feature to predict the space attention scores. In summary, the space attention module has two modes: the first predicts the space attention scores from the ground truth multi-view image, referred to as Image-based input; the second predicts the space attention scores from the synthetic heatmaps, referred to as Heatmap-based input." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 131, + 205, + 334, + 217 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 205, + 334, + 217 + ], + "spans": [ + { + "bbox": [ + 131, + 205, + 334, + 217 + ], + "type": "text", + "content": "4.2 Comparisons to Existing Methods" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 225, + 483, + 381 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 225, + 483, + 381 + ], + "spans": [ + { + "bbox": [ + 130, + 225, + 483, + 381 + ], + "type": "text", + "content": "Panoptic. We first evaluate our model on the Panoptic dataset [21] and compare it with the state-of-the-art model. As illustrated in Tab. 1, by incorporating the space attention module " + }, + { + "bbox": [ + 130, + 225, + 483, + 381 + ], + "type": "inline_equation", + "content": "(10 \\times 10 \\times 3" + }, + { + "bbox": [ + 130, + 225, + 483, + 381 + ], + "type": "text", + "content": " configuration) with two voxel-based methods, VoxelPose [35] and Faster VoxelPose [38], our model achieves " + }, + { + "bbox": [ + 130, + 225, + 483, + 381 + ], + "type": "inline_equation", + "content": "94.2\\%" + }, + { + "bbox": [ + 130, + 225, + 483, + 381 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 225, + 483, + 381 + ], + "type": "inline_equation", + "content": "94.22\\%" + }, + { + "bbox": [ + 130, + 225, + 483, + 381 + ], + "type": "text", + "content": " on the most strict evaluation metric " + }, + { + "bbox": [ + 130, + 225, + 483, + 381 + ], + "type": "inline_equation", + "content": "AP_{25}" + }, + { + "bbox": [ + 130, + 225, + 483, + 381 + ], + "type": "text", + "content": ", outperforming the transformer model MvP [40]. Our proposed method shows inferior performance in terms of " + }, + { + "bbox": [ + 130, + 225, + 483, + 381 + ], + "type": "inline_equation", + "content": "AP@50,100,150" + }, + { + "bbox": [ + 130, + 225, + 483, + 381 + ], + "type": "text", + "content": " when compared to VoxelPose, and this " + }, + { + "bbox": [ + 130, + 225, + 483, + 381 + ], + "type": "inline_equation", + "content": "0.5\\%" + }, + { + "bbox": [ + 130, + 225, + 483, + 381 + ], + "type": "text", + "content": " performance gap is generally attributed to model variation. It particularly emphasizes that in terms of the " + }, + { + "bbox": [ + 130, + 225, + 483, + 381 + ], + "type": "inline_equation", + "content": "AP_{25}" + }, + { + "bbox": [ + 130, + 225, + 483, + 381 + ], + "type": "text", + "content": " metric, our method has significantly improved, outperforming VoxelPose by " + }, + { + "bbox": [ + 130, + 225, + 483, + 381 + ], + "type": "inline_equation", + "content": "12.69\\%" + }, + { + "bbox": [ + 130, + 225, + 483, + 381 + ], + "type": "text", + "content": " and Faster VoxelPose by " + }, + { + "bbox": [ + 130, + 225, + 483, + 381 + ], + "type": "inline_equation", + "content": "10.56\\%" + }, + { + "bbox": [ + 130, + 225, + 483, + 381 + ], + "type": "text", + "content": ". Remarkably, both methods achieved much lower MPJPE with values of 13.98 and 14.55, outperforming the TEMPO [7] and achieving the SOTA records. This demonstrates the effectiveness of our space attention module." + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 153, + 434, + 455, + 545 + ], + "blocks": [ + { + "bbox": [ + 161, + 413, + 452, + 425 + ], + "lines": [ + { + "bbox": [ + 161, + 413, + 452, + 425 + ], + "spans": [ + { + "bbox": [ + 161, + 413, + 452, + 425 + ], + "type": "text", + "content": "Table 1: Comparison with existing methods on the Panoptic datasets." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 153, + 434, + 455, + 545 + ], + "lines": [ + { + "bbox": [ + 153, + 434, + 455, + 545 + ], + "spans": [ + { + "bbox": [ + 153, + 434, + 455, + 545 + ], + "type": "table", + "html": "
MethodAP25AP50AP100AP150MPJPE
VoxelPose [35]83.5998.3399.7699.9117.68mm
Faster VoxelPose [38]85.2298.0899.3299.4818.26mm
PlaneSweep Pose [25]92.1298.9699.8199.8416.75mm
RPGN [37]----15.84mm
MvP [40]92.2896.697.4597.6915.76mm
TEMPO [7]89.0199.0899.7699.9314.68mm
VoxelPose + 3DSA94.298.4999.2199.3113.98mm
Faster VoxelPose + 3DSA94.2298.6599.4999.7514.55mm
", + "image_path": "bed8890e7df110761af63834d924d97f4479666c579fb0093052d4887ab1fdd6.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 569, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 569, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 569, + 482, + 666 + ], + "type": "text", + "content": "Campus and Shelf. The quantitative evaluation results on Shelf and Campus datasets [2] are presented in Tab. 2. Our proposed method (VoxelPose [35] with space attention, " + }, + { + "bbox": [ + 130, + 569, + 482, + 666 + ], + "type": "inline_equation", + "content": "10 \\times 10 \\times 3" + }, + { + "bbox": [ + 130, + 569, + 482, + 666 + ], + "type": "text", + "content": " configuration) remains competitive on both datasets. The performance of space attention is not as outstanding on Panoptic datasets [21], and we believe this is related to the Heatmap-based input. Since the heatmap lacks image information, the model is hard to determine the importance of different regions in 3D space from the heatmap. We will detail our research on this issue in the subsequent ablation study." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 422, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 422, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 422, + 91, + 447, + 100 + ], + "type": "text", + "content": "3DSA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 480, + 100 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 134, + 136, + 478, + 254 + ], + "blocks": [ + { + "bbox": [ + 180, + 114, + 432, + 126 + ], + "lines": [ + { + "bbox": [ + 180, + 114, + 432, + 126 + ], + "spans": [ + { + "bbox": [ + 180, + 114, + 432, + 126 + ], + "type": "text", + "content": "Table 2: Quantitative results on Shelf and Campus datasets." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 134, + 136, + 478, + 254 + ], + "lines": [ + { + "bbox": [ + 134, + 136, + 478, + 254 + ], + "spans": [ + { + "bbox": [ + 134, + 136, + 478, + 254 + ], + "type": "table", + "html": "
MethodShelfCampus
Actor1Actor2Actor3AverageActor1Actor2Actor3Average
Ershadi et al. [12]93.375.994.88894.292.984.690.6
Dong et al. [10]98.894.197.896.997.693.39896.3
MvP [40]99.394.197.897.498.294.197.496.6
TEMPO [7]99.395.197.897.497.795.597.997.3
Faster VoxelPose. [38]99.49697.597.696.594.197.996.2
VoxelPose [35]99.394.197.69797.693.898.896.7
Ours99.495.497.697.59893.498.696.7
", + "image_path": "67f322dcc48367f815059c40111c684184b7a19bc2284eb9c76fb1b477c8b6df.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 164, + 284, + 447, + 466 + ], + "blocks": [ + { + "bbox": [ + 153, + 263, + 460, + 275 + ], + "lines": [ + { + "bbox": [ + 153, + 263, + 460, + 275 + ], + "spans": [ + { + "bbox": [ + 153, + 263, + 460, + 275 + ], + "type": "text", + "content": "Table 3: Space subdivision and efficiency analysis on the Panoptic dataset" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 164, + 284, + 447, + 466 + ], + "lines": [ + { + "bbox": [ + 164, + 284, + 447, + 466 + ], + "spans": [ + { + "bbox": [ + 164, + 284, + 447, + 466 + ], + "type": "table", + "html": "
VoxelPose incorporate with space attention
Space subdivisionAP25AP100MPJPEMACs(G)Parameter(M)
Tu et al. [35]83.5999.7617.68178.8840.62
3 × 3 × 392.7399.5814.78179.0940.64
7 × 7 × 393.7199.3314.41180.0440.77
10 × 10 × 394.299.2113.98181.2440.92
15 × 15 × 694.3399.113.97193.2442.47
20 × 20 × 994.4499.4413.94221.5846.15
Faster VoxelPose incorporate with space attention
Space subdivisionAP25AP100MPJPEMACs(G)Parameter(M)
Ye et al. [38]85.2299.3218.26106.8736.37
3 × 3 × 392.5799.6115.54107.0836.39
7 × 7 × 393.7599.5414.88108.0336.52
10 × 10 × 394.2299.4914.55109.2336.67
", + "image_path": "74d00a6df15ec6e72e81e282f54af9c27c0684495e4d8d86d1888a932f11c536.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 487, + 244, + 498 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 487, + 244, + 498 + ], + "spans": [ + { + "bbox": [ + 132, + 487, + 244, + 498 + ], + "type": "text", + "content": "4.3 Ablation studies" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 509, + 481, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 509, + 481, + 533 + ], + "spans": [ + { + "bbox": [ + 130, + 509, + 481, + 533 + ], + "type": "text", + "content": "In this section, we conduct ablative experiments to analyze a variety of factors within our approach." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 533, + 482, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 533, + 482, + 628 + ], + "spans": [ + { + "bbox": [ + 130, + 533, + 482, + 628 + ], + "type": "text", + "content": "Individual contributions of the space attention module and the 3D space subdivision algorithm. By comparing the results in Tab. 3, we can see that the finer the subdivision of the 3D space, the model's accuracy and precision improve correspondingly. However, the model's performance tends to converge after subdividing into " + }, + { + "bbox": [ + 130, + 533, + 482, + 628 + ], + "type": "inline_equation", + "content": "10 \\times 10 \\times 3" + }, + { + "bbox": [ + 130, + 533, + 482, + 628 + ], + "type": "text", + "content": " regions. The result demonstrates the critical importance of the space subdivision algorithm within the space attention module. The direct prediction of all voxels does not result in significant improvements in performance." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "type": "text", + "content": "Efficiency analysis. In this work, we focus on comparing our method with existing voxel-based methods [35, 38]. Tab. 3 demonstrates that incorporating the space attention module into the voxel-based approach resulted in a slight" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 212, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 212, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 212, + 101 + ], + "type": "text", + "content": "Chen et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 248 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 248 + ], + "type": "text", + "content": "increase in the model's complexity. Regarding the model we eventually selected (VoxelPose with " + }, + { + "bbox": [ + 130, + 116, + 482, + 248 + ], + "type": "inline_equation", + "content": "10 \\times 10 \\times 3" + }, + { + "bbox": [ + 130, + 116, + 482, + 248 + ], + "type": "text", + "content": " space attention module), MACs increased by " + }, + { + "bbox": [ + 130, + 116, + 482, + 248 + ], + "type": "inline_equation", + "content": "1.32\\%" + }, + { + "bbox": [ + 130, + 116, + 482, + 248 + ], + "type": "text", + "content": " and parameters by " + }, + { + "bbox": [ + 130, + 116, + 482, + 248 + ], + "type": "inline_equation", + "content": "0.74\\%" + }, + { + "bbox": [ + 130, + 116, + 482, + 248 + ], + "type": "text", + "content": " when compared to the VoxelPose method. As previously mentioned, excessively increasing the number of spatial subdivisions does not enhance performance but significantly increases the model's complexity. For instance, subdividing the space into " + }, + { + "bbox": [ + 130, + 116, + 482, + 248 + ], + "type": "inline_equation", + "content": "20 \\times 20 \\times 9" + }, + { + "bbox": [ + 130, + 116, + 482, + 248 + ], + "type": "text", + "content": " regions resulted in a " + }, + { + "bbox": [ + 130, + 116, + 482, + 248 + ], + "type": "inline_equation", + "content": "23.8\\%" + }, + { + "bbox": [ + 130, + 116, + 482, + 248 + ], + "type": "text", + "content": " increase in the model's MACs and a " + }, + { + "bbox": [ + 130, + 116, + 482, + 248 + ], + "type": "inline_equation", + "content": "13.6\\%" + }, + { + "bbox": [ + 130, + 116, + 482, + 248 + ], + "type": "text", + "content": " increase in parameters. This further demonstrates the importance of the space subdivision algorithm in improving the efficiency of the space attention module. To strike a balance between performance and efficiency, we adopt the " + }, + { + "bbox": [ + 130, + 116, + 482, + 248 + ], + "type": "inline_equation", + "content": "10 \\times 10 \\times 3" + }, + { + "bbox": [ + 130, + 116, + 482, + 248 + ], + "type": "text", + "content": " space attention configuration on VoxelPose [35] to study the impact of the individual factors." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 250, + 482, + 322 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 250, + 482, + 322 + ], + "spans": [ + { + "bbox": [ + 130, + 250, + 482, + 322 + ], + "type": "text", + "content": "Number of cameras. We compared our method with existing 3D Pose methods [7,35,38,40]. Tab. 4 shows that the feature volume representation is diminished with fewer camera views, leading to a drop in accuracy. The improvement in both " + }, + { + "bbox": [ + 130, + 250, + 482, + 322 + ], + "type": "inline_equation", + "content": "AP" + }, + { + "bbox": [ + 130, + 250, + 482, + 322 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 250, + 482, + 322 + ], + "type": "inline_equation", + "content": "MPJPE" + }, + { + "bbox": [ + 130, + 250, + 482, + 322 + ], + "type": "text", + "content": " metrics over other models, as the number of cameras increases, underscores the significance of multi-view images for enhancing the space attention module's performance." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 324, + 482, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 324, + 482, + 407 + ], + "spans": [ + { + "bbox": [ + 130, + 324, + 482, + 407 + ], + "type": "text", + "content": "Image-based input /Heatmap-based input. To further validate the impact of different inputs on the space attention module, we conducted experiments on the Panoptic dataset [2]. As shown in Tab. 5, although the space attention with Heatmap-based input shows an improvement compared to the baseline model [35], it is noticeably inferior to the space attention with Image-based input. We consider that this disparity occurs because heatmaps lack spatial and depth information in comparison to images." + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 181, + 465, + 431, + 635 + ], + "blocks": [ + { + "bbox": [ + 178, + 443, + 435, + 454 + ], + "lines": [ + { + "bbox": [ + 178, + 443, + 435, + 454 + ], + "spans": [ + { + "bbox": [ + 178, + 443, + 435, + 454 + ], + "type": "text", + "content": "Table 4: Number of cameras analysis on the Panoptic dataset" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 181, + 465, + 431, + 635 + ], + "lines": [ + { + "bbox": [ + 181, + 465, + 431, + 635 + ], + "spans": [ + { + "bbox": [ + 181, + 465, + 431, + 635 + ], + "type": "table", + "html": "
MethodCam\\(AP_{25}\\)\\(AP_{50}\\)\\(AP_{100}\\)\\(AP_{150}\\)MPJPE
Faster VoxelPose [38]73.9597.0299.2199.3521.12
MvP [40]484.1-96.7-19.3
TEMPO [7]----17.34
ours88.498.199.5999.716.78
VoxelPose [35]58.9493.8898.4599.3224.29
Faster VoxelPose [38]53.6891.8997.498.326.13
MvP [40]371.8-95.1-21.1
TEMPO [7]----19.22
ours73.0695.2398.6499.2519.03
MvP [40]37.7-93-34.8
TEMPO [7]2----32.13
ours47.9588.7497.8498.827.35
", + "image_path": "2b94fac917455989a8aa3d679aa6357923e6086abda6fb1d3c59b2693e010dcd.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 422, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 422, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 422, + 91, + 447, + 100 + ], + "type": "text", + "content": "3DSA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 212, + 136, + 399, + 193 + ], + "blocks": [ + { + "bbox": [ + 180, + 114, + 432, + 125 + ], + "lines": [ + { + "bbox": [ + 180, + 114, + 432, + 125 + ], + "spans": [ + { + "bbox": [ + 180, + 114, + 432, + 125 + ], + "type": "text", + "content": "Table 5: Effect of different inputs on space attention module" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 212, + 136, + 399, + 193 + ], + "lines": [ + { + "bbox": [ + 212, + 136, + 399, + 193 + ], + "spans": [ + { + "bbox": [ + 212, + 136, + 399, + 193 + ], + "type": "table", + "html": "
Image-based input / Heatmap-based input
InputAP25AP50AP100AP150MPJPE
Image94.298.4999.2199.3113.98
Heatmap86.9798.399.299 9.3817.21
", + "image_path": "6b5dda47d93cd4ac0d345aaa282b13917febf0b36f845e436c2f5620b38b9c0b.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 217, + 322, + 228 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 217, + 322, + 228 + ], + "spans": [ + { + "bbox": [ + 132, + 217, + 322, + 228 + ], + "type": "text", + "content": "4.4 3D space attention visualization" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 239, + 485, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 239, + 485, + 335 + ], + "spans": [ + { + "bbox": [ + 130, + 239, + 485, + 335 + ], + "type": "text", + "content": "In Fig. 6, we provide the space attention visualization results on Panoptic datasets. Red regions indicate attention scores above 0.8, while blue for below 0.8. Observing the spatial distribution of attention in 3D space (1st row), most key attention areas are focused where people are present. In view 5, an obscured person is not visible from that angle, resulting in lower attention scores in that area. This result aligns with our hypothesis, confirming that the space attention mechanism discriminates the importance of different regions in the feature volume based on visibility. More visualization results are provided in the supplementary material." + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 134, + 372, + 480, + 482 + ], + "blocks": [ + { + "bbox": [ + 134, + 372, + 480, + 482 + ], + "lines": [ + { + "bbox": [ + 134, + 372, + 480, + 482 + ], + "spans": [ + { + "bbox": [ + 134, + 372, + 480, + 482 + ], + "type": "image", + "image_path": "743321a5f7b19114d7b2d7911667a821f6ebc7b7a187fd4d96786f2abe27f9ba.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 492, + 482, + 526 + ], + "lines": [ + { + "bbox": [ + 130, + 492, + 482, + 526 + ], + "spans": [ + { + "bbox": [ + 130, + 492, + 482, + 526 + ], + "type": "text", + "content": "Fig. 6: 3D space attention visualization. We marked areas with scores above 0.8 (red regions) in 3D space (1st row) and projected them onto the corresponding 2D image (2nd row)." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 554, + 220, + 566 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 554, + 220, + 566 + ], + "spans": [ + { + "bbox": [ + 132, + 554, + 220, + 566 + ], + "type": "text", + "content": "5 Conclusion" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 581, + 482, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 581, + 482, + 654 + ], + "spans": [ + { + "bbox": [ + 130, + 581, + 482, + 654 + ], + "type": "text", + "content": "In this paper, we present the novel space attention module for the voxel-based multi-view 3D pose estimation method. We learn the space attention scores from the input image and utilize the 3D space subdivision algorithm to divide the feature volume, finally constructing the feature volumes with space attention. By integrating our space attention module into two existing voxel-based methods, both models achieve the state-of-the-art results on the panoptic benchmarks." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 212, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 212, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 212, + 101 + ], + "type": "text", + "content": "Chen et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 133, + 114, + 246, + 129 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 114, + 246, + 129 + ], + "spans": [ + { + "bbox": [ + 133, + 114, + 246, + 129 + ], + "type": "text", + "content": "Acknowledgements" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 139, + 480, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 139, + 480, + 175 + ], + "spans": [ + { + "bbox": [ + 132, + 139, + 480, + 175 + ], + "type": "text", + "content": "This work is supported and National Science and Technology Council (NSTC), Taiwan R.O.C. projects with grants 112-2222-E-006-009-, 113-2218-E-035-001-, 113-2425-H-006-007- and NSTC 113-2627-M-006-005 -." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 133, + 193, + 197, + 205 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 193, + 197, + 205 + ], + "spans": [ + { + "bbox": [ + 133, + 193, + 197, + 205 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 138, + 217, + 480, + 665 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 138, + 217, + 480, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 217, + 480, + 239 + ], + "spans": [ + { + "bbox": [ + 138, + 217, + 480, + 239 + ], + "type": "text", + "content": "1. Amin, S., Andriluka, M., Rohrbach, M., Schiele, B.: Multi-view pictorial structures for 3d human pose estimation. In: BMvc. vol. 1. Bristol, UK (2013)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 138, + 239, + 480, + 272 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 239, + 480, + 272 + ], + "spans": [ + { + "bbox": [ + 138, + 239, + 480, + 272 + ], + "type": "text", + "content": "2. Belagiannis, V., Amin, S., Andriluka, M., Schiele, B., Navab, N., Ilic, S.: 3d pictorial structures for multiple human pose estimation. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 1669-1676 (2014)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 138, + 272, + 480, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 272, + 480, + 304 + ], + "spans": [ + { + "bbox": [ + 138, + 272, + 480, + 304 + ], + "type": "text", + "content": "3. Belagiannis, V., Amin, S., Andriluka, M., Schiele, B., Navab, N., Ilic, S.: 3d pictorial structures revisited: Multiple human pose estimation. IEEE transactions on pattern analysis and machine intelligence 38(10), 1929-1942 (2015)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 138, + 304, + 480, + 359 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 304, + 480, + 359 + ], + "spans": [ + { + "bbox": [ + 138, + 304, + 480, + 359 + ], + "type": "text", + "content": "4. Bogo, F., Kanazawa, A., Lassner, C., Gehler, P., Romero, J., Black, M.J.: Keep it smpl: Automatic estimation of 3d human pose and shape from a single image. In: Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part V 14. pp. 561-578. Springer (2016)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 138, + 360, + 480, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 360, + 480, + 392 + ], + "spans": [ + { + "bbox": [ + 138, + 360, + 480, + 392 + ], + "type": "text", + "content": "5. Bridgeman, L., Volino, M., Guillemaut, J.Y., Hilton, A.: Multi-person 3d pose estimation and tracking in sports. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition workshops. pp. 0-0 (2019)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 138, + 392, + 480, + 414 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 392, + 480, + 414 + ], + "spans": [ + { + "bbox": [ + 138, + 392, + 480, + 414 + ], + "type": "text", + "content": "6. Chen, Y., Gu, R., Huang, O., Jia, G.: Vtp: volumetric transformer for multi-view multi-person 3d pose estimation. Applied Intelligence 53(22), 26568-26579 (2023)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 138, + 415, + 480, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 415, + 480, + 456 + ], + "spans": [ + { + "bbox": [ + 138, + 415, + 480, + 456 + ], + "type": "text", + "content": "7. Choudhury, R., Kitani, K.M., Jeni, L.A.: Tempo: Efficient multi-view pose estimation, tracking, and forecasting. In: 2023 IEEE/CVF International Conference on Computer Vision (ICCV). pp. 14704-14714 (2023). https://doi.org/10.1109/ICCV51070.2023.01355" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 138, + 458, + 480, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 458, + 480, + 491 + ], + "spans": [ + { + "bbox": [ + 138, + 458, + 480, + 491 + ], + "type": "text", + "content": "8. Dabral, R., Mundhada, A., Kusupati, U., Afaque, S., Sharma, A., Jain, A.: Learning 3d human pose from structure and motion. In: Proceedings of the European conference on computer vision (ECCV). pp. 668-683 (2018)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 138, + 491, + 480, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 491, + 480, + 533 + ], + "spans": [ + { + "bbox": [ + 138, + 491, + 480, + 533 + ], + "type": "text", + "content": "9. Dong, J., Fang, Q., Jiang, W., Yang, Y., Huang, Q., Bao, H., Zhou, X.: Fast and robust multi-person 3d pose estimation and tracking from multiple views. IEEE Transactions on Pattern Analysis and Machine Intelligence 44(10), 6981-6992 (2021)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 138, + 534, + 480, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 534, + 480, + 567 + ], + "spans": [ + { + "bbox": [ + 138, + 534, + 480, + 567 + ], + "type": "text", + "content": "0. Dong, J., Jiang, W., Huang, Q., Bao, H., Zhou, X.: Fast and robust multi-person 3d pose estimation from multiple views. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 7792-7801 (2019)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 138, + 567, + 480, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 567, + 480, + 600 + ], + "spans": [ + { + "bbox": [ + 138, + 567, + 480, + 600 + ], + "type": "text", + "content": "1. Dong, Z., Song, J., Chen, X., Guo, C., Hilliges, O.: Shape-aware multi-person pose estimation from multi-view images. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 11158-11168 (2021)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 138, + 601, + 480, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 601, + 480, + 632 + ], + "spans": [ + { + "bbox": [ + 138, + 601, + 480, + 632 + ], + "type": "text", + "content": "2. Ershadi-Nasab, S., Noury, E., Kasaei, S., Sanaei, E.: Multiple human 3d pose estimation from multiview images. Multimedia Tools and Applications 77, 15573-15601 (2018)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 138, + 632, + 480, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 632, + 480, + 665 + ], + "spans": [ + { + "bbox": [ + 138, + 632, + 480, + 665 + ], + "type": "text", + "content": "3. Fang, H.S., Xie, S., Tai, Y.W., Lu, C.: Rmpe: Regional multi-person pose estimation. In: Proceedings of the IEEE international conference on computer vision. pp. 2334-2343 (2017)" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 422, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 422, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 422, + 91, + 447, + 100 + ], + "type": "text", + "content": "3DSA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 481, + 100 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 133, + 117, + 480, + 665 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 133, + 117, + 480, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 117, + 480, + 138 + ], + "spans": [ + { + "bbox": [ + 133, + 117, + 480, + 138 + ], + "type": "text", + "content": "14. Fischler, M.A., Elschlager, R.A.: The representation and matching of pictorial structures. IEEE Transactions on computers 100(1), 67-92 (1973)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 133, + 140, + 480, + 160 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 140, + 480, + 160 + ], + "spans": [ + { + "bbox": [ + 133, + 140, + 480, + 160 + ], + "type": "text", + "content": "15. Hartley, R., Zisserman, A.: Multiple view geometry in computer vision. Cambridge university press (2003)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 134, + 161, + 480, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 161, + 480, + 194 + ], + "spans": [ + { + "bbox": [ + 134, + 161, + 480, + 194 + ], + "type": "text", + "content": "16. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 770-778 (2016)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 134, + 194, + 480, + 216 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 194, + 480, + 216 + ], + "spans": [ + { + "bbox": [ + 134, + 194, + 480, + 216 + ], + "type": "text", + "content": "17. Hu, J., Shen, L., Sun, G.: Squeeze-and-excitation networks. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 7132-7141 (2018)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 134, + 217, + 480, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 217, + 480, + 270 + ], + "spans": [ + { + "bbox": [ + 134, + 217, + 480, + 270 + ], + "type": "text", + "content": "18. Huang, C., Jiang, S., Li, Y., Zhang, Z., Traish, J., Deng, C., Ferguson, S., Da Xu, R.Y.: End-to-end dynamic matching network for multi-view multi-person 3d pose estimation. In: Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XXVIII 16. pp. 477-493. Springer (2020)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 134, + 271, + 480, + 303 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 271, + 480, + 303 + ], + "spans": [ + { + "bbox": [ + 134, + 271, + 480, + 303 + ], + "type": "text", + "content": "19. Iskakov, K., Burkov, E., Lempitsky, V., Malkov, Y.: Learnable triangulation of human pose. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 7718-7727 (2019)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 134, + 304, + 480, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 304, + 480, + 335 + ], + "spans": [ + { + "bbox": [ + 134, + 304, + 480, + 335 + ], + "type": "text", + "content": "20. Iskakov, K., Burkov, E., Lempitsky, V., Malkov, Y.: Learnable triangulation of human pose. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 7718-7727 (2019)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 134, + 336, + 480, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 336, + 480, + 380 + ], + "spans": [ + { + "bbox": [ + 134, + 336, + 480, + 380 + ], + "type": "text", + "content": "21. Joo, H., Liu, H., Tan, L., Gui, L., Nabbe, B., Matthews, I., Kanade, T., Nobuhara, S., Sheikh, Y.: Panoptic studio: A massively multiview system for social motion capture. In: Proceedings of the IEEE International Conference on Computer Vision. pp. 3334-3342 (2015)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 134, + 380, + 480, + 413 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 380, + 480, + 413 + ], + "spans": [ + { + "bbox": [ + 134, + 380, + 480, + 413 + ], + "type": "text", + "content": "22. Lai, J.Y., Shu, S.H., Huang, Y.C.: A cell subdivision strategy for r-nearest neighbors computation. Journal of the Chinese Institute of Engineers 29(6), 953-965 (2006)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 134, + 414, + 480, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 414, + 480, + 445 + ], + "spans": [ + { + "bbox": [ + 134, + 414, + 480, + 445 + ], + "type": "text", + "content": "23. Li, X., Wang, W., Hu, X., Yang, J.: Selective kernel networks. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 510-519 (2019)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 134, + 447, + 480, + 489 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 447, + 480, + 489 + ], + "spans": [ + { + "bbox": [ + 134, + 447, + 480, + 489 + ], + "type": "text", + "content": "24. Li, Z., Oskarsson, M., Heyden, A.: 3d human pose and shape estimation through collaborative learning and multi-view model-fitting. In: Proceedings of the IEEE/CVF winter conference on applications of computer vision. pp. 1888-1897 (2021)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 134, + 491, + 480, + 522 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 491, + 480, + 522 + ], + "spans": [ + { + "bbox": [ + 134, + 491, + 480, + 522 + ], + "type": "text", + "content": "25. Lin, J., Lee, G.H.: Multi-view multi-person 3d pose estimation with plane sweep stereo. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 11886-11895 (2021)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 134, + 523, + 480, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 523, + 480, + 555 + ], + "spans": [ + { + "bbox": [ + 134, + 523, + 480, + 555 + ], + "type": "text", + "content": "26. Loper, M., Mahmood, N., Romero, J., Pons-Moll, G., Black, M.J.: Spl: A skinned multi-person linear model. In: Seminal Graphics Papers: Pushing the Boundaries, Volume 2, pp. 851-866 (2023)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 134, + 556, + 480, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 556, + 480, + 588 + ], + "spans": [ + { + "bbox": [ + 134, + 556, + 480, + 588 + ], + "type": "text", + "content": "27. Ma, X., Su, J., Wang, C., Ci, H., Wang, Y.: Context modeling in 3d human pose estimation: A unified perspective. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 6238-6247 (2021)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 134, + 589, + 480, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 589, + 480, + 621 + ], + "spans": [ + { + "bbox": [ + 134, + 589, + 480, + 621 + ], + "type": "text", + "content": "28. Ma, X., Su, J., Wang, C., Ci, H., Wang, Y.: Context modeling in 3d human pose estimation: A unified perspective. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 6238-6247 (2021)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 134, + 622, + 480, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 622, + 480, + 665 + ], + "spans": [ + { + "bbox": [ + 134, + 622, + 480, + 665 + ], + "type": "text", + "content": "29. Moon, G., Chang, J.Y., Lee, K.M.: V2v-posenet: Voxel-to-voxel prediction network for accurate 3d hand and human pose estimation from a single depth map. In: Proceedings of the IEEE conference on computer vision and pattern Recognition. pp. 5079-5088 (2018)" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 212, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 212, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 212, + 100 + ], + "type": "text", + "content": "Chen et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 578 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 161 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 161 + ], + "type": "text", + "content": "30. Reddy, N.D., Guigues, L., Pishchulin, L., Eledath, J., Narasimhan, S.G.: Tessen-track: End-to-end learnable multi-person articulated 3d pose tracking. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 15190-15200 (2021)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 162, + 481, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 162, + 481, + 182 + ], + "spans": [ + { + "bbox": [ + 130, + 162, + 481, + 182 + ], + "type": "text", + "content": "31. Shin, S., Halilaj, E.: Multi-view human pose and shape estimation using learnable volumetric aggregation. arXiv preprint arXiv:2011.13427 (2020)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 183, + 481, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 183, + 481, + 215 + ], + "spans": [ + { + "bbox": [ + 132, + 183, + 481, + 215 + ], + "type": "text", + "content": "32. Su, J., Wang, C., Ma, X., Zeng, W., Wang, Y.: Virtualpose: Learning generalizable 3d human pose models from virtual data. In: European Conference on Computer Vision. pp. 55-71. Springer (2022)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 216, + 481, + 248 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 216, + 481, + 248 + ], + "spans": [ + { + "bbox": [ + 132, + 216, + 481, + 248 + ], + "type": "text", + "content": "33. Sun, Y., Bao, Q., Liu, W., Fu, Y., Black, M.J., Mei, T.: Monocular, one-stage, regression of multiple 3d people. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 11179-11188 (2021)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 249, + 481, + 292 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 249, + 481, + 292 + ], + "spans": [ + { + "bbox": [ + 132, + 249, + 481, + 292 + ], + "type": "text", + "content": "34. Sun, Y., Liu, W., Bao, Q., Fu, Y., Mei, T., Black, M.J.: Putting people in their place: Monocular regression of 3d people in depth. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 13243-13252 (2022)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 293, + 481, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 293, + 481, + 335 + ], + "spans": [ + { + "bbox": [ + 132, + 293, + 481, + 335 + ], + "type": "text", + "content": "35. Tu, H., Wang, C., Zeng, W.: Voxelpos: Towards multi-camera 3d human pose estimation in wild environment. In: Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part I 16. pp. 197-212. Springer (2020)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 336, + 481, + 369 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 336, + 481, + 369 + ], + "spans": [ + { + "bbox": [ + 132, + 336, + 481, + 369 + ], + "type": "text", + "content": "36. Woo, S., Park, J., Lee, J.Y., Kweon, I.S.: Cbam: Convolutional block attention module. In: Proceedings of the European conference on computer vision (ECCV). pp. 3-19 (2018)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 370, + 481, + 402 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 370, + 481, + 402 + ], + "spans": [ + { + "bbox": [ + 132, + 370, + 481, + 402 + ], + "type": "text", + "content": "37. Wu, S., Jin, S., Liu, W., Bai, L., Qian, C., Liu, D., Ouyang, W.: Graph-based 3d multi-person pose estimation using multi-view images. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 11148-11157 (2021)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 403, + 481, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 403, + 481, + 435 + ], + "spans": [ + { + "bbox": [ + 132, + 403, + 481, + 435 + ], + "type": "text", + "content": "38. Ye, H., Zhu, W., Wang, C., Wu, R., Wang, Y.: Faster voxelpose: Real-time 3d human pose estimation by orthographic projection. In: European Conference on Computer Vision. pp. 142-159. Springer (2022)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 435, + 481, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 435, + 481, + 468 + ], + "spans": [ + { + "bbox": [ + 132, + 435, + 481, + 468 + ], + "type": "text", + "content": "39. Yu, Z., Zhang, L., Xu, Y., Tang, C., Tran, L., Keskin, C., Park, H.S.: Multiview human body reconstruction from uncalibrated cameras. Advances in Neural Information Processing Systems 35, 7879-7891 (2022)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 468, + 481, + 500 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 468, + 481, + 500 + ], + "spans": [ + { + "bbox": [ + 132, + 468, + 481, + 500 + ], + "type": "text", + "content": "40. Zhang, J., Cai, Y., Yan, S., Feng, J., et al.: Direct multi-view multi-person 3d pose estimation. Advances in Neural Information Processing Systems 34, 13153-13164 (2021)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 132, + 501, + 481, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 501, + 481, + 533 + ], + "spans": [ + { + "bbox": [ + 132, + 501, + 481, + 533 + ], + "type": "text", + "content": "41. Zhang, Y., Wang, C., Wang, X., Liu, W., Zeng, W.: Voxeltrack: Multi-person 3d human pose estimation and tracking in the wild. IEEE Transactions on Pattern Analysis and Machine Intelligence 45(2), 2613-2626 (2022)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 132, + 534, + 481, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 534, + 481, + 578 + ], + "spans": [ + { + "bbox": [ + 132, + 534, + 481, + 578 + ], + "type": "text", + "content": "42. Zhang, Y., An, L., Yu, T., Li, X., Li, K., Liu, Y.: 4d association graph for realtime multi-person motion capture using multiple video cameras. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 1324-1333 (2020)" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 422, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 422, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 422, + 91, + 447, + 100 + ], + "type": "text", + "content": "3DSA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/3R-INN_ How to be climate friendly while consuming_delivering videos_/f281395b-e7ef-449c-8738-d5a976fca3fe_content_list.json b/2024/3R-INN_ How to be climate friendly while consuming_delivering videos_/f281395b-e7ef-449c-8738-d5a976fca3fe_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..e80ce5b795501ef7fe449f58918d74015c1a0280 --- /dev/null +++ b/2024/3R-INN_ How to be climate friendly while consuming_delivering videos_/f281395b-e7ef-449c-8738-d5a976fca3fe_content_list.json @@ -0,0 +1,1900 @@ +[ + { + "type": "text", + "text": "3R- INN: How to be climate friendly while consuming/delivering videos?", + "text_level": 1, + "bbox": [ + 259, + 141, + 743, + 186 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zoubida Ameur1, Claire-Hélène Demarty1, Daniel Ménard2, and Olivier Le Meur1", + "bbox": [ + 225, + 212, + 777, + 242 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 InterDigital, France, firstname.lastname@interdigital.com \n2 IETR, daniel.menard@insa-rennes.fr", + "bbox": [ + 284, + 253, + 712, + 282 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/65a53aa72741b3542eed667bb83f0b09595a3293d57832a896ad46c7ad59b915.jpg", + "image_caption": [ + "Fig. 1: 3R-INN: End-to-end energy-aware video distribution chain by Removing grain, Rescaling and Reducing display energy." + ], + "image_footnote": [], + "bbox": [ + 217, + 310, + 785, + 385 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract. The consumption of a video requires a considerable amount of energy during the various stages of its life-cycle. With a billion hours of video consumed daily, this contributes significantly to the greenhouse gas (GHG) emission. Therefore, reducing the end-to-end carbon footprint of the video chain, while preserving the quality of experience at the user side, is of high importance. To contribute in an impactful manner, we propose 3R-INN, a single invertible network that does three tasks at once: given a high-resolution (HR) grainy image, it Rescales it to a lower resolution, Removes film grain and Reduces its power consumption when displayed. Providing such a minimum viable quality content contributes to reducing the energy consumption during encoding, transmission, decoding and display. 3R-INN also offers the possibility to restore either the HR grainy original image or a grain-free version, thanks to its invertibility and the disentanglement of the high frequency, and without transmitting auxiliary data. Experiments show that, 3R-INN enables significant energy savings for encoding (78%), decoding (77%) and rendering (5% to 20%), while outperforming state-of-the-art film grain removal and synthesis, energy-aware and downscaling methods on different test-sets.", + "bbox": [ + 259, + 426, + 740, + 689 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Keywords: Energy saving $\\cdot$ Invertible network $\\cdot$ Video distribution", + "bbox": [ + 261, + 703, + 715, + 717 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 215, + 744, + 375, + 758 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Over $75\\%$ of the world's global GHG emissions comes from energy production, particularly from fossil fuels. The growing energy consumption of the media and entertainment industry, in particular streaming, strongly contributes to climate change, with more than $1.3\\%$ of GHG in 2020 [41]. Therefore, this industry has to move towards decarbonisation, energy efficiency and sustainability in", + "bbox": [ + 212, + 763, + 787, + 840 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "all its stages, e.g., head-end (encoding), delivery (transmission) and end-user device (decoding and display). Taking apart the energy consumed while building the different necessary equipment, reduced energy consumption at the head-end translates into shorter encoding times and lower computing loads, while at the distribution level it translates into lower bit-rates. At the end-device level, significant gains can be achieved, as displays constitute the most power-hungry part of the whole chain [41]. In the specific case of emissive displays, e.g., organic light-emitting diodes (OLEDs), the power consumption is pixelwise and therefore directly dependent on the displayed content. Consequently, less energy-intensive images at display and shorter decoding times will also lead to lower energy consumption.", + "bbox": [ + 212, + 146, + 787, + 313 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The encoding and decoding times are related to the content resolution and complexity. Downscaling the content before encoding and upscaling it after decoding while preserving the same quality of experience [7] is one straightforward solution to reduce the computational burden. Additionally, removing and modeling artistic noise, such as film grain, before encoding and synthesizing it after decoding, not only reduces encoding and decoding times, but also significantly reduces the bit-rate [32], while still preserving the artistic intent at the user side. Finally, as displays consume the largest proportion of the energy, providing energy-aware content, i.e., that will consume less when displayed, is of significant importance, at least for OLED displays. Several studies addressed this issue by investigating how to reduce the content brightness [25, 26, 36].", + "bbox": [ + 212, + 327, + 787, + 494 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Because climate change is a pressing issue, we believe that having a global vision on the overall energy consumption in the video chain and an holistic approach on how to reduce it is of the utmost importance. Therefore, in this paper, we propose an end-to-end energy reduction of the video distribution chain, while preserving a good quality of experience at the user side, by leveraging a deep learning invertible neural network (INN)-based model, called 3R-INN. Prior to encoding a HR grainy image, our 3R-INN multi-task network Rescales it to a lower resolution, Removes film grain and Reduces its power consumption when displayed, by some reduction rate $R$ . While saving energy along the video chain, 3R-INN also provides a visually-pleasant content intended to be displayed, following the new paradigm proposed in [37], which promotes to target a minimum viable video quality for transported videos. Within this same paradigm, the possibility to recover the original content is encouraged, with the counter part that it will consume more. This is feasible thanks to the invertibility of 3R-INN which allows to retrieve the original HR image from the clean energy-aware LR one by running inversely the framework. Furthermore, thanks to the modeling and disentanglement of the lost information in the forward pass, two versions, grainy and clean, of the original HR image can be restored, without transmitting any auxiliary information. With the idea that the energy consumed when applying an energy reduction processing should not exceed the amount of energy saved, we designed 3R-INN to be a single network, that replaces three separate and potentially heavier processings, showing that", + "bbox": [ + 212, + 507, + 787, + 843 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Z. Ameur et al.", + "bbox": [ + 271, + 114, + 377, + 128 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "the use of 3R-INN results in a positive energy balance, as soon as a content is displayed multiple times. In summary, our main contributions are four-folds:", + "bbox": [ + 212, + 146, + 782, + 176 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- a first end-to-end solution for reducing the energy consumption of the video chain that exhibits a better energy balance compared to the sum of the corresponding three tasks in the state-of-the-art;", + "- a single network for the three tasks of rescaling, removing/synthesizing grain and reducing the energy at display, dedicated towards saving energy in the whole video chain;", + "- the provision of a visually pleasant, energy reduced version of the original image, and the capability to go back to the original HR grainy image with no transmission of additional metadata along the video chain;", + "- the best method so far for high-fidelity film grain synthesis, with no need of auxiliary data and the best method so far for downscaling and building energy-aware images." + ], + "bbox": [ + 214, + 178, + 782, + 358 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In the following, we first review the state-of-the-art for rescaling, film grain removal/synthesis and energy-aware images (Section 2), before detailing our proposed solution (Section 3). In Section 4, we provide a compared analysis of the quantitative, qualitative and energy performances of the use of 3R-INN, against state-of-the-art solutions. In Section 5, we draw conclusions and perspectives.", + "bbox": [ + 212, + 361, + 784, + 436 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Related work", + "text_level": 1, + "bbox": [ + 214, + 440, + 382, + 455 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Rescaling The rescaling task helps saving resources, through the storage and transfer of downscaled versions of an original HR image/video. Recovering the original resolution while having pleasant LR content can be very challenging. For these purposes, to maximize the restoration performance while producing visually pleasant low-resolution (LR) content, several works learn jointly the two tasks, i.e., downscaling and upscaling. In [22], an auto-encoder-based framework learns the optimal LR image that maximizes the reconstruction performance of the HR image. In [40], an unsupervised downscaling method with consideration on the upscaling process but no assumption on how the HR image is downscaled, allows to learn the essential information for upscaling in an optimal way. Following a different paradigm, the method called IRN [42] models the down- and up-scaling processes using an invertible bijective transformation. In a forward pass, IRN performs the downscaling process by producing visually pleasing LR images while capturing the distribution of the lost information using a latent variable that follows a specified distribution. Meanwhile, the upscaling process is made tractable such that the HR image is reconstructed by inversely passing a randomly drawn latent variable with the LR image through the network. However, the reconstruction is not image-adaptive due to the case-agnostic latent variable.", + "bbox": [ + 212, + 460, + 785, + 746 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Film grain removal and synthesis To better preserve film grain while compressing video content efficiently, it is classically removed and modeled before encoding and restored after decoding [15, 32]. Hence, dedicated methods for film grain removal are proposed, based on either temporal filtering [10], spatiotemporal inter-color correlation filtering [19] or deep-learning encoder-decoder models [6]. On the other hand, several studies addressed the film grain synthesis", + "bbox": [ + 212, + 750, + 785, + 840 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "3R-INN: How to be climate friendly while consuming/delivering videos?", + "bbox": [ + 251, + 114, + 730, + 128 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "task. In [31], a Boolean in-homogeneous model [39] is used to model the grain, which corresponds to uniformly distributed disks. In AV1 codec [32], film grain is modeled by an autoregressive (AR) method as well as by an intensity-based function to adjust its strength. In VVC [34], a method based on frequency filtering is used. The grain pattern is first modeled thanks to a discrete cosine transform (DCT) applied to the grain blocks corresponding to smooth regions, and further scaled to the appropriate level, by using a step-wise scaling function. In [6], a conditional generative adversarial network (cGAN) that generates grain at different intensities is proposed. Yet, it does not perform any analysis on the original grain for a reliable synthesis. In [5], a deep-learning framework is proposed which consists of a style encoder for film grain style analysis, a mapping network for film grain style generation, and a synthesis network that generates and blends a specific grain style to a content in a content-adaptive manner.", + "bbox": [ + 212, + 146, + 787, + 344 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Energy-aware images Many works target the reduction of the energy consumption of images while displayed on screens, especially for OLED displays. A first set of methods reduces the luminance through clipping or equalizing histograms [20, 21]. Other works directly scale the pixel luminance [26, 36, 38]. The most promising methods leverage deep learning models, trained with a combination of loss functions that minimize the energy consumption while maintaining an acceptable perceptual quality. In [43], a deep learning model trained with a variational loss for simultaneously enhancing the visual quality and reducing the power consumption is proposed. Authors in [38] describe an adaptive contrast enhancement (ACE) convolutional neural network, that performs contrast enhancement of luminance scaled images. In [33], an improved version of ACE, called Residual-ACE (R-ACE), is proposed to infer an attenuation map instead of a reduced image. In [26], authors revisit R-ACE to significantly reduce the complexity without compromising the performance. Different from the above methods, an invertible energy-aware network (InvEAN) [25] produces invertible energy-aware images and allows to recover the original images if required.", + "bbox": [ + 212, + 357, + 787, + 599 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Invertible neural networks INNs learn the mapping $x = f(z)$ , which is fully invertible as $z = f^{-1}(x)$ , through a sequence of differentiable invertible mappings such as affine coupling layers [12] and invertible $1 \\times 1$ convolutional layers [24]. INNs have direct applications in ambiguous inverse problems by learning information-lossless mappings [13, 27, 44]. The lost information in the forward process is captured by additional latent output variables. Thus, the inverse process is learned implicitly. A first application is the steganography, i.e., concealing images into other images [9, 28]. In [44], an INN is used to produce invertible grayscale images, where the lost color information is encoded into a set of Gaussian distributed latent variables. The original color version can be recovered by using a new set of randomly sampled Gaussian distributed variables as input, together with the synthetic grayscale, through the reverse mapping. Similarly, an invertible denoising network (InvDN) transforms a noisy input into a LR clean image and a latent representation containing noise in [27]. To discard noise and restore the clean image, InvDN replaces the noisy latent representation with", + "bbox": [ + 212, + 613, + 787, + 840 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Z. Ameur et al.", + "bbox": [ + 271, + 114, + 377, + 128 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "another one sampled from a prior distribution during reversion. In [13], another INN further disentangles noise from the high frequency image information.", + "bbox": [ + 212, + 146, + 782, + 176 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3 Proposed approach", + "text_level": 1, + "bbox": [ + 214, + 184, + 439, + 202 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "With the target of reducing the overall energy consumption of the video chain, our 3R-INN framework, run in a forward pass at the encoder side, performs three invertible tasks simultaneously: 1) film grain removal, 2) downscaling and 3) display energy reduction, as illustrated in Figure 1. From a HR grainy image $I_G \\in \\mathbb{R}^{H \\times W \\times 3}$ , 3R-INN outputs a visually pleasant grain-free LR energy-aware image $\\tilde{I}_{LR|R} \\in \\mathbb{R}^{\\frac{1}{2} H \\times \\frac{1}{2} W \\times 3}$ with $R \\in [0,1]$ being the energy reduction rate, and 2 being the scaling factor corresponding to the best compromise between quality of the LR images, framework complexity and energy savings in the video chain. To ensure the process invertibility and the bijective mapping, the lost information mainly due to grain removal and downscaling is captured in a latent variable $z$ distributed according to a standard Gaussian distribution $\\mathcal{N}(0,1)$ . This can be formulated as: $[\\tilde{I}_{LR|R}, z] = f_{\\theta}(I_G)$ where $\\theta$ is the set of trainable parameters of the 3R-INN network $f$ . $\\tilde{I}_{LR|R}$ is intended to be encoded, transmitted and displayed at the end-user device for an optimal energy consumption and quality of experience trade-off. The lost information $z$ is further disentangled into two parts inside 3R-INN, by setting $\\tilde{z}$ its internal representation as $\\tilde{z} = [\\tilde{z}_D, \\tilde{z}_G]$ with $\\tilde{z}_D$ and $\\tilde{z}_G$ representing losses due to downscaling and grain removal, respectively.", + "bbox": [ + 212, + 208, + 787, + 468 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In case the original content should be recovered, 3R-INN is run in an inverse pass at the decoder side (see Figure 1), as follows: $\\tilde{I}_G = f_\\theta^{-1}([\\tilde{I}_{LR|R},z])$ . The original HR grainy content is then reconstructed with no need to transmit any auxiliary information in the video chain thanks to the modeling of the lost information. Moreover, thanks to the film grain and high frequency loss disentanglement, $\\tilde{z} = [\\tilde{z}_D,\\tilde{z}_G]$ , 3R-INN is also able to generate a clean HR version $\\tilde{I}_C$ of the original content by setting $\\tilde{z}_G = 0$ . The overall architecture of the proposed framework is composed of three block types: one Haar Transformation block, several invertible blocks and a conditional latent encoding block, as illustrated in Figure 1.", + "bbox": [ + 212, + 472, + 787, + 625 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Haar transform As removing film grain and downscaling an image significantly impacts high frequencies, it seems natural to first decompose the input HR image into low and high-frequency components. For that purpose, we chose the dyadic Haar wavelet transformation, similarly to [42, 44], because of its simplicity, efficiency and invertibility. Specifically, the Haar transform decomposes an input feature $f_{in} \\in \\mathbb{R}^{H \\times W \\times C}$ into one low-frequency $f_{low} \\in \\mathbb{R}^{\\frac{1}{2} H \\times \\frac{1}{2} W \\times C}$ and three high-frequency $f_{high} \\in \\mathbb{R}^{\\frac{1}{2} H \\times \\frac{1}{2} W \\times 3C}$ sub-bands. $f_{low}$ , produced by an average pooling, represents the overall structure and coarse features of the image, while $f_{high}$ contains finer details in the vertical, horizontal and diagonal directions, corresponding to film grain and edges. This splitting strategy allows to separate very early in the process the low frequency components from the information we aim to suppress. $f_{low}$ and $f_{high}$ serve as input to the following invertible blocks.", + "bbox": [ + 212, + 643, + 787, + 825 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "3R-INN: How to be climate friendly while consuming/delivering videos?", + "bbox": [ + 251, + 113, + 730, + 130 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 774, + 114, + 785, + 127 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Invertible block As invertible blocks, we selected the coupling layer architecture proposed in [24]. A given input $h^i$ is composed of two parts $h_1^i$ and $h_2^i$ , representing the three low-frequency and the nine high-frequency sub-bands of the color input channels RGB, respectively. These sub-bands are then processed by the $i^{th}$ invertible block as follows:", + "bbox": [ + 212, + 146, + 787, + 220 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nh _ {1} ^ {i + 1} = h _ {1} ^ {i} + \\phi \\left(h _ {2} ^ {i}\\right) \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 437, + 229, + 785, + 247 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nh _ {2} ^ {i + 1} = h _ {2} ^ {i} \\odot \\exp (\\psi (h _ {1} ^ {i + 1})) + \\eta (h _ {1} ^ {i + 1})\n$$\n", + "text_format": "latex", + "bbox": [ + 372, + 250, + 630, + 268 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\phi$ , $\\psi$ and $\\eta$ are dense blocks [18]. Given $[h_1^{i + 1}, h_2^{i + 1}]$ , the inverse transformation can be easily computed by:", + "bbox": [ + 214, + 277, + 785, + 309 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nh _ {2} ^ {i} = \\left(h _ {2} ^ {i + 1} - \\eta \\left(h _ {1} ^ {i + 1}\\right)\\right) / \\exp \\left(\\psi \\left(h _ {1} ^ {i + 1}\\right)\\right) \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 372, + 316, + 785, + 334 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nh _ {1} ^ {i} = h _ {1} ^ {i + 1} - \\phi \\left(h _ {2} ^ {i}\\right)\n$$\n", + "text_format": "latex", + "bbox": [ + 437, + 337, + 566, + 354 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Conditioned latent encoding block Invertible networks learn a bijective mapping between an input and an output distribution. In case of information loss, a latent variable $\\tilde{z}$ is added to ensure the invertible property. This latent variable is assumed to follow a known distribution i.e., a standard Gaussian distribution, to avoid transmitting additional information for the reconstruction, and to make the process case-agnostic. In our context, this would mean that the reconstruction of the HR grainy $(\\tilde{I}_G)$ or clean $(\\tilde{I}_C)$ images would not rely on the a priori knowledge of the LR image $\\tilde{I}_{LR|R}$ . To overcome this limitation and to enable an image-adaptive reconstruction during the inverse pass, the lost information $\\tilde{z}$ is transformed into a Gaussian distributed latent variable $z$ whose mean and variance are conditioned on $\\tilde{I}_{LR|R}$ . This is done through the use of a latent encoding block inspired from [44], whose structure is a one-side affine coupling layer that normalizes $\\tilde{z}$ into a standard Gaussian distributed variable $z$ as follows, with $\\phi_g$ and $\\theta_g$ being dense blocks:", + "bbox": [ + 214, + 363, + 787, + 575 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nz = (\\tilde {z} - \\phi_ {g} (\\tilde {I} _ {L R | R})) / \\exp \\left(\\theta_ {g} (\\tilde {I} _ {L R | R})\\right) \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 372, + 584, + 785, + 602 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The reverse mapping can be formulated as:", + "bbox": [ + 240, + 609, + 553, + 625 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {z} = z \\odot \\exp \\left(\\theta_ {g} \\left(\\tilde {I} _ {L R | R}\\right)\\right) + \\phi_ {g} \\left(\\tilde {I} _ {L R | R}\\right)) \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 370, + 633, + 785, + 650 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Training objectives", + "text_level": 1, + "bbox": [ + 215, + 657, + 377, + 672 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3R-INN is first trained on the rescaling and film grain removal/synthesis tasks, before being fine-tuned on the energy reduction task.", + "bbox": [ + 212, + 672, + 785, + 702 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Rescaling and film grain removal/synthesis tasks The Forward Pass optimization is driven by a fidelity loss $\\mathcal{L}_{forw}$ to guarantee a visually pleasant clean LR image $\\tilde{I}_{LR}$ , and a regularization loss $\\mathcal{L}_{reg}$ to guarantee that the latent variable $z$ follows a standard Gaussian distribution. To guide $f_{\\theta}$ to generate $\\tilde{I}_{LR}$ , a downsampled image $I_{LR}$ of the HR clean image $I_C$ is computed by a bicubic filter, and used as ground-truth to minimize $\\mathcal{L}_{forw}$ :", + "bbox": [ + 212, + 705, + 787, + 799 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {f o r w}} \\left(\\tilde {I} _ {L R}, I _ {L R}\\right) = \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\left\\| \\tilde {I} _ {L R} - I _ {L R} \\right\\| _ {2} \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 370, + 806, + 785, + 842 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Z. Ameur et al.", + "bbox": [ + 271, + 114, + 377, + 127 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "with $N$ the batch size. Second, the log-likelihood of the probability density function $p(z)$ of the standard Gaussian distribution is maximized, with $D = \\dim (z)$ :", + "bbox": [ + 214, + 146, + 787, + 179 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {r e g} = - \\log (p (z)) = - \\log \\left(\\frac {1}{(2 \\pi) ^ {D / 2}} \\exp (- \\frac {1}{2} | | z | | ^ {2})\\right) \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 326, + 189, + 785, + 218 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The Inverse Pass optimization consists of two fidelity losses $\\mathcal{L}_{\\text{back}_G}$ and $\\mathcal{L}_{\\text{back}_C}$ , to restore $\\tilde{I}_G$ and $\\tilde{I}_C$ , respectively. To this end, $z$ is first decoded into $\\tilde{z}$ by the latent encoding block conditioned by $\\tilde{I}_{LR}$ . Then the disentanglement of film grain $(G)$ and fine details $(D)$ is performed with $\\tilde{z} = [\\tilde{z}_D, \\tilde{z}_G]$ .", + "bbox": [ + 214, + 220, + 782, + 280 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "$\\tilde{I}_G$ is reconstructed by considering all the information contained in $\\tilde{z}$ , i.e., related to film grain and fine details:", + "bbox": [ + 214, + 281, + 782, + 310 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {b a c k} _ {G}} \\left(\\tilde {I} _ {G}, I _ {G}\\right) = \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\left| \\left| f _ {\\theta} ^ {- 1} \\left(\\tilde {I} _ {L R}, z \\right] \\right| _ {\\left| \\tilde {z} _ {D}, \\tilde {z} _ {G} \\right]} - I _ {G} \\right| | _ {1} \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 328, + 320, + 785, + 356 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "$\\tilde{I}_C$ is restored by considering only the subset $\\tilde{z}_D$ of $\\tilde{z}$ , i.e., by using $\\tilde{z} = [\\tilde{z}_D, \\tilde{z}_G = 0]$ as follows:", + "bbox": [ + 215, + 366, + 782, + 398 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {b a c k} _ {C}} \\left(\\tilde {I} _ {C}, I _ {C}\\right) = \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\left\\| f _ {\\theta} ^ {- 1} \\left(\\tilde {I} _ {L R}, z\\right) _ {\\left| \\left[ \\tilde {z} _ {D}, 0 \\right] \\right.} - I _ {C} \\right\\| _ {1}, \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 333, + 407, + 785, + 444 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "For both fidelity losses, the $\\ell_1$ norm is classically used as in [27, 42]. Finally, for the first two tasks, the following weighted sum is minimized:", + "bbox": [ + 214, + 452, + 782, + 482 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {t o t a l}} = \\lambda_ {1} \\mathcal {L} _ {\\text {f o r w}} + \\lambda_ {2} \\mathcal {L} _ {\\text {r e g}} + \\lambda_ {3} \\mathcal {L} _ {\\text {b a c k} _ {C}} + \\lambda_ {4} \\mathcal {L} _ {\\text {b a c k} _ {G}} \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 323, + 494, + 785, + 511 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Energy-aware task After 3R-INN learns the film grain removal/synthesis and rescaling tasks, it is fine-tuned during the forward pass with additional power and fidelity losses, $\\mathcal{L}_{pow}$ and $\\mathcal{L}_{SSIM}$ , to output an energy-aware grain-free LR image $\\tilde{I}_{LR|R}$ , i.e., its power consumption is reduced by $R$ compared to the power consumption of $I_{LR}$ . Contrary to most works computing energy aware images, assuming a linear relationship between the power consumption $P_Y$ of an image and its linearized luminance [33], we follow the model in [11] dedicated to RGBW OLED screens, and compute $P_{RGBW}$ as the sum of the powers consumed by the four R, G, B, W leds. As in [25], the following power loss is then minimized:", + "bbox": [ + 214, + 521, + 787, + 659 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {p o w} = \\left| \\left| \\tilde {P} _ {R G B W} - (1 - R) \\times P _ {R G B W} \\right| \\right| _ {1} \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 357, + 670, + 785, + 686 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "with $(1 - R) \\times P_{RGBW}$ the desired target power and $\\tilde{P}_{RGBW}$ the power of $\\tilde{I}_{LR|R}$ . To ensure a better visual quality of the energy-aware images, a structural similarity index measure (SSIM) loss is added and minimized as follows:", + "bbox": [ + 214, + 698, + 787, + 744 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {S S I M} = 1 - S S I M \\left(\\tilde {I} _ {L R | R}, I _ {L R}\\right) \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 383, + 755, + 785, + 772 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "As the inverse pass objectives remain exactly the same, the total loss minimized in the fine-tuning stage is:", + "bbox": [ + 214, + 782, + 782, + 813 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {f i n e t u n e d}} = \\mathcal {L} _ {\\text {t o t a l}} + \\lambda_ {5} \\mathcal {L} _ {\\text {p o w}} + \\lambda_ {6} \\mathcal {L} _ {\\text {S S I M}} \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 354, + 825, + 785, + 840 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "3R-INN: How to be climate friendly while consuming/delivering videos?", + "bbox": [ + 251, + 114, + 732, + 130 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 774, + 114, + 785, + 126 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Training details During training, we use the DIV2K training set [4] from the FilmGrainStyle740K dataset [5], which contains pairs of corresponding images with and without grain. To complement the DIV2K validation set, we evaluate 3R-INN on the BSDS300 test set [30] and Kodak24 dataset [14], which were augmented to add grainy versions of the images, by following the same process as in the FilmGrainStyle740K dataset3. Input images were randomly cropped into $144 \\times 144$ and augmented by applying random horizontal and vertical flips. Other training parameters are: Adam optimizer [23, 35] with $\\beta_{1} = 0.9$ , $\\beta_{2} = 0.999$ ; mini-batch size of 16; 500k (training of the first two tasks) + 5k (energy-aware fine-tuning) iterations; learning rate initialized as 2e-4 and halved at [100k, 200k, 300k, 400k] mini-batch updates. Hyper-parameters are set to: $(\\lambda_{1}, \\lambda_{2}, \\lambda_{3}, \\lambda_{4}, \\lambda_{5}, \\lambda_{6}) = (40, 1, 1, 1, 1e10, 1e4)$ and eight successive invertible blocks are used. Scale and shift coefficients are learned through a five-layer densely connected convolutional block. Each convolutional filter is of size $3 \\times 3$ , with padding 1, followed by a leaky ReLU activation layer with negative slope set to 0.2. The intermediate channel number of the convolutional blocks is fixed to 32. Dimensions of $\\tilde{z}_{D}$ and $\\tilde{z}_{G}$ were set to (8, 1), respectively.", + "bbox": [ + 212, + 145, + 787, + 404 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4 Experiments", + "text_level": 1, + "bbox": [ + 215, + 407, + 375, + 424 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The goal of our paper is to reduce the overall energy consumption along the video distribution system using 3R-INN, which primarily supports the display of a grain-free energy-aware LR image, and then offers the possibility to recover the original version, i.e., the grainy HR, as well as a clean HR version as a third option. Thus, we adopt the following evaluation use case: we first assess the energy savings achieved by using 3R-INN along the video chain, and evaluate its energy needs. Then, we assess its performances in terms of quality for the LR grain-free energy-aware images and the reconstructed HR grainy images against state-of-the-art methods. An evaluation of the reconstructed HR clean images and an ablation study are provided in the supplementary materials.", + "bbox": [ + 212, + 428, + 787, + 580 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.1 Energy consumption performance", + "text_level": 1, + "bbox": [ + 215, + 582, + 540, + 597 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Evaluation of energy savings To estimate the energy savings realized along the video distribution system (headend, delivery, decoding and display), we tested the full video transmission chain by applying 3R-INN on two JVET sequences RaceHorses (300 frames, $832 \\times 480$ , 10s) and BasketBall (500 frames, HD) [8]. The LR clean energy-aware at $R = 20\\%$ is encoded using VTM [3], in full intra mode. Although not reflecting real-world scenarios in which efficient hardware decoders are used, the choice of using a non-optimized software-based VVC decoder (VTM) will nevertheless enable to demonstrate that 3R-INN results in consistent energy savings. Fixed broadband transmission was assumed. We then decoded and displayed the sequences on an OLED screen, with all TV options disabled, including the ambient light setting. Similarly to state-of-the-art methods, no video specific optimization was conducted as 3R-INN works in a frame by frame manner. In particular, film grain is known to be temporally", + "bbox": [ + 212, + 599, + 787, + 797 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Z. Ameur et al.", + "bbox": [ + 271, + 114, + 377, + 128 + ], + "page_idx": 7 + }, + { + "type": "page_footnote", + "text": "3 The additional dataset is proposed at www.interdigital.com/data_sets/filmgrainstyle740k-dataset", + "bbox": [ + 217, + 810, + 823, + 839 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/9b0ced8cab9cdd2963178d402629ae8cba68606eead14ef36b13f6cbde313217.jpg", + "image_caption": [ + "Fig. 2: Bit-rate, encoding and decoding times with and without using 3R-INN in terms of QP for sequence RaceHorses." + ], + "image_footnote": [], + "bbox": [ + 248, + 146, + 415, + 273 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/2c337df963ffeacf269b9aa389832552d64ea35e4a17209bf1c4d78ac3dd1308.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 416, + 146, + 583, + 272 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/fe788dff4bd7434936a373e0c04d36bf4d40e05c294f17799ff96f9a635c8d2f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 591, + 146, + 751, + 273 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "uncorrelated, hence its frame-based analysis. Resulting videos are provided in the supplementary material, illustrating 3R-INN capability to process temporal content.", + "bbox": [ + 212, + 308, + 782, + 354 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/e7eff9f174d0779a35da9aac4cd0015a979d735f4174c3b1247d6a70d7b1bfe2.jpg", + "image_caption": [ + "Fig. 3: Measured power consumption when displaying sequence RaceHorses. Left: Comparison between HR and LR versions at $\\mathrm{QP} = 22$ . Right: Comparison between LR versions before and after encoding/decoding." + ], + "image_footnote": [], + "bbox": [ + 279, + 385, + 491, + 494 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/cf36dc1fce3dba8e53b21e099df879a4ffa1f653ef2a957ec567b3c6bd48910f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 509, + 385, + 725, + 494 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Figure 2 reports the average encoding/decoding times and bit-rates, for different quantization parameters (QP), for the original HR clean and grainy RaceHorses sequences, and for the resulting LR versions with different $R \\in \\{5\\%, 20\\%, 40\\%, 60\\% \\}$ . Up to QP = 27, encoding and decoding the HR grainy video is more time and bit-rate demanding than for the HR clean version. For higher QPs, encoding time is still higher, however, bit-rate and decoding time are similar, because grain was removed during the encoding process. This confirms that compressing a grainy video while preserving film grain requires encoding at low QPs (which is far from the real-world scenario), leading to high and impractical bit-rates. On the contrary, encoding LR grain-free versions, whatever the value of $R$ , shows substantially lower times and bit-rates, and consequently reduces the energy at the head-end, transmission and decoding stages.", + "bbox": [ + 212, + 568, + 785, + 748 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Figure 3 presents actual measures of energy consumptions on an OLED LG-42C2 screen, for $R \\in \\{5\\%, 20\\%, 40\\%, 60\\% \\}$ , for the sequence RaceHorses. On the left plot, we compare the consumption of the encoded/decoded LR and HR clean sequences at $\\mathrm{QP} = 22$ . This proves that displaying an energy-aware video at different reduction rates significantly reduces the display power consumption. The average gains of power are $6.8\\%, 21.5\\%, 33.3\\%$ and $44.2\\%$ . The right", + "bbox": [ + 212, + 750, + 787, + 840 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "3R-INN: How to be climate friendly while consuming/delivering videos?", + "bbox": [ + 251, + 114, + 730, + 128 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "plot compares the consumption of the LR sequences for different $R$ , before and after encoding/decoding $(\\mathrm{QP} = 22)$ . For each $R$ , we observe a non-significant impact of the compression on the display consumption. This demonstrates that energy-aware images are to some extent robust to compression in terms of power values. Similar results are obtained for the sequence BasketBall (shown in the supplemental material).", + "bbox": [ + 212, + 146, + 787, + 236 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Table 1 illustrates the end-to-end energy savings at each level of the video chain, for the sequence RaceHorses, at QP22 and $R = 20\\%$ , according to the energy model in [17, 29]. Note that a range of power consumption values are considered for both encoding and decoding where the boundary values represent a very optimized vs. a non-optimized power consumption encoder, as well as hardware and software decoders, respectively (the detailed computation is provided in the supplemental material). 3R-INN allows $74\\%$ and $78\\%$ of total end-to-end savings with respectively highly power optimized and non-optimized encoders/decoders. This corresponds to savings of $78\\%$ for head-end, $19\\%$ for delivery and ca. $77\\%$ for decoding. From this, we draw several observations: Head-end: As expected, the encoding energy consumption $E_{c}$ significantly depends on the incoming resolution. The HR sequences are the most energy-demanding, particularly when they contain film grain, making encoding at a lower resolution a wise choice. Delivery: The transmission energy consumption $E_{t}$ does not strongly depend on bitrate, but more on the power consumed by the infrastructure. Thus, transmitting HR instead of LR content results in a relatively small energy gain. Decoding: The gain in energy consumption for the decoding operation is significant, even if, in absolute value, it remains quite low. Display: Displaying the energy-aware clean LR video at $R = 20\\%$ results in $11\\%$ of energy consumption reduction compared to the original grainy HR video. With the removal of the static consumption of the screen, the achieved energy reduction is higher and nearly reaches the target rate of $20\\%$ . In absolute value, the energy consumption of display $E_{D}$ is significant compared to that of the other components of the video chain, except for the encoder. However, since the content is encoded once and displayed several times, the display energy gains are further multiplied by the number of displays. Assuming that the sequence RaceHorses is viewed by $10\\%$ of Netflix subscribers [2] for one hour, using 3R-INN would save 156 GWh of energy, equivalent to the monthly consumption of 176 American citizens [1].", + "bbox": [ + 212, + 238, + 787, + 678 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Energy cost of using 3R-INN 3R-INN is a single network that replaces three separate NN architectures for the tasks of grain removal /synthesis, rescaling and building energy-aware images. In that sense, Table 2 reports a comparison of its complexity and energy performance against those of the sum of the", + "bbox": [ + 212, + 679, + 789, + 739 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/2e06f7703642759ca04914164c77b441f2880c452a70446b2d5ca353951b3675.jpg", + "table_caption": [ + "Table 1: End-to-end energy savings along the video chain for a fixed broadband access. Case study of the sequence RaceHorses (300 frames, $832 \\times 480$ , 30fps, 10s)." + ], + "table_footnote": [], + "table_body": "
HeadendDeliveryDecodingDisplayTotal
Encoding time (s), EcBitrate (kbps) (QP=22), EtDecoding time (s), EdPower (W), EDOriginal (Grainy HR)[78%, 78%][2.03W, 914kWh][0.0010Wh][0.004W, 0.11Wh][0.002W, 914.0002kWh]
Original (Grainy HR)46682, [2.59W, 1167kWh]16668, 0.0055Wh23.2, [0.005W, 0.15Wh]60.8, 0.168Wh[2.7685W, 1167.0003kWh]
Original (Clean HR)37901, [2.10W, 947kWh]14516, 0.0053Wh21.0, [0.004W, 0.14Wh]59.7, 0.165Wh[2.2743W, 947.0003kWh]
Ours (Energy-aware clean LR) R = 20%10150, [0.56W, 253kWh]4237, 0.0045Wh5.7, [0.001W, 0.039Wh]54.4, 0.151Wh[0.7165W, 253.0001kWh]
Reduction in % Grainy HR vs Ours[78%, 78%]19%[80%, 74%]11%[74%, 78%]
Reduction in energy Grainy HR vs Ours[2.03W, 914kWh]0.0010Wh[0.004W, 0.11Wh]0.017Wh[2.052W, 914.0002kWh]
", + "bbox": [ + 215, + 772, + 787, + 837 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Z. Ameur et al.", + "bbox": [ + 271, + 114, + 377, + 128 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "best-performing networks for the three tasks, on sequence RaceHorses, in terms of number of parameters, number of Multiply-ACcumulate operations (MACs), power consumption and number of equivalent displays. Power consumption is approximated using the model in [16], which we assume is valid for all the networks used in this study. The number of equivalent displays represents the number of displays/users that are needed to counterbalance the needed energy to run the network(s). Computation details are provided in the supplementary materials.", + "bbox": [ + 212, + 146, + 787, + 253 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "While producing a clean, $20\\%$ energy-reduced LR image from an HR grainy one, the cost of running 3R-INN in a forward pass is compared with the successive use of three NN architectures: StyleFG analyzer module to model film grain, IRN to remove it and downscale the image, and InvEAN to reduce its energy consumption. From the first part of Table 2, 3R-INN counts significantly fewer parameters/operations and needs less power than the combination of the three networks. Moreover, solely comparing with the savings at the display side ( $\\approx 6$ Watts, see Table 1), the power gain is tangible as soon as the content is displayed 110 times for 3R-INN vs. 161 times for the combination.", + "bbox": [ + 212, + 256, + 787, + 393 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "In case recovering an HR grainy image is required, 3R-INN is simply run in an inverse pass, thus, its complexity and power savings remain the same. In contrast, InvEAN and IRN are first run in an inverse pass to recover a clean HR version; then, film grain is synthesized using StyleFG synthesizer module. This time, 3R-INN use represents $58\\%$ less power than the combination (660 vs. 1573W), and requires fewer equivalent displays (110 vs. 262) to offset its power requirements.", + "bbox": [ + 212, + 397, + 787, + 503 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "4.2 Quantitative and qualitative evaluation of LR images", + "text_level": 1, + "bbox": [ + 214, + 508, + 699, + 525 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Film grain removal and downscaling The quantitative and qualitative evaluations of the LR clean image $\\tilde{I}_{LR|R=0}$ , i.e., corresponding to an energy reduction rate $R = 0$ , are given in Table 3 and Figure 4, respectively. The reference image is the bicubic rescaling of the HR clean image. Although quite similar to the experimental protocol used for IRN in [42], we here assess the ability of the network both to rescale and to remove film grain. Thus, for a fair comparison, we re-train IRN on our dataset to perform both tasks, given grainy HR images as input. The latter is designed and optimized for clean HR image reconstruction only, although it can reconstruct grainy HR images if the original high-frequencies $z$ are provided. However, in the video chain context, this would lead to the transmission of heavy metadata, whereas 3R-INN operates without any transmission of metadata. Results show that the proposed method outperforms IRN in terms", + "bbox": [ + 212, + 532, + 787, + 714 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/33dded2a8709c1b0b40c06823080df68e47e2da8efe0e3febf6fefaf53b30abe.jpg", + "table_caption": [ + "Table 2: Comparison of complexity and power performance of 3R-INN against the use of three independent neural networks for sequence RaceHorses. A corresponding power equivalent in terms of number of displays is also given." + ], + "table_footnote": [], + "table_body": "
OutputNetwork(s)ParametersGMACsPower(W)Display equivalent #
Clean LR at R=20%StyleFG analyzer + IRN + InvEAN23.3M334967161
3R-INN (forward)1.7M230660110
Grainy HRStyleFG synthesizer + IRN + InvEAN36.3M6161573262
3R-INN (inverse)1.7M230660110
", + "bbox": [ + 215, + 766, + 787, + 837 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "3R-INN: How to be climate friendly while consuming/delivering videos?", + "bbox": [ + 251, + 113, + 730, + 128 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 767, + 114, + 782, + 126 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/d4fea4b8981bf8f449912860edb64a3c16fe3d35a35f539dc9ef55eb9666eee2.jpg", + "table_caption": [ + "Table 3: Comparison between generated LR clean images $\\tilde{I}_{LR|R=0}$ and a bicubic rescaling of the HR clean image as ground-truth." + ], + "table_footnote": [], + "table_body": "
MethodDIV2KBSDS300Kodak24
PSNR ↑SSIM ↑PSNR ↑SSIM ↑PSNR ↑SSIM ↑
IRN [42]39.060.94238.950.95338.750.947
Ours39.630.95139.790.96439.710.957
", + "bbox": [ + 326, + 159, + 671, + 220 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/afc44e58838d201eeec2a5a84e8eab8dda1352470b68e3613e635ceaf51711bc.jpg", + "image_caption": [ + "Ground-truth", + "(bicubic)", + "Fig. 4: Comparison between a bicubic downscaling, IRN and the clean LR $\\tilde{I}_{LR|R=0}$ . of PSNR and SSIM. They also outline its good generalization, as even better performances are observed on BSDS300 and Kodak24 datasets." + ], + "image_footnote": [], + "bbox": [ + 240, + 236, + 411, + 339 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/590af96f3e142ffec186d9df5b768322342eef5dd61c4deee9c0988207e255d1.jpg", + "image_caption": [ + "IRN", + "$\\mathrm{(PSNR = 43.10dB)}$" + ], + "image_footnote": [], + "bbox": [ + 415, + 237, + 586, + 339 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/141995e98c7a0f2b5a7a1ee961024f37a354b86f3d4f930898eea5e84a9a7e31.jpg", + "image_caption": [ + "Ours", + "$\\mathrm{(PSNR = 43.14dB)}$" + ], + "image_footnote": [], + "bbox": [ + 589, + 237, + 763, + 339 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Energy-aware images For $R > 0$ , we evaluate the visual quality of the LR clean energy-aware image $\\tilde{I}_{LR|R}$ against state-of-the-art energy-aware methods, i.e., a global linear scaling of the luminance (LS), R-ACE [33], DeepPVR [26] and InvEAN [25]. To solely evaluate the energy-aware task, and for a fair comparison, existing methods were evaluated while taking as input the output of our method after the fine tuning step with $R = 0$ . All evaluation metrics in the following were calculated with this image as reference. Table 4 reports PSNR-Y and SSIM metrics at four reduction rates, on three test sets. Two conclusions can be drawn. First, when the power consumption model $P_{Y}$ is used for a fair comparison with state-of-the-art methods, the proposed method outperforms LS and R-ACE methods, while being similar to DeepPVR and slightly below InvEAN. When the power consumption model $P_{RGBW}$ is used, the quality scores of 3R-INN are significantly better, and especially for the PSNR-Y. This can be explained by the fact that our model does not learn to reduce the image luminance, contrary to state-of-art methods. The latter in turn were not trained to optimize $P_{RGBW}$ ; this may explain their lower performances.", + "bbox": [ + 212, + 410, + 787, + 652 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "This trend is confirmed by Figure 6 which plots SSIM scores as function of the actual reduction rate, computed with $P_{RGBW}$ . PSNR plots are provided in the supplemental material. Figure 5 shows a qualitative comparison of energy-", + "bbox": [ + 212, + 654, + 787, + 700 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/c8c73ddab39180629d23339a74d67f780dc97dd4b4c6722293867c0fc6f6269a.jpg", + "table_caption": [ + "Table 4: PSNR-Y and SSIM quality scores for the energy-aware task for four reduction rates $R$ . 3R-INN results are presented for two power consumption models, i.e. $P_{Y}$ (for comparison with state-of-the-art methods) and $P_{RGBW}$ , corresponding to RGB and RGBW OLED screens, respectively. InvEAN model is not available at $R = 5\\%$ in [25]." + ], + "table_footnote": [], + "table_body": "
MethodDIV2KBSDSKolak24
R=5%R=20%R=40%R=60%R=5%R=20%R=40%R=60%R=5%R=20%R=40%R=60%
LS39.34/0.99927.01/0.99120.33/0.95816.06/0.87739.64/0.99927.31/0.99020.67/0.95516.35/0.86739.38/0.99927.05/0.99120.41/0.95716.09/0.875
R-ACE [33]41.53/0.99526.59/0.96720.05/0.90115.92/0.78840.55/0.99726.90/0.97820.24/0.91516.12/0.80640.70/0.99726.74/0.98320.08/0.93015.98/0.830
DeepPVR [26]39.37/0.99627.12/0.98321.04/0.95215.81/0.89039.63/0.99727.53/0.98921.13/0.95916.36/0.89439.27/0.99727.17/0.98920.61/0.95516.00/0.892
InvEAN [25]-27.75/0.99421.17/0.97317.07/0.932-28.25/0.99321.74/0.97317.72/0.931-27.92/0.99321.42/0.97317.37/0.932
Ours (Pc)39.55/0.98727.32/0.98020.62/0.94916.43/0.88340.06/0.99427.65/0.98620.94/0.95516.77/0.88340.02/0.99227.43/0.98520.70/0.95416.51/0.886
Ours (PROBW)47.68/0.99838.02/0.99329.15/0.97423.66/0.94548.33/0.99938.36/0.99530.47/0.98324.96/0.96147.47/0.99837.39/0.99429.63/0.98224.18/0.958
", + "bbox": [ + 215, + 773, + 784, + 837 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Z. Ameur et al.", + "bbox": [ + 271, + 114, + 375, + 126 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/0e836ad70e4fe81b8e75b9db052a23a3c859f8aff9ce4301c94af8dde956a27b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 233, + 143, + 320, + 191 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/12de44a2c6babce9606368f13791d14d7bd329ee3fb2ef7ac68d570d882abf7f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 233, + 191, + 320, + 237 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/9228acd3f65cb119f5880aabed0d050481134619268fb2b0adc277b5c19f9ca9.jpg", + "image_caption": [ + "Original" + ], + "image_footnote": [], + "bbox": [ + 233, + 237, + 320, + 282 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/8499ddd0eea85411321c447b70d152899b244e827b196272cb80e5ab1ca5fd28.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 321, + 143, + 410, + 190 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/9a55a29ec925a75151424a0050dcb22baf8510f0358c09db8ace0a2bb67fa878.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 321, + 190, + 410, + 237 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/c8dc217e3958352259ec21c415b1e07adba4ada3317669afe13f4f1a34426048.jpg", + "image_caption": [ + "LS" + ], + "image_footnote": [], + "bbox": [ + 321, + 237, + 410, + 282 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/298024752f2c570b5a2304bfb2668bd421157d22d537fb839a68d4dea8fb4897.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 411, + 143, + 500, + 190 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/abb7c855d56abda193c739564f63ab95a089ba580ace4a06bcac1341289eeb78.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 411, + 190, + 500, + 236 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/9d5b522312fe765cbb58901aa208da8d425dc7b9b410a776ab0340de5f807a94.jpg", + "image_caption": [ + "RACE", + "Fig. 5: Comparison of generated energy-aware images with the state-of-the-art, for $R \\in \\{5\\%, 20\\%, 40\\% \\}$ from first to third lines. Achieved rates computed by the power model in [11] are provided. InvEAN model is not available at $R = 5\\%$ in [25]." + ], + "image_footnote": [], + "bbox": [ + 411, + 237, + 500, + 282 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/1ac6223cf4493bc5fbf77d2e3e898494b9a131c2975868da3cff4efa0839d86a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 143, + 589, + 190 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/709d471a3b2b8a99b3631a0e64ab66f830ecf12b85fcb9b6767412bb7f1867db.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 190, + 589, + 237 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/51376c1d436f64bb81967422f403c08cd985a116db5c3b990a413f3185134b9c.jpg", + "image_caption": [ + "DeepPVR" + ], + "image_footnote": [], + "bbox": [ + 501, + 237, + 589, + 282 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/cfe1d9e2a331c333ef646c748f89dbbd8388af6bb037572ab239a62c4b5e925b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 591, + 143, + 679, + 190 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/256ece244dc934c8e741968f8cf8119c4166d7048c4160be598295379e0a04ca.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 591, + 190, + 679, + 236 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/12f53dabffcff583a69d06a970d47b5ebd5f0adc2b0e9549551241ba289c7185.jpg", + "image_caption": [ + "InvEAN" + ], + "image_footnote": [], + "bbox": [ + 591, + 237, + 679, + 282 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/2dfb3e5052e2321183f2def5c822ec13f542113eb7992ee4f4099313b04250d7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 681, + 143, + 769, + 190 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/d2d7988097960325aa2e3d9b963cb0740416b4fcdc61fcf55a5b8afae13ffbde.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 681, + 190, + 769, + 236 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/c9067f867d96f7659e4a5d4706e2481b7db6ea3a6f727168a4f6f16262781955.jpg", + "image_caption": [ + "Ours" + ], + "image_footnote": [], + "bbox": [ + 681, + 237, + 769, + 282 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "aware images. 3R-INN and LS respect the reduction rate targets better than other methods. Our method also exhibits a different behavior for high values of $R$ , once again keeping the luminance but modifying the colors. The subjective comparison is however difficult since the achieved energy reduction rates vary from one method to another. Although not fully dedicated to the energy-reduction task, 3R-INN performs well compared to existing methods and similarly to InvEAN, it offers the possibility to recover the original image without any side-information.", + "bbox": [ + 212, + 345, + 787, + 465 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "4.3 Quantitative and qualitative evaluation of HR grainy images", + "text_level": 1, + "bbox": [ + 214, + 470, + 759, + 486 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The reversibility property of 3R-INN is an important feature. To evaluate this property, we evaluated the HR grainy reconstruction with state-of-the-art film grain synthesis methods: VVC (Versatile Video Coding) implementation [34], Deep-FG [6] and Style-FG [6]. Table 5 summarizes the quantitative results for $R = 0$ , in terms of fidelity of the synthesized grain using learned perceptual image patch similarity (LPIPS), JSDNSS and the KL divergence (KLD) [45], these last two being computed between the histograms of ground-truth and HR grainy images. All methods perform analysis and synthesis except Deep-FG for which we generated 5 versions of grain, one per available intensity level, and kept only the best performing image for each metric in the comparison.", + "bbox": [ + 212, + 489, + 787, + 641 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Results show that the proposed method outperforms quantitatively VVC [34] and Deep-FG [6]. It also performs better than Style-FG [6] for LPIPS and KLD", + "bbox": [ + 214, + 642, + 785, + 672 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/932e2ce72c3513e3263ff1db0347547ee8771b1a57148bce63b3af12b1a10b8b.jpg", + "image_caption": [ + "Fig. 6: SSIM scores as function of the target power reduction, for the different energy-aware methods." + ], + "image_footnote": [], + "bbox": [ + 276, + 704, + 727, + 819 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "3R-INN: How to be climate friendly while consuming/delivering videos?", + "bbox": [ + 251, + 114, + 730, + 128 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 767, + 114, + 785, + 126 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/cf7a81f020ff91602fcaca59e5f22e4d82a1bcf3673c868115dca5f587cde288.jpg", + "table_caption": [ + "Table 5: Comparison between reconstructed HR grainy images and ground-truth for different methods on DIV2K validation set." + ], + "table_footnote": [], + "table_body": "
AnalysisAuxiliary dataJSD-NSS ↓LPIPS ↓KLD ↓
VVC [34]set of params0.01480.29810.0327
Deep-FG [6]xx0.01340.37220.0260
Style-FG [5]style vector0.00240.15920.0232
Oursnone0.00880.04450.0177
", + "bbox": [ + 328, + 157, + 669, + 224 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "metrics which are representative of the quality of generated grain. The lower JSD-NSS value for Style-FG [6] could be explained by the fact that it is a GAN-based network which models the data distribution at the expense of the output quality. The qualitative comparison in Figure 7 confirms these observations (additional results in the supplemental material). Another advantage of 3R-INN is that no auxiliary data is required for grain synthesis, unlike VVC and Style-FG, which transmit a set of parameters and a style vector respectively. Similar results are obtained for $R > 0$ and are presented as supplemental material.", + "bbox": [ + 212, + 258, + 787, + 380 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/94acb4fef121f6a6576c367eb884d717e2a55ff601e2ec677564155f3c266479.jpg", + "image_caption": [ + "Ground-truth" + ], + "image_footnote": [], + "bbox": [ + 220, + 412, + 330, + 491 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/d1d4065c6f2443ba0c46eb445fd13c2ca22fdb9bf5295614a7029a5a427a4fb9.jpg", + "image_caption": [ + "VVC (0.3343)", + "Fig. 7: Qualitative evaluation of HR synthesized grainy images for different methods, with LPIPS values between parenthesis." + ], + "image_footnote": [], + "bbox": [ + 334, + 412, + 442, + 491 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/d33342e3aa7e2cd67444a8367880bc08fb19ce30ba13240ed9da328eb11f911b.jpg", + "image_caption": [ + "DeepFG (0.3533)" + ], + "image_footnote": [], + "bbox": [ + 446, + 412, + 555, + 491 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/9199feb4d23090f8fa7b3c44a50fef6553598571e992eb083203eb50a08b87a6.jpg", + "image_caption": [ + "StyleFG (0.1693)" + ], + "image_footnote": [], + "bbox": [ + 558, + 412, + 666, + 491 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/3d217dfcfe541c0fdae42ceb7d563e2c108b6a263b16955e7b2a5216369f26f7.jpg", + "image_caption": [ + "Ours (0.0508)" + ], + "image_footnote": [], + "bbox": [ + 669, + 412, + 779, + 491 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "5 Conclusion", + "text_level": 1, + "bbox": [ + 215, + 544, + 359, + 560 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "This paper presents 3R-INN, the first network that enables to reduce the overall energy consumption in the video transmission chain. Given an HR grainy image, 3R-INN delivers a minimum viable quality, low-resolution, grain-free and energy-aware image, thus reducing the energy required for encoding, transmission, decoding and display. With multiple views of the same content, 3R-INN achieves a positive energy balance, far more efficient than current state-of-the-art systems. Furthermore it does not need to transmit auxiliary information to reconstruct the original grainy content, since all the lost information including details, film grain and brightness was encoded and disentangled in a standard Gaussian distribution, through a latent encoding block conditioned on the LR image. Experimental results demonstrate that 3R-INN outperforms the existing methods by a large margin for film grain synthesis, and achieves state-of-the-art performance in the rescaling and energy-aware tasks. For the latter, a fine-tuning for each value of energy reduction rate target $R$ was conducted. Conditioning the network on $R$ to avoid fine-tuning different networks for each value of $R$ , will therefore be investigated in the future, as an extension of current work. Some subjective test will also be conducted to assess the acceptability by end users of the provided LR energy-aware images.", + "bbox": [ + 212, + 566, + 787, + 840 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Z. Ameur et al.", + "bbox": [ + 271, + 114, + 375, + 126 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 217, + 143, + 321, + 159 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "1. Energy consumption household. https://www.energybot.com/blog/average-energy-consumption.html.", + "2. Netflix subscribers. https://www.usnews.com/news/business/articles/2024-01-23/netflixs-subscriber-growth-surges-as-streaming-service-unwraps-best-ever-holiday-season-results.", + "3. Vtm-19.0. https://vctgit.hhi.fraunhofer.de/jvet/VVCSoftware_VTM/~/tags/VTM-19.0", + "4. Agustsson, E., Timofte, R.: Ntire 2017 challenge on single image super-resolution: Dataset and study. In: Proceedings of the IEEE conference on computer vision and pattern recognition workshops. pp. 126-135 (2017)", + "5. Ameur, Z., Demarty, C.H., Le Meur, O., Menard, D., François, E.: Style-based film grain analysis and synthesis. In: Proceedings of the 14th Conference on ACM Multimedia Systems. pp. 229-238 (2023)", + "6. Ameur, Z., Hamidouche, W., François, E., Radosavljevic, M., Menard, D., Demarty, C.H.: Deep-based film grain removal and synthesis. IEEE Transactions on Image Processing (2023)", + "7. Bonniveau, C., Hamidouche, W., Travers, J.F., Déforges, O.: Versatile video coding and super-resolution for efficient delivery of 8k video with 4k backward-compatibility. In: ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). pp. 2048-2052 (2020). https://doi.org/10.1109/ICASSP40776.2020.9054716", + "8. Boyce, J., Suehring, K., Li, X., Seregin, V.: Jvet-j1010: Jvet common test conditions and software reference configurations. In: 10th Meeting of the Joint Video Experts Team. pp. JVET-J1010 (2018)", + "9. Chen, Z., Liu, T., Huang, J.J., Zhao, W., Bi, X., Wang, M.: Invertible mosaic image hiding network for very large capacity image steganography. arXiv preprint arXiv:2309.08987 (2023)", + "0. Dai, J., Au, O.C., Pang, C., Yang, W., Zou, F.: Film grain noise removal and synthesis in video coding. In: 2010 IEEE International Conference on Acoustics, Speech and Signal Processing. pp. 890-893. IEEE (2010)", + "1. Demarty, C.H., Blondé, L., Le Meur, O.: Display power modeling for energy consumption control. In: 2023 IEEE International Conference on Image Processing (ICIP). IEEE (2023)", + "2. Dinh, L., Sohl-Dickstein, J., Bengio, S.: Density estimation using real nvp. arXiv preprint arXiv:1605.08803 (2016)", + "3. Du, W., Chen, H., Zhang, Y., Yang, H.: Hierarchical disentangled representation for invertible image denoising and beyond. arXiv preprint arXiv:2301.13358 (2023)", + "4. Franzen, R.: Kodak lossless true color image suite. source: http://r0k.us/graphics/kodak 4(2), 9 (1999)", + "5. Gomila, C.: Sei message for film grain encoding. JVT document, May 2003 (2003)", + "6. Herglotz, C., Brand, F., Regensky, A., Rievel, F., Kaup, A.: Processing energy modeling for neural network based image compression. In: 2023 IEEE International Conference on Image Processing (ICIP). pp. 2390-2394. IEEE (2023)", + "7. Herglotz, C., Kränzler, M., Schober, R., Kaup, A.: Sweet streams are made of this: The system engineer's view on energy efficiency in video communications [feature]. IEEE Circuits and Systems Magazine 23(1), 57-77 (2023)", + "8. Huang, G., Liu, Z., Van Der Maaten, L., Weinberger, K.Q.: Densely connected convolutional networks. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 4700-4708 (2017)" + ], + "bbox": [ + 225, + 164, + 784, + 839 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "3R-INN: How to be climate friendly while consuming/delivering videos?", + "bbox": [ + 251, + 114, + 730, + 128 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 767, + 116, + 784, + 126 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "19. Hwang, I., Jeong, J., Choi, J., Choe, Y.: Enhanced film grain noise removal for high fidelity video coding. In: 2013 International Conference on Information Science and Cloud Computing Companion. pp. 668-674. IEEE (2013)", + "20. Kang, S.J.: Image-quality-based power control technique for organic light emitting diode displays. Journal of Display Technology 11(1), 104-109 (2015)", + "21. Kang, S.j., Kim, Y.H.: Image integrity-based gray-level error control for low power liquid crystal displays. IEEE Transactions on Consumer Electronics 55(4), 2401-2406 (2009). https://doi.org/10.1109/TCE.2009.5373816", + "22. Kim, H., Choi, M., Lim, B., Lee, K.M.: Task-aware image downscaling. In: Proceedings of the European conference on computer vision (ECCV). pp. 399-414 (2018)", + "23. Kingma, D.P., Ba, J.: Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980 (2014)", + "24. Kingma, D.P., Dhariwal, P.: Glow: Generative flow with invertible 1x1 convolutions. Advances in neural information processing systems 31 (2018)", + "25. Le Meur, O., Demarty, C.H.: Invertible energy-aware images. IEEE Signal Processing Letters (2023)", + "26. Le Meur, O., Demarty, C.H., Blondé, L.: Deep-learning-based energy aware images. In: 2023 IEEE International Conference on Image Processing (ICIP). pp. 590-594. IEEE (2023)", + "27. Liu, Y., Qin, Z., Anwar, S., Ji, P., Kim, D., Caldwell, S., Gedeon, T.: Invertible denoising network: A light solution for real noise removal. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 13365-13374 (2021)", + "28. Lu, S.P., Wang, R., Zhong, T., Rosin, P.L.: Large-capacity image steganography based on invertible neural networks. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 10816-10825 (2021)", + "29. Malmodin, J.: The power consumption of mobile and fixed network data services—the case of streaming video and downloading large files. In: Electronics Goes Green. vol. 2020 (2020)", + "30. Martin, D., Fowlkes, C., Tal, D., Malik, J.: A database of human segmented natural images and its application to evaluating segmentation algorithms and measuring ecological statistics. In: Proceedings Eighth IEEE International Conference on Computer Vision. ICCV 2001. vol. 2, pp. 416-423. IEEE (2001)", + "31. Newson, A., Delon, J., Galerne, B.: A stochastic film grain model for resolution-independent rendering. In: Computer Graphics Forum. vol. 36, pp. 684-699. Wiley Online Library (2017)", + "32. Norkin, A., Birkbeck, N.: Film grain synthesis for av1 video codec. In: 2018 Data Compression Conference. pp. 3-12. IEEE (2018)", + "33. Nugroho, K.A., Ruan, S.J.: R-ace network for oled image power saving. In: 2022 IEEE 4th Global Conference on Life Sciences and Technologies (LifeTech). pp. 284-285. IEEE (2022)", + "34. Radosavljevic, M., François, E., Reinhard, E., Hamidouche, W., Amestoy, T.: Implementation of film-grain technology within vvc. In: Applications of Digital Image Processing XLIV. vol. 11842, pp. 85-95. SPIE (2021)", + "35. Reddi, S.J., Kale, S., Kumar, S.: On the convergence of adam and beyond. arXiv preprint arXiv:1904.09237 (2019)", + "36. Reinhard, E., Demarty, C.H., Blondé, L.: Pixel value adjustment to reduce the energy requirements of display devices. SMPTE Motion Imaging Journal 132(7), 10-19 (2023)" + ], + "bbox": [ + 215, + 147, + 784, + 840 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Z. Ameur et al.", + "bbox": [ + 271, + 114, + 375, + 127 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "37. Robinson, D.: Greening of streaming: The less accord: Low energy sustainable streaming. In: Proceedings of the 2nd Mile-High Video Conference (MHV'23). p. 115 (2023)", + "38. Shin, Y.G., Park, S., Yoo, M.J., Ko, S.J.: Unsupervised deep power saving and contrast enhancement for oled displays. arXiv preprint arXiv:1905.05916 (2019)", + "39. Stoyan, D., Kendall, W.S., Chiu, S.N., Mecke, J.: Stochastic geometry and its applications. John Wiley & Sons (2013)", + "40. Sun, W., Chen, Z.: Learned image downscaling for upscaling using content adaptive resampler. IEEE Transactions on Image Processing 29, 4027-4040 (2020)", + "41. Trust, T.C.: Carbon impact of video streaming. https://www.carbontrust.com/eneu/node/1537 (2021)", + "42. Xiao, M., Zheng, S., Liu, C., Wang, Y., He, D., Ke, G., Bian, J., Lin, Z., Liu, T.Y.: Invertible image rescaling. In: Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part I 16. pp. 126-144. Springer (2020)", + "43. Yin, J.L., Chen, B.H., Peng, Y.T., Tsai, C.C.: Deep battery saver: End-to-end learning for power constrained contrast enhancement. IEEE Transactions on Multimedia 23, 1049-1059 (2020)", + "44. Zhao, R., Liu, T., Xiao, J., Lun, D.P., Lam, K.M.: Invertible image decolorization. IEEE Transactions on Image Processing 30, 6081-6095 (2021)", + "45. Zhu, F., Chen, G., Hao, J., Heng, P.A.: Blind image denoising via dependent dirichlet process tree. IEEE transactions on pattern analysis and machine intelligence 39(8), 1518-1531 (2016)" + ], + "bbox": [ + 215, + 147, + 784, + 465 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "3R-INN: How to be climate friendly while consuming/delivering videos?", + "bbox": [ + 251, + 114, + 730, + 128 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 767, + 116, + 784, + 126 + ], + "page_idx": 16 + } +] \ No newline at end of file diff --git a/2024/3R-INN_ How to be climate friendly while consuming_delivering videos_/f281395b-e7ef-449c-8738-d5a976fca3fe_model.json b/2024/3R-INN_ How to be climate friendly while consuming_delivering videos_/f281395b-e7ef-449c-8738-d5a976fca3fe_model.json new file mode 100644 index 0000000000000000000000000000000000000000..b090a8ea35a565eb52fb55e15005a465f805dd91 --- /dev/null +++ b/2024/3R-INN_ How to be climate friendly while consuming_delivering videos_/f281395b-e7ef-449c-8738-d5a976fca3fe_model.json @@ -0,0 +1,2577 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.26, + 0.142, + 0.744, + 0.187 + ], + "angle": 0, + "content": "3R- INN: How to be climate friendly while consuming/delivering videos?" + }, + { + "type": "text", + "bbox": [ + 0.226, + 0.213, + 0.778, + 0.243 + ], + "angle": 0, + "content": "Zoubida Ameur1, Claire-Hélène Demarty1, Daniel Ménard2, and Olivier Le Meur1" + }, + { + "type": "text", + "bbox": [ + 0.285, + 0.255, + 0.713, + 0.283 + ], + "angle": 0, + "content": "1 InterDigital, France, firstname.lastname@interdigital.com \n2 IETR, daniel.menard@insa-rennes.fr" + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.311, + 0.787, + 0.386 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.216, + 0.387, + 0.787, + 0.414 + ], + "angle": 0, + "content": "Fig. 1: 3R-INN: End-to-end energy-aware video distribution chain by Removing grain, Rescaling and Reducing display energy." + }, + { + "type": "text", + "bbox": [ + 0.261, + 0.427, + 0.741, + 0.69 + ], + "angle": 0, + "content": "Abstract. The consumption of a video requires a considerable amount of energy during the various stages of its life-cycle. With a billion hours of video consumed daily, this contributes significantly to the greenhouse gas (GHG) emission. Therefore, reducing the end-to-end carbon footprint of the video chain, while preserving the quality of experience at the user side, is of high importance. To contribute in an impactful manner, we propose 3R-INN, a single invertible network that does three tasks at once: given a high-resolution (HR) grainy image, it Rescales it to a lower resolution, Removes film grain and Reduces its power consumption when displayed. Providing such a minimum viable quality content contributes to reducing the energy consumption during encoding, transmission, decoding and display. 3R-INN also offers the possibility to restore either the HR grainy original image or a grain-free version, thanks to its invertibility and the disentanglement of the high frequency, and without transmitting auxiliary data. Experiments show that, 3R-INN enables significant energy savings for encoding (78%), decoding (77%) and rendering (5% to 20%), while outperforming state-of-the-art film grain removal and synthesis, energy-aware and downscaling methods on different test-sets." + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.704, + 0.717, + 0.718 + ], + "angle": 0, + "content": "Keywords: Energy saving \\(\\cdot\\) Invertible network \\(\\cdot\\) Video distribution" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.745, + 0.377, + 0.76 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.765, + 0.788, + 0.841 + ], + "angle": 0, + "content": "Over \\(75\\%\\) of the world's global GHG emissions comes from energy production, particularly from fossil fuels. The growing energy consumption of the media and entertainment industry, in particular streaming, strongly contributes to climate change, with more than \\(1.3\\%\\) of GHG in 2020 [41]. Therefore, this industry has to move towards decarbonisation, energy efficiency and sustainability in" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "2" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.116, + 0.378, + 0.129 + ], + "angle": 0, + "content": "Z. Ameur et al." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.314 + ], + "angle": 0, + "content": "all its stages, e.g., head-end (encoding), delivery (transmission) and end-user device (decoding and display). Taking apart the energy consumed while building the different necessary equipment, reduced energy consumption at the head-end translates into shorter encoding times and lower computing loads, while at the distribution level it translates into lower bit-rates. At the end-device level, significant gains can be achieved, as displays constitute the most power-hungry part of the whole chain [41]. In the specific case of emissive displays, e.g., organic light-emitting diodes (OLEDs), the power consumption is pixelwise and therefore directly dependent on the displayed content. Consequently, less energy-intensive images at display and shorter decoding times will also lead to lower energy consumption." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.328, + 0.788, + 0.495 + ], + "angle": 0, + "content": "The encoding and decoding times are related to the content resolution and complexity. Downscaling the content before encoding and upscaling it after decoding while preserving the same quality of experience [7] is one straightforward solution to reduce the computational burden. Additionally, removing and modeling artistic noise, such as film grain, before encoding and synthesizing it after decoding, not only reduces encoding and decoding times, but also significantly reduces the bit-rate [32], while still preserving the artistic intent at the user side. Finally, as displays consume the largest proportion of the energy, providing energy-aware content, i.e., that will consume less when displayed, is of significant importance, at least for OLED displays. Several studies addressed this issue by investigating how to reduce the content brightness [25, 26, 36]." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.508, + 0.788, + 0.844 + ], + "angle": 0, + "content": "Because climate change is a pressing issue, we believe that having a global vision on the overall energy consumption in the video chain and an holistic approach on how to reduce it is of the utmost importance. Therefore, in this paper, we propose an end-to-end energy reduction of the video distribution chain, while preserving a good quality of experience at the user side, by leveraging a deep learning invertible neural network (INN)-based model, called 3R-INN. Prior to encoding a HR grainy image, our 3R-INN multi-task network Rescales it to a lower resolution, Removes film grain and Reduces its power consumption when displayed, by some reduction rate \\( R \\). While saving energy along the video chain, 3R-INN also provides a visually-pleasant content intended to be displayed, following the new paradigm proposed in [37], which promotes to target a minimum viable video quality for transported videos. Within this same paradigm, the possibility to recover the original content is encouraged, with the counter part that it will consume more. This is feasible thanks to the invertibility of 3R-INN which allows to retrieve the original HR image from the clean energy-aware LR one by running inversely the framework. Furthermore, thanks to the modeling and disentanglement of the lost information in the forward pass, two versions, grainy and clean, of the original HR image can be restored, without transmitting any auxiliary information. With the idea that the energy consumed when applying an energy reduction processing should not exceed the amount of energy saved, we designed 3R-INN to be a single network, that replaces three separate and potentially heavier processings, showing that" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.253, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "3R-INN: How to be climate friendly while consuming/delivering videos?" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "3" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.784, + 0.177 + ], + "angle": 0, + "content": "the use of 3R-INN results in a positive energy balance, as soon as a content is displayed multiple times. In summary, our main contributions are four-folds:" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.179, + 0.784, + 0.223 + ], + "angle": 0, + "content": "- a first end-to-end solution for reducing the energy consumption of the video chain that exhibits a better energy balance compared to the sum of the corresponding three tasks in the state-of-the-art;" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.225, + 0.784, + 0.268 + ], + "angle": 0, + "content": "- a single network for the three tasks of rescaling, removing/synthesizing grain and reducing the energy at display, dedicated towards saving energy in the whole video chain;" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.27, + 0.784, + 0.314 + ], + "angle": 0, + "content": "- the provision of a visually pleasant, energy reduced version of the original image, and the capability to go back to the original HR grainy image with no transmission of additional metadata along the video chain;" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.315, + 0.784, + 0.359 + ], + "angle": 0, + "content": "- the best method so far for high-fidelity film grain synthesis, with no need of auxiliary data and the best method so far for downscaling and building energy-aware images." + }, + { + "type": "list", + "bbox": [ + 0.215, + 0.179, + 0.784, + 0.359 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.362, + 0.785, + 0.437 + ], + "angle": 0, + "content": "In the following, we first review the state-of-the-art for rescaling, film grain removal/synthesis and energy-aware images (Section 2), before detailing our proposed solution (Section 3). In Section 4, we provide a compared analysis of the quantitative, qualitative and energy performances of the use of 3R-INN, against state-of-the-art solutions. In Section 5, we draw conclusions and perspectives." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.441, + 0.383, + 0.456 + ], + "angle": 0, + "content": "2 Related work" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.461, + 0.787, + 0.747 + ], + "angle": 0, + "content": "Rescaling The rescaling task helps saving resources, through the storage and transfer of downscaled versions of an original HR image/video. Recovering the original resolution while having pleasant LR content can be very challenging. For these purposes, to maximize the restoration performance while producing visually pleasant low-resolution (LR) content, several works learn jointly the two tasks, i.e., downscaling and upscaling. In [22], an auto-encoder-based framework learns the optimal LR image that maximizes the reconstruction performance of the HR image. In [40], an unsupervised downscaling method with consideration on the upscaling process but no assumption on how the HR image is downscaled, allows to learn the essential information for upscaling in an optimal way. Following a different paradigm, the method called IRN [42] models the down- and up-scaling processes using an invertible bijective transformation. In a forward pass, IRN performs the downscaling process by producing visually pleasing LR images while capturing the distribution of the lost information using a latent variable that follows a specified distribution. Meanwhile, the upscaling process is made tractable such that the HR image is reconstructed by inversely passing a randomly drawn latent variable with the LR image through the network. However, the reconstruction is not image-adaptive due to the case-agnostic latent variable." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.75, + 0.787, + 0.841 + ], + "angle": 0, + "content": "Film grain removal and synthesis To better preserve film grain while compressing video content efficiently, it is classically removed and modeled before encoding and restored after decoding [15, 32]. Hence, dedicated methods for film grain removal are proposed, based on either temporal filtering [10], spatiotemporal inter-color correlation filtering [19] or deep-learning encoder-decoder models [6]. On the other hand, several studies addressed the film grain synthesis" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "4" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.378, + 0.129 + ], + "angle": 0, + "content": "Z. Ameur et al." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.345 + ], + "angle": 0, + "content": "task. In [31], a Boolean in-homogeneous model [39] is used to model the grain, which corresponds to uniformly distributed disks. In AV1 codec [32], film grain is modeled by an autoregressive (AR) method as well as by an intensity-based function to adjust its strength. In VVC [34], a method based on frequency filtering is used. The grain pattern is first modeled thanks to a discrete cosine transform (DCT) applied to the grain blocks corresponding to smooth regions, and further scaled to the appropriate level, by using a step-wise scaling function. In [6], a conditional generative adversarial network (cGAN) that generates grain at different intensities is proposed. Yet, it does not perform any analysis on the original grain for a reliable synthesis. In [5], a deep-learning framework is proposed which consists of a style encoder for film grain style analysis, a mapping network for film grain style generation, and a synthesis network that generates and blends a specific grain style to a content in a content-adaptive manner." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.358, + 0.788, + 0.6 + ], + "angle": 0, + "content": "Energy-aware images Many works target the reduction of the energy consumption of images while displayed on screens, especially for OLED displays. A first set of methods reduces the luminance through clipping or equalizing histograms [20, 21]. Other works directly scale the pixel luminance [26, 36, 38]. The most promising methods leverage deep learning models, trained with a combination of loss functions that minimize the energy consumption while maintaining an acceptable perceptual quality. In [43], a deep learning model trained with a variational loss for simultaneously enhancing the visual quality and reducing the power consumption is proposed. Authors in [38] describe an adaptive contrast enhancement (ACE) convolutional neural network, that performs contrast enhancement of luminance scaled images. In [33], an improved version of ACE, called Residual-ACE (R-ACE), is proposed to infer an attenuation map instead of a reduced image. In [26], authors revisit R-ACE to significantly reduce the complexity without compromising the performance. Different from the above methods, an invertible energy-aware network (InvEAN) [25] produces invertible energy-aware images and allows to recover the original images if required." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.614, + 0.788, + 0.842 + ], + "angle": 0, + "content": "Invertible neural networks INNs learn the mapping \\( x = f(z) \\), which is fully invertible as \\( z = f^{-1}(x) \\), through a sequence of differentiable invertible mappings such as affine coupling layers [12] and invertible \\( 1 \\times 1 \\) convolutional layers [24]. INNs have direct applications in ambiguous inverse problems by learning information-lossless mappings [13, 27, 44]. The lost information in the forward process is captured by additional latent output variables. Thus, the inverse process is learned implicitly. A first application is the steganography, i.e., concealing images into other images [9, 28]. In [44], an INN is used to produce invertible grayscale images, where the lost color information is encoded into a set of Gaussian distributed latent variables. The original color version can be recovered by using a new set of randomly sampled Gaussian distributed variables as input, together with the synthetic grayscale, through the reverse mapping. Similarly, an invertible denoising network (InvDN) transforms a noisy input into a LR clean image and a latent representation containing noise in [27]. To discard noise and restore the clean image, InvDN replaces the noisy latent representation with" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.252, + 0.114, + 0.732, + 0.131 + ], + "angle": 0, + "content": "3R-INN: How to be climate friendly while consuming/delivering videos?" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.116, + 0.787, + 0.128 + ], + "angle": 0, + "content": "5" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.784, + 0.178 + ], + "angle": 0, + "content": "another one sampled from a prior distribution during reversion. In [13], another INN further disentangles noise from the high frequency image information." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.185, + 0.441, + 0.203 + ], + "angle": 0, + "content": "3 Proposed approach" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.209, + 0.788, + 0.469 + ], + "angle": 0, + "content": "With the target of reducing the overall energy consumption of the video chain, our 3R-INN framework, run in a forward pass at the encoder side, performs three invertible tasks simultaneously: 1) film grain removal, 2) downscaling and 3) display energy reduction, as illustrated in Figure 1. From a HR grainy image \\( I_G \\in \\mathbb{R}^{H \\times W \\times 3} \\), 3R-INN outputs a visually pleasant grain-free LR energy-aware image \\( \\tilde{I}_{LR|R} \\in \\mathbb{R}^{\\frac{1}{2} H \\times \\frac{1}{2} W \\times 3} \\) with \\( R \\in [0,1] \\) being the energy reduction rate, and 2 being the scaling factor corresponding to the best compromise between quality of the LR images, framework complexity and energy savings in the video chain. To ensure the process invertibility and the bijective mapping, the lost information mainly due to grain removal and downscaling is captured in a latent variable \\( z \\) distributed according to a standard Gaussian distribution \\( \\mathcal{N}(0,1) \\). This can be formulated as: \\( [\\tilde{I}_{LR|R}, z] = f_{\\theta}(I_G) \\) where \\( \\theta \\) is the set of trainable parameters of the 3R-INN network \\( f \\). \\( \\tilde{I}_{LR|R} \\) is intended to be encoded, transmitted and displayed at the end-user device for an optimal energy consumption and quality of experience trade-off. The lost information \\( z \\) is further disentangled into two parts inside 3R-INN, by setting \\( \\tilde{z} \\) its internal representation as \\( \\tilde{z} = [\\tilde{z}_D, \\tilde{z}_G] \\) with \\( \\tilde{z}_D \\) and \\( \\tilde{z}_G \\) representing losses due to downscaling and grain removal, respectively." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.473, + 0.788, + 0.625 + ], + "angle": 0, + "content": "In case the original content should be recovered, 3R-INN is run in an inverse pass at the decoder side (see Figure 1), as follows: \\(\\tilde{I}_G = f_\\theta^{-1}([\\tilde{I}_{LR|R},z])\\). The original HR grainy content is then reconstructed with no need to transmit any auxiliary information in the video chain thanks to the modeling of the lost information. Moreover, thanks to the film grain and high frequency loss disentanglement, \\(\\tilde{z} = [\\tilde{z}_D,\\tilde{z}_G]\\), 3R-INN is also able to generate a clean HR version \\(\\tilde{I}_C\\) of the original content by setting \\(\\tilde{z}_G = 0\\). The overall architecture of the proposed framework is composed of three block types: one Haar Transformation block, several invertible blocks and a conditional latent encoding block, as illustrated in Figure 1." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.644, + 0.788, + 0.827 + ], + "angle": 0, + "content": "Haar transform As removing film grain and downscaling an image significantly impacts high frequencies, it seems natural to first decompose the input HR image into low and high-frequency components. For that purpose, we chose the dyadic Haar wavelet transformation, similarly to [42, 44], because of its simplicity, efficiency and invertibility. Specifically, the Haar transform decomposes an input feature \\( f_{in} \\in \\mathbb{R}^{H \\times W \\times C} \\) into one low-frequency \\( f_{low} \\in \\mathbb{R}^{\\frac{1}{2} H \\times \\frac{1}{2} W \\times C} \\) and three high-frequency \\( f_{high} \\in \\mathbb{R}^{\\frac{1}{2} H \\times \\frac{1}{2} W \\times 3C} \\) sub-bands. \\( f_{low} \\), produced by an average pooling, represents the overall structure and coarse features of the image, while \\( f_{high} \\) contains finer details in the vertical, horizontal and diagonal directions, corresponding to film grain and edges. This splitting strategy allows to separate very early in the process the low frequency components from the information we aim to suppress. \\( f_{low} \\) and \\( f_{high} \\) serve as input to the following invertible blocks." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "6" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.378, + 0.128 + ], + "angle": 0, + "content": "Z. Ameur et al." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.222 + ], + "angle": 0, + "content": "Invertible block As invertible blocks, we selected the coupling layer architecture proposed in [24]. A given input \\( h^i \\) is composed of two parts \\( h_1^i \\) and \\( h_2^i \\), representing the three low-frequency and the nine high-frequency sub-bands of the color input channels RGB, respectively. These sub-bands are then processed by the \\( i^{th} \\) invertible block as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.438, + 0.231, + 0.786, + 0.248 + ], + "angle": 0, + "content": "\\[\nh _ {1} ^ {i + 1} = h _ {1} ^ {i} + \\phi \\left(h _ {2} ^ {i}\\right) \\tag {1}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.374, + 0.251, + 0.632, + 0.27 + ], + "angle": 0, + "content": "\\[\nh _ {2} ^ {i + 1} = h _ {2} ^ {i} \\odot \\exp (\\psi (h _ {1} ^ {i + 1})) + \\eta (h _ {1} ^ {i + 1})\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.278, + 0.786, + 0.31 + ], + "angle": 0, + "content": "where \\(\\phi\\), \\(\\psi\\) and \\(\\eta\\) are dense blocks [18]. Given \\([h_1^{i + 1}, h_2^{i + 1}]\\), the inverse transformation can be easily computed by:" + }, + { + "type": "equation", + "bbox": [ + 0.373, + 0.318, + 0.786, + 0.335 + ], + "angle": 0, + "content": "\\[\nh _ {2} ^ {i} = \\left(h _ {2} ^ {i + 1} - \\eta \\left(h _ {1} ^ {i + 1}\\right)\\right) / \\exp \\left(\\psi \\left(h _ {1} ^ {i + 1}\\right)\\right) \\tag {2}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.438, + 0.338, + 0.568, + 0.355 + ], + "angle": 0, + "content": "\\[\nh _ {1} ^ {i} = h _ {1} ^ {i + 1} - \\phi \\left(h _ {2} ^ {i}\\right)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.364, + 0.788, + 0.576 + ], + "angle": 0, + "content": "Conditioned latent encoding block Invertible networks learn a bijective mapping between an input and an output distribution. In case of information loss, a latent variable \\(\\tilde{z}\\) is added to ensure the invertible property. This latent variable is assumed to follow a known distribution i.e., a standard Gaussian distribution, to avoid transmitting additional information for the reconstruction, and to make the process case-agnostic. In our context, this would mean that the reconstruction of the HR grainy \\((\\tilde{I}_G)\\) or clean \\((\\tilde{I}_C)\\) images would not rely on the a priori knowledge of the LR image \\(\\tilde{I}_{LR|R}\\). To overcome this limitation and to enable an image-adaptive reconstruction during the inverse pass, the lost information \\(\\tilde{z}\\) is transformed into a Gaussian distributed latent variable \\(z\\) whose mean and variance are conditioned on \\(\\tilde{I}_{LR|R}\\). This is done through the use of a latent encoding block inspired from [44], whose structure is a one-side affine coupling layer that normalizes \\(\\tilde{z}\\) into a standard Gaussian distributed variable \\(z\\) as follows, with \\(\\phi_g\\) and \\(\\theta_g\\) being dense blocks:" + }, + { + "type": "equation", + "bbox": [ + 0.373, + 0.585, + 0.786, + 0.603 + ], + "angle": 0, + "content": "\\[\nz = (\\tilde {z} - \\phi_ {g} (\\tilde {I} _ {L R | R})) / \\exp \\left(\\theta_ {g} (\\tilde {I} _ {L R | R})\\right) \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.241, + 0.61, + 0.555, + 0.626 + ], + "angle": 0, + "content": "The reverse mapping can be formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.372, + 0.634, + 0.786, + 0.651 + ], + "angle": 0, + "content": "\\[\n\\tilde {z} = z \\odot \\exp \\left(\\theta_ {g} \\left(\\tilde {I} _ {L R | R}\\right)\\right) + \\phi_ {g} \\left(\\tilde {I} _ {L R | R}\\right)) \\tag {4}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.659, + 0.378, + 0.674 + ], + "angle": 0, + "content": "Training objectives" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.674, + 0.786, + 0.703 + ], + "angle": 0, + "content": "3R-INN is first trained on the rescaling and film grain removal/synthesis tasks, before being fine-tuned on the energy reduction task." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.706, + 0.788, + 0.8 + ], + "angle": 0, + "content": "Rescaling and film grain removal/synthesis tasks The Forward Pass optimization is driven by a fidelity loss \\(\\mathcal{L}_{forw}\\) to guarantee a visually pleasant clean LR image \\(\\tilde{I}_{LR}\\), and a regularization loss \\(\\mathcal{L}_{reg}\\) to guarantee that the latent variable \\(z\\) follows a standard Gaussian distribution. To guide \\(f_{\\theta}\\) to generate \\(\\tilde{I}_{LR}\\), a downsampled image \\(I_{LR}\\) of the HR clean image \\(I_C\\) is computed by a bicubic filter, and used as ground-truth to minimize \\(\\mathcal{L}_{forw}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.371, + 0.808, + 0.786, + 0.843 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {f o r w}} \\left(\\tilde {I} _ {L R}, I _ {L R}\\right) = \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\left\\| \\tilde {I} _ {L R} - I _ {L R} \\right\\| _ {2} \\tag {5}\n\\]" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.252, + 0.115, + 0.733, + 0.131 + ], + "angle": 0, + "content": "3R-INN: How to be climate friendly while consuming/delivering videos?" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.116, + 0.787, + 0.127 + ], + "angle": 0, + "content": "7" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.147, + 0.788, + 0.18 + ], + "angle": 0, + "content": "with \\(N\\) the batch size. Second, the log-likelihood of the probability density function \\(p(z)\\) of the standard Gaussian distribution is maximized, with \\(D = \\dim (z)\\):" + }, + { + "type": "equation", + "bbox": [ + 0.327, + 0.19, + 0.786, + 0.219 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {r e g} = - \\log (p (z)) = - \\log \\left(\\frac {1}{(2 \\pi) ^ {D / 2}} \\exp (- \\frac {1}{2} | | z | | ^ {2})\\right) \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.221, + 0.784, + 0.281 + ], + "angle": 0, + "content": "The Inverse Pass optimization consists of two fidelity losses \\(\\mathcal{L}_{\\text{back}_G}\\) and \\(\\mathcal{L}_{\\text{back}_C}\\), to restore \\(\\tilde{I}_G\\) and \\(\\tilde{I}_C\\), respectively. To this end, \\(z\\) is first decoded into \\(\\tilde{z}\\) by the latent encoding block conditioned by \\(\\tilde{I}_{LR}\\). Then the disentanglement of film grain \\((G)\\) and fine details \\((D)\\) is performed with \\(\\tilde{z} = [\\tilde{z}_D, \\tilde{z}_G]\\)." + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.282, + 0.784, + 0.311 + ], + "angle": 0, + "content": "\\(\\tilde{I}_G\\) is reconstructed by considering all the information contained in \\(\\tilde{z}\\), i.e., related to film grain and fine details:" + }, + { + "type": "equation", + "bbox": [ + 0.33, + 0.321, + 0.786, + 0.357 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {b a c k} _ {G}} \\left(\\tilde {I} _ {G}, I _ {G}\\right) = \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\left| \\left| f _ {\\theta} ^ {- 1} \\left(\\tilde {I} _ {L R}, z \\right] \\right| _ {\\left| \\tilde {z} _ {D}, \\tilde {z} _ {G} \\right]} - I _ {G} \\right| | _ {1} \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.367, + 0.784, + 0.399 + ], + "angle": 0, + "content": "\\(\\tilde{I}_C\\) is restored by considering only the subset \\(\\tilde{z}_D\\) of \\(\\tilde{z}\\), i.e., by using \\(\\tilde{z} = [\\tilde{z}_D, \\tilde{z}_G = 0]\\) as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.334, + 0.409, + 0.786, + 0.445 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {b a c k} _ {C}} \\left(\\tilde {I} _ {C}, I _ {C}\\right) = \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\left\\| f _ {\\theta} ^ {- 1} \\left(\\tilde {I} _ {L R}, z\\right) _ {\\left| \\left[ \\tilde {z} _ {D}, 0 \\right] \\right.} - I _ {C} \\right\\| _ {1}, \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.453, + 0.784, + 0.483 + ], + "angle": 0, + "content": "For both fidelity losses, the \\(\\ell_1\\) norm is classically used as in [27, 42]. Finally, for the first two tasks, the following weighted sum is minimized:" + }, + { + "type": "equation", + "bbox": [ + 0.324, + 0.496, + 0.786, + 0.512 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {t o t a l}} = \\lambda_ {1} \\mathcal {L} _ {\\text {f o r w}} + \\lambda_ {2} \\mathcal {L} _ {\\text {r e g}} + \\lambda_ {3} \\mathcal {L} _ {\\text {b a c k} _ {C}} + \\lambda_ {4} \\mathcal {L} _ {\\text {b a c k} _ {G}} \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.522, + 0.788, + 0.66 + ], + "angle": 0, + "content": "Energy-aware task After 3R-INN learns the film grain removal/synthesis and rescaling tasks, it is fine-tuned during the forward pass with additional power and fidelity losses, \\(\\mathcal{L}_{pow}\\) and \\(\\mathcal{L}_{SSIM}\\), to output an energy-aware grain-free LR image \\(\\tilde{I}_{LR|R}\\), i.e., its power consumption is reduced by \\(R\\) compared to the power consumption of \\(I_{LR}\\). Contrary to most works computing energy aware images, assuming a linear relationship between the power consumption \\(P_Y\\) of an image and its linearized luminance [33], we follow the model in [11] dedicated to RGBW OLED screens, and compute \\(P_{RGBW}\\) as the sum of the powers consumed by the four R, G, B, W leds. As in [25], the following power loss is then minimized:" + }, + { + "type": "equation", + "bbox": [ + 0.359, + 0.671, + 0.786, + 0.688 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {p o w} = \\left| \\left| \\tilde {P} _ {R G B W} - (1 - R) \\times P _ {R G B W} \\right| \\right| _ {1} \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.699, + 0.788, + 0.745 + ], + "angle": 0, + "content": "with \\((1 - R) \\times P_{RGBW}\\) the desired target power and \\(\\tilde{P}_{RGBW}\\) the power of \\(\\tilde{I}_{LR|R}\\). To ensure a better visual quality of the energy-aware images, a structural similarity index measure (SSIM) loss is added and minimized as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.385, + 0.756, + 0.786, + 0.773 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {S S I M} = 1 - S S I M \\left(\\tilde {I} _ {L R | R}, I _ {L R}\\right) \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.784, + 0.784, + 0.814 + ], + "angle": 0, + "content": "As the inverse pass objectives remain exactly the same, the total loss minimized in the fine-tuning stage is:" + }, + { + "type": "equation", + "bbox": [ + 0.356, + 0.827, + 0.786, + 0.842 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {f i n e t u n e d}} = \\mathcal {L} _ {\\text {t o t a l}} + \\lambda_ {5} \\mathcal {L} _ {\\text {p o w}} + \\lambda_ {6} \\mathcal {L} _ {\\text {S S I M}} \\tag {12}\n\\]" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "8" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.378, + 0.129 + ], + "angle": 0, + "content": "Z. Ameur et al." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.146, + 0.788, + 0.405 + ], + "angle": 0, + "content": "Training details During training, we use the DIV2K training set [4] from the FilmGrainStyle740K dataset [5], which contains pairs of corresponding images with and without grain. To complement the DIV2K validation set, we evaluate 3R-INN on the BSDS300 test set [30] and Kodak24 dataset [14], which were augmented to add grainy versions of the images, by following the same process as in the FilmGrainStyle740K dataset3. Input images were randomly cropped into \\(144 \\times 144\\) and augmented by applying random horizontal and vertical flips. Other training parameters are: Adam optimizer [23, 35] with \\(\\beta_{1} = 0.9\\), \\(\\beta_{2} = 0.999\\); mini-batch size of 16; 500k (training of the first two tasks) + 5k (energy-aware fine-tuning) iterations; learning rate initialized as 2e-4 and halved at [100k, 200k, 300k, 400k] mini-batch updates. Hyper-parameters are set to: \\((\\lambda_{1}, \\lambda_{2}, \\lambda_{3}, \\lambda_{4}, \\lambda_{5}, \\lambda_{6}) = (40, 1, 1, 1, 1e10, 1e4)\\) and eight successive invertible blocks are used. Scale and shift coefficients are learned through a five-layer densely connected convolutional block. Each convolutional filter is of size \\(3 \\times 3\\), with padding 1, followed by a leaky ReLU activation layer with negative slope set to 0.2. The intermediate channel number of the convolutional blocks is fixed to 32. Dimensions of \\(\\tilde{z}_{D}\\) and \\(\\tilde{z}_{G}\\) were set to (8, 1), respectively." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.408, + 0.377, + 0.425 + ], + "angle": 0, + "content": "4 Experiments" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.429, + 0.788, + 0.581 + ], + "angle": 0, + "content": "The goal of our paper is to reduce the overall energy consumption along the video distribution system using 3R-INN, which primarily supports the display of a grain-free energy-aware LR image, and then offers the possibility to recover the original version, i.e., the grainy HR, as well as a clean HR version as a third option. Thus, we adopt the following evaluation use case: we first assess the energy savings achieved by using 3R-INN along the video chain, and evaluate its energy needs. Then, we assess its performances in terms of quality for the LR grain-free energy-aware images and the reconstructed HR grainy images against state-of-the-art methods. An evaluation of the reconstructed HR clean images and an ablation study are provided in the supplementary materials." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.583, + 0.542, + 0.598 + ], + "angle": 0, + "content": "4.1 Energy consumption performance" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.601, + 0.788, + 0.799 + ], + "angle": 0, + "content": "Evaluation of energy savings To estimate the energy savings realized along the video distribution system (headend, delivery, decoding and display), we tested the full video transmission chain by applying 3R-INN on two JVET sequences RaceHorses (300 frames, \\(832 \\times 480\\), 10s) and BasketBall (500 frames, HD) [8]. The LR clean energy-aware at \\(R = 20\\%\\) is encoded using VTM [3], in full intra mode. Although not reflecting real-world scenarios in which efficient hardware decoders are used, the choice of using a non-optimized software-based VVC decoder (VTM) will nevertheless enable to demonstrate that 3R-INN results in consistent energy savings. Fixed broadband transmission was assumed. We then decoded and displayed the sequences on an OLED screen, with all TV options disabled, including the ambient light setting. Similarly to state-of-the-art methods, no video specific optimization was conducted as 3R-INN works in a frame by frame manner. In particular, film grain is known to be temporally" + }, + { + "type": "page_footnote", + "bbox": [ + 0.218, + 0.811, + 0.825, + 0.84 + ], + "angle": 0, + "content": "3 The additional dataset is proposed at www.interdigital.com/data_sets/filmgrainstyle740k-dataset" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.253, + 0.115, + 0.731, + 0.129 + ], + "angle": 0, + "content": "3R-INN: How to be climate friendly while consuming/delivering videos?" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "9" + }, + { + "type": "image", + "bbox": [ + 0.249, + 0.147, + 0.416, + 0.274 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.418, + 0.147, + 0.584, + 0.273 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.593, + 0.147, + 0.752, + 0.274 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.276, + 0.784, + 0.304 + ], + "angle": 0, + "content": "Fig. 2: Bit-rate, encoding and decoding times with and without using 3R-INN in terms of QP for sequence RaceHorses." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.309, + 0.784, + 0.355 + ], + "angle": 0, + "content": "uncorrelated, hence its frame-based analysis. Resulting videos are provided in the supplementary material, illustrating 3R-INN capability to process temporal content." + }, + { + "type": "image", + "bbox": [ + 0.281, + 0.386, + 0.493, + 0.496 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.51, + 0.386, + 0.727, + 0.496 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.519, + 0.785, + 0.561 + ], + "angle": 0, + "content": "Fig. 3: Measured power consumption when displaying sequence RaceHorses. Left: Comparison between HR and LR versions at \\(\\mathrm{QP} = 22\\). Right: Comparison between LR versions before and after encoding/decoding." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.569, + 0.787, + 0.749 + ], + "angle": 0, + "content": "Figure 2 reports the average encoding/decoding times and bit-rates, for different quantization parameters (QP), for the original HR clean and grainy RaceHorses sequences, and for the resulting LR versions with different \\( R \\in \\{5\\%, 20\\%, 40\\%, 60\\% \\} \\). Up to QP = 27, encoding and decoding the HR grainy video is more time and bit-rate demanding than for the HR clean version. For higher QPs, encoding time is still higher, however, bit-rate and decoding time are similar, because grain was removed during the encoding process. This confirms that compressing a grainy video while preserving film grain requires encoding at low QPs (which is far from the real-world scenario), leading to high and impractical bit-rates. On the contrary, encoding LR grain-free versions, whatever the value of \\( R \\), shows substantially lower times and bit-rates, and consequently reduces the energy at the head-end, transmission and decoding stages." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.75, + 0.788, + 0.841 + ], + "angle": 0, + "content": "Figure 3 presents actual measures of energy consumptions on an OLED LG-42C2 screen, for \\( R \\in \\{5\\%, 20\\%, 40\\%, 60\\% \\} \\), for the sequence RaceHorses. On the left plot, we compare the consumption of the encoded/decoded LR and HR clean sequences at \\( \\mathrm{QP} = 22 \\). This proves that displaying an energy-aware video at different reduction rates significantly reduces the display power consumption. The average gains of power are \\( 6.8\\%, 21.5\\%, 33.3\\% \\) and \\( 44.2\\% \\). The right" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "10" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.378, + 0.129 + ], + "angle": 0, + "content": "Z. Ameur et al." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.237 + ], + "angle": 0, + "content": "plot compares the consumption of the LR sequences for different \\( R \\), before and after encoding/decoding \\( (\\mathrm{QP} = 22) \\). For each \\( R \\), we observe a non-significant impact of the compression on the display consumption. This demonstrates that energy-aware images are to some extent robust to compression in terms of power values. Similar results are obtained for the sequence BasketBall (shown in the supplemental material)." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.239, + 0.789, + 0.679 + ], + "angle": 0, + "content": "Table 1 illustrates the end-to-end energy savings at each level of the video chain, for the sequence RaceHorses, at QP22 and \\( R = 20\\% \\), according to the energy model in [17, 29]. Note that a range of power consumption values are considered for both encoding and decoding where the boundary values represent a very optimized vs. a non-optimized power consumption encoder, as well as hardware and software decoders, respectively (the detailed computation is provided in the supplemental material). 3R-INN allows \\( 74\\% \\) and \\( 78\\% \\) of total end-to-end savings with respectively highly power optimized and non-optimized encoders/decoders. This corresponds to savings of \\( 78\\% \\) for head-end, \\( 19\\% \\) for delivery and ca. \\( 77\\% \\) for decoding. From this, we draw several observations: Head-end: As expected, the encoding energy consumption \\( E_{c} \\) significantly depends on the incoming resolution. The HR sequences are the most energy-demanding, particularly when they contain film grain, making encoding at a lower resolution a wise choice. Delivery: The transmission energy consumption \\( E_{t} \\) does not strongly depend on bitrate, but more on the power consumed by the infrastructure. Thus, transmitting HR instead of LR content results in a relatively small energy gain. Decoding: The gain in energy consumption for the decoding operation is significant, even if, in absolute value, it remains quite low. Display: Displaying the energy-aware clean LR video at \\( R = 20\\% \\) results in \\( 11\\% \\) of energy consumption reduction compared to the original grainy HR video. With the removal of the static consumption of the screen, the achieved energy reduction is higher and nearly reaches the target rate of \\( 20\\% \\). In absolute value, the energy consumption of display \\( E_{D} \\) is significant compared to that of the other components of the video chain, except for the encoder. However, since the content is encoded once and displayed several times, the display energy gains are further multiplied by the number of displays. Assuming that the sequence RaceHorses is viewed by \\( 10\\% \\) of Netflix subscribers [2] for one hour, using 3R-INN would save 156 GWh of energy, equivalent to the monthly consumption of 176 American citizens [1]." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.68, + 0.79, + 0.741 + ], + "angle": 0, + "content": "Energy cost of using 3R-INN 3R-INN is a single network that replaces three separate NN architectures for the tasks of grain removal /synthesis, rescaling and building energy-aware images. In that sense, Table 2 reports a comparison of its complexity and energy performance against those of the sum of the" + }, + { + "type": "table_caption", + "bbox": [ + 0.214, + 0.743, + 0.788, + 0.772 + ], + "angle": 0, + "content": "Table 1: End-to-end energy savings along the video chain for a fixed broadband access. Case study of the sequence RaceHorses (300 frames, \\(832 \\times 480\\), 30fps, 10s)." + }, + { + "type": "table", + "bbox": [ + 0.216, + 0.773, + 0.788, + 0.838 + ], + "angle": 0, + "content": "
HeadendDeliveryDecodingDisplayTotal
Encoding time (s), EcBitrate (kbps) (QP=22), EtDecoding time (s), EdPower (W), EDOriginal (Grainy HR)[78%, 78%][2.03W, 914kWh][0.0010Wh][0.004W, 0.11Wh][0.002W, 914.0002kWh]
Original (Grainy HR)46682, [2.59W, 1167kWh]16668, 0.0055Wh23.2, [0.005W, 0.15Wh]60.8, 0.168Wh[2.7685W, 1167.0003kWh]
Original (Clean HR)37901, [2.10W, 947kWh]14516, 0.0053Wh21.0, [0.004W, 0.14Wh]59.7, 0.165Wh[2.2743W, 947.0003kWh]
Ours (Energy-aware clean LR) R = 20%10150, [0.56W, 253kWh]4237, 0.0045Wh5.7, [0.001W, 0.039Wh]54.4, 0.151Wh[0.7165W, 253.0001kWh]
Reduction in % Grainy HR vs Ours[78%, 78%]19%[80%, 74%]11%[74%, 78%]
Reduction in energy Grainy HR vs Ours[2.03W, 914kWh]0.0010Wh[0.004W, 0.11Wh]0.017Wh[2.052W, 914.0002kWh]
" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.252, + 0.114, + 0.732, + 0.129 + ], + "angle": 0, + "content": "3R-INN: How to be climate friendly while consuming/delivering videos?" + }, + { + "type": "page_number", + "bbox": [ + 0.768, + 0.116, + 0.784, + 0.127 + ], + "angle": 0, + "content": "11" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.254 + ], + "angle": 0, + "content": "best-performing networks for the three tasks, on sequence RaceHorses, in terms of number of parameters, number of Multiply-ACcumulate operations (MACs), power consumption and number of equivalent displays. Power consumption is approximated using the model in [16], which we assume is valid for all the networks used in this study. The number of equivalent displays represents the number of displays/users that are needed to counterbalance the needed energy to run the network(s). Computation details are provided in the supplementary materials." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.257, + 0.788, + 0.394 + ], + "angle": 0, + "content": "While producing a clean, \\(20\\%\\) energy-reduced LR image from an HR grainy one, the cost of running 3R-INN in a forward pass is compared with the successive use of three NN architectures: StyleFG analyzer module to model film grain, IRN to remove it and downscale the image, and InvEAN to reduce its energy consumption. From the first part of Table 2, 3R-INN counts significantly fewer parameters/operations and needs less power than the combination of the three networks. Moreover, solely comparing with the savings at the display side (\\(\\approx 6\\) Watts, see Table 1), the power gain is tangible as soon as the content is displayed 110 times for 3R-INN vs. 161 times for the combination." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.398, + 0.788, + 0.504 + ], + "angle": 0, + "content": "In case recovering an HR grainy image is required, 3R-INN is simply run in an inverse pass, thus, its complexity and power savings remain the same. In contrast, InvEAN and IRN are first run in an inverse pass to recover a clean HR version; then, film grain is synthesized using StyleFG synthesizer module. This time, 3R-INN use represents \\(58\\%\\) less power than the combination (660 vs. 1573W), and requires fewer equivalent displays (110 vs. 262) to offset its power requirements." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.51, + 0.7, + 0.526 + ], + "angle": 0, + "content": "4.2 Quantitative and qualitative evaluation of LR images" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.533, + 0.788, + 0.715 + ], + "angle": 0, + "content": "Film grain removal and downscaling The quantitative and qualitative evaluations of the LR clean image \\(\\tilde{I}_{LR|R=0}\\), i.e., corresponding to an energy reduction rate \\(R = 0\\), are given in Table 3 and Figure 4, respectively. The reference image is the bicubic rescaling of the HR clean image. Although quite similar to the experimental protocol used for IRN in [42], we here assess the ability of the network both to rescale and to remove film grain. Thus, for a fair comparison, we re-train IRN on our dataset to perform both tasks, given grainy HR images as input. The latter is designed and optimized for clean HR image reconstruction only, although it can reconstruct grainy HR images if the original high-frequencies \\(z\\) are provided. However, in the video chain context, this would lead to the transmission of heavy metadata, whereas 3R-INN operates without any transmission of metadata. Results show that the proposed method outperforms IRN in terms" + }, + { + "type": "table_caption", + "bbox": [ + 0.214, + 0.725, + 0.788, + 0.766 + ], + "angle": 0, + "content": "Table 2: Comparison of complexity and power performance of 3R-INN against the use of three independent neural networks for sequence RaceHorses. A corresponding power equivalent in terms of number of displays is also given." + }, + { + "type": "table", + "bbox": [ + 0.216, + 0.767, + 0.788, + 0.838 + ], + "angle": 0, + "content": "
OutputNetwork(s)ParametersGMACsPower(W)Display equivalent #
Clean LR at R=20%StyleFG analyzer + IRN + InvEAN23.3M334967161
3R-INN (forward)1.7M230660110
Grainy HRStyleFG synthesizer + IRN + InvEAN36.3M6161573262
3R-INN (inverse)1.7M230660110
" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "12" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.377, + 0.127 + ], + "angle": 0, + "content": "Z. Ameur et al." + }, + { + "type": "table_caption", + "bbox": [ + 0.216, + 0.13, + 0.787, + 0.158 + ], + "angle": 0, + "content": "Table 3: Comparison between generated LR clean images \\(\\tilde{I}_{LR|R=0}\\) and a bicubic rescaling of the HR clean image as ground-truth." + }, + { + "type": "table", + "bbox": [ + 0.328, + 0.16, + 0.672, + 0.221 + ], + "angle": 0, + "content": "
MethodDIV2KBSDS300Kodak24
PSNR ↑SSIM ↑PSNR ↑SSIM ↑PSNR ↑SSIM ↑
IRN [42]39.060.94238.950.95338.750.947
Ours39.630.95139.790.96439.710.957
" + }, + { + "type": "image", + "bbox": [ + 0.241, + 0.237, + 0.413, + 0.34 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.287, + 0.34, + 0.367, + 0.35 + ], + "angle": 0, + "content": "Ground-truth" + }, + { + "type": "image_caption", + "bbox": [ + 0.301, + 0.351, + 0.353, + 0.361 + ], + "angle": 0, + "content": "(bicubic)" + }, + { + "type": "image", + "bbox": [ + 0.416, + 0.238, + 0.588, + 0.34 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.49, + 0.341, + 0.515, + 0.35 + ], + "angle": 0, + "content": "IRN" + }, + { + "type": "image_caption", + "bbox": [ + 0.445, + 0.351, + 0.559, + 0.361 + ], + "angle": 0, + "content": "\\(\\mathrm{(PSNR = 43.10dB)}\\)" + }, + { + "type": "image", + "bbox": [ + 0.59, + 0.238, + 0.764, + 0.34 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.663, + 0.341, + 0.692, + 0.35 + ], + "angle": 0, + "content": "Ours" + }, + { + "type": "image_caption", + "bbox": [ + 0.619, + 0.351, + 0.734, + 0.361 + ], + "angle": 0, + "content": "\\(\\mathrm{(PSNR = 43.14dB)}\\)" + }, + { + "type": "image_caption", + "bbox": [ + 0.216, + 0.362, + 0.784, + 0.407 + ], + "angle": 0, + "content": "Fig. 4: Comparison between a bicubic downscaling, IRN and the clean LR \\(\\tilde{I}_{LR|R=0}\\). of PSNR and SSIM. They also outline its good generalization, as even better performances are observed on BSDS300 and Kodak24 datasets." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.411, + 0.788, + 0.653 + ], + "angle": 0, + "content": "Energy-aware images For \\( R > 0 \\), we evaluate the visual quality of the LR clean energy-aware image \\( \\tilde{I}_{LR|R} \\) against state-of-the-art energy-aware methods, i.e., a global linear scaling of the luminance (LS), R-ACE [33], DeepPVR [26] and InvEAN [25]. To solely evaluate the energy-aware task, and for a fair comparison, existing methods were evaluated while taking as input the output of our method after the fine tuning step with \\( R = 0 \\). All evaluation metrics in the following were calculated with this image as reference. Table 4 reports PSNR-Y and SSIM metrics at four reduction rates, on three test sets. Two conclusions can be drawn. First, when the power consumption model \\( P_{Y} \\) is used for a fair comparison with state-of-the-art methods, the proposed method outperforms LS and R-ACE methods, while being similar to DeepPVR and slightly below InvEAN. When the power consumption model \\( P_{RGBW} \\) is used, the quality scores of 3R-INN are significantly better, and especially for the PSNR-Y. This can be explained by the fact that our model does not learn to reduce the image luminance, contrary to state-of-art methods. The latter in turn were not trained to optimize \\( P_{RGBW} \\); this may explain their lower performances." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.655, + 0.788, + 0.701 + ], + "angle": 0, + "content": "This trend is confirmed by Figure 6 which plots SSIM scores as function of the actual reduction rate, computed with \\( P_{RGBW} \\). PSNR plots are provided in the supplemental material. Figure 5 shows a qualitative comparison of energy-" + }, + { + "type": "table_caption", + "bbox": [ + 0.214, + 0.717, + 0.788, + 0.773 + ], + "angle": 0, + "content": "Table 4: PSNR-Y and SSIM quality scores for the energy-aware task for four reduction rates \\( R \\). 3R-INN results are presented for two power consumption models, i.e. \\( P_{Y} \\) (for comparison with state-of-the-art methods) and \\( P_{RGBW} \\), corresponding to RGB and RGBW OLED screens, respectively. InvEAN model is not available at \\( R = 5\\% \\) in [25]." + }, + { + "type": "table", + "bbox": [ + 0.217, + 0.774, + 0.785, + 0.838 + ], + "angle": 0, + "content": "
MethodDIV2KBSDSKolak24
R=5%R=20%R=40%R=60%R=5%R=20%R=40%R=60%R=5%R=20%R=40%R=60%
LS39.34/0.99927.01/0.99120.33/0.95816.06/0.87739.64/0.99927.31/0.99020.67/0.95516.35/0.86739.38/0.99927.05/0.99120.41/0.95716.09/0.875
R-ACE [33]41.53/0.99526.59/0.96720.05/0.90115.92/0.78840.55/0.99726.90/0.97820.24/0.91516.12/0.80640.70/0.99726.74/0.98320.08/0.93015.98/0.830
DeepPVR [26]39.37/0.99627.12/0.98321.04/0.95215.81/0.89039.63/0.99727.53/0.98921.13/0.95916.36/0.89439.27/0.99727.17/0.98920.61/0.95516.00/0.892
InvEAN [25]-27.75/0.99421.17/0.97317.07/0.932-28.25/0.99321.74/0.97317.72/0.931-27.92/0.99321.42/0.97317.37/0.932
Ours (Pc)39.55/0.98727.32/0.98020.62/0.94916.43/0.88340.06/0.99427.65/0.98620.94/0.95516.77/0.88340.02/0.99227.43/0.98520.70/0.95416.51/0.886
Ours (PROBW)47.68/0.99838.02/0.99329.15/0.97423.66/0.94548.33/0.99938.36/0.99530.47/0.98324.96/0.96147.47/0.99837.39/0.99429.63/0.98224.18/0.958
" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.253, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "3R-INN: How to be climate friendly while consuming/delivering videos?" + }, + { + "type": "page_number", + "bbox": [ + 0.768, + 0.116, + 0.786, + 0.127 + ], + "angle": 0, + "content": "13" + }, + { + "type": "image", + "bbox": [ + 0.234, + 0.145, + 0.321, + 0.192 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.234, + 0.192, + 0.321, + 0.238 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.234, + 0.238, + 0.321, + 0.283 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.254, + 0.285, + 0.302, + 0.296 + ], + "angle": 0, + "content": "Original" + }, + { + "type": "image", + "bbox": [ + 0.323, + 0.145, + 0.411, + 0.191 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.323, + 0.191, + 0.411, + 0.238 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.323, + 0.238, + 0.411, + 0.283 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.359, + 0.285, + 0.376, + 0.295 + ], + "angle": 0, + "content": "LS" + }, + { + "type": "image", + "bbox": [ + 0.412, + 0.145, + 0.501, + 0.191 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.412, + 0.191, + 0.501, + 0.237 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.412, + 0.238, + 0.501, + 0.283 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.438, + 0.285, + 0.477, + 0.296 + ], + "angle": 0, + "content": "RACE" + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.145, + 0.591, + 0.191 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.191, + 0.591, + 0.238 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.238, + 0.591, + 0.283 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.517, + 0.285, + 0.576, + 0.297 + ], + "angle": 0, + "content": "DeepPVR" + }, + { + "type": "image", + "bbox": [ + 0.592, + 0.145, + 0.68, + 0.191 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.592, + 0.191, + 0.68, + 0.237 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.592, + 0.238, + 0.68, + 0.283 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.612, + 0.285, + 0.661, + 0.296 + ], + "angle": 0, + "content": "InvEAN" + }, + { + "type": "image", + "bbox": [ + 0.682, + 0.145, + 0.771, + 0.191 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.682, + 0.191, + 0.771, + 0.237 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.682, + 0.238, + 0.771, + 0.283 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.711, + 0.285, + 0.742, + 0.296 + ], + "angle": 0, + "content": "Ours" + }, + { + "type": "image_caption", + "bbox": [ + 0.216, + 0.297, + 0.787, + 0.339 + ], + "angle": 0, + "content": "Fig. 5: Comparison of generated energy-aware images with the state-of-the-art, for \\( R \\in \\{5\\%, 20\\%, 40\\% \\} \\) from first to third lines. Achieved rates computed by the power model in [11] are provided. InvEAN model is not available at \\( R = 5\\% \\) in [25]." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.347, + 0.788, + 0.466 + ], + "angle": 0, + "content": "aware images. 3R-INN and LS respect the reduction rate targets better than other methods. Our method also exhibits a different behavior for high values of \\( R \\), once again keeping the luminance but modifying the colors. The subjective comparison is however difficult since the achieved energy reduction rates vary from one method to another. Although not fully dedicated to the energy-reduction task, 3R-INN performs well compared to existing methods and similarly to InvEAN, it offers the possibility to recover the original image without any side-information." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.471, + 0.761, + 0.487 + ], + "angle": 0, + "content": "4.3 Quantitative and qualitative evaluation of HR grainy images" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.49, + 0.788, + 0.642 + ], + "angle": 0, + "content": "The reversibility property of 3R-INN is an important feature. To evaluate this property, we evaluated the HR grainy reconstruction with state-of-the-art film grain synthesis methods: VVC (Versatile Video Coding) implementation [34], Deep-FG [6] and Style-FG [6]. Table 5 summarizes the quantitative results for \\( R = 0 \\), in terms of fidelity of the synthesized grain using learned perceptual image patch similarity (LPIPS), JSDNSS and the KL divergence (KLD) [45], these last two being computed between the histograms of ground-truth and HR grainy images. All methods perform analysis and synthesis except Deep-FG for which we generated 5 versions of grain, one per available intensity level, and kept only the best performing image for each metric in the comparison." + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.643, + 0.787, + 0.674 + ], + "angle": 0, + "content": "Results show that the proposed method outperforms quantitatively VVC [34] and Deep-FG [6]. It also performs better than Style-FG [6] for LPIPS and KLD" + }, + { + "type": "image", + "bbox": [ + 0.277, + 0.705, + 0.728, + 0.82 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.215, + 0.821, + 0.787, + 0.848 + ], + "angle": 0, + "content": "Fig. 6: SSIM scores as function of the target power reduction, for the different energy-aware methods." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "14" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.377, + 0.127 + ], + "angle": 0, + "content": "Z. Ameur et al." + }, + { + "type": "table_caption", + "bbox": [ + 0.216, + 0.13, + 0.784, + 0.157 + ], + "angle": 0, + "content": "Table 5: Comparison between reconstructed HR grainy images and ground-truth for different methods on DIV2K validation set." + }, + { + "type": "table", + "bbox": [ + 0.33, + 0.159, + 0.67, + 0.226 + ], + "angle": 0, + "content": "
AnalysisAuxiliary dataJSD-NSS ↓LPIPS ↓KLD ↓
VVC [34]set of params0.01480.29810.0327
Deep-FG [6]xx0.01340.37220.0260
Style-FG [5]style vector0.00240.15920.0232
Oursnone0.00880.04450.0177
" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.26, + 0.788, + 0.381 + ], + "angle": 0, + "content": "metrics which are representative of the quality of generated grain. The lower JSD-NSS value for Style-FG [6] could be explained by the fact that it is a GAN-based network which models the data distribution at the expense of the output quality. The qualitative comparison in Figure 7 confirms these observations (additional results in the supplemental material). Another advantage of 3R-INN is that no auxiliary data is required for grain synthesis, unlike VVC and Style-FG, which transmit a set of parameters and a style vector respectively. Similar results are obtained for \\( R > 0 \\) and are presented as supplemental material." + }, + { + "type": "image", + "bbox": [ + 0.222, + 0.413, + 0.331, + 0.492 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.238, + 0.493, + 0.317, + 0.503 + ], + "angle": 0, + "content": "Ground-truth" + }, + { + "type": "image", + "bbox": [ + 0.335, + 0.413, + 0.444, + 0.492 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.35, + 0.493, + 0.429, + 0.503 + ], + "angle": 0, + "content": "VVC (0.3343)" + }, + { + "type": "image", + "bbox": [ + 0.447, + 0.413, + 0.556, + 0.492 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.453, + 0.493, + 0.55, + 0.503 + ], + "angle": 0, + "content": "DeepFG (0.3533)" + }, + { + "type": "image", + "bbox": [ + 0.559, + 0.413, + 0.668, + 0.492 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.564, + 0.493, + 0.664, + 0.503 + ], + "angle": 0, + "content": "StyleFG (0.1693)" + }, + { + "type": "image", + "bbox": [ + 0.671, + 0.413, + 0.781, + 0.492 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.687, + 0.493, + 0.765, + 0.503 + ], + "angle": 0, + "content": "Ours (0.0508)" + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.504, + 0.784, + 0.532 + ], + "angle": 0, + "content": "Fig. 7: Qualitative evaluation of HR synthesized grainy images for different methods, with LPIPS values between parenthesis." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.545, + 0.36, + 0.561 + ], + "angle": 0, + "content": "5 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.568, + 0.789, + 0.842 + ], + "angle": 0, + "content": "This paper presents 3R-INN, the first network that enables to reduce the overall energy consumption in the video transmission chain. Given an HR grainy image, 3R-INN delivers a minimum viable quality, low-resolution, grain-free and energy-aware image, thus reducing the energy required for encoding, transmission, decoding and display. With multiple views of the same content, 3R-INN achieves a positive energy balance, far more efficient than current state-of-the-art systems. Furthermore it does not need to transmit auxiliary information to reconstruct the original grainy content, since all the lost information including details, film grain and brightness was encoded and disentangled in a standard Gaussian distribution, through a latent encoding block conditioned on the LR image. Experimental results demonstrate that 3R-INN outperforms the existing methods by a large margin for film grain synthesis, and achieves state-of-the-art performance in the rescaling and energy-aware tasks. For the latter, a fine-tuning for each value of energy reduction rate target \\( R \\) was conducted. Conditioning the network on \\( R \\) to avoid fine-tuning different networks for each value of \\( R \\), will therefore be investigated in the future, as an extension of current work. Some subjective test will also be conducted to assess the acceptability by end users of the provided LR energy-aware images." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.253, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "3R-INN: How to be climate friendly while consuming/delivering videos?" + }, + { + "type": "page_number", + "bbox": [ + 0.768, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "15" + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.145, + 0.323, + 0.16 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.165, + 0.784, + 0.191 + ], + "angle": 0, + "content": "1. Energy consumption household. https://www.energybot.com/blog/average-energy-consumption.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.192, + 0.785, + 0.233 + ], + "angle": 0, + "content": "2. Netflix subscribers. https://www.usnews.com/news/business/articles/2024-01-23/netflixs-subscriber-growth-surges-as-streaming-service-unwraps-best-ever-holiday-season-results." + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.234, + 0.785, + 0.26 + ], + "angle": 0, + "content": "3. Vtm-19.0. https://vctgit.hhi.fraunhofer.de/jvet/VVCSoftware_VTM/~/tags/VTM-19.0" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.261, + 0.785, + 0.302 + ], + "angle": 0, + "content": "4. Agustsson, E., Timofte, R.: Ntire 2017 challenge on single image super-resolution: Dataset and study. In: Proceedings of the IEEE conference on computer vision and pattern recognition workshops. pp. 126-135 (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.303, + 0.785, + 0.344 + ], + "angle": 0, + "content": "5. Ameur, Z., Demarty, C.H., Le Meur, O., Menard, D., François, E.: Style-based film grain analysis and synthesis. In: Proceedings of the 14th Conference on ACM Multimedia Systems. pp. 229-238 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.344, + 0.785, + 0.385 + ], + "angle": 0, + "content": "6. Ameur, Z., Hamidouche, W., François, E., Radosavljevic, M., Menard, D., Demarty, C.H.: Deep-based film grain removal and synthesis. IEEE Transactions on Image Processing (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.386, + 0.785, + 0.454 + ], + "angle": 0, + "content": "7. Bonniveau, C., Hamidouche, W., Travers, J.F., Déforges, O.: Versatile video coding and super-resolution for efficient delivery of 8k video with 4k backward-compatibility. In: ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). pp. 2048-2052 (2020). https://doi.org/10.1109/ICASSP40776.2020.9054716" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.455, + 0.785, + 0.495 + ], + "angle": 0, + "content": "8. Boyce, J., Suehring, K., Li, X., Seregin, V.: Jvet-j1010: Jvet common test conditions and software reference configurations. In: 10th Meeting of the Joint Video Experts Team. pp. JVET-J1010 (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.496, + 0.785, + 0.537 + ], + "angle": 0, + "content": "9. Chen, Z., Liu, T., Huang, J.J., Zhao, W., Bi, X., Wang, M.: Invertible mosaic image hiding network for very large capacity image steganography. arXiv preprint arXiv:2309.08987 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.538, + 0.785, + 0.579 + ], + "angle": 0, + "content": "0. Dai, J., Au, O.C., Pang, C., Yang, W., Zou, F.: Film grain noise removal and synthesis in video coding. In: 2010 IEEE International Conference on Acoustics, Speech and Signal Processing. pp. 890-893. IEEE (2010)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.58, + 0.785, + 0.62 + ], + "angle": 0, + "content": "1. Demarty, C.H., Blondé, L., Le Meur, O.: Display power modeling for energy consumption control. In: 2023 IEEE International Conference on Image Processing (ICIP). IEEE (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.621, + 0.785, + 0.647 + ], + "angle": 0, + "content": "2. Dinh, L., Sohl-Dickstein, J., Bengio, S.: Density estimation using real nvp. arXiv preprint arXiv:1605.08803 (2016)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.648, + 0.785, + 0.675 + ], + "angle": 0, + "content": "3. Du, W., Chen, H., Zhang, Y., Yang, H.: Hierarchical disentangled representation for invertible image denoising and beyond. arXiv preprint arXiv:2301.13358 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.676, + 0.785, + 0.702 + ], + "angle": 0, + "content": "4. Franzen, R.: Kodak lossless true color image suite. source: http://r0k.us/graphics/kodak 4(2), 9 (1999)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.703, + 0.785, + 0.716 + ], + "angle": 0, + "content": "5. Gomila, C.: Sei message for film grain encoding. JVT document, May 2003 (2003)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.717, + 0.785, + 0.757 + ], + "angle": 0, + "content": "6. Herglotz, C., Brand, F., Regensky, A., Rievel, F., Kaup, A.: Processing energy modeling for neural network based image compression. In: 2023 IEEE International Conference on Image Processing (ICIP). pp. 2390-2394. IEEE (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.758, + 0.785, + 0.799 + ], + "angle": 0, + "content": "7. Herglotz, C., Kränzler, M., Schober, R., Kaup, A.: Sweet streams are made of this: The system engineer's view on energy efficiency in video communications [feature]. IEEE Circuits and Systems Magazine 23(1), 57-77 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.799, + 0.785, + 0.84 + ], + "angle": 0, + "content": "8. Huang, G., Liu, Z., Van Der Maaten, L., Weinberger, K.Q.: Densely connected convolutional networks. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 4700-4708 (2017)" + }, + { + "type": "list", + "bbox": [ + 0.226, + 0.165, + 0.785, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "16" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.377, + 0.128 + ], + "angle": 0, + "content": "Z. Ameur et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.148, + 0.785, + 0.189 + ], + "angle": 0, + "content": "19. Hwang, I., Jeong, J., Choi, J., Choe, Y.: Enhanced film grain noise removal for high fidelity video coding. In: 2013 International Conference on Information Science and Cloud Computing Companion. pp. 668-674. IEEE (2013)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.19, + 0.785, + 0.218 + ], + "angle": 0, + "content": "20. Kang, S.J.: Image-quality-based power control technique for organic light emitting diode displays. Journal of Display Technology 11(1), 104-109 (2015)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.218, + 0.785, + 0.259 + ], + "angle": 0, + "content": "21. Kang, S.j., Kim, Y.H.: Image integrity-based gray-level error control for low power liquid crystal displays. IEEE Transactions on Consumer Electronics 55(4), 2401-2406 (2009). https://doi.org/10.1109/TCE.2009.5373816" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.259, + 0.785, + 0.3 + ], + "angle": 0, + "content": "22. Kim, H., Choi, M., Lim, B., Lee, K.M.: Task-aware image downscaling. In: Proceedings of the European conference on computer vision (ECCV). pp. 399-414 (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.3, + 0.785, + 0.328 + ], + "angle": 0, + "content": "23. Kingma, D.P., Ba, J.: Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980 (2014)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.328, + 0.785, + 0.357 + ], + "angle": 0, + "content": "24. Kingma, D.P., Dhariwal, P.: Glow: Generative flow with invertible 1x1 convolutions. Advances in neural information processing systems 31 (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.357, + 0.785, + 0.384 + ], + "angle": 0, + "content": "25. Le Meur, O., Demarty, C.H.: Invertible energy-aware images. IEEE Signal Processing Letters (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.384, + 0.785, + 0.425 + ], + "angle": 0, + "content": "26. Le Meur, O., Demarty, C.H., Blondé, L.: Deep-learning-based energy aware images. In: 2023 IEEE International Conference on Image Processing (ICIP). pp. 590-594. IEEE (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.425, + 0.785, + 0.481 + ], + "angle": 0, + "content": "27. Liu, Y., Qin, Z., Anwar, S., Ji, P., Kim, D., Caldwell, S., Gedeon, T.: Invertible denoising network: A light solution for real noise removal. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 13365-13374 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.481, + 0.785, + 0.522 + ], + "angle": 0, + "content": "28. Lu, S.P., Wang, R., Zhong, T., Rosin, P.L.: Large-capacity image steganography based on invertible neural networks. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 10816-10825 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.522, + 0.785, + 0.563 + ], + "angle": 0, + "content": "29. Malmodin, J.: The power consumption of mobile and fixed network data services—the case of streaming video and downloading large files. In: Electronics Goes Green. vol. 2020 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.564, + 0.785, + 0.619 + ], + "angle": 0, + "content": "30. Martin, D., Fowlkes, C., Tal, D., Malik, J.: A database of human segmented natural images and its application to evaluating segmentation algorithms and measuring ecological statistics. In: Proceedings Eighth IEEE International Conference on Computer Vision. ICCV 2001. vol. 2, pp. 416-423. IEEE (2001)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.619, + 0.785, + 0.66 + ], + "angle": 0, + "content": "31. Newson, A., Delon, J., Galerne, B.: A stochastic film grain model for resolution-independent rendering. In: Computer Graphics Forum. vol. 36, pp. 684-699. Wiley Online Library (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.66, + 0.785, + 0.688 + ], + "angle": 0, + "content": "32. Norkin, A., Birkbeck, N.: Film grain synthesis for av1 video codec. In: 2018 Data Compression Conference. pp. 3-12. IEEE (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.688, + 0.785, + 0.73 + ], + "angle": 0, + "content": "33. Nugroho, K.A., Ruan, S.J.: R-ace network for oled image power saving. In: 2022 IEEE 4th Global Conference on Life Sciences and Technologies (LifeTech). pp. 284-285. IEEE (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.73, + 0.785, + 0.771 + ], + "angle": 0, + "content": "34. Radosavljevic, M., François, E., Reinhard, E., Hamidouche, W., Amestoy, T.: Implementation of film-grain technology within vvc. In: Applications of Digital Image Processing XLIV. vol. 11842, pp. 85-95. SPIE (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.771, + 0.785, + 0.799 + ], + "angle": 0, + "content": "35. Reddi, S.J., Kale, S., Kumar, S.: On the convergence of adam and beyond. arXiv preprint arXiv:1904.09237 (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.799, + 0.785, + 0.841 + ], + "angle": 0, + "content": "36. Reinhard, E., Demarty, C.H., Blondé, L.: Pixel value adjustment to reduce the energy requirements of display devices. SMPTE Motion Imaging Journal 132(7), 10-19 (2023)" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.148, + 0.785, + 0.841 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.253, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "3R-INN: How to be climate friendly while consuming/delivering videos?" + }, + { + "type": "page_number", + "bbox": [ + 0.768, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "17" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.148, + 0.785, + 0.189 + ], + "angle": 0, + "content": "37. Robinson, D.: Greening of streaming: The less accord: Low energy sustainable streaming. In: Proceedings of the 2nd Mile-High Video Conference (MHV'23). p. 115 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.19, + 0.785, + 0.217 + ], + "angle": 0, + "content": "38. Shin, Y.G., Park, S., Yoo, M.J., Ko, S.J.: Unsupervised deep power saving and contrast enhancement for oled displays. arXiv preprint arXiv:1905.05916 (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.218, + 0.785, + 0.244 + ], + "angle": 0, + "content": "39. Stoyan, D., Kendall, W.S., Chiu, S.N., Mecke, J.: Stochastic geometry and its applications. John Wiley & Sons (2013)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.245, + 0.785, + 0.273 + ], + "angle": 0, + "content": "40. Sun, W., Chen, Z.: Learned image downscaling for upscaling using content adaptive resampler. IEEE Transactions on Image Processing 29, 4027-4040 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.273, + 0.785, + 0.3 + ], + "angle": 0, + "content": "41. Trust, T.C.: Carbon impact of video streaming. https://www.carbontrust.com/eneu/node/1537 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.3, + 0.785, + 0.356 + ], + "angle": 0, + "content": "42. Xiao, M., Zheng, S., Liu, C., Wang, Y., He, D., Ke, G., Bian, J., Lin, Z., Liu, T.Y.: Invertible image rescaling. In: Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part I 16. pp. 126-144. Springer (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.356, + 0.785, + 0.397 + ], + "angle": 0, + "content": "43. Yin, J.L., Chen, B.H., Peng, Y.T., Tsai, C.C.: Deep battery saver: End-to-end learning for power constrained contrast enhancement. IEEE Transactions on Multimedia 23, 1049-1059 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.397, + 0.785, + 0.425 + ], + "angle": 0, + "content": "44. Zhao, R., Liu, T., Xiao, J., Lun, D.P., Lam, K.M.: Invertible image decolorization. IEEE Transactions on Image Processing 30, 6081-6095 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.425, + 0.785, + 0.467 + ], + "angle": 0, + "content": "45. Zhu, F., Chen, G., Hao, J., Heng, P.A.: Blind image denoising via dependent dirichlet process tree. IEEE transactions on pattern analysis and machine intelligence 39(8), 1518-1531 (2016)" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.148, + 0.785, + 0.467 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/2024/3R-INN_ How to be climate friendly while consuming_delivering videos_/f281395b-e7ef-449c-8738-d5a976fca3fe_origin.pdf b/2024/3R-INN_ How to be climate friendly while consuming_delivering videos_/f281395b-e7ef-449c-8738-d5a976fca3fe_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..fdd17e791b372e2c01484aa18d2aeef83d9040e0 --- /dev/null +++ b/2024/3R-INN_ How to be climate friendly while consuming_delivering videos_/f281395b-e7ef-449c-8738-d5a976fca3fe_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:86fa5e512de5166a0e4e18a039d322d6514a67a40520d287ed6c02b5e850e4cb +size 4718609 diff --git a/2024/3R-INN_ How to be climate friendly while consuming_delivering videos_/full.md b/2024/3R-INN_ How to be climate friendly while consuming_delivering videos_/full.md new file mode 100644 index 0000000000000000000000000000000000000000..c0b804d5d38899a2358c5c5d3b8dbcf6d8134735 --- /dev/null +++ b/2024/3R-INN_ How to be climate friendly while consuming_delivering videos_/full.md @@ -0,0 +1,347 @@ +# 3R- INN: How to be climate friendly while consuming/delivering videos? + +Zoubida Ameur1, Claire-Hélène Demarty1, Daniel Ménard2, and Olivier Le Meur1 + +1 InterDigital, France, firstname.lastname@interdigital.com +2 IETR, daniel.menard@insa-rennes.fr + +![](images/65a53aa72741b3542eed667bb83f0b09595a3293d57832a896ad46c7ad59b915.jpg) +Fig. 1: 3R-INN: End-to-end energy-aware video distribution chain by Removing grain, Rescaling and Reducing display energy. + +Abstract. The consumption of a video requires a considerable amount of energy during the various stages of its life-cycle. With a billion hours of video consumed daily, this contributes significantly to the greenhouse gas (GHG) emission. Therefore, reducing the end-to-end carbon footprint of the video chain, while preserving the quality of experience at the user side, is of high importance. To contribute in an impactful manner, we propose 3R-INN, a single invertible network that does three tasks at once: given a high-resolution (HR) grainy image, it Rescales it to a lower resolution, Removes film grain and Reduces its power consumption when displayed. Providing such a minimum viable quality content contributes to reducing the energy consumption during encoding, transmission, decoding and display. 3R-INN also offers the possibility to restore either the HR grainy original image or a grain-free version, thanks to its invertibility and the disentanglement of the high frequency, and without transmitting auxiliary data. Experiments show that, 3R-INN enables significant energy savings for encoding (78%), decoding (77%) and rendering (5% to 20%), while outperforming state-of-the-art film grain removal and synthesis, energy-aware and downscaling methods on different test-sets. + +Keywords: Energy saving $\cdot$ Invertible network $\cdot$ Video distribution + +# 1 Introduction + +Over $75\%$ of the world's global GHG emissions comes from energy production, particularly from fossil fuels. The growing energy consumption of the media and entertainment industry, in particular streaming, strongly contributes to climate change, with more than $1.3\%$ of GHG in 2020 [41]. Therefore, this industry has to move towards decarbonisation, energy efficiency and sustainability in + +all its stages, e.g., head-end (encoding), delivery (transmission) and end-user device (decoding and display). Taking apart the energy consumed while building the different necessary equipment, reduced energy consumption at the head-end translates into shorter encoding times and lower computing loads, while at the distribution level it translates into lower bit-rates. At the end-device level, significant gains can be achieved, as displays constitute the most power-hungry part of the whole chain [41]. In the specific case of emissive displays, e.g., organic light-emitting diodes (OLEDs), the power consumption is pixelwise and therefore directly dependent on the displayed content. Consequently, less energy-intensive images at display and shorter decoding times will also lead to lower energy consumption. + +The encoding and decoding times are related to the content resolution and complexity. Downscaling the content before encoding and upscaling it after decoding while preserving the same quality of experience [7] is one straightforward solution to reduce the computational burden. Additionally, removing and modeling artistic noise, such as film grain, before encoding and synthesizing it after decoding, not only reduces encoding and decoding times, but also significantly reduces the bit-rate [32], while still preserving the artistic intent at the user side. Finally, as displays consume the largest proportion of the energy, providing energy-aware content, i.e., that will consume less when displayed, is of significant importance, at least for OLED displays. Several studies addressed this issue by investigating how to reduce the content brightness [25, 26, 36]. + +Because climate change is a pressing issue, we believe that having a global vision on the overall energy consumption in the video chain and an holistic approach on how to reduce it is of the utmost importance. Therefore, in this paper, we propose an end-to-end energy reduction of the video distribution chain, while preserving a good quality of experience at the user side, by leveraging a deep learning invertible neural network (INN)-based model, called 3R-INN. Prior to encoding a HR grainy image, our 3R-INN multi-task network Rescales it to a lower resolution, Removes film grain and Reduces its power consumption when displayed, by some reduction rate $R$ . While saving energy along the video chain, 3R-INN also provides a visually-pleasant content intended to be displayed, following the new paradigm proposed in [37], which promotes to target a minimum viable video quality for transported videos. Within this same paradigm, the possibility to recover the original content is encouraged, with the counter part that it will consume more. This is feasible thanks to the invertibility of 3R-INN which allows to retrieve the original HR image from the clean energy-aware LR one by running inversely the framework. Furthermore, thanks to the modeling and disentanglement of the lost information in the forward pass, two versions, grainy and clean, of the original HR image can be restored, without transmitting any auxiliary information. With the idea that the energy consumed when applying an energy reduction processing should not exceed the amount of energy saved, we designed 3R-INN to be a single network, that replaces three separate and potentially heavier processings, showing that + +the use of 3R-INN results in a positive energy balance, as soon as a content is displayed multiple times. In summary, our main contributions are four-folds: + +- a first end-to-end solution for reducing the energy consumption of the video chain that exhibits a better energy balance compared to the sum of the corresponding three tasks in the state-of-the-art; +- a single network for the three tasks of rescaling, removing/synthesizing grain and reducing the energy at display, dedicated towards saving energy in the whole video chain; +- the provision of a visually pleasant, energy reduced version of the original image, and the capability to go back to the original HR grainy image with no transmission of additional metadata along the video chain; +- the best method so far for high-fidelity film grain synthesis, with no need of auxiliary data and the best method so far for downscaling and building energy-aware images. + +In the following, we first review the state-of-the-art for rescaling, film grain removal/synthesis and energy-aware images (Section 2), before detailing our proposed solution (Section 3). In Section 4, we provide a compared analysis of the quantitative, qualitative and energy performances of the use of 3R-INN, against state-of-the-art solutions. In Section 5, we draw conclusions and perspectives. + +# 2 Related work + +Rescaling The rescaling task helps saving resources, through the storage and transfer of downscaled versions of an original HR image/video. Recovering the original resolution while having pleasant LR content can be very challenging. For these purposes, to maximize the restoration performance while producing visually pleasant low-resolution (LR) content, several works learn jointly the two tasks, i.e., downscaling and upscaling. In [22], an auto-encoder-based framework learns the optimal LR image that maximizes the reconstruction performance of the HR image. In [40], an unsupervised downscaling method with consideration on the upscaling process but no assumption on how the HR image is downscaled, allows to learn the essential information for upscaling in an optimal way. Following a different paradigm, the method called IRN [42] models the down- and up-scaling processes using an invertible bijective transformation. In a forward pass, IRN performs the downscaling process by producing visually pleasing LR images while capturing the distribution of the lost information using a latent variable that follows a specified distribution. Meanwhile, the upscaling process is made tractable such that the HR image is reconstructed by inversely passing a randomly drawn latent variable with the LR image through the network. However, the reconstruction is not image-adaptive due to the case-agnostic latent variable. + +Film grain removal and synthesis To better preserve film grain while compressing video content efficiently, it is classically removed and modeled before encoding and restored after decoding [15, 32]. Hence, dedicated methods for film grain removal are proposed, based on either temporal filtering [10], spatiotemporal inter-color correlation filtering [19] or deep-learning encoder-decoder models [6]. On the other hand, several studies addressed the film grain synthesis + +task. In [31], a Boolean in-homogeneous model [39] is used to model the grain, which corresponds to uniformly distributed disks. In AV1 codec [32], film grain is modeled by an autoregressive (AR) method as well as by an intensity-based function to adjust its strength. In VVC [34], a method based on frequency filtering is used. The grain pattern is first modeled thanks to a discrete cosine transform (DCT) applied to the grain blocks corresponding to smooth regions, and further scaled to the appropriate level, by using a step-wise scaling function. In [6], a conditional generative adversarial network (cGAN) that generates grain at different intensities is proposed. Yet, it does not perform any analysis on the original grain for a reliable synthesis. In [5], a deep-learning framework is proposed which consists of a style encoder for film grain style analysis, a mapping network for film grain style generation, and a synthesis network that generates and blends a specific grain style to a content in a content-adaptive manner. + +Energy-aware images Many works target the reduction of the energy consumption of images while displayed on screens, especially for OLED displays. A first set of methods reduces the luminance through clipping or equalizing histograms [20, 21]. Other works directly scale the pixel luminance [26, 36, 38]. The most promising methods leverage deep learning models, trained with a combination of loss functions that minimize the energy consumption while maintaining an acceptable perceptual quality. In [43], a deep learning model trained with a variational loss for simultaneously enhancing the visual quality and reducing the power consumption is proposed. Authors in [38] describe an adaptive contrast enhancement (ACE) convolutional neural network, that performs contrast enhancement of luminance scaled images. In [33], an improved version of ACE, called Residual-ACE (R-ACE), is proposed to infer an attenuation map instead of a reduced image. In [26], authors revisit R-ACE to significantly reduce the complexity without compromising the performance. Different from the above methods, an invertible energy-aware network (InvEAN) [25] produces invertible energy-aware images and allows to recover the original images if required. + +Invertible neural networks INNs learn the mapping $x = f(z)$ , which is fully invertible as $z = f^{-1}(x)$ , through a sequence of differentiable invertible mappings such as affine coupling layers [12] and invertible $1 \times 1$ convolutional layers [24]. INNs have direct applications in ambiguous inverse problems by learning information-lossless mappings [13, 27, 44]. The lost information in the forward process is captured by additional latent output variables. Thus, the inverse process is learned implicitly. A first application is the steganography, i.e., concealing images into other images [9, 28]. In [44], an INN is used to produce invertible grayscale images, where the lost color information is encoded into a set of Gaussian distributed latent variables. The original color version can be recovered by using a new set of randomly sampled Gaussian distributed variables as input, together with the synthetic grayscale, through the reverse mapping. Similarly, an invertible denoising network (InvDN) transforms a noisy input into a LR clean image and a latent representation containing noise in [27]. To discard noise and restore the clean image, InvDN replaces the noisy latent representation with + +another one sampled from a prior distribution during reversion. In [13], another INN further disentangles noise from the high frequency image information. + +# 3 Proposed approach + +With the target of reducing the overall energy consumption of the video chain, our 3R-INN framework, run in a forward pass at the encoder side, performs three invertible tasks simultaneously: 1) film grain removal, 2) downscaling and 3) display energy reduction, as illustrated in Figure 1. From a HR grainy image $I_G \in \mathbb{R}^{H \times W \times 3}$ , 3R-INN outputs a visually pleasant grain-free LR energy-aware image $\tilde{I}_{LR|R} \in \mathbb{R}^{\frac{1}{2} H \times \frac{1}{2} W \times 3}$ with $R \in [0,1]$ being the energy reduction rate, and 2 being the scaling factor corresponding to the best compromise between quality of the LR images, framework complexity and energy savings in the video chain. To ensure the process invertibility and the bijective mapping, the lost information mainly due to grain removal and downscaling is captured in a latent variable $z$ distributed according to a standard Gaussian distribution $\mathcal{N}(0,1)$ . This can be formulated as: $[\tilde{I}_{LR|R}, z] = f_{\theta}(I_G)$ where $\theta$ is the set of trainable parameters of the 3R-INN network $f$ . $\tilde{I}_{LR|R}$ is intended to be encoded, transmitted and displayed at the end-user device for an optimal energy consumption and quality of experience trade-off. The lost information $z$ is further disentangled into two parts inside 3R-INN, by setting $\tilde{z}$ its internal representation as $\tilde{z} = [\tilde{z}_D, \tilde{z}_G]$ with $\tilde{z}_D$ and $\tilde{z}_G$ representing losses due to downscaling and grain removal, respectively. + +In case the original content should be recovered, 3R-INN is run in an inverse pass at the decoder side (see Figure 1), as follows: $\tilde{I}_G = f_\theta^{-1}([\tilde{I}_{LR|R},z])$ . The original HR grainy content is then reconstructed with no need to transmit any auxiliary information in the video chain thanks to the modeling of the lost information. Moreover, thanks to the film grain and high frequency loss disentanglement, $\tilde{z} = [\tilde{z}_D,\tilde{z}_G]$ , 3R-INN is also able to generate a clean HR version $\tilde{I}_C$ of the original content by setting $\tilde{z}_G = 0$ . The overall architecture of the proposed framework is composed of three block types: one Haar Transformation block, several invertible blocks and a conditional latent encoding block, as illustrated in Figure 1. + +Haar transform As removing film grain and downscaling an image significantly impacts high frequencies, it seems natural to first decompose the input HR image into low and high-frequency components. For that purpose, we chose the dyadic Haar wavelet transformation, similarly to [42, 44], because of its simplicity, efficiency and invertibility. Specifically, the Haar transform decomposes an input feature $f_{in} \in \mathbb{R}^{H \times W \times C}$ into one low-frequency $f_{low} \in \mathbb{R}^{\frac{1}{2} H \times \frac{1}{2} W \times C}$ and three high-frequency $f_{high} \in \mathbb{R}^{\frac{1}{2} H \times \frac{1}{2} W \times 3C}$ sub-bands. $f_{low}$ , produced by an average pooling, represents the overall structure and coarse features of the image, while $f_{high}$ contains finer details in the vertical, horizontal and diagonal directions, corresponding to film grain and edges. This splitting strategy allows to separate very early in the process the low frequency components from the information we aim to suppress. $f_{low}$ and $f_{high}$ serve as input to the following invertible blocks. + +Invertible block As invertible blocks, we selected the coupling layer architecture proposed in [24]. A given input $h^i$ is composed of two parts $h_1^i$ and $h_2^i$ , representing the three low-frequency and the nine high-frequency sub-bands of the color input channels RGB, respectively. These sub-bands are then processed by the $i^{th}$ invertible block as follows: + +$$ +h _ {1} ^ {i + 1} = h _ {1} ^ {i} + \phi \left(h _ {2} ^ {i}\right) \tag {1} +$$ + +$$ +h _ {2} ^ {i + 1} = h _ {2} ^ {i} \odot \exp (\psi (h _ {1} ^ {i + 1})) + \eta (h _ {1} ^ {i + 1}) +$$ + +where $\phi$ , $\psi$ and $\eta$ are dense blocks [18]. Given $[h_1^{i + 1}, h_2^{i + 1}]$ , the inverse transformation can be easily computed by: + +$$ +h _ {2} ^ {i} = \left(h _ {2} ^ {i + 1} - \eta \left(h _ {1} ^ {i + 1}\right)\right) / \exp \left(\psi \left(h _ {1} ^ {i + 1}\right)\right) \tag {2} +$$ + +$$ +h _ {1} ^ {i} = h _ {1} ^ {i + 1} - \phi \left(h _ {2} ^ {i}\right) +$$ + +Conditioned latent encoding block Invertible networks learn a bijective mapping between an input and an output distribution. In case of information loss, a latent variable $\tilde{z}$ is added to ensure the invertible property. This latent variable is assumed to follow a known distribution i.e., a standard Gaussian distribution, to avoid transmitting additional information for the reconstruction, and to make the process case-agnostic. In our context, this would mean that the reconstruction of the HR grainy $(\tilde{I}_G)$ or clean $(\tilde{I}_C)$ images would not rely on the a priori knowledge of the LR image $\tilde{I}_{LR|R}$ . To overcome this limitation and to enable an image-adaptive reconstruction during the inverse pass, the lost information $\tilde{z}$ is transformed into a Gaussian distributed latent variable $z$ whose mean and variance are conditioned on $\tilde{I}_{LR|R}$ . This is done through the use of a latent encoding block inspired from [44], whose structure is a one-side affine coupling layer that normalizes $\tilde{z}$ into a standard Gaussian distributed variable $z$ as follows, with $\phi_g$ and $\theta_g$ being dense blocks: + +$$ +z = (\tilde {z} - \phi_ {g} (\tilde {I} _ {L R | R})) / \exp \left(\theta_ {g} (\tilde {I} _ {L R | R})\right) \tag {3} +$$ + +The reverse mapping can be formulated as: + +$$ +\tilde {z} = z \odot \exp \left(\theta_ {g} \left(\tilde {I} _ {L R | R}\right)\right) + \phi_ {g} \left(\tilde {I} _ {L R | R}\right)) \tag {4} +$$ + +# Training objectives + +3R-INN is first trained on the rescaling and film grain removal/synthesis tasks, before being fine-tuned on the energy reduction task. + +Rescaling and film grain removal/synthesis tasks The Forward Pass optimization is driven by a fidelity loss $\mathcal{L}_{forw}$ to guarantee a visually pleasant clean LR image $\tilde{I}_{LR}$ , and a regularization loss $\mathcal{L}_{reg}$ to guarantee that the latent variable $z$ follows a standard Gaussian distribution. To guide $f_{\theta}$ to generate $\tilde{I}_{LR}$ , a downsampled image $I_{LR}$ of the HR clean image $I_C$ is computed by a bicubic filter, and used as ground-truth to minimize $\mathcal{L}_{forw}$ : + +$$ +\mathcal {L} _ {\text {f o r w}} \left(\tilde {I} _ {L R}, I _ {L R}\right) = \frac {1}{N} \sum_ {i = 1} ^ {N} \left\| \tilde {I} _ {L R} - I _ {L R} \right\| _ {2} \tag {5} +$$ + +with $N$ the batch size. Second, the log-likelihood of the probability density function $p(z)$ of the standard Gaussian distribution is maximized, with $D = \dim (z)$ : + +$$ +\mathcal {L} _ {r e g} = - \log (p (z)) = - \log \left(\frac {1}{(2 \pi) ^ {D / 2}} \exp (- \frac {1}{2} | | z | | ^ {2})\right) \tag {6} +$$ + +The Inverse Pass optimization consists of two fidelity losses $\mathcal{L}_{\text{back}_G}$ and $\mathcal{L}_{\text{back}_C}$ , to restore $\tilde{I}_G$ and $\tilde{I}_C$ , respectively. To this end, $z$ is first decoded into $\tilde{z}$ by the latent encoding block conditioned by $\tilde{I}_{LR}$ . Then the disentanglement of film grain $(G)$ and fine details $(D)$ is performed with $\tilde{z} = [\tilde{z}_D, \tilde{z}_G]$ . + +$\tilde{I}_G$ is reconstructed by considering all the information contained in $\tilde{z}$ , i.e., related to film grain and fine details: + +$$ +\mathcal {L} _ {\text {b a c k} _ {G}} \left(\tilde {I} _ {G}, I _ {G}\right) = \frac {1}{N} \sum_ {i = 1} ^ {N} \left| \left| f _ {\theta} ^ {- 1} \left(\tilde {I} _ {L R}, z \right] \right| _ {\left| \tilde {z} _ {D}, \tilde {z} _ {G} \right]} - I _ {G} \right| | _ {1} \tag {7} +$$ + +$\tilde{I}_C$ is restored by considering only the subset $\tilde{z}_D$ of $\tilde{z}$ , i.e., by using $\tilde{z} = [\tilde{z}_D, \tilde{z}_G = 0]$ as follows: + +$$ +\mathcal {L} _ {\text {b a c k} _ {C}} \left(\tilde {I} _ {C}, I _ {C}\right) = \frac {1}{N} \sum_ {i = 1} ^ {N} \left\| f _ {\theta} ^ {- 1} \left(\tilde {I} _ {L R}, z\right) _ {\left| \left[ \tilde {z} _ {D}, 0 \right] \right.} - I _ {C} \right\| _ {1}, \tag {8} +$$ + +For both fidelity losses, the $\ell_1$ norm is classically used as in [27, 42]. Finally, for the first two tasks, the following weighted sum is minimized: + +$$ +\mathcal {L} _ {\text {t o t a l}} = \lambda_ {1} \mathcal {L} _ {\text {f o r w}} + \lambda_ {2} \mathcal {L} _ {\text {r e g}} + \lambda_ {3} \mathcal {L} _ {\text {b a c k} _ {C}} + \lambda_ {4} \mathcal {L} _ {\text {b a c k} _ {G}} \tag {9} +$$ + +Energy-aware task After 3R-INN learns the film grain removal/synthesis and rescaling tasks, it is fine-tuned during the forward pass with additional power and fidelity losses, $\mathcal{L}_{pow}$ and $\mathcal{L}_{SSIM}$ , to output an energy-aware grain-free LR image $\tilde{I}_{LR|R}$ , i.e., its power consumption is reduced by $R$ compared to the power consumption of $I_{LR}$ . Contrary to most works computing energy aware images, assuming a linear relationship between the power consumption $P_Y$ of an image and its linearized luminance [33], we follow the model in [11] dedicated to RGBW OLED screens, and compute $P_{RGBW}$ as the sum of the powers consumed by the four R, G, B, W leds. As in [25], the following power loss is then minimized: + +$$ +\mathcal {L} _ {p o w} = \left| \left| \tilde {P} _ {R G B W} - (1 - R) \times P _ {R G B W} \right| \right| _ {1} \tag {10} +$$ + +with $(1 - R) \times P_{RGBW}$ the desired target power and $\tilde{P}_{RGBW}$ the power of $\tilde{I}_{LR|R}$ . To ensure a better visual quality of the energy-aware images, a structural similarity index measure (SSIM) loss is added and minimized as follows: + +$$ +\mathcal {L} _ {S S I M} = 1 - S S I M \left(\tilde {I} _ {L R | R}, I _ {L R}\right) \tag {11} +$$ + +As the inverse pass objectives remain exactly the same, the total loss minimized in the fine-tuning stage is: + +$$ +\mathcal {L} _ {\text {f i n e t u n e d}} = \mathcal {L} _ {\text {t o t a l}} + \lambda_ {5} \mathcal {L} _ {\text {p o w}} + \lambda_ {6} \mathcal {L} _ {\text {S S I M}} \tag {12} +$$ + +Training details During training, we use the DIV2K training set [4] from the FilmGrainStyle740K dataset [5], which contains pairs of corresponding images with and without grain. To complement the DIV2K validation set, we evaluate 3R-INN on the BSDS300 test set [30] and Kodak24 dataset [14], which were augmented to add grainy versions of the images, by following the same process as in the FilmGrainStyle740K dataset3. Input images were randomly cropped into $144 \times 144$ and augmented by applying random horizontal and vertical flips. Other training parameters are: Adam optimizer [23, 35] with $\beta_{1} = 0.9$ , $\beta_{2} = 0.999$ ; mini-batch size of 16; 500k (training of the first two tasks) + 5k (energy-aware fine-tuning) iterations; learning rate initialized as 2e-4 and halved at [100k, 200k, 300k, 400k] mini-batch updates. Hyper-parameters are set to: $(\lambda_{1}, \lambda_{2}, \lambda_{3}, \lambda_{4}, \lambda_{5}, \lambda_{6}) = (40, 1, 1, 1, 1e10, 1e4)$ and eight successive invertible blocks are used. Scale and shift coefficients are learned through a five-layer densely connected convolutional block. Each convolutional filter is of size $3 \times 3$ , with padding 1, followed by a leaky ReLU activation layer with negative slope set to 0.2. The intermediate channel number of the convolutional blocks is fixed to 32. Dimensions of $\tilde{z}_{D}$ and $\tilde{z}_{G}$ were set to (8, 1), respectively. + +# 4 Experiments + +The goal of our paper is to reduce the overall energy consumption along the video distribution system using 3R-INN, which primarily supports the display of a grain-free energy-aware LR image, and then offers the possibility to recover the original version, i.e., the grainy HR, as well as a clean HR version as a third option. Thus, we adopt the following evaluation use case: we first assess the energy savings achieved by using 3R-INN along the video chain, and evaluate its energy needs. Then, we assess its performances in terms of quality for the LR grain-free energy-aware images and the reconstructed HR grainy images against state-of-the-art methods. An evaluation of the reconstructed HR clean images and an ablation study are provided in the supplementary materials. + +# 4.1 Energy consumption performance + +Evaluation of energy savings To estimate the energy savings realized along the video distribution system (headend, delivery, decoding and display), we tested the full video transmission chain by applying 3R-INN on two JVET sequences RaceHorses (300 frames, $832 \times 480$ , 10s) and BasketBall (500 frames, HD) [8]. The LR clean energy-aware at $R = 20\%$ is encoded using VTM [3], in full intra mode. Although not reflecting real-world scenarios in which efficient hardware decoders are used, the choice of using a non-optimized software-based VVC decoder (VTM) will nevertheless enable to demonstrate that 3R-INN results in consistent energy savings. Fixed broadband transmission was assumed. We then decoded and displayed the sequences on an OLED screen, with all TV options disabled, including the ambient light setting. Similarly to state-of-the-art methods, no video specific optimization was conducted as 3R-INN works in a frame by frame manner. In particular, film grain is known to be temporally + +![](images/9b0ced8cab9cdd2963178d402629ae8cba68606eead14ef36b13f6cbde313217.jpg) +Fig. 2: Bit-rate, encoding and decoding times with and without using 3R-INN in terms of QP for sequence RaceHorses. + +![](images/2c337df963ffeacf269b9aa389832552d64ea35e4a17209bf1c4d78ac3dd1308.jpg) + +![](images/fe788dff4bd7434936a373e0c04d36bf4d40e05c294f17799ff96f9a635c8d2f.jpg) + +uncorrelated, hence its frame-based analysis. Resulting videos are provided in the supplementary material, illustrating 3R-INN capability to process temporal content. + +![](images/e7eff9f174d0779a35da9aac4cd0015a979d735f4174c3b1247d6a70d7b1bfe2.jpg) +Fig. 3: Measured power consumption when displaying sequence RaceHorses. Left: Comparison between HR and LR versions at $\mathrm{QP} = 22$ . Right: Comparison between LR versions before and after encoding/decoding. + +![](images/cf36dc1fce3dba8e53b21e099df879a4ffa1f653ef2a957ec567b3c6bd48910f.jpg) + +Figure 2 reports the average encoding/decoding times and bit-rates, for different quantization parameters (QP), for the original HR clean and grainy RaceHorses sequences, and for the resulting LR versions with different $R \in \{5\%, 20\%, 40\%, 60\% \}$ . Up to QP = 27, encoding and decoding the HR grainy video is more time and bit-rate demanding than for the HR clean version. For higher QPs, encoding time is still higher, however, bit-rate and decoding time are similar, because grain was removed during the encoding process. This confirms that compressing a grainy video while preserving film grain requires encoding at low QPs (which is far from the real-world scenario), leading to high and impractical bit-rates. On the contrary, encoding LR grain-free versions, whatever the value of $R$ , shows substantially lower times and bit-rates, and consequently reduces the energy at the head-end, transmission and decoding stages. + +Figure 3 presents actual measures of energy consumptions on an OLED LG-42C2 screen, for $R \in \{5\%, 20\%, 40\%, 60\% \}$ , for the sequence RaceHorses. On the left plot, we compare the consumption of the encoded/decoded LR and HR clean sequences at $\mathrm{QP} = 22$ . This proves that displaying an energy-aware video at different reduction rates significantly reduces the display power consumption. The average gains of power are $6.8\%, 21.5\%, 33.3\%$ and $44.2\%$ . The right + +plot compares the consumption of the LR sequences for different $R$ , before and after encoding/decoding $(\mathrm{QP} = 22)$ . For each $R$ , we observe a non-significant impact of the compression on the display consumption. This demonstrates that energy-aware images are to some extent robust to compression in terms of power values. Similar results are obtained for the sequence BasketBall (shown in the supplemental material). + +Table 1 illustrates the end-to-end energy savings at each level of the video chain, for the sequence RaceHorses, at QP22 and $R = 20\%$ , according to the energy model in [17, 29]. Note that a range of power consumption values are considered for both encoding and decoding where the boundary values represent a very optimized vs. a non-optimized power consumption encoder, as well as hardware and software decoders, respectively (the detailed computation is provided in the supplemental material). 3R-INN allows $74\%$ and $78\%$ of total end-to-end savings with respectively highly power optimized and non-optimized encoders/decoders. This corresponds to savings of $78\%$ for head-end, $19\%$ for delivery and ca. $77\%$ for decoding. From this, we draw several observations: Head-end: As expected, the encoding energy consumption $E_{c}$ significantly depends on the incoming resolution. The HR sequences are the most energy-demanding, particularly when they contain film grain, making encoding at a lower resolution a wise choice. Delivery: The transmission energy consumption $E_{t}$ does not strongly depend on bitrate, but more on the power consumed by the infrastructure. Thus, transmitting HR instead of LR content results in a relatively small energy gain. Decoding: The gain in energy consumption for the decoding operation is significant, even if, in absolute value, it remains quite low. Display: Displaying the energy-aware clean LR video at $R = 20\%$ results in $11\%$ of energy consumption reduction compared to the original grainy HR video. With the removal of the static consumption of the screen, the achieved energy reduction is higher and nearly reaches the target rate of $20\%$ . In absolute value, the energy consumption of display $E_{D}$ is significant compared to that of the other components of the video chain, except for the encoder. However, since the content is encoded once and displayed several times, the display energy gains are further multiplied by the number of displays. Assuming that the sequence RaceHorses is viewed by $10\%$ of Netflix subscribers [2] for one hour, using 3R-INN would save 156 GWh of energy, equivalent to the monthly consumption of 176 American citizens [1]. + +Energy cost of using 3R-INN 3R-INN is a single network that replaces three separate NN architectures for the tasks of grain removal /synthesis, rescaling and building energy-aware images. In that sense, Table 2 reports a comparison of its complexity and energy performance against those of the sum of the + +Table 1: End-to-end energy savings along the video chain for a fixed broadband access. Case study of the sequence RaceHorses (300 frames, $832 \times 480$ , 30fps, 10s). + +
HeadendDeliveryDecodingDisplayTotal
Encoding time (s), EcBitrate (kbps) (QP=22), EtDecoding time (s), EdPower (W), EDOriginal (Grainy HR)[78%, 78%][2.03W, 914kWh][0.0010Wh][0.004W, 0.11Wh][0.002W, 914.0002kWh]
Original (Grainy HR)46682, [2.59W, 1167kWh]16668, 0.0055Wh23.2, [0.005W, 0.15Wh]60.8, 0.168Wh[2.7685W, 1167.0003kWh]
Original (Clean HR)37901, [2.10W, 947kWh]14516, 0.0053Wh21.0, [0.004W, 0.14Wh]59.7, 0.165Wh[2.2743W, 947.0003kWh]
Ours (Energy-aware clean LR) R = 20%10150, [0.56W, 253kWh]4237, 0.0045Wh5.7, [0.001W, 0.039Wh]54.4, 0.151Wh[0.7165W, 253.0001kWh]
Reduction in % Grainy HR vs Ours[78%, 78%]19%[80%, 74%]11%[74%, 78%]
Reduction in energy Grainy HR vs Ours[2.03W, 914kWh]0.0010Wh[0.004W, 0.11Wh]0.017Wh[2.052W, 914.0002kWh]
+ +best-performing networks for the three tasks, on sequence RaceHorses, in terms of number of parameters, number of Multiply-ACcumulate operations (MACs), power consumption and number of equivalent displays. Power consumption is approximated using the model in [16], which we assume is valid for all the networks used in this study. The number of equivalent displays represents the number of displays/users that are needed to counterbalance the needed energy to run the network(s). Computation details are provided in the supplementary materials. + +While producing a clean, $20\%$ energy-reduced LR image from an HR grainy one, the cost of running 3R-INN in a forward pass is compared with the successive use of three NN architectures: StyleFG analyzer module to model film grain, IRN to remove it and downscale the image, and InvEAN to reduce its energy consumption. From the first part of Table 2, 3R-INN counts significantly fewer parameters/operations and needs less power than the combination of the three networks. Moreover, solely comparing with the savings at the display side ( $\approx 6$ Watts, see Table 1), the power gain is tangible as soon as the content is displayed 110 times for 3R-INN vs. 161 times for the combination. + +In case recovering an HR grainy image is required, 3R-INN is simply run in an inverse pass, thus, its complexity and power savings remain the same. In contrast, InvEAN and IRN are first run in an inverse pass to recover a clean HR version; then, film grain is synthesized using StyleFG synthesizer module. This time, 3R-INN use represents $58\%$ less power than the combination (660 vs. 1573W), and requires fewer equivalent displays (110 vs. 262) to offset its power requirements. + +# 4.2 Quantitative and qualitative evaluation of LR images + +Film grain removal and downscaling The quantitative and qualitative evaluations of the LR clean image $\tilde{I}_{LR|R=0}$ , i.e., corresponding to an energy reduction rate $R = 0$ , are given in Table 3 and Figure 4, respectively. The reference image is the bicubic rescaling of the HR clean image. Although quite similar to the experimental protocol used for IRN in [42], we here assess the ability of the network both to rescale and to remove film grain. Thus, for a fair comparison, we re-train IRN on our dataset to perform both tasks, given grainy HR images as input. The latter is designed and optimized for clean HR image reconstruction only, although it can reconstruct grainy HR images if the original high-frequencies $z$ are provided. However, in the video chain context, this would lead to the transmission of heavy metadata, whereas 3R-INN operates without any transmission of metadata. Results show that the proposed method outperforms IRN in terms + +Table 2: Comparison of complexity and power performance of 3R-INN against the use of three independent neural networks for sequence RaceHorses. A corresponding power equivalent in terms of number of displays is also given. + +
OutputNetwork(s)ParametersGMACsPower(W)Display equivalent #
Clean LR at R=20%StyleFG analyzer + IRN + InvEAN23.3M334967161
3R-INN (forward)1.7M230660110
Grainy HRStyleFG synthesizer + IRN + InvEAN36.3M6161573262
3R-INN (inverse)1.7M230660110
+ +Table 3: Comparison between generated LR clean images $\tilde{I}_{LR|R=0}$ and a bicubic rescaling of the HR clean image as ground-truth. + +
MethodDIV2KBSDS300Kodak24
PSNR ↑SSIM ↑PSNR ↑SSIM ↑PSNR ↑SSIM ↑
IRN [42]39.060.94238.950.95338.750.947
Ours39.630.95139.790.96439.710.957
+ +![](images/afc44e58838d201eeec2a5a84e8eab8dda1352470b68e3613e635ceaf51711bc.jpg) +Ground-truth +(bicubic) +Fig. 4: Comparison between a bicubic downscaling, IRN and the clean LR $\tilde{I}_{LR|R=0}$ . of PSNR and SSIM. They also outline its good generalization, as even better performances are observed on BSDS300 and Kodak24 datasets. + +![](images/590af96f3e142ffec186d9df5b768322342eef5dd61c4deee9c0988207e255d1.jpg) +IRN +$\mathrm{(PSNR = 43.10dB)}$ + +![](images/141995e98c7a0f2b5a7a1ee961024f37a354b86f3d4f930898eea5e84a9a7e31.jpg) +Ours +$\mathrm{(PSNR = 43.14dB)}$ + +Energy-aware images For $R > 0$ , we evaluate the visual quality of the LR clean energy-aware image $\tilde{I}_{LR|R}$ against state-of-the-art energy-aware methods, i.e., a global linear scaling of the luminance (LS), R-ACE [33], DeepPVR [26] and InvEAN [25]. To solely evaluate the energy-aware task, and for a fair comparison, existing methods were evaluated while taking as input the output of our method after the fine tuning step with $R = 0$ . All evaluation metrics in the following were calculated with this image as reference. Table 4 reports PSNR-Y and SSIM metrics at four reduction rates, on three test sets. Two conclusions can be drawn. First, when the power consumption model $P_{Y}$ is used for a fair comparison with state-of-the-art methods, the proposed method outperforms LS and R-ACE methods, while being similar to DeepPVR and slightly below InvEAN. When the power consumption model $P_{RGBW}$ is used, the quality scores of 3R-INN are significantly better, and especially for the PSNR-Y. This can be explained by the fact that our model does not learn to reduce the image luminance, contrary to state-of-art methods. The latter in turn were not trained to optimize $P_{RGBW}$ ; this may explain their lower performances. + +This trend is confirmed by Figure 6 which plots SSIM scores as function of the actual reduction rate, computed with $P_{RGBW}$ . PSNR plots are provided in the supplemental material. Figure 5 shows a qualitative comparison of energy- + +Table 4: PSNR-Y and SSIM quality scores for the energy-aware task for four reduction rates $R$ . 3R-INN results are presented for two power consumption models, i.e. $P_{Y}$ (for comparison with state-of-the-art methods) and $P_{RGBW}$ , corresponding to RGB and RGBW OLED screens, respectively. InvEAN model is not available at $R = 5\%$ in [25]. + +
MethodDIV2KBSDSKolak24
R=5%R=20%R=40%R=60%R=5%R=20%R=40%R=60%R=5%R=20%R=40%R=60%
LS39.34/0.99927.01/0.99120.33/0.95816.06/0.87739.64/0.99927.31/0.99020.67/0.95516.35/0.86739.38/0.99927.05/0.99120.41/0.95716.09/0.875
R-ACE [33]41.53/0.99526.59/0.96720.05/0.90115.92/0.78840.55/0.99726.90/0.97820.24/0.91516.12/0.80640.70/0.99726.74/0.98320.08/0.93015.98/0.830
DeepPVR [26]39.37/0.99627.12/0.98321.04/0.95215.81/0.89039.63/0.99727.53/0.98921.13/0.95916.36/0.89439.27/0.99727.17/0.98920.61/0.95516.00/0.892
InvEAN [25]-27.75/0.99421.17/0.97317.07/0.932-28.25/0.99321.74/0.97317.72/0.931-27.92/0.99321.42/0.97317.37/0.932
Ours (Pc)39.55/0.98727.32/0.98020.62/0.94916.43/0.88340.06/0.99427.65/0.98620.94/0.95516.77/0.88340.02/0.99227.43/0.98520.70/0.95416.51/0.886
Ours (PROBW)47.68/0.99838.02/0.99329.15/0.97423.66/0.94548.33/0.99938.36/0.99530.47/0.98324.96/0.96147.47/0.99837.39/0.99429.63/0.98224.18/0.958
+ +![](images/0e836ad70e4fe81b8e75b9db052a23a3c859f8aff9ce4301c94af8dde956a27b.jpg) + +![](images/12de44a2c6babce9606368f13791d14d7bd329ee3fb2ef7ac68d570d882abf7f.jpg) + +![](images/9228acd3f65cb119f5880aabed0d050481134619268fb2b0adc277b5c19f9ca9.jpg) +Original + +![](images/8499ddd0eea85411321c447b70d152899b244e827b196272cb80e5ab1ca5fd28.jpg) + +![](images/9a55a29ec925a75151424a0050dcb22baf8510f0358c09db8ace0a2bb67fa878.jpg) + +![](images/c8dc217e3958352259ec21c415b1e07adba4ada3317669afe13f4f1a34426048.jpg) +LS + +![](images/298024752f2c570b5a2304bfb2668bd421157d22d537fb839a68d4dea8fb4897.jpg) + +![](images/abb7c855d56abda193c739564f63ab95a089ba580ace4a06bcac1341289eeb78.jpg) + +![](images/9d5b522312fe765cbb58901aa208da8d425dc7b9b410a776ab0340de5f807a94.jpg) +RACE +Fig. 5: Comparison of generated energy-aware images with the state-of-the-art, for $R \in \{5\%, 20\%, 40\% \}$ from first to third lines. Achieved rates computed by the power model in [11] are provided. InvEAN model is not available at $R = 5\%$ in [25]. + +![](images/1ac6223cf4493bc5fbf77d2e3e898494b9a131c2975868da3cff4efa0839d86a.jpg) + +![](images/709d471a3b2b8a99b3631a0e64ab66f830ecf12b85fcb9b6767412bb7f1867db.jpg) + +![](images/51376c1d436f64bb81967422f403c08cd985a116db5c3b990a413f3185134b9c.jpg) +DeepPVR + +![](images/cfe1d9e2a331c333ef646c748f89dbbd8388af6bb037572ab239a62c4b5e925b.jpg) + +![](images/256ece244dc934c8e741968f8cf8119c4166d7048c4160be598295379e0a04ca.jpg) + +![](images/12f53dabffcff583a69d06a970d47b5ebd5f0adc2b0e9549551241ba289c7185.jpg) +InvEAN + +![](images/2dfb3e5052e2321183f2def5c822ec13f542113eb7992ee4f4099313b04250d7.jpg) + +![](images/d2d7988097960325aa2e3d9b963cb0740416b4fcdc61fcf55a5b8afae13ffbde.jpg) + +![](images/c9067f867d96f7659e4a5d4706e2481b7db6ea3a6f727168a4f6f16262781955.jpg) +Ours + +aware images. 3R-INN and LS respect the reduction rate targets better than other methods. Our method also exhibits a different behavior for high values of $R$ , once again keeping the luminance but modifying the colors. The subjective comparison is however difficult since the achieved energy reduction rates vary from one method to another. Although not fully dedicated to the energy-reduction task, 3R-INN performs well compared to existing methods and similarly to InvEAN, it offers the possibility to recover the original image without any side-information. + +# 4.3 Quantitative and qualitative evaluation of HR grainy images + +The reversibility property of 3R-INN is an important feature. To evaluate this property, we evaluated the HR grainy reconstruction with state-of-the-art film grain synthesis methods: VVC (Versatile Video Coding) implementation [34], Deep-FG [6] and Style-FG [6]. Table 5 summarizes the quantitative results for $R = 0$ , in terms of fidelity of the synthesized grain using learned perceptual image patch similarity (LPIPS), JSDNSS and the KL divergence (KLD) [45], these last two being computed between the histograms of ground-truth and HR grainy images. All methods perform analysis and synthesis except Deep-FG for which we generated 5 versions of grain, one per available intensity level, and kept only the best performing image for each metric in the comparison. + +Results show that the proposed method outperforms quantitatively VVC [34] and Deep-FG [6]. It also performs better than Style-FG [6] for LPIPS and KLD + +![](images/932e2ce72c3513e3263ff1db0347547ee8771b1a57148bce63b3af12b1a10b8b.jpg) +Fig. 6: SSIM scores as function of the target power reduction, for the different energy-aware methods. + +Table 5: Comparison between reconstructed HR grainy images and ground-truth for different methods on DIV2K validation set. + +
AnalysisAuxiliary dataJSD-NSS ↓LPIPS ↓KLD ↓
VVC [34]set of params0.01480.29810.0327
Deep-FG [6]xx0.01340.37220.0260
Style-FG [5]style vector0.00240.15920.0232
Oursnone0.00880.04450.0177
+ +metrics which are representative of the quality of generated grain. The lower JSD-NSS value for Style-FG [6] could be explained by the fact that it is a GAN-based network which models the data distribution at the expense of the output quality. The qualitative comparison in Figure 7 confirms these observations (additional results in the supplemental material). Another advantage of 3R-INN is that no auxiliary data is required for grain synthesis, unlike VVC and Style-FG, which transmit a set of parameters and a style vector respectively. Similar results are obtained for $R > 0$ and are presented as supplemental material. + +![](images/94acb4fef121f6a6576c367eb884d717e2a55ff601e2ec677564155f3c266479.jpg) +Ground-truth + +![](images/d1d4065c6f2443ba0c46eb445fd13c2ca22fdb9bf5295614a7029a5a427a4fb9.jpg) +VVC (0.3343) +Fig. 7: Qualitative evaluation of HR synthesized grainy images for different methods, with LPIPS values between parenthesis. + +![](images/d33342e3aa7e2cd67444a8367880bc08fb19ce30ba13240ed9da328eb11f911b.jpg) +DeepFG (0.3533) + +![](images/9199feb4d23090f8fa7b3c44a50fef6553598571e992eb083203eb50a08b87a6.jpg) +StyleFG (0.1693) + +![](images/3d217dfcfe541c0fdae42ceb7d563e2c108b6a263b16955e7b2a5216369f26f7.jpg) +Ours (0.0508) + +# 5 Conclusion + +This paper presents 3R-INN, the first network that enables to reduce the overall energy consumption in the video transmission chain. Given an HR grainy image, 3R-INN delivers a minimum viable quality, low-resolution, grain-free and energy-aware image, thus reducing the energy required for encoding, transmission, decoding and display. With multiple views of the same content, 3R-INN achieves a positive energy balance, far more efficient than current state-of-the-art systems. Furthermore it does not need to transmit auxiliary information to reconstruct the original grainy content, since all the lost information including details, film grain and brightness was encoded and disentangled in a standard Gaussian distribution, through a latent encoding block conditioned on the LR image. Experimental results demonstrate that 3R-INN outperforms the existing methods by a large margin for film grain synthesis, and achieves state-of-the-art performance in the rescaling and energy-aware tasks. For the latter, a fine-tuning for each value of energy reduction rate target $R$ was conducted. Conditioning the network on $R$ to avoid fine-tuning different networks for each value of $R$ , will therefore be investigated in the future, as an extension of current work. Some subjective test will also be conducted to assess the acceptability by end users of the provided LR energy-aware images. + +# References + +1. Energy consumption household. https://www.energybot.com/blog/average-energy-consumption.html. +2. Netflix subscribers. https://www.usnews.com/news/business/articles/2024-01-23/netflixs-subscriber-growth-surges-as-streaming-service-unwraps-best-ever-holiday-season-results. +3. Vtm-19.0. https://vctgit.hhi.fraunhofer.de/jvet/VVCSoftware_VTM/~/tags/VTM-19.0 +4. Agustsson, E., Timofte, R.: Ntire 2017 challenge on single image super-resolution: Dataset and study. In: Proceedings of the IEEE conference on computer vision and pattern recognition workshops. pp. 126-135 (2017) +5. Ameur, Z., Demarty, C.H., Le Meur, O., Menard, D., François, E.: Style-based film grain analysis and synthesis. In: Proceedings of the 14th Conference on ACM Multimedia Systems. pp. 229-238 (2023) +6. Ameur, Z., Hamidouche, W., François, E., Radosavljevic, M., Menard, D., Demarty, C.H.: Deep-based film grain removal and synthesis. IEEE Transactions on Image Processing (2023) +7. Bonniveau, C., Hamidouche, W., Travers, J.F., Déforges, O.: Versatile video coding and super-resolution for efficient delivery of 8k video with 4k backward-compatibility. In: ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). pp. 2048-2052 (2020). https://doi.org/10.1109/ICASSP40776.2020.9054716 +8. Boyce, J., Suehring, K., Li, X., Seregin, V.: Jvet-j1010: Jvet common test conditions and software reference configurations. In: 10th Meeting of the Joint Video Experts Team. pp. JVET-J1010 (2018) +9. Chen, Z., Liu, T., Huang, J.J., Zhao, W., Bi, X., Wang, M.: Invertible mosaic image hiding network for very large capacity image steganography. arXiv preprint arXiv:2309.08987 (2023) +0. Dai, J., Au, O.C., Pang, C., Yang, W., Zou, F.: Film grain noise removal and synthesis in video coding. In: 2010 IEEE International Conference on Acoustics, Speech and Signal Processing. pp. 890-893. IEEE (2010) +1. Demarty, C.H., Blondé, L., Le Meur, O.: Display power modeling for energy consumption control. In: 2023 IEEE International Conference on Image Processing (ICIP). IEEE (2023) +2. Dinh, L., Sohl-Dickstein, J., Bengio, S.: Density estimation using real nvp. arXiv preprint arXiv:1605.08803 (2016) +3. Du, W., Chen, H., Zhang, Y., Yang, H.: Hierarchical disentangled representation for invertible image denoising and beyond. arXiv preprint arXiv:2301.13358 (2023) +4. Franzen, R.: Kodak lossless true color image suite. source: http://r0k.us/graphics/kodak 4(2), 9 (1999) +5. Gomila, C.: Sei message for film grain encoding. JVT document, May 2003 (2003) +6. Herglotz, C., Brand, F., Regensky, A., Rievel, F., Kaup, A.: Processing energy modeling for neural network based image compression. In: 2023 IEEE International Conference on Image Processing (ICIP). pp. 2390-2394. IEEE (2023) +7. Herglotz, C., Kränzler, M., Schober, R., Kaup, A.: Sweet streams are made of this: The system engineer's view on energy efficiency in video communications [feature]. IEEE Circuits and Systems Magazine 23(1), 57-77 (2023) +8. Huang, G., Liu, Z., Van Der Maaten, L., Weinberger, K.Q.: Densely connected convolutional networks. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 4700-4708 (2017) + +19. Hwang, I., Jeong, J., Choi, J., Choe, Y.: Enhanced film grain noise removal for high fidelity video coding. In: 2013 International Conference on Information Science and Cloud Computing Companion. pp. 668-674. IEEE (2013) +20. Kang, S.J.: Image-quality-based power control technique for organic light emitting diode displays. Journal of Display Technology 11(1), 104-109 (2015) +21. Kang, S.j., Kim, Y.H.: Image integrity-based gray-level error control for low power liquid crystal displays. IEEE Transactions on Consumer Electronics 55(4), 2401-2406 (2009). https://doi.org/10.1109/TCE.2009.5373816 +22. Kim, H., Choi, M., Lim, B., Lee, K.M.: Task-aware image downscaling. In: Proceedings of the European conference on computer vision (ECCV). pp. 399-414 (2018) +23. Kingma, D.P., Ba, J.: Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980 (2014) +24. Kingma, D.P., Dhariwal, P.: Glow: Generative flow with invertible 1x1 convolutions. Advances in neural information processing systems 31 (2018) +25. Le Meur, O., Demarty, C.H.: Invertible energy-aware images. IEEE Signal Processing Letters (2023) +26. Le Meur, O., Demarty, C.H., Blondé, L.: Deep-learning-based energy aware images. In: 2023 IEEE International Conference on Image Processing (ICIP). pp. 590-594. IEEE (2023) +27. Liu, Y., Qin, Z., Anwar, S., Ji, P., Kim, D., Caldwell, S., Gedeon, T.: Invertible denoising network: A light solution for real noise removal. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 13365-13374 (2021) +28. Lu, S.P., Wang, R., Zhong, T., Rosin, P.L.: Large-capacity image steganography based on invertible neural networks. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 10816-10825 (2021) +29. Malmodin, J.: The power consumption of mobile and fixed network data services—the case of streaming video and downloading large files. In: Electronics Goes Green. vol. 2020 (2020) +30. Martin, D., Fowlkes, C., Tal, D., Malik, J.: A database of human segmented natural images and its application to evaluating segmentation algorithms and measuring ecological statistics. In: Proceedings Eighth IEEE International Conference on Computer Vision. ICCV 2001. vol. 2, pp. 416-423. IEEE (2001) +31. Newson, A., Delon, J., Galerne, B.: A stochastic film grain model for resolution-independent rendering. In: Computer Graphics Forum. vol. 36, pp. 684-699. Wiley Online Library (2017) +32. Norkin, A., Birkbeck, N.: Film grain synthesis for av1 video codec. In: 2018 Data Compression Conference. pp. 3-12. IEEE (2018) +33. Nugroho, K.A., Ruan, S.J.: R-ace network for oled image power saving. In: 2022 IEEE 4th Global Conference on Life Sciences and Technologies (LifeTech). pp. 284-285. IEEE (2022) +34. Radosavljevic, M., François, E., Reinhard, E., Hamidouche, W., Amestoy, T.: Implementation of film-grain technology within vvc. In: Applications of Digital Image Processing XLIV. vol. 11842, pp. 85-95. SPIE (2021) +35. Reddi, S.J., Kale, S., Kumar, S.: On the convergence of adam and beyond. arXiv preprint arXiv:1904.09237 (2019) +36. Reinhard, E., Demarty, C.H., Blondé, L.: Pixel value adjustment to reduce the energy requirements of display devices. SMPTE Motion Imaging Journal 132(7), 10-19 (2023) + +37. Robinson, D.: Greening of streaming: The less accord: Low energy sustainable streaming. In: Proceedings of the 2nd Mile-High Video Conference (MHV'23). p. 115 (2023) +38. Shin, Y.G., Park, S., Yoo, M.J., Ko, S.J.: Unsupervised deep power saving and contrast enhancement for oled displays. arXiv preprint arXiv:1905.05916 (2019) +39. Stoyan, D., Kendall, W.S., Chiu, S.N., Mecke, J.: Stochastic geometry and its applications. John Wiley & Sons (2013) +40. Sun, W., Chen, Z.: Learned image downscaling for upscaling using content adaptive resampler. IEEE Transactions on Image Processing 29, 4027-4040 (2020) +41. Trust, T.C.: Carbon impact of video streaming. https://www.carbontrust.com/eneu/node/1537 (2021) +42. Xiao, M., Zheng, S., Liu, C., Wang, Y., He, D., Ke, G., Bian, J., Lin, Z., Liu, T.Y.: Invertible image rescaling. In: Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part I 16. pp. 126-144. Springer (2020) +43. Yin, J.L., Chen, B.H., Peng, Y.T., Tsai, C.C.: Deep battery saver: End-to-end learning for power constrained contrast enhancement. IEEE Transactions on Multimedia 23, 1049-1059 (2020) +44. Zhao, R., Liu, T., Xiao, J., Lun, D.P., Lam, K.M.: Invertible image decolorization. IEEE Transactions on Image Processing 30, 6081-6095 (2021) +45. Zhu, F., Chen, G., Hao, J., Heng, P.A.: Blind image denoising via dependent dirichlet process tree. IEEE transactions on pattern analysis and machine intelligence 39(8), 1518-1531 (2016) \ No newline at end of file diff --git a/2024/3R-INN_ How to be climate friendly while consuming_delivering videos_/images.zip b/2024/3R-INN_ How to be climate friendly while consuming_delivering videos_/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..dee0182a1942e5494ffed91a4f83fe27f8241552 --- /dev/null +++ b/2024/3R-INN_ How to be climate friendly while consuming_delivering videos_/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73e8ea2ebe24eb6c223b04ee508d9452bf2d5b7f54d3b7921eca72cab304302a +size 492717 diff --git a/2024/3R-INN_ How to be climate friendly while consuming_delivering videos_/layout.json b/2024/3R-INN_ How to be climate friendly while consuming_delivering videos_/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..aa23035a491e028cda2139ce8f18260d3a6d2f77 --- /dev/null +++ b/2024/3R-INN_ How to be climate friendly while consuming_delivering videos_/layout.json @@ -0,0 +1,11214 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 159, + 112, + 455, + 148 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 112, + 455, + 148 + ], + "spans": [ + { + "bbox": [ + 159, + 112, + 455, + 148 + ], + "type": "text", + "content": "3R- INN: How to be climate friendly while consuming/delivering videos?" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 138, + 168, + 476, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 168, + 476, + 192 + ], + "spans": [ + { + "bbox": [ + 138, + 168, + 476, + 192 + ], + "type": "text", + "content": "Zoubida Ameur1, Claire-Hélène Demarty1, Daniel Ménard2, and Olivier Le Meur1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 174, + 201, + 436, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 174, + 201, + 436, + 224 + ], + "spans": [ + { + "bbox": [ + 174, + 201, + 436, + 224 + ], + "type": "text", + "content": "1 InterDigital, France, firstname.lastname@interdigital.com \n2 IETR, daniel.menard@insa-rennes.fr" + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 133, + 246, + 481, + 305 + ], + "blocks": [ + { + "bbox": [ + 133, + 246, + 481, + 305 + ], + "lines": [ + { + "bbox": [ + 133, + 246, + 481, + 305 + ], + "spans": [ + { + "bbox": [ + 133, + 246, + 481, + 305 + ], + "type": "image", + "image_path": "65a53aa72741b3542eed667bb83f0b09595a3293d57832a896ad46c7ad59b915.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 132, + 306, + 481, + 327 + ], + "lines": [ + { + "bbox": [ + 132, + 306, + 481, + 327 + ], + "spans": [ + { + "bbox": [ + 132, + 306, + 481, + 327 + ], + "type": "text", + "content": "Fig. 1: 3R-INN: End-to-end energy-aware video distribution chain by Removing grain, Rescaling and Reducing display energy." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 159, + 338, + 453, + 546 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 338, + 453, + 546 + ], + "spans": [ + { + "bbox": [ + 159, + 338, + 453, + 546 + ], + "type": "text", + "content": "Abstract. The consumption of a video requires a considerable amount of energy during the various stages of its life-cycle. With a billion hours of video consumed daily, this contributes significantly to the greenhouse gas (GHG) emission. Therefore, reducing the end-to-end carbon footprint of the video chain, while preserving the quality of experience at the user side, is of high importance. To contribute in an impactful manner, we propose 3R-INN, a single invertible network that does three tasks at once: given a high-resolution (HR) grainy image, it Rescales it to a lower resolution, Removes film grain and Reduces its power consumption when displayed. Providing such a minimum viable quality content contributes to reducing the energy consumption during encoding, transmission, decoding and display. 3R-INN also offers the possibility to restore either the HR grainy original image or a grain-free version, thanks to its invertibility and the disentanglement of the high frequency, and without transmitting auxiliary data. Experiments show that, 3R-INN enables significant energy savings for encoding (78%), decoding (77%) and rendering (5% to 20%), while outperforming state-of-the-art film grain removal and synthesis, energy-aware and downscaling methods on different test-sets." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 160, + 557, + 438, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 557, + 438, + 568 + ], + "spans": [ + { + "bbox": [ + 160, + 557, + 438, + 568 + ], + "type": "text", + "content": "Keywords: Energy saving " + }, + { + "bbox": [ + 160, + 557, + 438, + 568 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 160, + 557, + 438, + 568 + ], + "type": "text", + "content": " Invertible network " + }, + { + "bbox": [ + 160, + 557, + 438, + 568 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 160, + 557, + 438, + 568 + ], + "type": "text", + "content": " Video distribution" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 590, + 230, + 601 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 590, + 230, + 601 + ], + "spans": [ + { + "bbox": [ + 132, + 590, + 230, + 601 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "type": "text", + "content": "Over " + }, + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "type": "inline_equation", + "content": "75\\%" + }, + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "type": "text", + "content": " of the world's global GHG emissions comes from energy production, particularly from fossil fuels. The growing energy consumption of the media and entertainment industry, in particular streaming, strongly contributes to climate change, with more than " + }, + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "type": "inline_equation", + "content": "1.3\\%" + }, + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "type": "text", + "content": " of GHG in 2020 [41]. Therefore, this industry has to move towards decarbonisation, energy efficiency and sustainability in" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 248 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 248 + ], + "type": "text", + "content": "all its stages, e.g., head-end (encoding), delivery (transmission) and end-user device (decoding and display). Taking apart the energy consumed while building the different necessary equipment, reduced energy consumption at the head-end translates into shorter encoding times and lower computing loads, while at the distribution level it translates into lower bit-rates. At the end-device level, significant gains can be achieved, as displays constitute the most power-hungry part of the whole chain [41]. In the specific case of emissive displays, e.g., organic light-emitting diodes (OLEDs), the power consumption is pixelwise and therefore directly dependent on the displayed content. Consequently, less energy-intensive images at display and shorter decoding times will also lead to lower energy consumption." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 259, + 482, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 259, + 482, + 392 + ], + "spans": [ + { + "bbox": [ + 130, + 259, + 482, + 392 + ], + "type": "text", + "content": "The encoding and decoding times are related to the content resolution and complexity. Downscaling the content before encoding and upscaling it after decoding while preserving the same quality of experience [7] is one straightforward solution to reduce the computational burden. Additionally, removing and modeling artistic noise, such as film grain, before encoding and synthesizing it after decoding, not only reduces encoding and decoding times, but also significantly reduces the bit-rate [32], while still preserving the artistic intent at the user side. Finally, as displays consume the largest proportion of the energy, providing energy-aware content, i.e., that will consume less when displayed, is of significant importance, at least for OLED displays. Several studies addressed this issue by investigating how to reduce the content brightness [25, 26, 36]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 402, + 482, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 402, + 482, + 668 + ], + "spans": [ + { + "bbox": [ + 130, + 402, + 482, + 668 + ], + "type": "text", + "content": "Because climate change is a pressing issue, we believe that having a global vision on the overall energy consumption in the video chain and an holistic approach on how to reduce it is of the utmost importance. Therefore, in this paper, we propose an end-to-end energy reduction of the video distribution chain, while preserving a good quality of experience at the user side, by leveraging a deep learning invertible neural network (INN)-based model, called 3R-INN. Prior to encoding a HR grainy image, our 3R-INN multi-task network Rescales it to a lower resolution, Removes film grain and Reduces its power consumption when displayed, by some reduction rate " + }, + { + "bbox": [ + 130, + 402, + 482, + 668 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 130, + 402, + 482, + 668 + ], + "type": "text", + "content": ". While saving energy along the video chain, 3R-INN also provides a visually-pleasant content intended to be displayed, following the new paradigm proposed in [37], which promotes to target a minimum viable video quality for transported videos. Within this same paradigm, the possibility to recover the original content is encouraged, with the counter part that it will consume more. This is feasible thanks to the invertibility of 3R-INN which allows to retrieve the original HR image from the clean energy-aware LR one by running inversely the framework. Furthermore, thanks to the modeling and disentanglement of the lost information in the forward pass, two versions, grainy and clean, of the original HR image can be restored, without transmitting any auxiliary information. With the idea that the energy consumed when applying an energy reduction processing should not exceed the amount of energy saved, we designed 3R-INN to be a single network, that replaces three separate and potentially heavier processings, showing that" + } + ] + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 231, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 231, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 231, + 102 + ], + "type": "text", + "content": "Z. Ameur et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "type": "text", + "content": "the use of 3R-INN results in a positive energy balance, as soon as a content is displayed multiple times. In summary, our main contributions are four-folds:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 131, + 141, + 479, + 284 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 131, + 141, + 479, + 176 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 141, + 479, + 176 + ], + "spans": [ + { + "bbox": [ + 131, + 141, + 479, + 176 + ], + "type": "text", + "content": "- a first end-to-end solution for reducing the energy consumption of the video chain that exhibits a better energy balance compared to the sum of the corresponding three tasks in the state-of-the-art;" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 131, + 178, + 479, + 212 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 178, + 479, + 212 + ], + "spans": [ + { + "bbox": [ + 131, + 178, + 479, + 212 + ], + "type": "text", + "content": "- a single network for the three tasks of rescaling, removing/synthesizing grain and reducing the energy at display, dedicated towards saving energy in the whole video chain;" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 131, + 213, + 479, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 213, + 479, + 248 + ], + "spans": [ + { + "bbox": [ + 131, + 213, + 479, + 248 + ], + "type": "text", + "content": "- the provision of a visually pleasant, energy reduced version of the original image, and the capability to go back to the original HR grainy image with no transmission of additional metadata along the video chain;" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 131, + 249, + 479, + 284 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 249, + 479, + 284 + ], + "spans": [ + { + "bbox": [ + 131, + 249, + 479, + 284 + ], + "type": "text", + "content": "- the best method so far for high-fidelity film grain synthesis, with no need of auxiliary data and the best method so far for downscaling and building energy-aware images." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 130, + 286, + 480, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 286, + 480, + 346 + ], + "spans": [ + { + "bbox": [ + 130, + 286, + 480, + 346 + ], + "type": "text", + "content": "In the following, we first review the state-of-the-art for rescaling, film grain removal/synthesis and energy-aware images (Section 2), before detailing our proposed solution (Section 3). In Section 4, we provide a compared analysis of the quantitative, qualitative and energy performances of the use of 3R-INN, against state-of-the-art solutions. In Section 5, we draw conclusions and perspectives." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 131, + 349, + 234, + 361 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 349, + 234, + 361 + ], + "spans": [ + { + "bbox": [ + 131, + 349, + 234, + 361 + ], + "type": "text", + "content": "2 Related work" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 365, + 481, + 591 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 365, + 481, + 591 + ], + "spans": [ + { + "bbox": [ + 130, + 365, + 481, + 591 + ], + "type": "text", + "content": "Rescaling The rescaling task helps saving resources, through the storage and transfer of downscaled versions of an original HR image/video. Recovering the original resolution while having pleasant LR content can be very challenging. For these purposes, to maximize the restoration performance while producing visually pleasant low-resolution (LR) content, several works learn jointly the two tasks, i.e., downscaling and upscaling. In [22], an auto-encoder-based framework learns the optimal LR image that maximizes the reconstruction performance of the HR image. In [40], an unsupervised downscaling method with consideration on the upscaling process but no assumption on how the HR image is downscaled, allows to learn the essential information for upscaling in an optimal way. Following a different paradigm, the method called IRN [42] models the down- and up-scaling processes using an invertible bijective transformation. In a forward pass, IRN performs the downscaling process by producing visually pleasing LR images while capturing the distribution of the lost information using a latent variable that follows a specified distribution. Meanwhile, the upscaling process is made tractable such that the HR image is reconstructed by inversely passing a randomly drawn latent variable with the LR image through the network. However, the reconstruction is not image-adaptive due to the case-agnostic latent variable." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 594, + 481, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 594, + 481, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 594, + 481, + 666 + ], + "type": "text", + "content": "Film grain removal and synthesis To better preserve film grain while compressing video content efficiently, it is classically removed and modeled before encoding and restored after decoding [15, 32]. Hence, dedicated methods for film grain removal are proposed, based on either temporal filtering [10], spatiotemporal inter-color correlation filtering [19] or deep-learning encoder-decoder models [6]. On the other hand, several studies addressed the film grain synthesis" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 154, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 154, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 154, + 91, + 447, + 102 + ], + "type": "text", + "content": "3R-INN: How to be climate friendly while consuming/delivering videos?" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 273 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 273 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 273 + ], + "type": "text", + "content": "task. In [31], a Boolean in-homogeneous model [39] is used to model the grain, which corresponds to uniformly distributed disks. In AV1 codec [32], film grain is modeled by an autoregressive (AR) method as well as by an intensity-based function to adjust its strength. In VVC [34], a method based on frequency filtering is used. The grain pattern is first modeled thanks to a discrete cosine transform (DCT) applied to the grain blocks corresponding to smooth regions, and further scaled to the appropriate level, by using a step-wise scaling function. In [6], a conditional generative adversarial network (cGAN) that generates grain at different intensities is proposed. Yet, it does not perform any analysis on the original grain for a reliable synthesis. In [5], a deep-learning framework is proposed which consists of a style encoder for film grain style analysis, a mapping network for film grain style generation, and a synthesis network that generates and blends a specific grain style to a content in a content-adaptive manner." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 283, + 482, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 283, + 482, + 475 + ], + "spans": [ + { + "bbox": [ + 130, + 283, + 482, + 475 + ], + "type": "text", + "content": "Energy-aware images Many works target the reduction of the energy consumption of images while displayed on screens, especially for OLED displays. A first set of methods reduces the luminance through clipping or equalizing histograms [20, 21]. Other works directly scale the pixel luminance [26, 36, 38]. The most promising methods leverage deep learning models, trained with a combination of loss functions that minimize the energy consumption while maintaining an acceptable perceptual quality. In [43], a deep learning model trained with a variational loss for simultaneously enhancing the visual quality and reducing the power consumption is proposed. Authors in [38] describe an adaptive contrast enhancement (ACE) convolutional neural network, that performs contrast enhancement of luminance scaled images. In [33], an improved version of ACE, called Residual-ACE (R-ACE), is proposed to infer an attenuation map instead of a reduced image. In [26], authors revisit R-ACE to significantly reduce the complexity without compromising the performance. Different from the above methods, an invertible energy-aware network (InvEAN) [25] produces invertible energy-aware images and allows to recover the original images if required." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 486, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 486, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 486, + 482, + 666 + ], + "type": "text", + "content": "Invertible neural networks INNs learn the mapping " + }, + { + "bbox": [ + 130, + 486, + 482, + 666 + ], + "type": "inline_equation", + "content": "x = f(z)" + }, + { + "bbox": [ + 130, + 486, + 482, + 666 + ], + "type": "text", + "content": ", which is fully invertible as " + }, + { + "bbox": [ + 130, + 486, + 482, + 666 + ], + "type": "inline_equation", + "content": "z = f^{-1}(x)" + }, + { + "bbox": [ + 130, + 486, + 482, + 666 + ], + "type": "text", + "content": ", through a sequence of differentiable invertible mappings such as affine coupling layers [12] and invertible " + }, + { + "bbox": [ + 130, + 486, + 482, + 666 + ], + "type": "inline_equation", + "content": "1 \\times 1" + }, + { + "bbox": [ + 130, + 486, + 482, + 666 + ], + "type": "text", + "content": " convolutional layers [24]. INNs have direct applications in ambiguous inverse problems by learning information-lossless mappings [13, 27, 44]. The lost information in the forward process is captured by additional latent output variables. Thus, the inverse process is learned implicitly. A first application is the steganography, i.e., concealing images into other images [9, 28]. In [44], an INN is used to produce invertible grayscale images, where the lost color information is encoded into a set of Gaussian distributed latent variables. The original color version can be recovered by using a new set of randomly sampled Gaussian distributed variables as input, together with the synthetic grayscale, through the reverse mapping. Similarly, an invertible denoising network (InvDN) transforms a noisy input into a LR clean image and a latent representation containing noise in [27]. To discard noise and restore the clean image, InvDN replaces the noisy latent representation with" + } + ] + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 231, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 231, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 231, + 102 + ], + "type": "text", + "content": "Z. Ameur et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "type": "text", + "content": "another one sampled from a prior distribution during reversion. In [13], another INN further disentangles noise from the high frequency image information." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 131, + 146, + 269, + 160 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 146, + 269, + 160 + ], + "spans": [ + { + "bbox": [ + 131, + 146, + 269, + 160 + ], + "type": "text", + "content": "3 Proposed approach" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 165, + 482, + 371 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 165, + 482, + 371 + ], + "spans": [ + { + "bbox": [ + 130, + 165, + 482, + 371 + ], + "type": "text", + "content": "With the target of reducing the overall energy consumption of the video chain, our 3R-INN framework, run in a forward pass at the encoder side, performs three invertible tasks simultaneously: 1) film grain removal, 2) downscaling and 3) display energy reduction, as illustrated in Figure 1. From a HR grainy image " + }, + { + "bbox": [ + 130, + 165, + 482, + 371 + ], + "type": "inline_equation", + "content": "I_G \\in \\mathbb{R}^{H \\times W \\times 3}" + }, + { + "bbox": [ + 130, + 165, + 482, + 371 + ], + "type": "text", + "content": ", 3R-INN outputs a visually pleasant grain-free LR energy-aware image " + }, + { + "bbox": [ + 130, + 165, + 482, + 371 + ], + "type": "inline_equation", + "content": "\\tilde{I}_{LR|R} \\in \\mathbb{R}^{\\frac{1}{2} H \\times \\frac{1}{2} W \\times 3}" + }, + { + "bbox": [ + 130, + 165, + 482, + 371 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 130, + 165, + 482, + 371 + ], + "type": "inline_equation", + "content": "R \\in [0,1]" + }, + { + "bbox": [ + 130, + 165, + 482, + 371 + ], + "type": "text", + "content": " being the energy reduction rate, and 2 being the scaling factor corresponding to the best compromise between quality of the LR images, framework complexity and energy savings in the video chain. To ensure the process invertibility and the bijective mapping, the lost information mainly due to grain removal and downscaling is captured in a latent variable " + }, + { + "bbox": [ + 130, + 165, + 482, + 371 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 130, + 165, + 482, + 371 + ], + "type": "text", + "content": " distributed according to a standard Gaussian distribution " + }, + { + "bbox": [ + 130, + 165, + 482, + 371 + ], + "type": "inline_equation", + "content": "\\mathcal{N}(0,1)" + }, + { + "bbox": [ + 130, + 165, + 482, + 371 + ], + "type": "text", + "content": ". This can be formulated as: " + }, + { + "bbox": [ + 130, + 165, + 482, + 371 + ], + "type": "inline_equation", + "content": "[\\tilde{I}_{LR|R}, z] = f_{\\theta}(I_G)" + }, + { + "bbox": [ + 130, + 165, + 482, + 371 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 130, + 165, + 482, + 371 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 130, + 165, + 482, + 371 + ], + "type": "text", + "content": " is the set of trainable parameters of the 3R-INN network " + }, + { + "bbox": [ + 130, + 165, + 482, + 371 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 130, + 165, + 482, + 371 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 130, + 165, + 482, + 371 + ], + "type": "inline_equation", + "content": "\\tilde{I}_{LR|R}" + }, + { + "bbox": [ + 130, + 165, + 482, + 371 + ], + "type": "text", + "content": " is intended to be encoded, transmitted and displayed at the end-user device for an optimal energy consumption and quality of experience trade-off. The lost information " + }, + { + "bbox": [ + 130, + 165, + 482, + 371 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 130, + 165, + 482, + 371 + ], + "type": "text", + "content": " is further disentangled into two parts inside 3R-INN, by setting " + }, + { + "bbox": [ + 130, + 165, + 482, + 371 + ], + "type": "inline_equation", + "content": "\\tilde{z}" + }, + { + "bbox": [ + 130, + 165, + 482, + 371 + ], + "type": "text", + "content": " its internal representation as " + }, + { + "bbox": [ + 130, + 165, + 482, + 371 + ], + "type": "inline_equation", + "content": "\\tilde{z} = [\\tilde{z}_D, \\tilde{z}_G]" + }, + { + "bbox": [ + 130, + 165, + 482, + 371 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 130, + 165, + 482, + 371 + ], + "type": "inline_equation", + "content": "\\tilde{z}_D" + }, + { + "bbox": [ + 130, + 165, + 482, + 371 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 165, + 482, + 371 + ], + "type": "inline_equation", + "content": "\\tilde{z}_G" + }, + { + "bbox": [ + 130, + 165, + 482, + 371 + ], + "type": "text", + "content": " representing losses due to downscaling and grain removal, respectively." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 374, + 482, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 374, + 482, + 495 + ], + "spans": [ + { + "bbox": [ + 130, + 374, + 482, + 495 + ], + "type": "text", + "content": "In case the original content should be recovered, 3R-INN is run in an inverse pass at the decoder side (see Figure 1), as follows: " + }, + { + "bbox": [ + 130, + 374, + 482, + 495 + ], + "type": "inline_equation", + "content": "\\tilde{I}_G = f_\\theta^{-1}([\\tilde{I}_{LR|R},z])" + }, + { + "bbox": [ + 130, + 374, + 482, + 495 + ], + "type": "text", + "content": ". The original HR grainy content is then reconstructed with no need to transmit any auxiliary information in the video chain thanks to the modeling of the lost information. Moreover, thanks to the film grain and high frequency loss disentanglement, " + }, + { + "bbox": [ + 130, + 374, + 482, + 495 + ], + "type": "inline_equation", + "content": "\\tilde{z} = [\\tilde{z}_D,\\tilde{z}_G]" + }, + { + "bbox": [ + 130, + 374, + 482, + 495 + ], + "type": "text", + "content": ", 3R-INN is also able to generate a clean HR version " + }, + { + "bbox": [ + 130, + 374, + 482, + 495 + ], + "type": "inline_equation", + "content": "\\tilde{I}_C" + }, + { + "bbox": [ + 130, + 374, + 482, + 495 + ], + "type": "text", + "content": " of the original content by setting " + }, + { + "bbox": [ + 130, + 374, + 482, + 495 + ], + "type": "inline_equation", + "content": "\\tilde{z}_G = 0" + }, + { + "bbox": [ + 130, + 374, + 482, + 495 + ], + "type": "text", + "content": ". The overall architecture of the proposed framework is composed of three block types: one Haar Transformation block, several invertible blocks and a conditional latent encoding block, as illustrated in Figure 1." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 510, + 482, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 510, + 482, + 654 + ], + "spans": [ + { + "bbox": [ + 130, + 510, + 482, + 654 + ], + "type": "text", + "content": "Haar transform As removing film grain and downscaling an image significantly impacts high frequencies, it seems natural to first decompose the input HR image into low and high-frequency components. For that purpose, we chose the dyadic Haar wavelet transformation, similarly to [42, 44], because of its simplicity, efficiency and invertibility. Specifically, the Haar transform decomposes an input feature " + }, + { + "bbox": [ + 130, + 510, + 482, + 654 + ], + "type": "inline_equation", + "content": "f_{in} \\in \\mathbb{R}^{H \\times W \\times C}" + }, + { + "bbox": [ + 130, + 510, + 482, + 654 + ], + "type": "text", + "content": " into one low-frequency " + }, + { + "bbox": [ + 130, + 510, + 482, + 654 + ], + "type": "inline_equation", + "content": "f_{low} \\in \\mathbb{R}^{\\frac{1}{2} H \\times \\frac{1}{2} W \\times C}" + }, + { + "bbox": [ + 130, + 510, + 482, + 654 + ], + "type": "text", + "content": " and three high-frequency " + }, + { + "bbox": [ + 130, + 510, + 482, + 654 + ], + "type": "inline_equation", + "content": "f_{high} \\in \\mathbb{R}^{\\frac{1}{2} H \\times \\frac{1}{2} W \\times 3C}" + }, + { + "bbox": [ + 130, + 510, + 482, + 654 + ], + "type": "text", + "content": " sub-bands. " + }, + { + "bbox": [ + 130, + 510, + 482, + 654 + ], + "type": "inline_equation", + "content": "f_{low}" + }, + { + "bbox": [ + 130, + 510, + 482, + 654 + ], + "type": "text", + "content": ", produced by an average pooling, represents the overall structure and coarse features of the image, while " + }, + { + "bbox": [ + 130, + 510, + 482, + 654 + ], + "type": "inline_equation", + "content": "f_{high}" + }, + { + "bbox": [ + 130, + 510, + 482, + 654 + ], + "type": "text", + "content": " contains finer details in the vertical, horizontal and diagonal directions, corresponding to film grain and edges. This splitting strategy allows to separate very early in the process the low frequency components from the information we aim to suppress. " + }, + { + "bbox": [ + 130, + 510, + 482, + 654 + ], + "type": "inline_equation", + "content": "f_{low}" + }, + { + "bbox": [ + 130, + 510, + 482, + 654 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 510, + 482, + 654 + ], + "type": "inline_equation", + "content": "f_{high}" + }, + { + "bbox": [ + 130, + 510, + 482, + 654 + ], + "type": "text", + "content": " serve as input to the following invertible blocks." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 154, + 90, + 447, + 103 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 154, + 90, + 447, + 103 + ], + "spans": [ + { + "bbox": [ + 154, + 90, + 447, + 103 + ], + "type": "text", + "content": "3R-INN: How to be climate friendly while consuming/delivering videos?" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 91, + 481, + 101 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 91, + 481, + 101 + ], + "spans": [ + { + "bbox": [ + 474, + 91, + 481, + 101 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 175 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 175 + ], + "type": "text", + "content": "Invertible block As invertible blocks, we selected the coupling layer architecture proposed in [24]. A given input " + }, + { + "bbox": [ + 130, + 116, + 482, + 175 + ], + "type": "inline_equation", + "content": "h^i" + }, + { + "bbox": [ + 130, + 116, + 482, + 175 + ], + "type": "text", + "content": " is composed of two parts " + }, + { + "bbox": [ + 130, + 116, + 482, + 175 + ], + "type": "inline_equation", + "content": "h_1^i" + }, + { + "bbox": [ + 130, + 116, + 482, + 175 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 116, + 482, + 175 + ], + "type": "inline_equation", + "content": "h_2^i" + }, + { + "bbox": [ + 130, + 116, + 482, + 175 + ], + "type": "text", + "content": ", representing the three low-frequency and the nine high-frequency sub-bands of the color input channels RGB, respectively. These sub-bands are then processed by the " + }, + { + "bbox": [ + 130, + 116, + 482, + 175 + ], + "type": "inline_equation", + "content": "i^{th}" + }, + { + "bbox": [ + 130, + 116, + 482, + 175 + ], + "type": "text", + "content": " invertible block as follows:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 268, + 182, + 481, + 196 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 268, + 182, + 481, + 196 + ], + "spans": [ + { + "bbox": [ + 268, + 182, + 481, + 196 + ], + "type": "interline_equation", + "content": "h _ {1} ^ {i + 1} = h _ {1} ^ {i} + \\phi \\left(h _ {2} ^ {i}\\right) \\tag {1}", + "image_path": "439f2dd9f0ad73be5bc11c6fcf117ec4d129c3311c1b682b705dd712ab1b6cbd.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 228, + 198, + 386, + 213 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 228, + 198, + 386, + 213 + ], + "spans": [ + { + "bbox": [ + 228, + 198, + 386, + 213 + ], + "type": "interline_equation", + "content": "h _ {2} ^ {i + 1} = h _ {2} ^ {i} \\odot \\exp (\\psi (h _ {1} ^ {i + 1})) + \\eta (h _ {1} ^ {i + 1})", + "image_path": "238a12e1bbab82e80bffde6de8a8386905d98524e4b23f1ca95afed24fecd962.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 131, + 220, + 481, + 245 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 220, + 481, + 245 + ], + "spans": [ + { + "bbox": [ + 131, + 220, + 481, + 245 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 131, + 220, + 481, + 245 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 131, + 220, + 481, + 245 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 131, + 220, + 481, + 245 + ], + "type": "inline_equation", + "content": "\\psi" + }, + { + "bbox": [ + 131, + 220, + 481, + 245 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 131, + 220, + 481, + 245 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 131, + 220, + 481, + 245 + ], + "type": "text", + "content": " are dense blocks [18]. Given " + }, + { + "bbox": [ + 131, + 220, + 481, + 245 + ], + "type": "inline_equation", + "content": "[h_1^{i + 1}, h_2^{i + 1}]" + }, + { + "bbox": [ + 131, + 220, + 481, + 245 + ], + "type": "text", + "content": ", the inverse transformation can be easily computed by:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 228, + 251, + 481, + 265 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 228, + 251, + 481, + 265 + ], + "spans": [ + { + "bbox": [ + 228, + 251, + 481, + 265 + ], + "type": "interline_equation", + "content": "h _ {2} ^ {i} = \\left(h _ {2} ^ {i + 1} - \\eta \\left(h _ {1} ^ {i + 1}\\right)\\right) / \\exp \\left(\\psi \\left(h _ {1} ^ {i + 1}\\right)\\right) \\tag {2}", + "image_path": "39b1b118b44786610170bde61c032d6430036a7cf36b622b7bf14c84ef5d9cc1.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 268, + 267, + 347, + 281 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 268, + 267, + 347, + 281 + ], + "spans": [ + { + "bbox": [ + 268, + 267, + 347, + 281 + ], + "type": "interline_equation", + "content": "h _ {1} ^ {i} = h _ {1} ^ {i + 1} - \\phi \\left(h _ {2} ^ {i}\\right)", + "image_path": "5b581db85db1e57d098513bd66bb630a68143ffe26af3fc93f1653216a0c9a62.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 131, + 288, + 482, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 288, + 482, + 456 + ], + "spans": [ + { + "bbox": [ + 131, + 288, + 482, + 456 + ], + "type": "text", + "content": "Conditioned latent encoding block Invertible networks learn a bijective mapping between an input and an output distribution. In case of information loss, a latent variable " + }, + { + "bbox": [ + 131, + 288, + 482, + 456 + ], + "type": "inline_equation", + "content": "\\tilde{z}" + }, + { + "bbox": [ + 131, + 288, + 482, + 456 + ], + "type": "text", + "content": " is added to ensure the invertible property. This latent variable is assumed to follow a known distribution i.e., a standard Gaussian distribution, to avoid transmitting additional information for the reconstruction, and to make the process case-agnostic. In our context, this would mean that the reconstruction of the HR grainy " + }, + { + "bbox": [ + 131, + 288, + 482, + 456 + ], + "type": "inline_equation", + "content": "(\\tilde{I}_G)" + }, + { + "bbox": [ + 131, + 288, + 482, + 456 + ], + "type": "text", + "content": " or clean " + }, + { + "bbox": [ + 131, + 288, + 482, + 456 + ], + "type": "inline_equation", + "content": "(\\tilde{I}_C)" + }, + { + "bbox": [ + 131, + 288, + 482, + 456 + ], + "type": "text", + "content": " images would not rely on the a priori knowledge of the LR image " + }, + { + "bbox": [ + 131, + 288, + 482, + 456 + ], + "type": "inline_equation", + "content": "\\tilde{I}_{LR|R}" + }, + { + "bbox": [ + 131, + 288, + 482, + 456 + ], + "type": "text", + "content": ". To overcome this limitation and to enable an image-adaptive reconstruction during the inverse pass, the lost information " + }, + { + "bbox": [ + 131, + 288, + 482, + 456 + ], + "type": "inline_equation", + "content": "\\tilde{z}" + }, + { + "bbox": [ + 131, + 288, + 482, + 456 + ], + "type": "text", + "content": " is transformed into a Gaussian distributed latent variable " + }, + { + "bbox": [ + 131, + 288, + 482, + 456 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 131, + 288, + 482, + 456 + ], + "type": "text", + "content": " whose mean and variance are conditioned on " + }, + { + "bbox": [ + 131, + 288, + 482, + 456 + ], + "type": "inline_equation", + "content": "\\tilde{I}_{LR|R}" + }, + { + "bbox": [ + 131, + 288, + 482, + 456 + ], + "type": "text", + "content": ". This is done through the use of a latent encoding block inspired from [44], whose structure is a one-side affine coupling layer that normalizes " + }, + { + "bbox": [ + 131, + 288, + 482, + 456 + ], + "type": "inline_equation", + "content": "\\tilde{z}" + }, + { + "bbox": [ + 131, + 288, + 482, + 456 + ], + "type": "text", + "content": " into a standard Gaussian distributed variable " + }, + { + "bbox": [ + 131, + 288, + 482, + 456 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 131, + 288, + 482, + 456 + ], + "type": "text", + "content": " as follows, with " + }, + { + "bbox": [ + 131, + 288, + 482, + 456 + ], + "type": "inline_equation", + "content": "\\phi_g" + }, + { + "bbox": [ + 131, + 288, + 482, + 456 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 131, + 288, + 482, + 456 + ], + "type": "inline_equation", + "content": "\\theta_g" + }, + { + "bbox": [ + 131, + 288, + 482, + 456 + ], + "type": "text", + "content": " being dense blocks:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 228, + 463, + 481, + 477 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 228, + 463, + 481, + 477 + ], + "spans": [ + { + "bbox": [ + 228, + 463, + 481, + 477 + ], + "type": "interline_equation", + "content": "z = (\\tilde {z} - \\phi_ {g} (\\tilde {I} _ {L R | R})) / \\exp \\left(\\theta_ {g} (\\tilde {I} _ {L R | R})\\right) \\tag {3}", + "image_path": "f77fdecb381e0b492214ca5c3e6165b8ec2a708e2fb00340b6a6bb12ea1b0af4.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 147, + 483, + 339, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 147, + 483, + 339, + 495 + ], + "spans": [ + { + "bbox": [ + 147, + 483, + 339, + 495 + ], + "type": "text", + "content": "The reverse mapping can be formulated as:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 227, + 502, + 481, + 515 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 502, + 481, + 515 + ], + "spans": [ + { + "bbox": [ + 227, + 502, + 481, + 515 + ], + "type": "interline_equation", + "content": "\\tilde {z} = z \\odot \\exp \\left(\\theta_ {g} \\left(\\tilde {I} _ {L R | R}\\right)\\right) + \\phi_ {g} \\left(\\tilde {I} _ {L R | R}\\right)) \\tag {4}", + "image_path": "a190064019f8b16ffd65eb0c704c862636869bd1418c99fc5cf6f5ae716b5a72.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 521, + 231, + 533 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 521, + 231, + 533 + ], + "spans": [ + { + "bbox": [ + 132, + 521, + 231, + 533 + ], + "type": "text", + "content": "Training objectives" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 130, + 533, + 481, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 533, + 481, + 556 + ], + "spans": [ + { + "bbox": [ + 130, + 533, + 481, + 556 + ], + "type": "text", + "content": "3R-INN is first trained on the rescaling and film grain removal/synthesis tasks, before being fine-tuned on the energy reduction task." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 130, + 559, + 482, + 633 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 559, + 482, + 633 + ], + "spans": [ + { + "bbox": [ + 130, + 559, + 482, + 633 + ], + "type": "text", + "content": "Rescaling and film grain removal/synthesis tasks The Forward Pass optimization is driven by a fidelity loss " + }, + { + "bbox": [ + 130, + 559, + 482, + 633 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{forw}" + }, + { + "bbox": [ + 130, + 559, + 482, + 633 + ], + "type": "text", + "content": " to guarantee a visually pleasant clean LR image " + }, + { + "bbox": [ + 130, + 559, + 482, + 633 + ], + "type": "inline_equation", + "content": "\\tilde{I}_{LR}" + }, + { + "bbox": [ + 130, + 559, + 482, + 633 + ], + "type": "text", + "content": ", and a regularization loss " + }, + { + "bbox": [ + 130, + 559, + 482, + 633 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{reg}" + }, + { + "bbox": [ + 130, + 559, + 482, + 633 + ], + "type": "text", + "content": " to guarantee that the latent variable " + }, + { + "bbox": [ + 130, + 559, + 482, + 633 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 130, + 559, + 482, + 633 + ], + "type": "text", + "content": " follows a standard Gaussian distribution. To guide " + }, + { + "bbox": [ + 130, + 559, + 482, + 633 + ], + "type": "inline_equation", + "content": "f_{\\theta}" + }, + { + "bbox": [ + 130, + 559, + 482, + 633 + ], + "type": "text", + "content": " to generate " + }, + { + "bbox": [ + 130, + 559, + 482, + 633 + ], + "type": "inline_equation", + "content": "\\tilde{I}_{LR}" + }, + { + "bbox": [ + 130, + 559, + 482, + 633 + ], + "type": "text", + "content": ", a downsampled image " + }, + { + "bbox": [ + 130, + 559, + 482, + 633 + ], + "type": "inline_equation", + "content": "I_{LR}" + }, + { + "bbox": [ + 130, + 559, + 482, + 633 + ], + "type": "text", + "content": " of the HR clean image " + }, + { + "bbox": [ + 130, + 559, + 482, + 633 + ], + "type": "inline_equation", + "content": "I_C" + }, + { + "bbox": [ + 130, + 559, + 482, + 633 + ], + "type": "text", + "content": " is computed by a bicubic filter, and used as ground-truth to minimize " + }, + { + "bbox": [ + 130, + 559, + 482, + 633 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{forw}" + }, + { + "bbox": [ + 130, + 559, + 482, + 633 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 227, + 639, + 481, + 667 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 639, + 481, + 667 + ], + "spans": [ + { + "bbox": [ + 227, + 639, + 481, + 667 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {f o r w}} \\left(\\tilde {I} _ {L R}, I _ {L R}\\right) = \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\left\\| \\tilde {I} _ {L R} - I _ {L R} \\right\\| _ {2} \\tag {5}", + "image_path": "104d4208b8e3e2409ff3e51c51c3278f30191a63e759f33f0b2fc376ea97d519.jpg" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 231, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 231, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 231, + 101 + ], + "type": "text", + "content": "Z. Ameur et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 131, + 116, + 482, + 142 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 116, + 482, + 142 + ], + "spans": [ + { + "bbox": [ + 131, + 116, + 482, + 142 + ], + "type": "text", + "content": "with " + }, + { + "bbox": [ + 131, + 116, + 482, + 142 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 131, + 116, + 482, + 142 + ], + "type": "text", + "content": " the batch size. Second, the log-likelihood of the probability density function " + }, + { + "bbox": [ + 131, + 116, + 482, + 142 + ], + "type": "inline_equation", + "content": "p(z)" + }, + { + "bbox": [ + 131, + 116, + 482, + 142 + ], + "type": "text", + "content": " of the standard Gaussian distribution is maximized, with " + }, + { + "bbox": [ + 131, + 116, + 482, + 142 + ], + "type": "inline_equation", + "content": "D = \\dim (z)" + }, + { + "bbox": [ + 131, + 116, + 482, + 142 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 200, + 150, + 481, + 173 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 200, + 150, + 481, + 173 + ], + "spans": [ + { + "bbox": [ + 200, + 150, + 481, + 173 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {r e g} = - \\log (p (z)) = - \\log \\left(\\frac {1}{(2 \\pi) ^ {D / 2}} \\exp (- \\frac {1}{2} | | z | | ^ {2})\\right) \\tag {6}", + "image_path": "3f90a6da132ccf772d0234754a44ca7b47158182bedd4054621f15e3bacf3e8f.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 131, + 175, + 479, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 175, + 479, + 222 + ], + "spans": [ + { + "bbox": [ + 131, + 175, + 479, + 222 + ], + "type": "text", + "content": "The Inverse Pass optimization consists of two fidelity losses " + }, + { + "bbox": [ + 131, + 175, + 479, + 222 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\text{back}_G}" + }, + { + "bbox": [ + 131, + 175, + 479, + 222 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 131, + 175, + 479, + 222 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\text{back}_C}" + }, + { + "bbox": [ + 131, + 175, + 479, + 222 + ], + "type": "text", + "content": ", to restore " + }, + { + "bbox": [ + 131, + 175, + 479, + 222 + ], + "type": "inline_equation", + "content": "\\tilde{I}_G" + }, + { + "bbox": [ + 131, + 175, + 479, + 222 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 131, + 175, + 479, + 222 + ], + "type": "inline_equation", + "content": "\\tilde{I}_C" + }, + { + "bbox": [ + 131, + 175, + 479, + 222 + ], + "type": "text", + "content": ", respectively. To this end, " + }, + { + "bbox": [ + 131, + 175, + 479, + 222 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 131, + 175, + 479, + 222 + ], + "type": "text", + "content": " is first decoded into " + }, + { + "bbox": [ + 131, + 175, + 479, + 222 + ], + "type": "inline_equation", + "content": "\\tilde{z}" + }, + { + "bbox": [ + 131, + 175, + 479, + 222 + ], + "type": "text", + "content": " by the latent encoding block conditioned by " + }, + { + "bbox": [ + 131, + 175, + 479, + 222 + ], + "type": "inline_equation", + "content": "\\tilde{I}_{LR}" + }, + { + "bbox": [ + 131, + 175, + 479, + 222 + ], + "type": "text", + "content": ". Then the disentanglement of film grain " + }, + { + "bbox": [ + 131, + 175, + 479, + 222 + ], + "type": "inline_equation", + "content": "(G)" + }, + { + "bbox": [ + 131, + 175, + 479, + 222 + ], + "type": "text", + "content": " and fine details " + }, + { + "bbox": [ + 131, + 175, + 479, + 222 + ], + "type": "inline_equation", + "content": "(D)" + }, + { + "bbox": [ + 131, + 175, + 479, + 222 + ], + "type": "text", + "content": " is performed with " + }, + { + "bbox": [ + 131, + 175, + 479, + 222 + ], + "type": "inline_equation", + "content": "\\tilde{z} = [\\tilde{z}_D, \\tilde{z}_G]" + }, + { + "bbox": [ + 131, + 175, + 479, + 222 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 131, + 223, + 479, + 246 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 223, + 479, + 246 + ], + "spans": [ + { + "bbox": [ + 131, + 223, + 479, + 246 + ], + "type": "inline_equation", + "content": "\\tilde{I}_G" + }, + { + "bbox": [ + 131, + 223, + 479, + 246 + ], + "type": "text", + "content": " is reconstructed by considering all the information contained in " + }, + { + "bbox": [ + 131, + 223, + 479, + 246 + ], + "type": "inline_equation", + "content": "\\tilde{z}" + }, + { + "bbox": [ + 131, + 223, + 479, + 246 + ], + "type": "text", + "content": ", i.e., related to film grain and fine details:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 201, + 254, + 481, + 282 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 201, + 254, + 481, + 282 + ], + "spans": [ + { + "bbox": [ + 201, + 254, + 481, + 282 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {b a c k} _ {G}} \\left(\\tilde {I} _ {G}, I _ {G}\\right) = \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\left| \\left| f _ {\\theta} ^ {- 1} \\left(\\tilde {I} _ {L R}, z \\right] \\right| _ {\\left| \\tilde {z} _ {D}, \\tilde {z} _ {G} \\right]} - I _ {G} \\right| | _ {1} \\tag {7}", + "image_path": "05b502423c0ae85870a5a091a3250d0ad4a5a488b5e7096ec739da59cc57a7b7.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 290, + 479, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 290, + 479, + 316 + ], + "spans": [ + { + "bbox": [ + 132, + 290, + 479, + 316 + ], + "type": "inline_equation", + "content": "\\tilde{I}_C" + }, + { + "bbox": [ + 132, + 290, + 479, + 316 + ], + "type": "text", + "content": " is restored by considering only the subset " + }, + { + "bbox": [ + 132, + 290, + 479, + 316 + ], + "type": "inline_equation", + "content": "\\tilde{z}_D" + }, + { + "bbox": [ + 132, + 290, + 479, + 316 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 132, + 290, + 479, + 316 + ], + "type": "inline_equation", + "content": "\\tilde{z}" + }, + { + "bbox": [ + 132, + 290, + 479, + 316 + ], + "type": "text", + "content": ", i.e., by using " + }, + { + "bbox": [ + 132, + 290, + 479, + 316 + ], + "type": "inline_equation", + "content": "\\tilde{z} = [\\tilde{z}_D, \\tilde{z}_G = 0]" + }, + { + "bbox": [ + 132, + 290, + 479, + 316 + ], + "type": "text", + "content": " as follows:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 204, + 323, + 481, + 352 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 204, + 323, + 481, + 352 + ], + "spans": [ + { + "bbox": [ + 204, + 323, + 481, + 352 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {b a c k} _ {C}} \\left(\\tilde {I} _ {C}, I _ {C}\\right) = \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\left\\| f _ {\\theta} ^ {- 1} \\left(\\tilde {I} _ {L R}, z\\right) _ {\\left| \\left[ \\tilde {z} _ {D}, 0 \\right] \\right.} - I _ {C} \\right\\| _ {1}, \\tag {8}", + "image_path": "d6372e13d990c6c8d59dd4124002d818c88c0c62acec7cb01b6fba92b5f27246.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 131, + 358, + 479, + 382 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 358, + 479, + 382 + ], + "spans": [ + { + "bbox": [ + 131, + 358, + 479, + 382 + ], + "type": "text", + "content": "For both fidelity losses, the " + }, + { + "bbox": [ + 131, + 358, + 479, + 382 + ], + "type": "inline_equation", + "content": "\\ell_1" + }, + { + "bbox": [ + 131, + 358, + 479, + 382 + ], + "type": "text", + "content": " norm is classically used as in [27, 42]. Finally, for the first two tasks, the following weighted sum is minimized:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 198, + 392, + 481, + 405 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 198, + 392, + 481, + 405 + ], + "spans": [ + { + "bbox": [ + 198, + 392, + 481, + 405 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {t o t a l}} = \\lambda_ {1} \\mathcal {L} _ {\\text {f o r w}} + \\lambda_ {2} \\mathcal {L} _ {\\text {r e g}} + \\lambda_ {3} \\mathcal {L} _ {\\text {b a c k} _ {C}} + \\lambda_ {4} \\mathcal {L} _ {\\text {b a c k} _ {G}} \\tag {9}", + "image_path": "10c83fd5caf8801c263df54726a98225fa126c8c1723550110594e187bde29d4.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 131, + 413, + 482, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 413, + 482, + 522 + ], + "spans": [ + { + "bbox": [ + 131, + 413, + 482, + 522 + ], + "type": "text", + "content": "Energy-aware task After 3R-INN learns the film grain removal/synthesis and rescaling tasks, it is fine-tuned during the forward pass with additional power and fidelity losses, " + }, + { + "bbox": [ + 131, + 413, + 482, + 522 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{pow}" + }, + { + "bbox": [ + 131, + 413, + 482, + 522 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 131, + 413, + 482, + 522 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{SSIM}" + }, + { + "bbox": [ + 131, + 413, + 482, + 522 + ], + "type": "text", + "content": ", to output an energy-aware grain-free LR image " + }, + { + "bbox": [ + 131, + 413, + 482, + 522 + ], + "type": "inline_equation", + "content": "\\tilde{I}_{LR|R}" + }, + { + "bbox": [ + 131, + 413, + 482, + 522 + ], + "type": "text", + "content": ", i.e., its power consumption is reduced by " + }, + { + "bbox": [ + 131, + 413, + 482, + 522 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 131, + 413, + 482, + 522 + ], + "type": "text", + "content": " compared to the power consumption of " + }, + { + "bbox": [ + 131, + 413, + 482, + 522 + ], + "type": "inline_equation", + "content": "I_{LR}" + }, + { + "bbox": [ + 131, + 413, + 482, + 522 + ], + "type": "text", + "content": ". Contrary to most works computing energy aware images, assuming a linear relationship between the power consumption " + }, + { + "bbox": [ + 131, + 413, + 482, + 522 + ], + "type": "inline_equation", + "content": "P_Y" + }, + { + "bbox": [ + 131, + 413, + 482, + 522 + ], + "type": "text", + "content": " of an image and its linearized luminance [33], we follow the model in [11] dedicated to RGBW OLED screens, and compute " + }, + { + "bbox": [ + 131, + 413, + 482, + 522 + ], + "type": "inline_equation", + "content": "P_{RGBW}" + }, + { + "bbox": [ + 131, + 413, + 482, + 522 + ], + "type": "text", + "content": " as the sum of the powers consumed by the four R, G, B, W leds. As in [25], the following power loss is then minimized:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 219, + 531, + 481, + 544 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 531, + 481, + 544 + ], + "spans": [ + { + "bbox": [ + 219, + 531, + 481, + 544 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {p o w} = \\left| \\left| \\tilde {P} _ {R G B W} - (1 - R) \\times P _ {R G B W} \\right| \\right| _ {1} \\tag {10}", + "image_path": "ed32fddf3fe94ea134eaee5e534b6c631944a8fa08f423ca5264ea60dec8fefc.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 131, + 553, + 482, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 553, + 482, + 590 + ], + "spans": [ + { + "bbox": [ + 131, + 553, + 482, + 590 + ], + "type": "text", + "content": "with " + }, + { + "bbox": [ + 131, + 553, + 482, + 590 + ], + "type": "inline_equation", + "content": "(1 - R) \\times P_{RGBW}" + }, + { + "bbox": [ + 131, + 553, + 482, + 590 + ], + "type": "text", + "content": " the desired target power and " + }, + { + "bbox": [ + 131, + 553, + 482, + 590 + ], + "type": "inline_equation", + "content": "\\tilde{P}_{RGBW}" + }, + { + "bbox": [ + 131, + 553, + 482, + 590 + ], + "type": "text", + "content": " the power of " + }, + { + "bbox": [ + 131, + 553, + 482, + 590 + ], + "type": "inline_equation", + "content": "\\tilde{I}_{LR|R}" + }, + { + "bbox": [ + 131, + 553, + 482, + 590 + ], + "type": "text", + "content": ". To ensure a better visual quality of the energy-aware images, a structural similarity index measure (SSIM) loss is added and minimized as follows:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 235, + 598, + 481, + 612 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 235, + 598, + 481, + 612 + ], + "spans": [ + { + "bbox": [ + 235, + 598, + 481, + 612 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {S S I M} = 1 - S S I M \\left(\\tilde {I} _ {L R | R}, I _ {L R}\\right) \\tag {11}", + "image_path": "128adf63dd1ad51e9179988e6a436182fa37cd5c1348f8c8cce1cdbba15a7df3.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 131, + 620, + 479, + 644 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 620, + 479, + 644 + ], + "spans": [ + { + "bbox": [ + 131, + 620, + 479, + 644 + ], + "type": "text", + "content": "As the inverse pass objectives remain exactly the same, the total loss minimized in the fine-tuning stage is:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 217, + 654, + 481, + 666 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 217, + 654, + 481, + 666 + ], + "spans": [ + { + "bbox": [ + 217, + 654, + 481, + 666 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {f i n e t u n e d}} = \\mathcal {L} _ {\\text {t o t a l}} + \\lambda_ {5} \\mathcal {L} _ {\\text {p o w}} + \\lambda_ {6} \\mathcal {L} _ {\\text {S S I M}} \\tag {12}", + "image_path": "cf880c65fdc429c992b1ba7b92a91f16d9d9bcc4c9c57a9cb790dc68314c49e7.jpg" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 154, + 91, + 448, + 103 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 154, + 91, + 448, + 103 + ], + "spans": [ + { + "bbox": [ + 154, + 91, + 448, + 103 + ], + "type": "text", + "content": "3R-INN: How to be climate friendly while consuming/delivering videos?" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 91, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 91, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 91, + 481, + 100 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 115, + 482, + 320 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 115, + 482, + 320 + ], + "spans": [ + { + "bbox": [ + 130, + 115, + 482, + 320 + ], + "type": "text", + "content": "Training details During training, we use the DIV2K training set [4] from the FilmGrainStyle740K dataset [5], which contains pairs of corresponding images with and without grain. To complement the DIV2K validation set, we evaluate 3R-INN on the BSDS300 test set [30] and Kodak24 dataset [14], which were augmented to add grainy versions of the images, by following the same process as in the FilmGrainStyle740K dataset3. Input images were randomly cropped into " + }, + { + "bbox": [ + 130, + 115, + 482, + 320 + ], + "type": "inline_equation", + "content": "144 \\times 144" + }, + { + "bbox": [ + 130, + 115, + 482, + 320 + ], + "type": "text", + "content": " and augmented by applying random horizontal and vertical flips. Other training parameters are: Adam optimizer [23, 35] with " + }, + { + "bbox": [ + 130, + 115, + 482, + 320 + ], + "type": "inline_equation", + "content": "\\beta_{1} = 0.9" + }, + { + "bbox": [ + 130, + 115, + 482, + 320 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 115, + 482, + 320 + ], + "type": "inline_equation", + "content": "\\beta_{2} = 0.999" + }, + { + "bbox": [ + 130, + 115, + 482, + 320 + ], + "type": "text", + "content": "; mini-batch size of 16; 500k (training of the first two tasks) + 5k (energy-aware fine-tuning) iterations; learning rate initialized as 2e-4 and halved at [100k, 200k, 300k, 400k] mini-batch updates. Hyper-parameters are set to: " + }, + { + "bbox": [ + 130, + 115, + 482, + 320 + ], + "type": "inline_equation", + "content": "(\\lambda_{1}, \\lambda_{2}, \\lambda_{3}, \\lambda_{4}, \\lambda_{5}, \\lambda_{6}) = (40, 1, 1, 1, 1e10, 1e4)" + }, + { + "bbox": [ + 130, + 115, + 482, + 320 + ], + "type": "text", + "content": " and eight successive invertible blocks are used. Scale and shift coefficients are learned through a five-layer densely connected convolutional block. Each convolutional filter is of size " + }, + { + "bbox": [ + 130, + 115, + 482, + 320 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 130, + 115, + 482, + 320 + ], + "type": "text", + "content": ", with padding 1, followed by a leaky ReLU activation layer with negative slope set to 0.2. The intermediate channel number of the convolutional blocks is fixed to 32. Dimensions of " + }, + { + "bbox": [ + 130, + 115, + 482, + 320 + ], + "type": "inline_equation", + "content": "\\tilde{z}_{D}" + }, + { + "bbox": [ + 130, + 115, + 482, + 320 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 115, + 482, + 320 + ], + "type": "inline_equation", + "content": "\\tilde{z}_{G}" + }, + { + "bbox": [ + 130, + 115, + 482, + 320 + ], + "type": "text", + "content": " were set to (8, 1), respectively." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 323, + 230, + 336 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 323, + 230, + 336 + ], + "spans": [ + { + "bbox": [ + 132, + 323, + 230, + 336 + ], + "type": "text", + "content": "4 Experiments" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 339, + 482, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 339, + 482, + 460 + ], + "spans": [ + { + "bbox": [ + 130, + 339, + 482, + 460 + ], + "type": "text", + "content": "The goal of our paper is to reduce the overall energy consumption along the video distribution system using 3R-INN, which primarily supports the display of a grain-free energy-aware LR image, and then offers the possibility to recover the original version, i.e., the grainy HR, as well as a clean HR version as a third option. Thus, we adopt the following evaluation use case: we first assess the energy savings achieved by using 3R-INN along the video chain, and evaluate its energy needs. Then, we assess its performances in terms of quality for the LR grain-free energy-aware images and the reconstructed HR grainy images against state-of-the-art methods. An evaluation of the reconstructed HR clean images and an ablation study are provided in the supplementary materials." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 461, + 331, + 473 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 461, + 331, + 473 + ], + "spans": [ + { + "bbox": [ + 132, + 461, + 331, + 473 + ], + "type": "text", + "content": "4.1 Energy consumption performance" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 475, + 482, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 475, + 482, + 632 + ], + "spans": [ + { + "bbox": [ + 130, + 475, + 482, + 632 + ], + "type": "text", + "content": "Evaluation of energy savings To estimate the energy savings realized along the video distribution system (headend, delivery, decoding and display), we tested the full video transmission chain by applying 3R-INN on two JVET sequences RaceHorses (300 frames, " + }, + { + "bbox": [ + 130, + 475, + 482, + 632 + ], + "type": "inline_equation", + "content": "832 \\times 480" + }, + { + "bbox": [ + 130, + 475, + 482, + 632 + ], + "type": "text", + "content": ", 10s) and BasketBall (500 frames, HD) [8]. The LR clean energy-aware at " + }, + { + "bbox": [ + 130, + 475, + 482, + 632 + ], + "type": "inline_equation", + "content": "R = 20\\%" + }, + { + "bbox": [ + 130, + 475, + 482, + 632 + ], + "type": "text", + "content": " is encoded using VTM [3], in full intra mode. Although not reflecting real-world scenarios in which efficient hardware decoders are used, the choice of using a non-optimized software-based VVC decoder (VTM) will nevertheless enable to demonstrate that 3R-INN results in consistent energy savings. Fixed broadband transmission was assumed. We then decoded and displayed the sequences on an OLED screen, with all TV options disabled, including the ambient light setting. Similarly to state-of-the-art methods, no video specific optimization was conducted as 3R-INN works in a frame by frame manner. In particular, film grain is known to be temporally" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 231, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 231, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 231, + 102 + ], + "type": "text", + "content": "Z. Ameur et al." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 133, + 642, + 504, + 665 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 642, + 504, + 665 + ], + "spans": [ + { + "bbox": [ + 133, + 642, + 504, + 665 + ], + "type": "text", + "content": "3 The additional dataset is proposed at www.interdigital.com/data_sets/filmgrainstyle740k-dataset" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 152, + 116, + 254, + 217 + ], + "blocks": [ + { + "bbox": [ + 152, + 116, + 254, + 217 + ], + "lines": [ + { + "bbox": [ + 152, + 116, + 254, + 217 + ], + "spans": [ + { + "bbox": [ + 152, + 116, + 254, + 217 + ], + "type": "image", + "image_path": "9b0ced8cab9cdd2963178d402629ae8cba68606eead14ef36b13f6cbde313217.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 218, + 479, + 240 + ], + "lines": [ + { + "bbox": [ + 130, + 218, + 479, + 240 + ], + "spans": [ + { + "bbox": [ + 130, + 218, + 479, + 240 + ], + "type": "text", + "content": "Fig. 2: Bit-rate, encoding and decoding times with and without using 3R-INN in terms of QP for sequence RaceHorses." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 255, + 116, + 357, + 216 + ], + "blocks": [ + { + "bbox": [ + 255, + 116, + 357, + 216 + ], + "lines": [ + { + "bbox": [ + 255, + 116, + 357, + 216 + ], + "spans": [ + { + "bbox": [ + 255, + 116, + 357, + 216 + ], + "type": "image", + "image_path": "2c337df963ffeacf269b9aa389832552d64ea35e4a17209bf1c4d78ac3dd1308.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 362, + 116, + 460, + 217 + ], + "blocks": [ + { + "bbox": [ + 362, + 116, + 460, + 217 + ], + "lines": [ + { + "bbox": [ + 362, + 116, + 460, + 217 + ], + "spans": [ + { + "bbox": [ + 362, + 116, + 460, + 217 + ], + "type": "image", + "image_path": "fe788dff4bd7434936a373e0c04d36bf4d40e05c294f17799ff96f9a635c8d2f.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 244, + 479, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 244, + 479, + 281 + ], + "spans": [ + { + "bbox": [ + 130, + 244, + 479, + 281 + ], + "type": "text", + "content": "uncorrelated, hence its frame-based analysis. Resulting videos are provided in the supplementary material, illustrating 3R-INN capability to process temporal content." + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 171, + 305, + 301, + 392 + ], + "blocks": [ + { + "bbox": [ + 171, + 305, + 301, + 392 + ], + "lines": [ + { + "bbox": [ + 171, + 305, + 301, + 392 + ], + "spans": [ + { + "bbox": [ + 171, + 305, + 301, + 392 + ], + "type": "image", + "image_path": "e7eff9f174d0779a35da9aac4cd0015a979d735f4174c3b1247d6a70d7b1bfe2.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 411, + 480, + 444 + ], + "lines": [ + { + "bbox": [ + 130, + 411, + 480, + 444 + ], + "spans": [ + { + "bbox": [ + 130, + 411, + 480, + 444 + ], + "type": "text", + "content": "Fig. 3: Measured power consumption when displaying sequence RaceHorses. Left: Comparison between HR and LR versions at " + }, + { + "bbox": [ + 130, + 411, + 480, + 444 + ], + "type": "inline_equation", + "content": "\\mathrm{QP} = 22" + }, + { + "bbox": [ + 130, + 411, + 480, + 444 + ], + "type": "text", + "content": ". Right: Comparison between LR versions before and after encoding/decoding." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 312, + 305, + 444, + 392 + ], + "blocks": [ + { + "bbox": [ + 312, + 305, + 444, + 392 + ], + "lines": [ + { + "bbox": [ + 312, + 305, + 444, + 392 + ], + "spans": [ + { + "bbox": [ + 312, + 305, + 444, + 392 + ], + "type": "image", + "image_path": "cf36dc1fce3dba8e53b21e099df879a4ffa1f653ef2a957ec567b3c6bd48910f.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 450, + 481, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 450, + 481, + 593 + ], + "spans": [ + { + "bbox": [ + 130, + 450, + 481, + 593 + ], + "type": "text", + "content": "Figure 2 reports the average encoding/decoding times and bit-rates, for different quantization parameters (QP), for the original HR clean and grainy RaceHorses sequences, and for the resulting LR versions with different " + }, + { + "bbox": [ + 130, + 450, + 481, + 593 + ], + "type": "inline_equation", + "content": "R \\in \\{5\\%, 20\\%, 40\\%, 60\\% \\}" + }, + { + "bbox": [ + 130, + 450, + 481, + 593 + ], + "type": "text", + "content": ". Up to QP = 27, encoding and decoding the HR grainy video is more time and bit-rate demanding than for the HR clean version. For higher QPs, encoding time is still higher, however, bit-rate and decoding time are similar, because grain was removed during the encoding process. This confirms that compressing a grainy video while preserving film grain requires encoding at low QPs (which is far from the real-world scenario), leading to high and impractical bit-rates. On the contrary, encoding LR grain-free versions, whatever the value of " + }, + { + "bbox": [ + 130, + 450, + 481, + 593 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 130, + 450, + 481, + 593 + ], + "type": "text", + "content": ", shows substantially lower times and bit-rates, and consequently reduces the energy at the head-end, transmission and decoding stages." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "type": "text", + "content": "Figure 3 presents actual measures of energy consumptions on an OLED LG-42C2 screen, for " + }, + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "type": "inline_equation", + "content": "R \\in \\{5\\%, 20\\%, 40\\%, 60\\% \\}" + }, + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "type": "text", + "content": ", for the sequence RaceHorses. On the left plot, we compare the consumption of the encoded/decoded LR and HR clean sequences at " + }, + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "type": "inline_equation", + "content": "\\mathrm{QP} = 22" + }, + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "type": "text", + "content": ". This proves that displaying an energy-aware video at different reduction rates significantly reduces the display power consumption. The average gains of power are " + }, + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "type": "inline_equation", + "content": "6.8\\%, 21.5\\%, 33.3\\%" + }, + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "type": "inline_equation", + "content": "44.2\\%" + }, + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "type": "text", + "content": ". The right" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 154, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 154, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 154, + 91, + 447, + 102 + ], + "type": "text", + "content": "3R-INN: How to be climate friendly while consuming/delivering videos?" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 187 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 187 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 187 + ], + "type": "text", + "content": "plot compares the consumption of the LR sequences for different " + }, + { + "bbox": [ + 130, + 116, + 482, + 187 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 130, + 116, + 482, + 187 + ], + "type": "text", + "content": ", before and after encoding/decoding " + }, + { + "bbox": [ + 130, + 116, + 482, + 187 + ], + "type": "inline_equation", + "content": "(\\mathrm{QP} = 22)" + }, + { + "bbox": [ + 130, + 116, + 482, + 187 + ], + "type": "text", + "content": ". For each " + }, + { + "bbox": [ + 130, + 116, + 482, + 187 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 130, + 116, + 482, + 187 + ], + "type": "text", + "content": ", we observe a non-significant impact of the compression on the display consumption. This demonstrates that energy-aware images are to some extent robust to compression in terms of power values. Similar results are obtained for the sequence BasketBall (shown in the supplemental material)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 189, + 482, + 537 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 189, + 482, + 537 + ], + "spans": [ + { + "bbox": [ + 130, + 189, + 482, + 537 + ], + "type": "text", + "content": "Table 1 illustrates the end-to-end energy savings at each level of the video chain, for the sequence RaceHorses, at QP22 and " + }, + { + "bbox": [ + 130, + 189, + 482, + 537 + ], + "type": "inline_equation", + "content": "R = 20\\%" + }, + { + "bbox": [ + 130, + 189, + 482, + 537 + ], + "type": "text", + "content": ", according to the energy model in [17, 29]. Note that a range of power consumption values are considered for both encoding and decoding where the boundary values represent a very optimized vs. a non-optimized power consumption encoder, as well as hardware and software decoders, respectively (the detailed computation is provided in the supplemental material). 3R-INN allows " + }, + { + "bbox": [ + 130, + 189, + 482, + 537 + ], + "type": "inline_equation", + "content": "74\\%" + }, + { + "bbox": [ + 130, + 189, + 482, + 537 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 189, + 482, + 537 + ], + "type": "inline_equation", + "content": "78\\%" + }, + { + "bbox": [ + 130, + 189, + 482, + 537 + ], + "type": "text", + "content": " of total end-to-end savings with respectively highly power optimized and non-optimized encoders/decoders. This corresponds to savings of " + }, + { + "bbox": [ + 130, + 189, + 482, + 537 + ], + "type": "inline_equation", + "content": "78\\%" + }, + { + "bbox": [ + 130, + 189, + 482, + 537 + ], + "type": "text", + "content": " for head-end, " + }, + { + "bbox": [ + 130, + 189, + 482, + 537 + ], + "type": "inline_equation", + "content": "19\\%" + }, + { + "bbox": [ + 130, + 189, + 482, + 537 + ], + "type": "text", + "content": " for delivery and ca. " + }, + { + "bbox": [ + 130, + 189, + 482, + 537 + ], + "type": "inline_equation", + "content": "77\\%" + }, + { + "bbox": [ + 130, + 189, + 482, + 537 + ], + "type": "text", + "content": " for decoding. From this, we draw several observations: Head-end: As expected, the encoding energy consumption " + }, + { + "bbox": [ + 130, + 189, + 482, + 537 + ], + "type": "inline_equation", + "content": "E_{c}" + }, + { + "bbox": [ + 130, + 189, + 482, + 537 + ], + "type": "text", + "content": " significantly depends on the incoming resolution. The HR sequences are the most energy-demanding, particularly when they contain film grain, making encoding at a lower resolution a wise choice. Delivery: The transmission energy consumption " + }, + { + "bbox": [ + 130, + 189, + 482, + 537 + ], + "type": "inline_equation", + "content": "E_{t}" + }, + { + "bbox": [ + 130, + 189, + 482, + 537 + ], + "type": "text", + "content": " does not strongly depend on bitrate, but more on the power consumed by the infrastructure. Thus, transmitting HR instead of LR content results in a relatively small energy gain. Decoding: The gain in energy consumption for the decoding operation is significant, even if, in absolute value, it remains quite low. Display: Displaying the energy-aware clean LR video at " + }, + { + "bbox": [ + 130, + 189, + 482, + 537 + ], + "type": "inline_equation", + "content": "R = 20\\%" + }, + { + "bbox": [ + 130, + 189, + 482, + 537 + ], + "type": "text", + "content": " results in " + }, + { + "bbox": [ + 130, + 189, + 482, + 537 + ], + "type": "inline_equation", + "content": "11\\%" + }, + { + "bbox": [ + 130, + 189, + 482, + 537 + ], + "type": "text", + "content": " of energy consumption reduction compared to the original grainy HR video. With the removal of the static consumption of the screen, the achieved energy reduction is higher and nearly reaches the target rate of " + }, + { + "bbox": [ + 130, + 189, + 482, + 537 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 130, + 189, + 482, + 537 + ], + "type": "text", + "content": ". In absolute value, the energy consumption of display " + }, + { + "bbox": [ + 130, + 189, + 482, + 537 + ], + "type": "inline_equation", + "content": "E_{D}" + }, + { + "bbox": [ + 130, + 189, + 482, + 537 + ], + "type": "text", + "content": " is significant compared to that of the other components of the video chain, except for the encoder. However, since the content is encoded once and displayed several times, the display energy gains are further multiplied by the number of displays. Assuming that the sequence RaceHorses is viewed by " + }, + { + "bbox": [ + 130, + 189, + 482, + 537 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 130, + 189, + 482, + 537 + ], + "type": "text", + "content": " of Netflix subscribers [2] for one hour, using 3R-INN would save 156 GWh of energy, equivalent to the monthly consumption of 176 American citizens [1]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 538, + 483, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 538, + 483, + 586 + ], + "spans": [ + { + "bbox": [ + 130, + 538, + 483, + 586 + ], + "type": "text", + "content": "Energy cost of using 3R-INN 3R-INN is a single network that replaces three separate NN architectures for the tasks of grain removal /synthesis, rescaling and building energy-aware images. In that sense, Table 2 reports a comparison of its complexity and energy performance against those of the sum of the" + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 132, + 612, + 482, + 663 + ], + "blocks": [ + { + "bbox": [ + 130, + 588, + 482, + 611 + ], + "lines": [ + { + "bbox": [ + 130, + 588, + 482, + 611 + ], + "spans": [ + { + "bbox": [ + 130, + 588, + 482, + 611 + ], + "type": "text", + "content": "Table 1: End-to-end energy savings along the video chain for a fixed broadband access. Case study of the sequence RaceHorses (300 frames, " + }, + { + "bbox": [ + 130, + 588, + 482, + 611 + ], + "type": "inline_equation", + "content": "832 \\times 480" + }, + { + "bbox": [ + 130, + 588, + 482, + 611 + ], + "type": "text", + "content": ", 30fps, 10s)." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 132, + 612, + 482, + 663 + ], + "lines": [ + { + "bbox": [ + 132, + 612, + 482, + 663 + ], + "spans": [ + { + "bbox": [ + 132, + 612, + 482, + 663 + ], + "type": "table", + "html": "
HeadendDeliveryDecodingDisplayTotal
Encoding time (s), EcBitrate (kbps) (QP=22), EtDecoding time (s), EdPower (W), EDOriginal (Grainy HR)[78%, 78%][2.03W, 914kWh][0.0010Wh][0.004W, 0.11Wh][0.002W, 914.0002kWh]
Original (Grainy HR)46682, [2.59W, 1167kWh]16668, 0.0055Wh23.2, [0.005W, 0.15Wh]60.8, 0.168Wh[2.7685W, 1167.0003kWh]
Original (Clean HR)37901, [2.10W, 947kWh]14516, 0.0053Wh21.0, [0.004W, 0.14Wh]59.7, 0.165Wh[2.2743W, 947.0003kWh]
Ours (Energy-aware clean LR) R = 20%10150, [0.56W, 253kWh]4237, 0.0045Wh5.7, [0.001W, 0.039Wh]54.4, 0.151Wh[0.7165W, 253.0001kWh]
Reduction in % Grainy HR vs Ours[78%, 78%]19%[80%, 74%]11%[74%, 78%]
Reduction in energy Grainy HR vs Ours[2.03W, 914kWh]0.0010Wh[0.004W, 0.11Wh]0.017Wh[2.052W, 914.0002kWh]
", + "image_path": "2e06f7703642759ca04914164c77b441f2880c452a70446b2d5ca353951b3675.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 231, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 231, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 231, + 102 + ], + "type": "text", + "content": "Z. Ameur et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 201 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 201 + ], + "type": "text", + "content": "best-performing networks for the three tasks, on sequence RaceHorses, in terms of number of parameters, number of Multiply-ACcumulate operations (MACs), power consumption and number of equivalent displays. Power consumption is approximated using the model in [16], which we assume is valid for all the networks used in this study. The number of equivalent displays represents the number of displays/users that are needed to counterbalance the needed energy to run the network(s). Computation details are provided in the supplementary materials." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 203, + 482, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 203, + 482, + 312 + ], + "spans": [ + { + "bbox": [ + 130, + 203, + 482, + 312 + ], + "type": "text", + "content": "While producing a clean, " + }, + { + "bbox": [ + 130, + 203, + 482, + 312 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 130, + 203, + 482, + 312 + ], + "type": "text", + "content": " energy-reduced LR image from an HR grainy one, the cost of running 3R-INN in a forward pass is compared with the successive use of three NN architectures: StyleFG analyzer module to model film grain, IRN to remove it and downscale the image, and InvEAN to reduce its energy consumption. From the first part of Table 2, 3R-INN counts significantly fewer parameters/operations and needs less power than the combination of the three networks. Moreover, solely comparing with the savings at the display side (" + }, + { + "bbox": [ + 130, + 203, + 482, + 312 + ], + "type": "inline_equation", + "content": "\\approx 6" + }, + { + "bbox": [ + 130, + 203, + 482, + 312 + ], + "type": "text", + "content": " Watts, see Table 1), the power gain is tangible as soon as the content is displayed 110 times for 3R-INN vs. 161 times for the combination." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 315, + 482, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 315, + 482, + 399 + ], + "spans": [ + { + "bbox": [ + 130, + 315, + 482, + 399 + ], + "type": "text", + "content": "In case recovering an HR grainy image is required, 3R-INN is simply run in an inverse pass, thus, its complexity and power savings remain the same. In contrast, InvEAN and IRN are first run in an inverse pass to recover a clean HR version; then, film grain is synthesized using StyleFG synthesizer module. This time, 3R-INN use represents " + }, + { + "bbox": [ + 130, + 315, + 482, + 399 + ], + "type": "inline_equation", + "content": "58\\%" + }, + { + "bbox": [ + 130, + 315, + 482, + 399 + ], + "type": "text", + "content": " less power than the combination (660 vs. 1573W), and requires fewer equivalent displays (110 vs. 262) to offset its power requirements." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 131, + 403, + 428, + 416 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 403, + 428, + 416 + ], + "spans": [ + { + "bbox": [ + 131, + 403, + 428, + 416 + ], + "type": "text", + "content": "4.2 Quantitative and qualitative evaluation of LR images" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 422, + 482, + 566 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 422, + 482, + 566 + ], + "spans": [ + { + "bbox": [ + 130, + 422, + 482, + 566 + ], + "type": "text", + "content": "Film grain removal and downscaling The quantitative and qualitative evaluations of the LR clean image " + }, + { + "bbox": [ + 130, + 422, + 482, + 566 + ], + "type": "inline_equation", + "content": "\\tilde{I}_{LR|R=0}" + }, + { + "bbox": [ + 130, + 422, + 482, + 566 + ], + "type": "text", + "content": ", i.e., corresponding to an energy reduction rate " + }, + { + "bbox": [ + 130, + 422, + 482, + 566 + ], + "type": "inline_equation", + "content": "R = 0" + }, + { + "bbox": [ + 130, + 422, + 482, + 566 + ], + "type": "text", + "content": ", are given in Table 3 and Figure 4, respectively. The reference image is the bicubic rescaling of the HR clean image. Although quite similar to the experimental protocol used for IRN in [42], we here assess the ability of the network both to rescale and to remove film grain. Thus, for a fair comparison, we re-train IRN on our dataset to perform both tasks, given grainy HR images as input. The latter is designed and optimized for clean HR image reconstruction only, although it can reconstruct grainy HR images if the original high-frequencies " + }, + { + "bbox": [ + 130, + 422, + 482, + 566 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 130, + 422, + 482, + 566 + ], + "type": "text", + "content": " are provided. However, in the video chain context, this would lead to the transmission of heavy metadata, whereas 3R-INN operates without any transmission of metadata. Results show that the proposed method outperforms IRN in terms" + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 132, + 607, + 482, + 663 + ], + "blocks": [ + { + "bbox": [ + 130, + 574, + 482, + 606 + ], + "lines": [ + { + "bbox": [ + 130, + 574, + 482, + 606 + ], + "spans": [ + { + "bbox": [ + 130, + 574, + 482, + 606 + ], + "type": "text", + "content": "Table 2: Comparison of complexity and power performance of 3R-INN against the use of three independent neural networks for sequence RaceHorses. A corresponding power equivalent in terms of number of displays is also given." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 132, + 607, + 482, + 663 + ], + "lines": [ + { + "bbox": [ + 132, + 607, + 482, + 663 + ], + "spans": [ + { + "bbox": [ + 132, + 607, + 482, + 663 + ], + "type": "table", + "html": "
OutputNetwork(s)ParametersGMACsPower(W)Display equivalent #
Clean LR at R=20%StyleFG analyzer + IRN + InvEAN23.3M334967161
3R-INN (forward)1.7M230660110
Grainy HRStyleFG synthesizer + IRN + InvEAN36.3M6161573262
3R-INN (inverse)1.7M230660110
", + "image_path": "33dded2a8709c1b0b40c06823080df68e47e2da8efe0e3febf6fefaf53b30abe.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 154, + 90, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 154, + 90, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 154, + 90, + 447, + 102 + ], + "type": "text", + "content": "3R-INN: How to be climate friendly while consuming/delivering videos?" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 479, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 479, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 479, + 100 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 200, + 126, + 411, + 175 + ], + "blocks": [ + { + "bbox": [ + 132, + 102, + 481, + 125 + ], + "lines": [ + { + "bbox": [ + 132, + 102, + 481, + 125 + ], + "spans": [ + { + "bbox": [ + 132, + 102, + 481, + 125 + ], + "type": "text", + "content": "Table 3: Comparison between generated LR clean images " + }, + { + "bbox": [ + 132, + 102, + 481, + 125 + ], + "type": "inline_equation", + "content": "\\tilde{I}_{LR|R=0}" + }, + { + "bbox": [ + 132, + 102, + 481, + 125 + ], + "type": "text", + "content": " and a bicubic rescaling of the HR clean image as ground-truth." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 200, + 126, + 411, + 175 + ], + "lines": [ + { + "bbox": [ + 200, + 126, + 411, + 175 + ], + "spans": [ + { + "bbox": [ + 200, + 126, + 411, + 175 + ], + "type": "table", + "html": "
MethodDIV2KBSDS300Kodak24
PSNR ↑SSIM ↑PSNR ↑SSIM ↑PSNR ↑SSIM ↑
IRN [42]39.060.94238.950.95338.750.947
Ours39.630.95139.790.96439.710.957
", + "image_path": "d4fea4b8981bf8f449912860edb64a3c16fe3d35a35f539dc9ef55eb9666eee2.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 147, + 187, + 252, + 269 + ], + "blocks": [ + { + "bbox": [ + 147, + 187, + 252, + 269 + ], + "lines": [ + { + "bbox": [ + 147, + 187, + 252, + 269 + ], + "spans": [ + { + "bbox": [ + 147, + 187, + 252, + 269 + ], + "type": "image", + "image_path": "afc44e58838d201eeec2a5a84e8eab8dda1352470b68e3613e635ceaf51711bc.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 175, + 269, + 224, + 277 + ], + "lines": [ + { + "bbox": [ + 175, + 269, + 224, + 277 + ], + "spans": [ + { + "bbox": [ + 175, + 269, + 224, + 277 + ], + "type": "text", + "content": "Ground-truth" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 184, + 277, + 216, + 285 + ], + "lines": [ + { + "bbox": [ + 184, + 277, + 216, + 285 + ], + "spans": [ + { + "bbox": [ + 184, + 277, + 216, + 285 + ], + "type": "text", + "content": "(bicubic)" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 132, + 286, + 479, + 322 + ], + "lines": [ + { + "bbox": [ + 132, + 286, + 479, + 322 + ], + "spans": [ + { + "bbox": [ + 132, + 286, + 479, + 322 + ], + "type": "text", + "content": "Fig. 4: Comparison between a bicubic downscaling, IRN and the clean LR " + }, + { + "bbox": [ + 132, + 286, + 479, + 322 + ], + "type": "inline_equation", + "content": "\\tilde{I}_{LR|R=0}" + }, + { + "bbox": [ + 132, + 286, + 479, + 322 + ], + "type": "text", + "content": ". of PSNR and SSIM. They also outline its good generalization, as even better performances are observed on BSDS300 and Kodak24 datasets." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 254, + 188, + 359, + 269 + ], + "blocks": [ + { + "bbox": [ + 254, + 188, + 359, + 269 + ], + "lines": [ + { + "bbox": [ + 254, + 188, + 359, + 269 + ], + "spans": [ + { + "bbox": [ + 254, + 188, + 359, + 269 + ], + "type": "image", + "image_path": "590af96f3e142ffec186d9df5b768322342eef5dd61c4deee9c0988207e255d1.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 299, + 270, + 315, + 277 + ], + "lines": [ + { + "bbox": [ + 299, + 270, + 315, + 277 + ], + "spans": [ + { + "bbox": [ + 299, + 270, + 315, + 277 + ], + "type": "text", + "content": "IRN" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 272, + 277, + 342, + 285 + ], + "lines": [ + { + "bbox": [ + 272, + 277, + 342, + 285 + ], + "spans": [ + { + "bbox": [ + 272, + 277, + 342, + 285 + ], + "type": "inline_equation", + "content": "\\mathrm{(PSNR = 43.10dB)}" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 361, + 188, + 467, + 269 + ], + "blocks": [ + { + "bbox": [ + 361, + 188, + 467, + 269 + ], + "lines": [ + { + "bbox": [ + 361, + 188, + 467, + 269 + ], + "spans": [ + { + "bbox": [ + 361, + 188, + 467, + 269 + ], + "type": "image", + "image_path": "141995e98c7a0f2b5a7a1ee961024f37a354b86f3d4f930898eea5e84a9a7e31.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 405, + 270, + 423, + 277 + ], + "lines": [ + { + "bbox": [ + 405, + 270, + 423, + 277 + ], + "spans": [ + { + "bbox": [ + 405, + 270, + 423, + 277 + ], + "type": "text", + "content": "Ours" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 378, + 277, + 449, + 285 + ], + "lines": [ + { + "bbox": [ + 378, + 277, + 449, + 285 + ], + "spans": [ + { + "bbox": [ + 378, + 277, + 449, + 285 + ], + "type": "inline_equation", + "content": "\\mathrm{(PSNR = 43.14dB)}" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 325, + 482, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 325, + 482, + 517 + ], + "spans": [ + { + "bbox": [ + 130, + 325, + 482, + 517 + ], + "type": "text", + "content": "Energy-aware images For " + }, + { + "bbox": [ + 130, + 325, + 482, + 517 + ], + "type": "inline_equation", + "content": "R > 0" + }, + { + "bbox": [ + 130, + 325, + 482, + 517 + ], + "type": "text", + "content": ", we evaluate the visual quality of the LR clean energy-aware image " + }, + { + "bbox": [ + 130, + 325, + 482, + 517 + ], + "type": "inline_equation", + "content": "\\tilde{I}_{LR|R}" + }, + { + "bbox": [ + 130, + 325, + 482, + 517 + ], + "type": "text", + "content": " against state-of-the-art energy-aware methods, i.e., a global linear scaling of the luminance (LS), R-ACE [33], DeepPVR [26] and InvEAN [25]. To solely evaluate the energy-aware task, and for a fair comparison, existing methods were evaluated while taking as input the output of our method after the fine tuning step with " + }, + { + "bbox": [ + 130, + 325, + 482, + 517 + ], + "type": "inline_equation", + "content": "R = 0" + }, + { + "bbox": [ + 130, + 325, + 482, + 517 + ], + "type": "text", + "content": ". All evaluation metrics in the following were calculated with this image as reference. Table 4 reports PSNR-Y and SSIM metrics at four reduction rates, on three test sets. Two conclusions can be drawn. First, when the power consumption model " + }, + { + "bbox": [ + 130, + 325, + 482, + 517 + ], + "type": "inline_equation", + "content": "P_{Y}" + }, + { + "bbox": [ + 130, + 325, + 482, + 517 + ], + "type": "text", + "content": " is used for a fair comparison with state-of-the-art methods, the proposed method outperforms LS and R-ACE methods, while being similar to DeepPVR and slightly below InvEAN. When the power consumption model " + }, + { + "bbox": [ + 130, + 325, + 482, + 517 + ], + "type": "inline_equation", + "content": "P_{RGBW}" + }, + { + "bbox": [ + 130, + 325, + 482, + 517 + ], + "type": "text", + "content": " is used, the quality scores of 3R-INN are significantly better, and especially for the PSNR-Y. This can be explained by the fact that our model does not learn to reduce the image luminance, contrary to state-of-art methods. The latter in turn were not trained to optimize " + }, + { + "bbox": [ + 130, + 325, + 482, + 517 + ], + "type": "inline_equation", + "content": "P_{RGBW}" + }, + { + "bbox": [ + 130, + 325, + 482, + 517 + ], + "type": "text", + "content": "; this may explain their lower performances." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 130, + 518, + 482, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 518, + 482, + 555 + ], + "spans": [ + { + "bbox": [ + 130, + 518, + 482, + 555 + ], + "type": "text", + "content": "This trend is confirmed by Figure 6 which plots SSIM scores as function of the actual reduction rate, computed with " + }, + { + "bbox": [ + 130, + 518, + 482, + 555 + ], + "type": "inline_equation", + "content": "P_{RGBW}" + }, + { + "bbox": [ + 130, + 518, + 482, + 555 + ], + "type": "text", + "content": ". PSNR plots are provided in the supplemental material. Figure 5 shows a qualitative comparison of energy-" + } + ] + } + ], + "index": 15 + }, + { + "type": "table", + "bbox": [ + 132, + 613, + 480, + 663 + ], + "blocks": [ + { + "bbox": [ + 130, + 567, + 482, + 612 + ], + "lines": [ + { + "bbox": [ + 130, + 567, + 482, + 612 + ], + "spans": [ + { + "bbox": [ + 130, + 567, + 482, + 612 + ], + "type": "text", + "content": "Table 4: PSNR-Y and SSIM quality scores for the energy-aware task for four reduction rates " + }, + { + "bbox": [ + 130, + 567, + 482, + 612 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 130, + 567, + 482, + 612 + ], + "type": "text", + "content": ". 3R-INN results are presented for two power consumption models, i.e. " + }, + { + "bbox": [ + 130, + 567, + 482, + 612 + ], + "type": "inline_equation", + "content": "P_{Y}" + }, + { + "bbox": [ + 130, + 567, + 482, + 612 + ], + "type": "text", + "content": " (for comparison with state-of-the-art methods) and " + }, + { + "bbox": [ + 130, + 567, + 482, + 612 + ], + "type": "inline_equation", + "content": "P_{RGBW}" + }, + { + "bbox": [ + 130, + 567, + 482, + 612 + ], + "type": "text", + "content": ", corresponding to RGB and RGBW OLED screens, respectively. InvEAN model is not available at " + }, + { + "bbox": [ + 130, + 567, + 482, + 612 + ], + "type": "inline_equation", + "content": "R = 5\\%" + }, + { + "bbox": [ + 130, + 567, + 482, + 612 + ], + "type": "text", + "content": " in [25]." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 132, + 613, + 480, + 663 + ], + "lines": [ + { + "bbox": [ + 132, + 613, + 480, + 663 + ], + "spans": [ + { + "bbox": [ + 132, + 613, + 480, + 663 + ], + "type": "table", + "html": "
MethodDIV2KBSDSKolak24
R=5%R=20%R=40%R=60%R=5%R=20%R=40%R=60%R=5%R=20%R=40%R=60%
LS39.34/0.99927.01/0.99120.33/0.95816.06/0.87739.64/0.99927.31/0.99020.67/0.95516.35/0.86739.38/0.99927.05/0.99120.41/0.95716.09/0.875
R-ACE [33]41.53/0.99526.59/0.96720.05/0.90115.92/0.78840.55/0.99726.90/0.97820.24/0.91516.12/0.80640.70/0.99726.74/0.98320.08/0.93015.98/0.830
DeepPVR [26]39.37/0.99627.12/0.98321.04/0.95215.81/0.89039.63/0.99727.53/0.98921.13/0.95916.36/0.89439.27/0.99727.17/0.98920.61/0.95516.00/0.892
InvEAN [25]-27.75/0.99421.17/0.97317.07/0.932-28.25/0.99321.74/0.97317.72/0.931-27.92/0.99321.42/0.97317.37/0.932
Ours (Pc)39.55/0.98727.32/0.98020.62/0.94916.43/0.88340.06/0.99427.65/0.98620.94/0.95516.77/0.88340.02/0.99227.43/0.98520.70/0.95416.51/0.886
Ours (PROBW)47.68/0.99838.02/0.99329.15/0.97423.66/0.94548.33/0.99938.36/0.99530.47/0.98324.96/0.96147.47/0.99837.39/0.99429.63/0.98224.18/0.958
", + "image_path": "c8c73ddab39180629d23339a74d67f780dc97dd4b4c6722293867c0fc6f6269a.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "table_body" + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 230, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 230, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 230, + 100 + ], + "type": "text", + "content": "Z. Ameur et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 143, + 114, + 196, + 152 + ], + "blocks": [ + { + "bbox": [ + 143, + 114, + 196, + 152 + ], + "lines": [ + { + "bbox": [ + 143, + 114, + 196, + 152 + ], + "spans": [ + { + "bbox": [ + 143, + 114, + 196, + 152 + ], + "type": "image", + "image_path": "0e836ad70e4fe81b8e75b9db052a23a3c859f8aff9ce4301c94af8dde956a27b.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 143, + 152, + 196, + 188 + ], + "blocks": [ + { + "bbox": [ + 143, + 152, + 196, + 188 + ], + "lines": [ + { + "bbox": [ + 143, + 152, + 196, + 188 + ], + "spans": [ + { + "bbox": [ + 143, + 152, + 196, + 188 + ], + "type": "image", + "image_path": "12de44a2c6babce9606368f13791d14d7bd329ee3fb2ef7ac68d570d882abf7f.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 143, + 188, + 196, + 224 + ], + "blocks": [ + { + "bbox": [ + 143, + 188, + 196, + 224 + ], + "lines": [ + { + "bbox": [ + 143, + 188, + 196, + 224 + ], + "spans": [ + { + "bbox": [ + 143, + 188, + 196, + 224 + ], + "type": "image", + "image_path": "9228acd3f65cb119f5880aabed0d050481134619268fb2b0adc277b5c19f9ca9.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 155, + 225, + 184, + 234 + ], + "lines": [ + { + "bbox": [ + 155, + 225, + 184, + 234 + ], + "spans": [ + { + "bbox": [ + 155, + 225, + 184, + 234 + ], + "type": "text", + "content": "Original" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 197, + 114, + 251, + 151 + ], + "blocks": [ + { + "bbox": [ + 197, + 114, + 251, + 151 + ], + "lines": [ + { + "bbox": [ + 197, + 114, + 251, + 151 + ], + "spans": [ + { + "bbox": [ + 197, + 114, + 251, + 151 + ], + "type": "image", + "image_path": "8499ddd0eea85411321c447b70d152899b244e827b196272cb80e5ab1ca5fd28.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 197, + 151, + 251, + 188 + ], + "blocks": [ + { + "bbox": [ + 197, + 151, + 251, + 188 + ], + "lines": [ + { + "bbox": [ + 197, + 151, + 251, + 188 + ], + "spans": [ + { + "bbox": [ + 197, + 151, + 251, + 188 + ], + "type": "image", + "image_path": "9a55a29ec925a75151424a0050dcb22baf8510f0358c09db8ace0a2bb67fa878.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 197, + 188, + 251, + 224 + ], + "blocks": [ + { + "bbox": [ + 197, + 188, + 251, + 224 + ], + "lines": [ + { + "bbox": [ + 197, + 188, + 251, + 224 + ], + "spans": [ + { + "bbox": [ + 197, + 188, + 251, + 224 + ], + "type": "image", + "image_path": "c8dc217e3958352259ec21c415b1e07adba4ada3317669afe13f4f1a34426048.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 219, + 225, + 230, + 233 + ], + "lines": [ + { + "bbox": [ + 219, + 225, + 230, + 233 + ], + "spans": [ + { + "bbox": [ + 219, + 225, + 230, + 233 + ], + "type": "text", + "content": "LS" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 252, + 114, + 306, + 151 + ], + "blocks": [ + { + "bbox": [ + 252, + 114, + 306, + 151 + ], + "lines": [ + { + "bbox": [ + 252, + 114, + 306, + 151 + ], + "spans": [ + { + "bbox": [ + 252, + 114, + 306, + 151 + ], + "type": "image", + "image_path": "298024752f2c570b5a2304bfb2668bd421157d22d537fb839a68d4dea8fb4897.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 252, + 151, + 306, + 187 + ], + "blocks": [ + { + "bbox": [ + 252, + 151, + 306, + 187 + ], + "lines": [ + { + "bbox": [ + 252, + 151, + 306, + 187 + ], + "spans": [ + { + "bbox": [ + 252, + 151, + 306, + 187 + ], + "type": "image", + "image_path": "abb7c855d56abda193c739564f63ab95a089ba580ace4a06bcac1341289eeb78.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 252, + 188, + 306, + 224 + ], + "blocks": [ + { + "bbox": [ + 252, + 188, + 306, + 224 + ], + "lines": [ + { + "bbox": [ + 252, + 188, + 306, + 224 + ], + "spans": [ + { + "bbox": [ + 252, + 188, + 306, + 224 + ], + "type": "image", + "image_path": "9d5b522312fe765cbb58901aa208da8d425dc7b9b410a776ab0340de5f807a94.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 268, + 225, + 291, + 234 + ], + "lines": [ + { + "bbox": [ + 268, + 225, + 291, + 234 + ], + "spans": [ + { + "bbox": [ + 268, + 225, + 291, + 234 + ], + "type": "text", + "content": "RACE" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 132, + 235, + 481, + 268 + ], + "lines": [ + { + "bbox": [ + 132, + 235, + 481, + 268 + ], + "spans": [ + { + "bbox": [ + 132, + 235, + 481, + 268 + ], + "type": "text", + "content": "Fig. 5: Comparison of generated energy-aware images with the state-of-the-art, for " + }, + { + "bbox": [ + 132, + 235, + 481, + 268 + ], + "type": "inline_equation", + "content": "R \\in \\{5\\%, 20\\%, 40\\% \\}" + }, + { + "bbox": [ + 132, + 235, + 481, + 268 + ], + "type": "text", + "content": " from first to third lines. Achieved rates computed by the power model in [11] are provided. InvEAN model is not available at " + }, + { + "bbox": [ + 132, + 235, + 481, + 268 + ], + "type": "inline_equation", + "content": "R = 5\\%" + }, + { + "bbox": [ + 132, + 235, + 481, + 268 + ], + "type": "text", + "content": " in [25]." + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 307, + 114, + 361, + 151 + ], + "blocks": [ + { + "bbox": [ + 307, + 114, + 361, + 151 + ], + "lines": [ + { + "bbox": [ + 307, + 114, + 361, + 151 + ], + "spans": [ + { + "bbox": [ + 307, + 114, + 361, + 151 + ], + "type": "image", + "image_path": "1ac6223cf4493bc5fbf77d2e3e898494b9a131c2975868da3cff4efa0839d86a.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 307, + 151, + 361, + 188 + ], + "blocks": [ + { + "bbox": [ + 307, + 151, + 361, + 188 + ], + "lines": [ + { + "bbox": [ + 307, + 151, + 361, + 188 + ], + "spans": [ + { + "bbox": [ + 307, + 151, + 361, + 188 + ], + "type": "image", + "image_path": "709d471a3b2b8a99b3631a0e64ab66f830ecf12b85fcb9b6767412bb7f1867db.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 307, + 188, + 361, + 224 + ], + "blocks": [ + { + "bbox": [ + 307, + 188, + 361, + 224 + ], + "lines": [ + { + "bbox": [ + 307, + 188, + 361, + 224 + ], + "spans": [ + { + "bbox": [ + 307, + 188, + 361, + 224 + ], + "type": "image", + "image_path": "51376c1d436f64bb81967422f403c08cd985a116db5c3b990a413f3185134b9c.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 316, + 225, + 352, + 235 + ], + "lines": [ + { + "bbox": [ + 316, + 225, + 352, + 235 + ], + "spans": [ + { + "bbox": [ + 316, + 225, + 352, + 235 + ], + "type": "text", + "content": "DeepPVR" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 362, + 114, + 416, + 151 + ], + "blocks": [ + { + "bbox": [ + 362, + 114, + 416, + 151 + ], + "lines": [ + { + "bbox": [ + 362, + 114, + 416, + 151 + ], + "spans": [ + { + "bbox": [ + 362, + 114, + 416, + 151 + ], + "type": "image", + "image_path": "cfe1d9e2a331c333ef646c748f89dbbd8388af6bb037572ab239a62c4b5e925b.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 362, + 151, + 416, + 187 + ], + "blocks": [ + { + "bbox": [ + 362, + 151, + 416, + 187 + ], + "lines": [ + { + "bbox": [ + 362, + 151, + 416, + 187 + ], + "spans": [ + { + "bbox": [ + 362, + 151, + 416, + 187 + ], + "type": "image", + "image_path": "256ece244dc934c8e741968f8cf8119c4166d7048c4160be598295379e0a04ca.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 362, + 188, + 416, + 224 + ], + "blocks": [ + { + "bbox": [ + 362, + 188, + 416, + 224 + ], + "lines": [ + { + "bbox": [ + 362, + 188, + 416, + 224 + ], + "spans": [ + { + "bbox": [ + 362, + 188, + 416, + 224 + ], + "type": "image", + "image_path": "12f53dabffcff583a69d06a970d47b5ebd5f0adc2b0e9549551241ba289c7185.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 374, + 225, + 404, + 234 + ], + "lines": [ + { + "bbox": [ + 374, + 225, + 404, + 234 + ], + "spans": [ + { + "bbox": [ + 374, + 225, + 404, + 234 + ], + "type": "text", + "content": "InvEAN" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 417, + 114, + 471, + 151 + ], + "blocks": [ + { + "bbox": [ + 417, + 114, + 471, + 151 + ], + "lines": [ + { + "bbox": [ + 417, + 114, + 471, + 151 + ], + "spans": [ + { + "bbox": [ + 417, + 114, + 471, + 151 + ], + "type": "image", + "image_path": "2dfb3e5052e2321183f2def5c822ec13f542113eb7992ee4f4099313b04250d7.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 417, + 151, + 471, + 187 + ], + "blocks": [ + { + "bbox": [ + 417, + 151, + 471, + 187 + ], + "lines": [ + { + "bbox": [ + 417, + 151, + 471, + 187 + ], + "spans": [ + { + "bbox": [ + 417, + 151, + 471, + 187 + ], + "type": "image", + "image_path": "d2d7988097960325aa2e3d9b963cb0740416b4fcdc61fcf55a5b8afae13ffbde.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 417, + 188, + 471, + 224 + ], + "blocks": [ + { + "bbox": [ + 417, + 188, + 471, + 224 + ], + "lines": [ + { + "bbox": [ + 417, + 188, + 471, + 224 + ], + "spans": [ + { + "bbox": [ + 417, + 188, + 471, + 224 + ], + "type": "image", + "image_path": "c9067f867d96f7659e4a5d4706e2481b7db6ea3a6f727168a4f6f16262781955.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 435, + 225, + 454, + 234 + ], + "lines": [ + { + "bbox": [ + 435, + 225, + 454, + 234 + ], + "spans": [ + { + "bbox": [ + 435, + 225, + 454, + 234 + ], + "type": "text", + "content": "Ours" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_caption" + } + ], + "index": 24 + }, + { + "bbox": [ + 130, + 274, + 482, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 274, + 482, + 369 + ], + "spans": [ + { + "bbox": [ + 130, + 274, + 482, + 369 + ], + "type": "text", + "content": "aware images. 3R-INN and LS respect the reduction rate targets better than other methods. Our method also exhibits a different behavior for high values of " + }, + { + "bbox": [ + 130, + 274, + 482, + 369 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 130, + 274, + 482, + 369 + ], + "type": "text", + "content": ", once again keeping the luminance but modifying the colors. The subjective comparison is however difficult since the achieved energy reduction rates vary from one method to another. Although not fully dedicated to the energy-reduction task, 3R-INN performs well compared to existing methods and similarly to InvEAN, it offers the possibility to recover the original image without any side-information." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 131, + 373, + 465, + 385 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 373, + 465, + 385 + ], + "spans": [ + { + "bbox": [ + 131, + 373, + 465, + 385 + ], + "type": "text", + "content": "4.3 Quantitative and qualitative evaluation of HR grainy images" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 130, + 388, + 482, + 508 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 388, + 482, + 508 + ], + "spans": [ + { + "bbox": [ + 130, + 388, + 482, + 508 + ], + "type": "text", + "content": "The reversibility property of 3R-INN is an important feature. To evaluate this property, we evaluated the HR grainy reconstruction with state-of-the-art film grain synthesis methods: VVC (Versatile Video Coding) implementation [34], Deep-FG [6] and Style-FG [6]. Table 5 summarizes the quantitative results for " + }, + { + "bbox": [ + 130, + 388, + 482, + 508 + ], + "type": "inline_equation", + "content": "R = 0" + }, + { + "bbox": [ + 130, + 388, + 482, + 508 + ], + "type": "text", + "content": ", in terms of fidelity of the synthesized grain using learned perceptual image patch similarity (LPIPS), JSDNSS and the KL divergence (KLD) [45], these last two being computed between the histograms of ground-truth and HR grainy images. All methods perform analysis and synthesis except Deep-FG for which we generated 5 versions of grain, one per available intensity level, and kept only the best performing image for each metric in the comparison." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 131, + 509, + 481, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 509, + 481, + 533 + ], + "spans": [ + { + "bbox": [ + 131, + 509, + 481, + 533 + ], + "type": "text", + "content": "Results show that the proposed method outperforms quantitatively VVC [34] and Deep-FG [6]. It also performs better than Style-FG [6] for LPIPS and KLD" + } + ] + } + ], + "index": 30 + }, + { + "type": "image", + "bbox": [ + 169, + 558, + 445, + 649 + ], + "blocks": [ + { + "bbox": [ + 169, + 558, + 445, + 649 + ], + "lines": [ + { + "bbox": [ + 169, + 558, + 445, + 649 + ], + "spans": [ + { + "bbox": [ + 169, + 558, + 445, + 649 + ], + "type": "image", + "image_path": "932e2ce72c3513e3263ff1db0347547ee8771b1a57148bce63b3af12b1a10b8b.jpg" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 131, + 650, + 481, + 671 + ], + "lines": [ + { + "bbox": [ + 131, + 650, + 481, + 671 + ], + "spans": [ + { + "bbox": [ + 131, + 650, + 481, + 671 + ], + "type": "text", + "content": "Fig. 6: SSIM scores as function of the target power reduction, for the different energy-aware methods." + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_caption" + } + ], + "index": 31 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 154, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 154, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 154, + 91, + 447, + 102 + ], + "type": "text", + "content": "3R-INN: How to be climate friendly while consuming/delivering videos?" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 201, + 125, + 410, + 178 + ], + "blocks": [ + { + "bbox": [ + 132, + 102, + 479, + 124 + ], + "lines": [ + { + "bbox": [ + 132, + 102, + 479, + 124 + ], + "spans": [ + { + "bbox": [ + 132, + 102, + 479, + 124 + ], + "type": "text", + "content": "Table 5: Comparison between reconstructed HR grainy images and ground-truth for different methods on DIV2K validation set." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 201, + 125, + 410, + 178 + ], + "lines": [ + { + "bbox": [ + 201, + 125, + 410, + 178 + ], + "spans": [ + { + "bbox": [ + 201, + 125, + 410, + 178 + ], + "type": "table", + "html": "
AnalysisAuxiliary dataJSD-NSS ↓LPIPS ↓KLD ↓
VVC [34]set of params0.01480.29810.0327
Deep-FG [6]xx0.01340.37220.0260
Style-FG [5]style vector0.00240.15920.0232
Oursnone0.00880.04450.0177
", + "image_path": "cf7a81f020ff91602fcaca59e5f22e4d82a1bcf3673c868115dca5f587cde288.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 205, + 482, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 205, + 482, + 301 + ], + "spans": [ + { + "bbox": [ + 130, + 205, + 482, + 301 + ], + "type": "text", + "content": "metrics which are representative of the quality of generated grain. The lower JSD-NSS value for Style-FG [6] could be explained by the fact that it is a GAN-based network which models the data distribution at the expense of the output quality. The qualitative comparison in Figure 7 confirms these observations (additional results in the supplemental material). Another advantage of 3R-INN is that no auxiliary data is required for grain synthesis, unlike VVC and Style-FG, which transmit a set of parameters and a style vector respectively. Similar results are obtained for " + }, + { + "bbox": [ + 130, + 205, + 482, + 301 + ], + "type": "inline_equation", + "content": "R > 0" + }, + { + "bbox": [ + 130, + 205, + 482, + 301 + ], + "type": "text", + "content": " and are presented as supplemental material." + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 135, + 327, + 202, + 389 + ], + "blocks": [ + { + "bbox": [ + 135, + 327, + 202, + 389 + ], + "lines": [ + { + "bbox": [ + 135, + 327, + 202, + 389 + ], + "spans": [ + { + "bbox": [ + 135, + 327, + 202, + 389 + ], + "type": "image", + "image_path": "94acb4fef121f6a6576c367eb884d717e2a55ff601e2ec677564155f3c266479.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 145, + 390, + 194, + 398 + ], + "lines": [ + { + "bbox": [ + 145, + 390, + 194, + 398 + ], + "spans": [ + { + "bbox": [ + 145, + 390, + 194, + 398 + ], + "type": "text", + "content": "Ground-truth" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 205, + 327, + 271, + 389 + ], + "blocks": [ + { + "bbox": [ + 205, + 327, + 271, + 389 + ], + "lines": [ + { + "bbox": [ + 205, + 327, + 271, + 389 + ], + "spans": [ + { + "bbox": [ + 205, + 327, + 271, + 389 + ], + "type": "image", + "image_path": "d1d4065c6f2443ba0c46eb445fd13c2ca22fdb9bf5295614a7029a5a427a4fb9.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 214, + 390, + 262, + 398 + ], + "lines": [ + { + "bbox": [ + 214, + 390, + 262, + 398 + ], + "spans": [ + { + "bbox": [ + 214, + 390, + 262, + 398 + ], + "type": "text", + "content": "VVC (0.3343)" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 130, + 399, + 479, + 421 + ], + "lines": [ + { + "bbox": [ + 130, + 399, + 479, + 421 + ], + "spans": [ + { + "bbox": [ + 130, + 399, + 479, + 421 + ], + "type": "text", + "content": "Fig. 7: Qualitative evaluation of HR synthesized grainy images for different methods, with LPIPS values between parenthesis." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 273, + 327, + 340, + 389 + ], + "blocks": [ + { + "bbox": [ + 273, + 327, + 340, + 389 + ], + "lines": [ + { + "bbox": [ + 273, + 327, + 340, + 389 + ], + "spans": [ + { + "bbox": [ + 273, + 327, + 340, + 389 + ], + "type": "image", + "image_path": "d33342e3aa7e2cd67444a8367880bc08fb19ce30ba13240ed9da328eb11f911b.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 277, + 390, + 336, + 398 + ], + "lines": [ + { + "bbox": [ + 277, + 390, + 336, + 398 + ], + "spans": [ + { + "bbox": [ + 277, + 390, + 336, + 398 + ], + "type": "text", + "content": "DeepFG (0.3533)" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 342, + 327, + 408, + 389 + ], + "blocks": [ + { + "bbox": [ + 342, + 327, + 408, + 389 + ], + "lines": [ + { + "bbox": [ + 342, + 327, + 408, + 389 + ], + "spans": [ + { + "bbox": [ + 342, + 327, + 408, + 389 + ], + "type": "image", + "image_path": "9199feb4d23090f8fa7b3c44a50fef6553598571e992eb083203eb50a08b87a6.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 345, + 390, + 406, + 398 + ], + "lines": [ + { + "bbox": [ + 345, + 390, + 406, + 398 + ], + "spans": [ + { + "bbox": [ + 345, + 390, + 406, + 398 + ], + "type": "text", + "content": "StyleFG (0.1693)" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 410, + 327, + 477, + 389 + ], + "blocks": [ + { + "bbox": [ + 410, + 327, + 477, + 389 + ], + "lines": [ + { + "bbox": [ + 410, + 327, + 477, + 389 + ], + "spans": [ + { + "bbox": [ + 410, + 327, + 477, + 389 + ], + "type": "image", + "image_path": "3d217dfcfe541c0fdae42ceb7d563e2c108b6a263b16955e7b2a5216369f26f7.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 420, + 390, + 468, + 398 + ], + "lines": [ + { + "bbox": [ + 420, + 390, + 468, + 398 + ], + "spans": [ + { + "bbox": [ + 420, + 390, + 468, + 398 + ], + "type": "text", + "content": "Ours (0.0508)" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "bbox": [ + 132, + 431, + 220, + 444 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 431, + 220, + 444 + ], + "spans": [ + { + "bbox": [ + 132, + 431, + 220, + 444 + ], + "type": "text", + "content": "5 Conclusion" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 130, + 449, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 449, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 449, + 482, + 666 + ], + "type": "text", + "content": "This paper presents 3R-INN, the first network that enables to reduce the overall energy consumption in the video transmission chain. Given an HR grainy image, 3R-INN delivers a minimum viable quality, low-resolution, grain-free and energy-aware image, thus reducing the energy required for encoding, transmission, decoding and display. With multiple views of the same content, 3R-INN achieves a positive energy balance, far more efficient than current state-of-the-art systems. Furthermore it does not need to transmit auxiliary information to reconstruct the original grainy content, since all the lost information including details, film grain and brightness was encoded and disentangled in a standard Gaussian distribution, through a latent encoding block conditioned on the LR image. Experimental results demonstrate that 3R-INN outperforms the existing methods by a large margin for film grain synthesis, and achieves state-of-the-art performance in the rescaling and energy-aware tasks. For the latter, a fine-tuning for each value of energy reduction rate target " + }, + { + "bbox": [ + 130, + 449, + 482, + 666 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 130, + 449, + 482, + 666 + ], + "type": "text", + "content": " was conducted. Conditioning the network on " + }, + { + "bbox": [ + 130, + 449, + 482, + 666 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 130, + 449, + 482, + 666 + ], + "type": "text", + "content": " to avoid fine-tuning different networks for each value of " + }, + { + "bbox": [ + 130, + 449, + 482, + 666 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 130, + 449, + 482, + 666 + ], + "type": "text", + "content": ", will therefore be investigated in the future, as an extension of current work. Some subjective test will also be conducted to assess the acceptability by end users of the provided LR energy-aware images." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 230, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 230, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 230, + 100 + ], + "type": "text", + "content": "Z. Ameur et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 133, + 114, + 197, + 126 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 114, + 197, + 126 + ], + "spans": [ + { + "bbox": [ + 133, + 114, + 197, + 126 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 138, + 130, + 480, + 665 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 138, + 130, + 479, + 151 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 130, + 479, + 151 + ], + "spans": [ + { + "bbox": [ + 138, + 130, + 479, + 151 + ], + "type": "text", + "content": "1. Energy consumption household. https://www.energybot.com/blog/average-energy-consumption.html." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 138, + 152, + 480, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 152, + 480, + 184 + ], + "spans": [ + { + "bbox": [ + 138, + 152, + 480, + 184 + ], + "type": "text", + "content": "2. Netflix subscribers. https://www.usnews.com/news/business/articles/2024-01-23/netflixs-subscriber-growth-surges-as-streaming-service-unwraps-best-ever-holiday-season-results." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 138, + 185, + 480, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 185, + 480, + 205 + ], + "spans": [ + { + "bbox": [ + 138, + 185, + 480, + 205 + ], + "type": "text", + "content": "3. Vtm-19.0. https://vctgit.hhi.fraunhofer.de/jvet/VVCSoftware_VTM/~/tags/VTM-19.0" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 138, + 206, + 480, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 206, + 480, + 239 + ], + "spans": [ + { + "bbox": [ + 138, + 206, + 480, + 239 + ], + "type": "text", + "content": "4. Agustsson, E., Timofte, R.: Ntire 2017 challenge on single image super-resolution: Dataset and study. In: Proceedings of the IEEE conference on computer vision and pattern recognition workshops. pp. 126-135 (2017)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 138, + 239, + 480, + 272 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 239, + 480, + 272 + ], + "spans": [ + { + "bbox": [ + 138, + 239, + 480, + 272 + ], + "type": "text", + "content": "5. Ameur, Z., Demarty, C.H., Le Meur, O., Menard, D., François, E.: Style-based film grain analysis and synthesis. In: Proceedings of the 14th Conference on ACM Multimedia Systems. pp. 229-238 (2023)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 138, + 272, + 480, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 272, + 480, + 304 + ], + "spans": [ + { + "bbox": [ + 138, + 272, + 480, + 304 + ], + "type": "text", + "content": "6. Ameur, Z., Hamidouche, W., François, E., Radosavljevic, M., Menard, D., Demarty, C.H.: Deep-based film grain removal and synthesis. IEEE Transactions on Image Processing (2023)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 138, + 305, + 480, + 359 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 305, + 480, + 359 + ], + "spans": [ + { + "bbox": [ + 138, + 305, + 480, + 359 + ], + "type": "text", + "content": "7. Bonniveau, C., Hamidouche, W., Travers, J.F., Déforges, O.: Versatile video coding and super-resolution for efficient delivery of 8k video with 4k backward-compatibility. In: ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). pp. 2048-2052 (2020). https://doi.org/10.1109/ICASSP40776.2020.9054716" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 138, + 360, + 480, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 360, + 480, + 392 + ], + "spans": [ + { + "bbox": [ + 138, + 360, + 480, + 392 + ], + "type": "text", + "content": "8. Boyce, J., Suehring, K., Li, X., Seregin, V.: Jvet-j1010: Jvet common test conditions and software reference configurations. In: 10th Meeting of the Joint Video Experts Team. pp. JVET-J1010 (2018)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 138, + 392, + 480, + 425 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 392, + 480, + 425 + ], + "spans": [ + { + "bbox": [ + 138, + 392, + 480, + 425 + ], + "type": "text", + "content": "9. Chen, Z., Liu, T., Huang, J.J., Zhao, W., Bi, X., Wang, M.: Invertible mosaic image hiding network for very large capacity image steganography. arXiv preprint arXiv:2309.08987 (2023)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 138, + 426, + 480, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 426, + 480, + 458 + ], + "spans": [ + { + "bbox": [ + 138, + 426, + 480, + 458 + ], + "type": "text", + "content": "0. Dai, J., Au, O.C., Pang, C., Yang, W., Zou, F.: Film grain noise removal and synthesis in video coding. In: 2010 IEEE International Conference on Acoustics, Speech and Signal Processing. pp. 890-893. IEEE (2010)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 138, + 459, + 480, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 459, + 480, + 491 + ], + "spans": [ + { + "bbox": [ + 138, + 459, + 480, + 491 + ], + "type": "text", + "content": "1. Demarty, C.H., Blondé, L., Le Meur, O.: Display power modeling for energy consumption control. In: 2023 IEEE International Conference on Image Processing (ICIP). IEEE (2023)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 138, + 491, + 480, + 512 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 491, + 480, + 512 + ], + "spans": [ + { + "bbox": [ + 138, + 491, + 480, + 512 + ], + "type": "text", + "content": "2. Dinh, L., Sohl-Dickstein, J., Bengio, S.: Density estimation using real nvp. arXiv preprint arXiv:1605.08803 (2016)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 138, + 513, + 480, + 534 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 513, + 480, + 534 + ], + "spans": [ + { + "bbox": [ + 138, + 513, + 480, + 534 + ], + "type": "text", + "content": "3. Du, W., Chen, H., Zhang, Y., Yang, H.: Hierarchical disentangled representation for invertible image denoising and beyond. arXiv preprint arXiv:2301.13358 (2023)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 138, + 535, + 480, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 535, + 480, + 555 + ], + "spans": [ + { + "bbox": [ + 138, + 535, + 480, + 555 + ], + "type": "text", + "content": "4. Franzen, R.: Kodak lossless true color image suite. source: http://r0k.us/graphics/kodak 4(2), 9 (1999)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 138, + 556, + 480, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 556, + 480, + 567 + ], + "spans": [ + { + "bbox": [ + 138, + 556, + 480, + 567 + ], + "type": "text", + "content": "5. Gomila, C.: Sei message for film grain encoding. JVT document, May 2003 (2003)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 138, + 567, + 480, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 567, + 480, + 599 + ], + "spans": [ + { + "bbox": [ + 138, + 567, + 480, + 599 + ], + "type": "text", + "content": "6. Herglotz, C., Brand, F., Regensky, A., Rievel, F., Kaup, A.: Processing energy modeling for neural network based image compression. In: 2023 IEEE International Conference on Image Processing (ICIP). pp. 2390-2394. IEEE (2023)" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 138, + 600, + 480, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 600, + 480, + 632 + ], + "spans": [ + { + "bbox": [ + 138, + 600, + 480, + 632 + ], + "type": "text", + "content": "7. Herglotz, C., Kränzler, M., Schober, R., Kaup, A.: Sweet streams are made of this: The system engineer's view on energy efficiency in video communications [feature]. IEEE Circuits and Systems Magazine 23(1), 57-77 (2023)" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 138, + 632, + 480, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 632, + 480, + 665 + ], + "spans": [ + { + "bbox": [ + 138, + 632, + 480, + 665 + ], + "type": "text", + "content": "8. Huang, G., Liu, Z., Van Der Maaten, L., Weinberger, K.Q.: Densely connected convolutional networks. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 4700-4708 (2017)" + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 154, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 154, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 154, + 91, + 447, + 102 + ], + "type": "text", + "content": "3R-INN: How to be climate friendly while consuming/delivering videos?" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 117, + 480, + 666 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 133, + 117, + 480, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 117, + 480, + 149 + ], + "spans": [ + { + "bbox": [ + 133, + 117, + 480, + 149 + ], + "type": "text", + "content": "19. Hwang, I., Jeong, J., Choi, J., Choe, Y.: Enhanced film grain noise removal for high fidelity video coding. In: 2013 International Conference on Information Science and Cloud Computing Companion. pp. 668-674. IEEE (2013)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 150, + 480, + 172 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 150, + 480, + 172 + ], + "spans": [ + { + "bbox": [ + 132, + 150, + 480, + 172 + ], + "type": "text", + "content": "20. Kang, S.J.: Image-quality-based power control technique for organic light emitting diode displays. Journal of Display Technology 11(1), 104-109 (2015)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 172, + 480, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 172, + 480, + 205 + ], + "spans": [ + { + "bbox": [ + 132, + 172, + 480, + 205 + ], + "type": "text", + "content": "21. Kang, S.j., Kim, Y.H.: Image integrity-based gray-level error control for low power liquid crystal displays. IEEE Transactions on Consumer Electronics 55(4), 2401-2406 (2009). https://doi.org/10.1109/TCE.2009.5373816" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 205, + 480, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 205, + 480, + 237 + ], + "spans": [ + { + "bbox": [ + 132, + 205, + 480, + 237 + ], + "type": "text", + "content": "22. Kim, H., Choi, M., Lim, B., Lee, K.M.: Task-aware image downscaling. In: Proceedings of the European conference on computer vision (ECCV). pp. 399-414 (2018)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 237, + 480, + 259 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 237, + 480, + 259 + ], + "spans": [ + { + "bbox": [ + 132, + 237, + 480, + 259 + ], + "type": "text", + "content": "23. Kingma, D.P., Ba, J.: Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980 (2014)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 259, + 480, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 259, + 480, + 282 + ], + "spans": [ + { + "bbox": [ + 132, + 259, + 480, + 282 + ], + "type": "text", + "content": "24. Kingma, D.P., Dhariwal, P.: Glow: Generative flow with invertible 1x1 convolutions. Advances in neural information processing systems 31 (2018)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 282, + 480, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 282, + 480, + 304 + ], + "spans": [ + { + "bbox": [ + 132, + 282, + 480, + 304 + ], + "type": "text", + "content": "25. Le Meur, O., Demarty, C.H.: Invertible energy-aware images. IEEE Signal Processing Letters (2023)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 304, + 480, + 336 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 304, + 480, + 336 + ], + "spans": [ + { + "bbox": [ + 132, + 304, + 480, + 336 + ], + "type": "text", + "content": "26. Le Meur, O., Demarty, C.H., Blondé, L.: Deep-learning-based energy aware images. In: 2023 IEEE International Conference on Image Processing (ICIP). pp. 590-594. IEEE (2023)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 336, + 480, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 336, + 480, + 380 + ], + "spans": [ + { + "bbox": [ + 132, + 336, + 480, + 380 + ], + "type": "text", + "content": "27. Liu, Y., Qin, Z., Anwar, S., Ji, P., Kim, D., Caldwell, S., Gedeon, T.: Invertible denoising network: A light solution for real noise removal. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 13365-13374 (2021)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 380, + 480, + 413 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 380, + 480, + 413 + ], + "spans": [ + { + "bbox": [ + 132, + 380, + 480, + 413 + ], + "type": "text", + "content": "28. Lu, S.P., Wang, R., Zhong, T., Rosin, P.L.: Large-capacity image steganography based on invertible neural networks. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 10816-10825 (2021)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 413, + 480, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 413, + 480, + 445 + ], + "spans": [ + { + "bbox": [ + 132, + 413, + 480, + 445 + ], + "type": "text", + "content": "29. Malmodin, J.: The power consumption of mobile and fixed network data services—the case of streaming video and downloading large files. In: Electronics Goes Green. vol. 2020 (2020)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 132, + 446, + 480, + 490 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 446, + 480, + 490 + ], + "spans": [ + { + "bbox": [ + 132, + 446, + 480, + 490 + ], + "type": "text", + "content": "30. Martin, D., Fowlkes, C., Tal, D., Malik, J.: A database of human segmented natural images and its application to evaluating segmentation algorithms and measuring ecological statistics. In: Proceedings Eighth IEEE International Conference on Computer Vision. ICCV 2001. vol. 2, pp. 416-423. IEEE (2001)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 132, + 490, + 480, + 522 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 490, + 480, + 522 + ], + "spans": [ + { + "bbox": [ + 132, + 490, + 480, + 522 + ], + "type": "text", + "content": "31. Newson, A., Delon, J., Galerne, B.: A stochastic film grain model for resolution-independent rendering. In: Computer Graphics Forum. vol. 36, pp. 684-699. Wiley Online Library (2017)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 132, + 522, + 480, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 522, + 480, + 544 + ], + "spans": [ + { + "bbox": [ + 132, + 522, + 480, + 544 + ], + "type": "text", + "content": "32. Norkin, A., Birkbeck, N.: Film grain synthesis for av1 video codec. In: 2018 Data Compression Conference. pp. 3-12. IEEE (2018)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 132, + 544, + 480, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 544, + 480, + 578 + ], + "spans": [ + { + "bbox": [ + 132, + 544, + 480, + 578 + ], + "type": "text", + "content": "33. Nugroho, K.A., Ruan, S.J.: R-ace network for oled image power saving. In: 2022 IEEE 4th Global Conference on Life Sciences and Technologies (LifeTech). pp. 284-285. IEEE (2022)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 132, + 578, + 480, + 610 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 578, + 480, + 610 + ], + "spans": [ + { + "bbox": [ + 132, + 578, + 480, + 610 + ], + "type": "text", + "content": "34. Radosavljevic, M., François, E., Reinhard, E., Hamidouche, W., Amestoy, T.: Implementation of film-grain technology within vvc. In: Applications of Digital Image Processing XLIV. vol. 11842, pp. 85-95. SPIE (2021)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 132, + 610, + 480, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 610, + 480, + 632 + ], + "spans": [ + { + "bbox": [ + 132, + 610, + 480, + 632 + ], + "type": "text", + "content": "35. Reddi, S.J., Kale, S., Kumar, S.: On the convergence of adam and beyond. arXiv preprint arXiv:1904.09237 (2019)" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 132, + 632, + 480, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 632, + 480, + 666 + ], + "spans": [ + { + "bbox": [ + 132, + 632, + 480, + 666 + ], + "type": "text", + "content": "36. Reinhard, E., Demarty, C.H., Blondé, L.: Pixel value adjustment to reduce the energy requirements of display devices. SMPTE Motion Imaging Journal 132(7), 10-19 (2023)" + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 230, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 230, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 230, + 101 + ], + "type": "text", + "content": "Z. Ameur et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 117, + 480, + 369 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 132, + 117, + 480, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 117, + 480, + 149 + ], + "spans": [ + { + "bbox": [ + 132, + 117, + 480, + 149 + ], + "type": "text", + "content": "37. Robinson, D.: Greening of streaming: The less accord: Low energy sustainable streaming. In: Proceedings of the 2nd Mile-High Video Conference (MHV'23). p. 115 (2023)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 150, + 480, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 150, + 480, + 171 + ], + "spans": [ + { + "bbox": [ + 132, + 150, + 480, + 171 + ], + "type": "text", + "content": "38. Shin, Y.G., Park, S., Yoo, M.J., Ko, S.J.: Unsupervised deep power saving and contrast enhancement for oled displays. arXiv preprint arXiv:1905.05916 (2019)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 172, + 480, + 193 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 172, + 480, + 193 + ], + "spans": [ + { + "bbox": [ + 132, + 172, + 480, + 193 + ], + "type": "text", + "content": "39. Stoyan, D., Kendall, W.S., Chiu, S.N., Mecke, J.: Stochastic geometry and its applications. John Wiley & Sons (2013)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 194, + 480, + 216 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 194, + 480, + 216 + ], + "spans": [ + { + "bbox": [ + 132, + 194, + 480, + 216 + ], + "type": "text", + "content": "40. Sun, W., Chen, Z.: Learned image downscaling for upscaling using content adaptive resampler. IEEE Transactions on Image Processing 29, 4027-4040 (2020)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 216, + 480, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 216, + 480, + 237 + ], + "spans": [ + { + "bbox": [ + 132, + 216, + 480, + 237 + ], + "type": "text", + "content": "41. Trust, T.C.: Carbon impact of video streaming. https://www.carbontrust.com/eneu/node/1537 (2021)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 237, + 480, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 237, + 480, + 281 + ], + "spans": [ + { + "bbox": [ + 132, + 237, + 480, + 281 + ], + "type": "text", + "content": "42. Xiao, M., Zheng, S., Liu, C., Wang, Y., He, D., Ke, G., Bian, J., Lin, Z., Liu, T.Y.: Invertible image rescaling. In: Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part I 16. pp. 126-144. Springer (2020)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 281, + 480, + 314 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 281, + 480, + 314 + ], + "spans": [ + { + "bbox": [ + 132, + 281, + 480, + 314 + ], + "type": "text", + "content": "43. Yin, J.L., Chen, B.H., Peng, Y.T., Tsai, C.C.: Deep battery saver: End-to-end learning for power constrained contrast enhancement. IEEE Transactions on Multimedia 23, 1049-1059 (2020)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 314, + 480, + 336 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 314, + 480, + 336 + ], + "spans": [ + { + "bbox": [ + 132, + 314, + 480, + 336 + ], + "type": "text", + "content": "44. Zhao, R., Liu, T., Xiao, J., Lun, D.P., Lam, K.M.: Invertible image decolorization. IEEE Transactions on Image Processing 30, 6081-6095 (2021)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 336, + 480, + 369 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 336, + 480, + 369 + ], + "spans": [ + { + "bbox": [ + 132, + 336, + 480, + 369 + ], + "type": "text", + "content": "45. Zhu, F., Chen, G., Hao, J., Heng, P.A.: Blind image denoising via dependent dirichlet process tree. IEEE transactions on pattern analysis and machine intelligence 39(8), 1518-1531 (2016)" + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 154, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 154, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 154, + 91, + 447, + 102 + ], + "type": "text", + "content": "3R-INN: How to be climate friendly while consuming/delivering videos?" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/3iGS_ Factorised Tensorial Illumination for 3D Gaussian Splatting/25df5a9d-fc43-4ff8-b4e4-a2a9b9e269ba_content_list.json b/2024/3iGS_ Factorised Tensorial Illumination for 3D Gaussian Splatting/25df5a9d-fc43-4ff8-b4e4-a2a9b9e269ba_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..4f6951f7c8bfeb158a915c7d134d5f7a4944f07a --- /dev/null +++ b/2024/3iGS_ Factorised Tensorial Illumination for 3D Gaussian Splatting/25df5a9d-fc43-4ff8-b4e4-a2a9b9e269ba_content_list.json @@ -0,0 +1,1589 @@ +[ + { + "type": "text", + "text": "3iGS: Factorised Tensorial Illumination for 3D Gaussian Splatting", + "text_level": 1, + "bbox": [ + 236, + 140, + 767, + 186 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zhe Jun Tang1 and Tat-Jen Cham2", + "bbox": [ + 357, + 212, + 645, + 227 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ S-Lab, Nanyang Technological University \n $^{2}$ College of Computing & Data Science, Nanyang Technological University {zhejun001} at {e.ntu.edu.sg}", + "bbox": [ + 250, + 239, + 753, + 282 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract. The use of 3D Gaussians as representation of radiance fields has enabled high quality novel view synthesis at real-time rendering speed. However, the choice of optimising the outgoing radiance of each Gaussian independently as spherical harmonics results in unsatisfactory view dependent effects. In response to these limitations, our work, Factorised Tensorial Illumination for 3D Gaussian Splatting, or 3iGS, improves upon 3D Gaussian Splatting (3DGS) rendering quality. Instead of optimising a single outgoing radiance parameter, 3iGS enhances 3DGS view-dependent effects by expressing the outgoing radiance as a function of a local illumination field and Bidirectional Reflectance Distribution Function (BRDF) features. We optimise a continuous incident illumination field through a Tensorial Factorisation representation, while separately fine-tuning the BRDF features of each 3D Gaussian relative to this illumination field. Our methodology significantly enhances the rendering quality of specular view-dependent effects of 3DGS, while maintaining rapid training and rendering speeds.", + "bbox": [ + 261, + 320, + 743, + 542 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Keywords: Gaussian Splatting $\\cdot$ Neural Radiance Field $\\cdot$ Novel View Synthesis", + "bbox": [ + 261, + 556, + 743, + 584 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 215, + 609, + 375, + 626 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "3D Gaussian Splatting (3DGS) has emerged as the standard method for representing 3D objects and scenes, trained from images, to render photorealistic novel views. Unlike the other popular method of Neural Radiance Field (NeRF) [22], which models a scene as an implicit continuous function, 3DGS represents surfaces with independent 3D Gaussians of different opacities, anisotropic covariances, and spherical harmonic coefficients. To render a pixel's colour, a fast, tile-based rasteriser performs alpha blending of anisotropic Gaussian splats, sorted in accordance with the visibility order.", + "bbox": [ + 212, + 643, + 787, + 763 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Although 3DGS shows promising performance in synthesising novel views of a scene at real-time rendering speeds, its renderings fall short in more challenging scenarios that involve complex, view-dependent surface effects. When observing images with reflective and specular surfaces, the changes in surface colour across viewing angles remain consistent, rather than exhibiting the complex variations", + "bbox": [ + 212, + 763, + 789, + 840 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/0f403304dd76c3b34f1f7ff28c666f61dbe5538e0bec348337db4ec1f3e8b8a6.jpg", + "image_caption": [ + "Fig. 1: We present test renderings from the \"Drums\" scene within the blender dataset [22], comparing our technique against Gaussian Splatting (3DGS) [17] and the ground truth (G.T). As the perspective shifts around the scene, the colour of the Floor Tom's top changes from translucent to reflective, showcasing intricate effects that depend on the viewpoint. These effects result from the specular reflection of incoming light and the reflections within the scene from elements like the Cymbals. Contrary to 3DGS, which struggles to capture these complex variations in light reflection, our method, 3iGS, aligns more accurately with the ground truth." + ], + "image_footnote": [], + "bbox": [ + 233, + 141, + 787, + 329 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "in reflections observed in the dataset shown in Fig. 1. A logical solution is to adopt the strategy of Physically Based Rendering (PBR), which involves explicitly modeling the surface characteristics and performing ray marching from surfaces to calculate illumination effects. As part of the process, the Bidirectional Reflectance Distribution Function (BRDF) of surfaces are predicted and a shading function is applied to simulate view-dependent effects [7,10]. Nonetheless, accurately determining these physical properties is an ill-posed challenge, making it difficult to infer and model all the intricate rendering effects correctly.", + "bbox": [ + 212, + 486, + 787, + 607 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this paper, we draw inspiration from graphics engines that utilise illumination volumes or light probes that summarise illumination information directed towards a surface. These methods compute illumination either directly from the local illumination volume surrounding the surface [12] or from the nearest light probes [28], rather than sampling numerous outward rays from the surface's upper hemisphere. Such approaches allow fast rendering speed at run time, as illumination information is pre-calculated and stored in the volumes or light probes.", + "bbox": [ + 212, + 609, + 787, + 731 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our work, named Factorised Tensorial Illumination for Gaussian Splatting (3iGS), enhances 3DGS rendering quality. We introduce a continuous local illumination field of 3D Gaussians represented by compact factorised tensors for fast evaluation. The means of the 3D Gaussians serve as the input to these factorised tensors to calculate illumination features. Subsequently, each 3D Gaussian is refined through an optimisation of its mean, opacity, anisotropic covariance, diffused colour, and BRDF features. A neural renderer then maps the incident", + "bbox": [ + 212, + 734, + 787, + 839 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "ZJ. Tang, TJ. Cham", + "bbox": [ + 271, + 114, + 410, + 128 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "illumination neural field, Gaussian BRDF attributes, and viewing angle to the Gaussian's specular colour. Overall, our approach represents a Gaussian's outgoing radiance as a function of both a continuous local illumination field and the individual Gaussian's BRDF attributes relative to it. This is opposed to the conventional optimisation of the 3D Gaussians' outgoing radiance in isolation, without accounting for the effects of adjacent Gaussians or scene lighting conditions.", + "bbox": [ + 212, + 146, + 782, + 251 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3iGS significantly enhances the accuracy of 3DGS, offering clear advantages in scenes with reflective surfaces where surface colours change dramatically across viewing angles as shown in Fig. 1. In synthetic datasets, such as the NeRF Blender dataset and the Shiny Blender dataset, 3iGS surpasses 3DGS both quantitatively and qualitatively. Similarly, 3iGS demonstrates superior performance over 3DGS in real-world scenarios on the Tanks and Temples dataset. In summary our technical contributions are:", + "bbox": [ + 212, + 252, + 784, + 357 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. a method to optimise the outgoing radiance as an incident continuous illumination field and Gaussian BRDF features with a neural renderer;", + "2. an approach to model a continuous illumination field with Tensorial Factorisation for compactness and fast evaluation; and", + "3. superior performance in rendering quality over baseline 3D Gaussian Splatting while maintaining real time performance." + ], + "bbox": [ + 222, + 369, + 784, + 460 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Related Work", + "text_level": 1, + "bbox": [ + 215, + 487, + 387, + 503 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our work falls into the category of learning scene representation from multi-view input images. Here we review prior work on NeRF-based representations and Gaussian splatting. We also discuss other relevant topics pertaining to inverse rendering which aims to recover scene geometry, material properties, and scene lighting conditions in Sec. 2.1.", + "bbox": [ + 212, + 521, + 782, + 595 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Scene Representations for View Synthesis - One of the pioneering neural rendering techniques called Neural Radiance Fields (NeRF) [22] has achieved remarkable results in novel view synthesis from multi-view images. By sampling points along rays traced from the camera into the scene, NeRF reconstructs a scene as a continuous field of outgoing radiance. The technique employs volumetric rendering to determine the colour of each pixel. This method has inspired numerous developments of other scene representations [1,2,22,31,32]. However, the vanilla NeRF, which encodes the entire scene representation into a set of MLPs, requires multiple queries of points along rays during training and inference. This massively slows down the speed required for real time rendering. To address this, other neural scene representation techniques apply hash encoding [19, 23], triplanes or factorised tensors [9, 14], and gridding [3, 11, 27] to accelerate training and inference speeds.", + "bbox": [ + 212, + 598, + 784, + 792 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Tensorial Factorisation - In TensoRF [9], a feature grid can be represented as a 4D tensor of which the first 3 represents the XYZ spatial grid and the last represents the feature channel dimension. To model a radiance field with grid", + "bbox": [ + 212, + 795, + 782, + 839 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "3iGS: Factorised Tensorial Illumination for 3D Gaussian Splatting", + "bbox": [ + 290, + 114, + 730, + 130 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "representation, [9] propose an extension of CANDECOMP/PARAFAC (CP)-Decomposition [8] to Vector-Matrix (VM) decomposition:", + "bbox": [ + 215, + 145, + 782, + 176 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {G} _ {c} = \\sum_ {r = 1} ^ {R _ {c}} \\mathbf {v} _ {\\mathbf {c}, \\mathbf {r}} ^ {\\mathbf {X}} \\circ \\mathbf {M} _ {\\mathbf {c}, \\mathbf {r}} ^ {\\mathbf {Y Z}} \\circ \\mathbf {b} _ {\\mathbf {3 r} - \\mathbf {2}} + \\mathbf {v} _ {\\mathbf {c}, \\mathbf {r}} ^ {\\mathbf {Y}} \\circ \\mathbf {M} _ {\\mathbf {c}, \\mathbf {r}} ^ {\\mathbf {X Z}} \\circ \\mathbf {b} _ {\\mathbf {3 r} - \\mathbf {1}} + \\mathbf {v} _ {\\mathbf {c}, \\mathbf {r}} ^ {\\mathbf {Z}} \\circ \\mathbf {M} _ {\\mathbf {c}, \\mathbf {r}} ^ {\\mathbf {X Y}} \\circ \\mathbf {b} _ {\\mathbf {3 r}} \\\\ = \\sum_ {r = 1} ^ {R _ {c}} \\mathbf {A} _ {C, r} ^ {X} \\circ \\mathbf {b} _ {3 r - 2} + \\mathbf {A} _ {C, r} ^ {Y} \\circ \\mathbf {b} _ {3 r - 1} + \\mathbf {A} _ {C, r} ^ {Z} \\circ \\mathbf {b} _ {3 r} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 235, + 198, + 743, + 282 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In Eq. (1), the inputs $\\mathbf{v}$ and $\\mathbf{M}$ corresponds to XYZ-mode vector and matrix factorisation and $\\mathbf{b}$ denotes the appearance feature mode vectors. Separately, $\\mathcal{G}_c$ and $R_{C}$ refers to the outgoing radiance and the colour feature channels.", + "bbox": [ + 212, + 291, + 782, + 335 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Gaussian Splatting - As opposed to ray marching, 3D Gaussian Splatting is a recent method for rendering scenes via rasterisation. To begin, Gaussians are fitted on a point cloud that are either initialised as a set of random points or bootstrapped with a sparse point cloud produced during the SfM process for free [17]. The Gaussians of the point cloud are defined by a function:", + "bbox": [ + 212, + 337, + 784, + 412 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\ng (\\mathbf {x} | \\mu , \\boldsymbol {\\Sigma}) = e ^ {- \\frac {1}{2} (\\mathbf {x} - \\mu) ^ {T} \\boldsymbol {\\Sigma} ^ {- 1} (\\mathbf {x} - \\mu)} \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 388, + 421, + 784, + 441 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where each point $\\mathbf{x}$ is centered at mean $\\mu \\in \\mathbb{R}^3$ with an anisotropic covariance matrix $\\pmb{\\Sigma} \\in \\mathbb{R}^{3x^3}$ . The mean of a Gaussian is parameterised by the coordinates $\\mu = (\\mu_x, \\mu_y, \\mu_z)$ that is scaled by the full 3D covariance matrix $\\pmb{\\Sigma}$ . As discussed in [17], these Gaussians have no physical meanings, given the difficulty of constraining $\\pmb{\\Sigma}$ to a valid semi-positive definite matrix during the optimisation process. Instead, to derive $\\pmb{\\Sigma}$ , a scaling matrix $S$ and a rotation matrix $R$ is learned during the optimisation process to scale the Gaussians:", + "bbox": [ + 212, + 450, + 787, + 555 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {\\Sigma} = \\mathbf {R} \\mathbf {S} \\mathbf {S} ^ {\\mathrm {T}} \\mathbf {R} ^ {\\mathrm {T}} \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 447, + 564, + 784, + 580 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "With a viewing transformation $\\mathbf{W}$ and an affine approximation of the projective transformation $\\mathbf{J}$ , the covariance matrix is then expressed in camera coordinates as:", + "bbox": [ + 212, + 590, + 782, + 632 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {\\Sigma} ^ {\\prime} = \\mathbf {J} \\mathbf {W} \\boldsymbol {\\Sigma} \\mathbf {W} ^ {\\mathrm {T}} \\mathbf {J} ^ {\\mathrm {T}} \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 433, + 633, + 784, + 651 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Furthermore, each Gaussian is coloured via a set of Spherical Harmonics (SH) coefficients that represent the view dependent colour $c_{i}$ , also known as radiance field, multiplied by its opacity $\\alpha$ . To colour a pixel $u$ as $\\hat{C}$ , alpha blending of $N$ ordered Gaussians is applied:", + "bbox": [ + 212, + 656, + 782, + 715 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\mathbf {C}} = \\sum_ {i \\in N} T _ {i} g _ {i} \\left(\\mathbf {u} \\mid \\mu^ {\\prime}, \\boldsymbol {\\Sigma} ^ {\\prime}\\right) \\alpha_ {i} \\mathbf {c} _ {i}, \\quad T _ {i} = \\prod_ {j = 1} ^ {i - 1} \\left(1 - g _ {i} \\left(\\mathbf {u} \\mid \\mu^ {\\prime}, \\boldsymbol {\\Sigma} ^ {\\prime}\\right) \\alpha_ {i}\\right) \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 299, + 726, + 784, + 767 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.1 Preliminaries", + "text_level": 1, + "bbox": [ + 215, + 786, + 372, + 800 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "As discussed, the direct optimisation of spherical harmonics to describe the outgoing radiance in individual Gaussians in 3DGS results in poor view-dependent", + "bbox": [ + 212, + 809, + 782, + 839 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "ZJ. Tang, TJ. Cham", + "bbox": [ + 271, + 114, + 408, + 128 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "effects. A crucial reason is that these Gaussians do not fully model scene properties [13] and thus fail to capture the specular effects which changes drastically across viewing angles.", + "bbox": [ + 212, + 146, + 782, + 191 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Therefore to account for the specular highlights, it is beneficial to model the underlying properties such as the BRDF and illumination effects of the scene. In conventional computer graphics, a rendering equation is commonly applied to simulate effects of specular and diffused shading [15]. For instance, rendering Eq. (6) describes an outgoing radiance of a surface point:", + "bbox": [ + 212, + 191, + 784, + 268 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nL _ {o} (\\mathbf {x}, \\mathbf {v}) = \\int_ {\\Omega} L _ {i} (\\mathbf {x}, \\mathbf {l}) f _ {r} (\\mathbf {l}, \\mathbf {v}) (\\mathbf {l} \\cdot \\mathbf {n}) d \\mathbf {l}, \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 369, + 273, + 784, + 305 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The radiance $L_{o}$ emitted from a surface point $\\mathbf{x}$ , when observed from a viewing direction $\\mathbf{v}$ , is defined in Eq. (6). An integral is applied to accumulate the contribution of incident light at an incident angle $\\mathbf{l}$ across the upper hemisphere $\\Omega$ of $\\mathbf{x}$ . The function $f_{r}$ denotes the Bidirectional Radiance Distribution Function (BRDF), describing the reflection characteristics of incident radiance at $\\mathbf{x}$ viewed in direction $\\mathbf{v}$ . Lastly the inclusion of the cosine law with the normal vector $\\mathbf{n}$ ensures the energy conservation.", + "bbox": [ + 212, + 310, + 782, + 414 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "From a signal processing perspective, an alternative to Eq. (6) is expressed more generally in terms of spherical harmonic convolution [16, 21]:", + "bbox": [ + 212, + 415, + 782, + 446 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nB _ {l m} = \\Lambda_ {l} \\rho_ {l} L _ {l m} \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 447, + 454, + 784, + 469 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In Eq. (7), $B_{lm}$ defines the outgoing reflected light as the product of BRDF filter $\\rho_l$ , spherical harmonic coefficients of lighting signal $L_{lm}$ , and the normalisation constant $\\varLambda_l$ .", + "bbox": [ + 212, + 474, + 782, + 518 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Some studies [13, 25] enhance 3DGS by expressing BRDF $f_{r}$ as a Cook-Torrance microfacet model [10] or the GGX Trowbridge-Reitz model [7]. In these approaches, physical attributes, including roughness $r$ , albedo $a$ , metallicity $m$ , and the normal vector $\\mathbf{n}$ are predicted and used in Eq. (6). Although these modifications marginally improve rendering quality metrics, they fail to accurately produce high-quality, view-dependent effects. This shortfall primarily stems from relying on estimated parameters for physical rendering within a simplified rendering equation [20]. Furthermore, these parameters are inherently challenging to be estimated accurately, due to the ill-posed nature of inverse rendering from multi-view images. Although numerous works [4-6, 14, 20, 26] also achieved success by exploring a neural representation of the rendering equation, these works either require prior information, such as known lighting conditions or a pre-trained model on a realistic dataset with known BRDF parameters. Furthermore these techniques are experimented with ray tracing based methods like NeRF. A work closest to ours in the area of rasterisation and Gaussian Splitting manner, is GaussianShader [13] which we compare against in Sec. 5.2.", + "bbox": [ + 212, + 521, + 785, + 763 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3 Method", + "text_level": 1, + "bbox": [ + 215, + 781, + 330, + 797 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Instead of predicting the physical BRDF properties of materials in the scene, our goal is to express the outgoing radiance of a Gaussian as a more general", + "bbox": [ + 212, + 809, + 782, + 839 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "3iGS: Factorised Tensorial Illumination for 3D Gaussian Splitting", + "bbox": [ + 290, + 114, + 732, + 130 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/66926acba882902ca5c87775ec218e2fb80260c9e0daf408a4dc528a355dbb09.jpg", + "image_caption": [ + "Fig. 2: A visualisation of 3iGS pipeline to render a single Gaussian's colour. We interpolate an incident illumination $L_{i}$ from the factorised tensorial illumination field $\\mathcal{G}_l$ using a Gaussian mean $\\pmb{x}_i$ as input. A neural network $\\mathcal{F}$ maps the illumination field $L_{i}$ , the Gaussian BRDF features $\\rho_{i}$ , and the viewing direction $\\omega_{o}$ to Gaussian's specular colour $c_{s}$ . Following, the diffused colour $c_{d}$ and specular colour $c_{s}$ are added linearly to produce the final outgoing radiance field $c$ ." + ], + "image_footnote": [], + "bbox": [ + 217, + 146, + 774, + 328 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "expression of BRDF, and the incoming illumination as neural features. This idea is based on a generalized version of Eq. (7), where BRDF features modify an incoming illumination field, without the need for decomposing down to intrinsic material properties [21].", + "bbox": [ + 212, + 450, + 787, + 511 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Specifically for each 3D Gaussian in the scene, the outgoing radiance field is formed by:", + "bbox": [ + 212, + 511, + 785, + 541 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {c} \\left(\\omega_ {o}\\right) = \\mathbf {c} _ {\\mathbf {d}} + \\mathbf {c} _ {\\mathbf {s}} \\left(\\omega_ {o}\\right) \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 426, + 541, + 785, + 558 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "For viewing angle $\\omega_{o}$ , a Gaussian is coloured by its constant diffused colour $c_{d}$ and a view dependent specular colour $c_{s}$ . At each Gaussian $i$ , a small neural network $\\mathcal{F}$ maps the Gaussian BRDF features $\\rho_{i}$ and the incoming illumination $L_{i}$ to its specular colour viewed at an angle $\\omega_{o}$ :", + "bbox": [ + 214, + 565, + 787, + 626 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {F}: \\left\\{\\rho_ {i}, L _ {i}, \\omega_ {o} \\right\\} \\mapsto \\mathbf {c} _ {\\mathbf {s}} \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 424, + 638, + 785, + 655 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.1 Illumination Grid by Tensorial Factorisation", + "text_level": 1, + "bbox": [ + 214, + 676, + 627, + 693 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Our work is largely inspired by conventional computer graphics engines for fast rendering of scene and objects in video games. The fundamental rendering equation highlights the role of multi-bounce lighting in achieving indirect illumination, wherein light bounces off one surface to illuminate another. However, the process of ray tracing from each Gaussian surface into the scene is notably resource-intensive, undermining the goal of quick rendering in 3D graphics systems. To facilitate real-time rendering, one strategy involves the use of baking techniques that employ irradiance volumes [12]. This method segments a scene into distinct volumes and pre-calculates irradiance data offline. An alternative", + "bbox": [ + 212, + 704, + 787, + 840 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "ZJ. Tang, TJ. Cham", + "bbox": [ + 271, + 114, + 410, + 128 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "strategy places light probes [28,29] throughout the scene to gather lighting information at specific spatial locations. When rendering the colour of a surface, the system quickly interpolates lighting information from the nearest light probes, ensuring swift rendering times.", + "bbox": [ + 212, + 146, + 782, + 205 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To maintain the fast rendering speed of 3DGS, our work describes a methodology of learning the illumination features of a Gaussian with a continuous grid based illumination field as:", + "bbox": [ + 212, + 207, + 782, + 251 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {G} _ {l}: \\left\\{\\mathbf {x} _ {\\mathbf {i}} \\right\\} \\mapsto L _ {i} \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 450, + 253, + 784, + 268 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Given a Gaussian's mean coordinate $\\mathbf{x_i}$ , we seek to compute an illumination field $L_{i}$ by interpolating from learnable grid representation. The illumination tensors $\\mathcal{G}_l$ is formulated similar to TensoRF [9] by a vector-matrix spatial factorisation as follows:", + "bbox": [ + 214, + 275, + 782, + 335 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {G} _ {l} = \\sum_ {r = 1} ^ {R _ {L}} \\mathbf {A} _ {L, r} ^ {X} \\circ \\mathbf {b} _ {3 r - 2} + \\mathbf {A} _ {L, r} ^ {Y} \\circ \\mathbf {b} _ {3 r - 1} + \\mathbf {A} _ {L, r} ^ {Z} \\circ \\mathbf {b} _ {3 r} \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 321, + 347, + 784, + 386 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In Eq. (11), $R_{L}$ represents the feature channels of the illumination components, $\\mathbf{A}$ as feature tensors and $\\mathbf{b}$ as feature vectors. The illumination feature grid is jointly learned end to end in the optimisation process together with each Gaussian in the scene. Unlike 3DGS, where each Gaussian is optimised independently, the illumination field is modelled as a continuous grid function. A Gaussian mean serves as the input to query from the factorised tensor grid via interpolation. The inclusion of this continuous incoming illumination field directed at each Gaussian is the core component of producing accurate view-dependent effects, as we show in the ablation study of Sec. 5.4. Furthermore, by formulating this field as factorised tensors, it allows the network to achieve fast rendering speed. Our illumination field is coarse, using $87.5\\%$ less voxels compared to TensoRF on synthetic datasets. This compact representation is also low in memory footprint compared to the number of optimised Gaussians, which is often a magnitude order or more higher. We refer readers to [9], which provides a comprehensive overview to describe how the tensors are factorised and interpolated.", + "bbox": [ + 212, + 398, + 787, + 625 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.2 3D Gaussian Features", + "text_level": 1, + "bbox": [ + 214, + 647, + 442, + 662 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In 3DGS [17], Gaussians are optimised with a set of parameters: 3D positions, opacity $\\alpha$ , anisotropic covariance, and spherical harmonics coefficients. In our work, instead of optimising spherical harmonics as an outgoing radiance, 3iGS characterises the Gaussians with a diffused colour and learnable BRDF features. Unlike [13, 25], we do not strictly enforce physically interpretable properties commonly used in shading techniques. Aforementioned, these techniques are often simplified, too ill-posed to be decomposed individually, and insufficient to encompass all complex rendering effects [20]. Rather, we loosely follow Eq. (7) and treat BRDF feature components as a set of weights that alter the incoming illumination field. Given a continuous illumination field obtained from Eq. (11), a Gaussian's BRDF is conditionally optimised against it. This is in contrast", + "bbox": [ + 212, + 672, + 787, + 840 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "3iGS: Factorised Tensorial Illumination for 3D Gaussian Splitting", + "bbox": [ + 290, + 114, + 732, + 130 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "to 3DGS where the Gaussians' outgoing radiance are individually optimised without modelling the interdependencies that should arise from a shared scene illumination, resulting in detrimental view-dependent effects.", + "bbox": [ + 212, + 146, + 787, + 191 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3.3 Shading Gaussians", + "text_level": 1, + "bbox": [ + 214, + 214, + 416, + 229 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Following Eq. (9), we shade each Gaussian by mapping its viewing directions encoded with Integrated Directional Encoding (IDE) [30], Gaussian features (obtained in Sec. 3.2), and its illumination field, to the specular colour output. We linearly add the diffused and specular colours to create its radiance field as per Eq. (8). To render the final scene, we follow the rasterisation pipeline proposed in the original 3DGS work.", + "bbox": [ + 212, + 239, + 787, + 330 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4 Optimisation", + "text_level": 1, + "bbox": [ + 214, + 354, + 380, + 372 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In the previous Sec. 3, we described the necessary components to model a scene with Gaussians and render it via rasterisation. To improve the stability of training and to enhance the final rendering quality, we first train the model with the diffused colour in the first 3,000 iterations. Following, specular colours are added to the Gaussians as in Eq. (8).", + "bbox": [ + 212, + 387, + 785, + 462 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "While training the tensorial illumination grid, an initial boundary which encapsulate the scene bounding box is defined. Midway through training, we shrink the illumination grid to fit the Gaussians and resample the grid with the same number of voxels. We adopt the same adaptive control of Gaussians of 3DGS [17] to limit the number of Gaussians and the units per volume. We propose to train our model with the same loss function as 3DGS for a fair evaluation:", + "bbox": [ + 212, + 463, + 787, + 566 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = (1 - \\lambda) \\mathcal {L} _ {1} + \\lambda \\mathcal {L} _ {\\mathrm {D} - \\mathrm {S S I M}} \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 406, + 569, + 785, + 585 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "where we combined the $\\mathcal{L}_1$ term with a D-SSIM term with $\\lambda$ set to 0.2.", + "bbox": [ + 214, + 590, + 730, + 606 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5 Experiments and Results", + "text_level": 1, + "bbox": [ + 214, + 631, + 496, + 648 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.1 Datasets", + "text_level": 1, + "bbox": [ + 214, + 662, + 336, + 676 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Synthetic scenes - We show experimental results of 3iGS based on the Blender dataset released in [22]. This dataset contains challenging scenes of complex geometries with realistic non-Lambertian materials. Similarly, we evaluate our model on the Shiny Blender dataset presented in [30]. Unlike the Blender dataset, Shiny Blender contains a singular object with simple geometries in each scene with more glossy effects.", + "bbox": [ + 212, + 688, + 787, + 779 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Real world complex scenes - To prove the effectiveness of our model in real world scenes, we evaluate our renderings on the Tanks and Temples dataset [18]. This dataset is obtained from video sequences of real world objects and environment.", + "bbox": [ + 214, + 779, + 787, + 839 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "ZJ. Tang, TJ. Cham", + "bbox": [ + 271, + 114, + 408, + 128 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.2 Comparisons", + "text_level": 1, + "bbox": [ + 215, + 146, + 370, + 161 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "To evaluate our model, we compared against methods that apply both ray-tracing methods like NeRF, or rasterisation methods with Gaussian Splatting. Out of all prior work, 3DGS and GaussianShader is the closest work which offers real time inference speed which we will mainly compare against. On comparing the qualitative result figures, we re-ran the experiments of 3DGS [17] and GaussianShader [13] using their original repository code under settings specified by the authors. Ray-Tracing Methods such as [20,22,30] represent a scene as a radiance field using MLPs. By performing multiple samplings on rays marched from the camera into the scene, the sampled points are queried with MLP to obtain the opacity and radiance values. Volume rendering is performed to obtain the final pixel colour. Rasterisation Methods such as Gaussian Splatting (3DGS) [17] and GaussianShader [13] apply a rasterisation pipeline as opposed to ray tracing methods. These models represents a scene as Gaussians with radiance properties based on Spherical Harmonics. In, [13], 3DGS is extended by modelling a scene with additional material characteristics and a shading function is applied, as opposed to ours which uses an MLP as neural renderer. Furthermore, [13] shades Gaussians with a global differentiable environment light stored in cube maps, and optimises independent Gaussians with spherical harmonic-based color for unaccounted illumination. In our work, we represent incident illumination locally with grid-based tensors and optimise Gaussian BRDF features relative to this field.", + "bbox": [ + 212, + 171, + 787, + 487 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "For a fair comparison, 3iGS is trained with the same loss function as 3DGS as described in Sec. 4 and the same number of iterations of 30,000 steps. We repurposed the $16 \\times 3$ SH coefficients in 3DGS as BRDF feature channels and added 4 additional parameters of base colour and roughness for IDE view-directional encoding. The tensorial illumination field is set at a coarse resolution size of $150^{3}$ voxels.", + "bbox": [ + 212, + 488, + 787, + 580 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/8ce7991025814077ad57694b81dacd1ef8d5c33fe53f0afd139ae6e5d478ad4d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 272, + 602, + 385, + 654 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/8116bb6874e0729fb74de9f0e54aba1ba206c1fa2f1ae40a612f86ff2b3a7acf.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 387, + 602, + 501, + 652 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/79f8cb7a84722ad8eb637b31f071f3efceb07cbc03aa12dbb3a95131eea56e4e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 602, + 612, + 652 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/56ba062ea5895beeba6c80f78ff50f889696d1a18399ede7980b1fc8cdba65e3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 614, + 602, + 728, + 652 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/b3dc5171bbf61f0175eb762d82e642437a838f3f710262ecda4503158f3d9d20.jpg", + "image_caption": [ + "3DGS" + ], + "image_footnote": [], + "bbox": [ + 272, + 656, + 380, + 705 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/bc7f0d57516468cc3a72d643e0a2e177f5ec85b9e73b9f702be2d0a094a632f2.jpg", + "image_caption": [ + "G.Shader", + "Fig. 3: Comparisons of test-set views of real world scenes. 3iGS enhances 3DGS renderings by producing clearer view dependent effects as shown." + ], + "image_footnote": [], + "bbox": [ + 387, + 656, + 500, + 705 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/30920ddd5356e96e1cae80fbd12b008f55b92f1e9abf323b713b9dc4e7a7ceb0.jpg", + "image_caption": [ + "Ours" + ], + "image_footnote": [], + "bbox": [ + 500, + 656, + 609, + 705 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/8a1855c886cba3ea446e7412cf9d05c3ad1660491f57e037471de92349fcaa68.jpg", + "image_caption": [ + "Ground Truth" + ], + "image_footnote": [], + "bbox": [ + 616, + 656, + 725, + 705 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "3iGS: Factorised Tensorial Illumination for 3D Gaussian Splitting", + "bbox": [ + 290, + 114, + 732, + 130 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/c96b59b0ab579676aba2db33b57eaaf8e4bdd61609d56a7721c436c3fe12c278.jpg", + "table_caption": [ + "Table 1: Our approach demonstrates superior quantitative performance over current methods when tested on Synthetic Datasets. Specifically, within the NeRF Synthetic dataset, our method surpasses all competitors across various image quality assessments (PSNR/SSIM/LPIPS). In the context of the Shiny Blender dataset, 3iGS matches the performance of existing rasterization techniques in terms of PSNR and SSIM but surpasses them in LPIPS for the majority of scenes. We encourage readers to examine the accompanying figure showcasing renderings of the Shiny Blender scene, where our method attains enhanced qualitative outcomes. Best results, benchmarked across real time rendering methods, are in bold." + ], + "table_footnote": [], + "table_body": "
NeRF Synthetic [22]
ChairDrumsLegoMicMats.ShipHotdogFicusAvg.
PSNR↑
NeRF [22]33.0025.0132.5432.9129.6228.6536.1830.1331.01
Ref-NeRF [30]33.9825.4335.1033.6527.1029.2437.0428.7431.29
ENVIDR [20]31.2222.9929.5532.1729.5221.5731.4426.6028.13
3DGS [17]35.8226.1735.6935.3430.0030.8737.6734.8333.30
G.Shader [13]35.8326.3635.8735.2330.0730.8237.8534.9733.38
G.Shader(reproduced) [13]33.7025.5032.9934.0728.8728.3735.2933.0531.48
Ours35.9026.7535.9436.0130.0031.1237.9835.4033.64
SSIM↑
NeRF [22]0.9670.9250.9610.9800.9490.8560.9740.9640.947
Ref-NeRF [30]0.9740.9290.9750.9830.9210.8640.9790.9540.947
ENVIDR [20]0.9760.9300.9610.9840.9680.8550.9630.9870.956
3DGS [17]0.9870.9540.9830.9910.9600.9070.9850.9870.969
G.Shader [13]0.9870.9490.9830.9910.9600.9050.9850.9850.968
G.Shader(reproduced) [13]0.9800.9450.9720.9890.9510.8810.9800.9820.960
Ours0.9870.9550.9830.9920.9610.9080.9860.9890.970
LPIPS↓
NeRF [22]0.0460.0910.0500.0280.0630.2060.1210.0440.081
Ref-NeRF [30]0.0290.0730.0250.0180.0780.1580.0280.0560.058
ENVIDR [20]0.0310.0800.0540.0210.0450.2280.0720.0100.067
3DGS [17]0.0120.0370.0160.0060.0340.1060.0200.0120.030
G.Shader [13]0.0120.0400.0140.0060.0330.0980.0190.0130.029
G.Shader(reproduced) [13]0.0190.0450.0260.0090.0460.1480.0290.0170.042
Ours0.0120.0360.0150.0050.0340.1020.0190.0100.029
Shiny Blender [30]
CarBallHelmetTeapotToasterCoffeeAvg.
PSNR↑
NVDiffRec [24]27.9821.7726.9740.4424.3130.7428.70
Ref-NeRF [30]30.4129.1429.9245.1925.2933.9932.32
ENVIDR [20]28.4638.8932.7341.5926.1129.4832.88
3DGS [17]27.2427.6928.3245.6820.9932.3230.37
G.Shader [13]27.9030.9828.3245.8626.2132.3931.94
G.Shader(reproduced) [13]27.5129.0228.7343.0522.8631.3430.41
Ours27.5127.6428.2146.0422.6932.5830.77
SSIM↑
NVDiffRec [24]0.9630.8580.9510.9960.9280.9730.945
Ref-NeRF [30]0.9490.9560.9550.9950.9100.9720.956
ENVIDR [20]0.9610.9910.9800.9960.9390.9490.969
3DGS [17]0.9300.9370.9510.9960.8950.9710.947
G.Shader [13]0.9310.9650.9500.9960.9290.9710.957
G.Shader(reproduced) [13]0.9300.9540.9550.9950.9000.9690.950
Ours0.9300.9380.9510.9970.9080.9730.949
LPIPS↓
NVDiffRec [24]0.0450.2970.1180.0110.1690.0760.119
Ref-NeRF [30]0.0510.3070.0870.0130.1180.0820.109
ENVIDR [20]0.0490.0670.0510.0110.1160.1390.072
3DGS [17]0.0470.1610.0790.0070.1260.0780.083
G.Shader [13]0.0450.1210.0760.0070.0790.0780.068
G.Shader(reproduced) [13]0.0450.1480.0880.0120.1110.0850.099
Ours0.0450.1560.0730.0060.0990.0760.075
", + "bbox": [ + 218, + 290, + 782, + 828 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "ZJ. Tang, TJ. Cham", + "bbox": [ + 271, + 114, + 408, + 128 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/96510cdb9df4a04f8a975529066e0e5d8b5e3ab590ff0943ead053b00ec4af6a.jpg", + "image_caption": [ + "Fig. 4: In evaluating test-set views from the Shiny Blender dataset, we compared the performance of 3DGS [17], GaussianShader [13], and our work 3iGS. The standard 3DGS method generally yields the least satisfactory renderings, with images often appearing blurry in areas of specular reflection. GaussianShader shows a slight improvement by incorporating the GGX BRDF model, leading to marginally better results in rendering specular regions. In contrast, 3iGS stands out by employing a general rendering function that predicts neural features of illumination field and BRDF instead of relying on physical parameters. This approach allows 3iGS to surpass existing methods significantly, capturing the intricate details within specular highlights with remarkable precision." + ], + "image_footnote": [], + "bbox": [ + 272, + 157, + 746, + 670 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "3iGS: Factorised Tensorial Illumination for 3D Gaussian Splitting", + "bbox": [ + 290, + 114, + 732, + 128 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 767, + 114, + 782, + 126 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/f9b8852e7add543394a02e62c58ce73a6eb67eb4b859d24e22f2d74cb6906841.jpg", + "table_caption": [ + "Table 2: A quantitative comparisons (PSNR / SSIM / LPIPS) between 3DGS [17], GaussianShader [13], and our method on real world scenarios on Tanks and Temples Dataset [18]" + ], + "table_footnote": [], + "table_body": "
Tanks and Temples Dataset [18]
BarnCaterpillarFamilyIgnatiusTruckAvg.
PSNR↑
3DGS [17]29.1326.1734.8829.5028.3829.61
G.Shader(reproduced) [13]27.6725.2333.5228.2827.6128.46
Ours29.7327.0435.3630.0428.8230.20
SSIM↑
3DGS [17]0.9200.9320.9820.9730.9450.950
G.Shader(reproduced) [13]0.8970.9150.9770.9680.9350.938
Ours0.9230.9380.9830.9740.9470.953
LPIPS↓
3DGS [17]0.1130.0740.0230.0320.0590.060
G.Shader(reproduced) [13]0.1470.0980.0290.0390.0710.077
Ours0.1120.0710.0220.0310.0570.058
", + "bbox": [ + 218, + 207, + 784, + 375 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "5.3 Discussion", + "text_level": 1, + "bbox": [ + 215, + 431, + 349, + 446 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "In the comparisons detailed in Sec. 5.2, 3iGS demonstrates superior performance over the established baselines, delivering both quantitatively and qualitatively enhanced renderings in a majority of test cases on real time rendering rasterisation approaches. In the NeRF Synthetic dataset, 3iGS surpasses the prior 3DGS and GaussianShader. Although GaussianShader reportedly performs slightly better on the Shiny Blender dataset, we have included both reported and reproduced results based on the official code repository from the authors. We postulate that the Shiny Blender dataset scenes, which comprise single objects only, presents simpler geometries which facilitates an easier recovery of intrinsic material properties essential for rendering view-dependent effects. In addition, specular reflections in this dataset is primarily dominated by direct illumination from an external environment map. Thus GaussianShader which models direct lighting with a differentiable environment cube map performs well. However, when presented with a complex scene containing multiple objects, such as the NeRF Synthetic dataset shown in Fig. 1 with its intricate intra-scene interactions, GaussianShader struggles to accurately recover the physical rendering parameters. Furthermore these lighting scenarios are more complex due to indirect lighting. Therefore jointly modelling direct and indirect lighting using a continuous local incident field is crucial. NeRF based approaches reported above present competitive results. Yet, such methods are extremely slow to train, often requiring days, and are unable to perform real-time rendering needed for interactive applications.", + "bbox": [ + 212, + 460, + 787, + 792 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Comparing across all methodologies, our 3iGS method presents an attractive and pragmatic alternative to achieve excellent rendering quality while balancing rendering speed, as discussed in Sec. 5.4.", + "bbox": [ + 212, + 794, + 785, + 839 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "ZJ. Tang, TJ. Cham", + "bbox": [ + 271, + 114, + 408, + 128 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/1d015a307ed8d70196e68321e2845c04f15d6b6457fc6df4bea45d20dbad5a5a.jpg", + "image_caption": [ + "Fig. 5: In contrast to 3DGS [17] and GaussianShader [13], our 3iGS method uniquely identifies both the golden specular highlights and the reflections on the Medium Tom as seen in the plastic surface of the Floor Tom (top row). Our approach successfully captures the detailed specular highlights on every cymbal within the drum setup from the Blender dataset, as presented in [22]." + ], + "image_footnote": [], + "bbox": [ + 243, + 143, + 406, + 297 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/d4f630b34d2a3017bd0d5e9c88bfc7ae22160bbe7ba4e155a34202af239a452a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 410, + 143, + 596, + 306 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/e90cd63d4a7140ffc259d6e11ca6c9411103f4d71f9d90d8e03f98e062cc6716.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 598, + 143, + 787, + 306 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "5.4 Ablation Studies", + "text_level": 1, + "bbox": [ + 215, + 412, + 401, + 426 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/12eca1b3f281f5d531dc2f52b26b8a9ea273522007aebc501fab3ebf891f4be7.jpg", + "table_caption": [ + "Table 3: An ablation study of our model on the Blender synthetic dataset. We experiment 3iGS under a variety of model parameters. In the first row, we directly an outgoing radiance field similar to NeRF based methods. The second row omits the prediction of a BRDF roughness parameter which encodes the viewing direction as IDE. Both experimental results are inferior compared to our complete model." + ], + "table_footnote": [], + "table_body": "
PSNRSSIMLPIPS
Ours (outgoing radiance field)32.380.9650.035
Ours (no roughness parameter, i.e IDE)33.260.9670.031
Ours (complete model)33.640.9700.029
", + "bbox": [ + 295, + 539, + 707, + 599 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "In Tab. 3, we study the effectiveness of our design choices and parameters for 3iGS. In the first row, we use the Gaussian mean and interpolate features from the factorised tensors and predict the outgoing specular colours directly. In this scenario, we predict the outgoing radiance field similar to a NeRF like manner for specular colours. In the second row, we abandon the BRDF roughness parameters from the Gaussian features and apply a standard Fourier positional encoding of viewing direction. Both cases led to inferior renderings as compared to our complete model.", + "bbox": [ + 212, + 628, + 784, + 747 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "In Tab. 4, we illustrate the training and rendering speed (test) of 3iGS against 3DGS and GaussianShader. We normalise the speed based on 3DGS. Our model performs competitively and achieve real time rendering speed although it is slower than 3DGS whereas GaussianShader performs much slower than the vanilla model. We attribute the efficient rendering speed to the use of factorised tensors for the illumination field.", + "bbox": [ + 212, + 750, + 787, + 839 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "3iGS: Factorised Tensorial Illumination for 3D Gaussian Splitting", + "bbox": [ + 290, + 114, + 732, + 128 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 767, + 114, + 785, + 126 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/0f597eb59f61bec678f8ce8242b2c13e158cf0f4bc152f138359e179f2e2339e.jpg", + "table_caption": [ + "Table 4: We evaluate the test and train speed of 3DGS [17] and GaussianShader [13] on a single Tesla V100 32Gb VRAM GPU with the original codebase and settings advocated by the authors. We then report the results normalised with these rendering speed of 3DGS." + ], + "table_footnote": [], + "table_body": "
TestTrain
3DGS1.0x1.0x
GaussianShader6.3x slower12.1x slower
Ours2.0x slower3.2x Slower
", + "bbox": [ + 362, + 213, + 638, + 271 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "6 Limitations and Weaknesses", + "text_level": 1, + "bbox": [ + 215, + 297, + 524, + 314 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "3iGS inherits the main challenges of factorised tensors as [9]. Our model is limited to scenes that fit within a defined bounding box. Future works could explore this direction in warping unbounded scenes to fit a tensorial grid representation. Furthermore, 3iGS inherits the weaknesses of 3DGS; a large VRAM GPU is necessary to fit 3D Gaussians, and to evaluate the illumination field. A straightforward workaround is to reduce the number of Gaussians created by adding an upper bound on the number of produced Gaussians in the adaptive control step. Our work also inherits 3DGS's difficulty in producing accurate scene geometry.", + "bbox": [ + 212, + 330, + 787, + 454 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "7 Conclusion", + "text_level": 1, + "bbox": [ + 215, + 474, + 359, + 491 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "We introduce our work, Factorised Tensorial Illumination for 3D Gaussian Splatting (3iGS), to enhance the view-dependent effects in rendering Gaussian radiance fields. Our approach overcomes the constraints of previous methods, which relied on optimising an outgoing radiance field of independent Gaussians with Spherical Harmonics (SH) parameters. We illustrate that superior view-dependent effects in 3DGS can be attained by depicting an outgoing radiance field as a continuous illumination field and the Gaussian's BRDF characteristics in relation to this field. Distinct from other methods depending on oversimplified yet restrictive rendering equations that require prediction of physical attributes of scene surfaces for shading, our methodology proves to be more efficacious. Furthermore, we have shown that fast rendering speeds are attainable through the representation of an illumination field with factorised tensors. We demonstrated our claims across diverse datasets, from synthetic to real-world environments, and compared against prior art on both quantitative and qualitative metrics. We also evaluate the effectiveness of our model parameters and design choices through an ablation study. Finally we acknowledge the limitations of our research as a catalyst for future investigative directions. Our code is released here. Acknowledgement This study is supported under the RIE2020 Industry Alignment Fund - Industry Collaboration Projects (IAF-ICP) Funding initiative, as well as cash and in-kind collaboration from the industry partner(s). The computational work for this article was partially performed on resources of the National Supercomputing Centre, Singapore.", + "bbox": [ + 212, + 507, + 787, + 843 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "ZJ. Tang, TJ. Cham", + "bbox": [ + 271, + 114, + 408, + 128 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 215, + 143, + 321, + 159 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "1. Barron, J.T., Mildenhall, B., Tancik, M., Hedman, P., Martin-Brualla, R., Srinivasan, P.P.: Mip-nerf: A multiscale representation for anti-aliasing neural radiance fields. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 5855-5864 (2021)", + "2. Barron, J.T., Mildenhall, B., Verbin, D., Srinivasan, P.P., Hedman, P.: Mipnerf 360: Unbounded anti-aliased neural radiance fields. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5470-5479 (2022)", + "3. Barron, J.T., Mildenhall, B., Verbin, D., Srinivasan, P.P., Hedman, P.: Zip-nerf: Anti-aliased grid-based neural radiance fields. arXiv preprint arXiv:2304.06706 (2023)", + "4. Bi, S., Xu, Z., Srinivasan, P., Mildenhall, B., Sunkavalli, K., Hasan, M., Hold-Geoffroy, Y., Kriegman, D., Ramamoorthi, R.: Neural reflectance fields for appearance acquisition. arXiv preprint arXiv:2008.03824 (2020)", + "5. Boss, M., Braun, R., Jampani, V., Barron, J.T., Liu, C., Lensch, H.: Nerd: Neural reflectance decomposition from image collections. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 12684-12694 (2021)", + "6. Boss, M., Jampani, V., Braun, R., Liu, C., Barron, J., Lensch, H.: Neural-pil: Neural pre-integrated lighting for reflectance decomposition. Advances in Neural Information Processing Systems 34, 10691-10704 (2021)", + "7. Burley, B., Studios, W.D.A.: Physically-based shading at disney. In: Acm Siggraph. vol. 2012, pp. 1-7. vol. 2012 (2012)", + "8. Carroll, J.D., Chang, J.J.: Analysis of individual differences in multidimensional scaling via an n-way generalization of \"eckart-young\" decomposition. Psychometrika 35(3), 283-319 (1970)", + "9. Chen, A., Xu, Z., Geiger, A., Yu, J., Su, H.: Tensorf: Tensorial radiance fields. In: European Conference on Computer Vision. pp. 333-350. Springer (2022)", + "0. Cook, R.L., Torrance, K.E.: A reflectance model for computer graphics. ACM Transactions on Graphics (ToG) 1(1), 7-24 (1982)", + "1. Fridovich-Keil, S., Yu, A., Tancik, M., Chen, Q., Recht, B., Kanazawa, A.: Plenoxels: Radiance fields without neural networks. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5501-5510 (2022)", + "2. Greger, G., Shirley, P., Hubbard, P.M., Greenberg, D.P.: The irradiance volume. IEEE Computer Graphics and Applications 18(2), 32-43 (1998)", + "3. Jiang, Y., Tu, J., Liu, Y., Gao, X., Long, X., Wang, W., Ma, Y.: Gaussianshader: 3d gaussian splatting with shading functions for reflective surfaces (2023)", + "4. Jin, H., Liu, I., Xu, P., Zhang, X., Han, S., Bi, S., Zhou, X., Xu, Z., Su, H.: Tensoroir: Tensorial inverse rendering. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 165-174 (2023)", + "5. Kajiya, J.T.: The rendering equation. In: Proceedings of the 13th annual conference on Computer graphics and interactive techniques. pp. 143-150 (1986)", + "6. Kautz, J., Snyder, J., Sloan, P.P.J.: Fast arbitrary brdf shading for low-frequency lighting using spherical harmonics. Rendering Techniques 2(291-296), 1 (2002)", + "7. Kerbl, B., Kopanas, G., Leimkuhler, T., Drettakis, G.: 3d gaussian splatting for real-time radiance field rendering. ACM Transactions on Graphics (ToG) 42(4), 1-14 (2023)", + "8. Knapitsch, A., Park, J., Zhou, Q.Y., Koltun, V.: Tanks and temples: Benchmarking large-scale scene reconstruction. ACM Transactions on Graphics 36(4) (2017)" + ], + "bbox": [ + 225, + 176, + 785, + 839 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "3iGS: Factorised Tensorial Illumination for 3D Gaussian Splitting", + "bbox": [ + 290, + 114, + 730, + 128 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 767, + 116, + 784, + 126 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "19. Li, Z., Müller, T., Evans, A., Taylor, R.H., Unberath, M., Liu, M.Y., Lin, C.H.: Neuralangelo: High-fidelity neural surface reconstruction. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 8456-8465 (2023)", + "20. Liang, R., Chen, H., Li, C., Chen, F., Panneer, S., Vijaykumar, N.: Envidr: Implicit differentiable renderer with neural environment lighting. arXiv preprint arXiv:2303.13022 (2023)", + "21. Mahajan, D., Ramamoorthi, R., Curless, B.: A theory of frequency domain invariants: Spherical harmonic identities for brdf/lighting transfer and image consistency. IEEE transactions on pattern analysis and machine intelligence 30(2), 197-213 (2007)", + "22. Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: Nerf: Representing scenes as neural radiance fields for view synthesis. In: European Conference on Computer Vision. pp. 405-421 (2020)", + "23. Müller, T., Evans, A., Schied, C., Keller, A.: Instant neural graphics primitives with a multiresolution hash encoding. ACM Transactions on Graphics (ToG) 41(4), 1-15 (2022)", + "24. Munkberg, J., Hasselgren, J., Shen, T., Gao, J., Chen, W., Evans, A., Müller, T., Fidler, S.: Extracting triangular 3d models, materials, and lighting from images. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 8280-8290 (2022)", + "25. Shi, Y., Wu, Y., Wu, C., Liu, X., Zhao, C., Feng, H., Liu, J., Zhang, L., Zhang, J., Zhou, B., Ding, E., Wang, J.: Gir: 3d gaussian inverse rendering for relightable scene factorization (2023)", + "26. Srinivasan, P.P., Deng, B., Zhang, X., Tancik, M., Mildenhall, B., Barron, J.T.: Nerv: Neural reflectance and visibility fields for relighting and view synthesis. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 7495-7504 (2021)", + "27. Sun, C., Sun, M., Chen, H.T.: Direct voxel grid optimization: Super-fast convergence for radiance fields reconstruction. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5459-5469 (2022)", + "28. Technologies, U.: Light probes, https://docsunity3d.com/Manual/LightProbes.html", + "29. Technologies, U.: Reflection probe, https://docs.unity3d.com/Manual/class-ReflectionProbe.html", + "30. Verbin, D., Hedman, P., Mildenhall, B., Zickler, T., Barron, J.T., Srinivasan, P.P.: Ref-nerf: Structured view-dependent appearance for neural radiance fields. In: 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 5481-5490. IEEE (2022)", + "31. Wang, P., Liu, L., Liu, Y., Theobalt, C., Komura, T., Wang, W.: Neus: Learning neural implicit surfaces by volume rendering for multi-view reconstruction. arXiv preprint arXiv:2106.10689 (2021)", + "32. Zhang, K., Riegler, G., Snavely, N., Koltun, V.: Nerf++: Analyzing and improving neural radiance fields. arXiv preprint arXiv:2010.07492 (2020)" + ], + "bbox": [ + 215, + 146, + 785, + 756 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "ZJ. Tang, TJ. Cham", + "bbox": [ + 271, + 114, + 408, + 127 + ], + "page_idx": 15 + } +] \ No newline at end of file diff --git a/2024/3iGS_ Factorised Tensorial Illumination for 3D Gaussian Splatting/25df5a9d-fc43-4ff8-b4e4-a2a9b9e269ba_model.json b/2024/3iGS_ Factorised Tensorial Illumination for 3D Gaussian Splatting/25df5a9d-fc43-4ff8-b4e4-a2a9b9e269ba_model.json new file mode 100644 index 0000000000000000000000000000000000000000..e6c2de51bc5c13b8ccf277bdd6004bf06593a792 --- /dev/null +++ b/2024/3iGS_ Factorised Tensorial Illumination for 3D Gaussian Splatting/25df5a9d-fc43-4ff8-b4e4-a2a9b9e269ba_model.json @@ -0,0 +1,2014 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.238, + 0.141, + 0.768, + 0.187 + ], + "angle": 0, + "content": "3iGS: Factorised Tensorial Illumination for 3D Gaussian Splatting" + }, + { + "type": "text", + "bbox": [ + 0.358, + 0.213, + 0.646, + 0.228 + ], + "angle": 0, + "content": "Zhe Jun Tang1 and Tat-Jen Cham2" + }, + { + "type": "text", + "bbox": [ + 0.25, + 0.24, + 0.754, + 0.284 + ], + "angle": 0, + "content": "\\(^{1}\\) S-Lab, Nanyang Technological University \n\\(^{2}\\) College of Computing & Data Science, Nanyang Technological University {zhejun001} at {e.ntu.edu.sg}" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.321, + 0.744, + 0.543 + ], + "angle": 0, + "content": "Abstract. The use of 3D Gaussians as representation of radiance fields has enabled high quality novel view synthesis at real-time rendering speed. However, the choice of optimising the outgoing radiance of each Gaussian independently as spherical harmonics results in unsatisfactory view dependent effects. In response to these limitations, our work, Factorised Tensorial Illumination for 3D Gaussian Splatting, or 3iGS, improves upon 3D Gaussian Splatting (3DGS) rendering quality. Instead of optimising a single outgoing radiance parameter, 3iGS enhances 3DGS view-dependent effects by expressing the outgoing radiance as a function of a local illumination field and Bidirectional Reflectance Distribution Function (BRDF) features. We optimise a continuous incident illumination field through a Tensorial Factorisation representation, while separately fine-tuning the BRDF features of each 3D Gaussian relative to this illumination field. Our methodology significantly enhances the rendering quality of specular view-dependent effects of 3DGS, while maintaining rapid training and rendering speeds." + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.557, + 0.744, + 0.585 + ], + "angle": 0, + "content": "Keywords: Gaussian Splatting \\(\\cdot\\) Neural Radiance Field \\(\\cdot\\) Novel View Synthesis" + }, + { + "type": "title", + "bbox": [ + 0.217, + 0.611, + 0.377, + 0.627 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.644, + 0.788, + 0.764 + ], + "angle": 0, + "content": "3D Gaussian Splatting (3DGS) has emerged as the standard method for representing 3D objects and scenes, trained from images, to render photorealistic novel views. Unlike the other popular method of Neural Radiance Field (NeRF) [22], which models a scene as an implicit continuous function, 3DGS represents surfaces with independent 3D Gaussians of different opacities, anisotropic covariances, and spherical harmonic coefficients. To render a pixel's colour, a fast, tile-based rasteriser performs alpha blending of anisotropic Gaussian splats, sorted in accordance with the visibility order." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.765, + 0.79, + 0.841 + ], + "angle": 0, + "content": "Although 3DGS shows promising performance in synthesising novel views of a scene at real-time rendering speeds, its renderings fall short in more challenging scenarios that involve complex, view-dependent surface effects. When observing images with reflective and specular surfaces, the changes in surface colour across viewing angles remain consistent, rather than exhibiting the complex variations" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "2" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.411, + 0.129 + ], + "angle": 0, + "content": "ZJ. Tang, TJ. Cham" + }, + { + "type": "image", + "bbox": [ + 0.234, + 0.142, + 0.788, + 0.33 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.339, + 0.788, + 0.452 + ], + "angle": 0, + "content": "Fig. 1: We present test renderings from the \"Drums\" scene within the blender dataset [22], comparing our technique against Gaussian Splatting (3DGS) [17] and the ground truth (G.T). As the perspective shifts around the scene, the colour of the Floor Tom's top changes from translucent to reflective, showcasing intricate effects that depend on the viewpoint. These effects result from the specular reflection of incoming light and the reflections within the scene from elements like the Cymbals. Contrary to 3DGS, which struggles to capture these complex variations in light reflection, our method, 3iGS, aligns more accurately with the ground truth." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.487, + 0.788, + 0.608 + ], + "angle": 0, + "content": "in reflections observed in the dataset shown in Fig. 1. A logical solution is to adopt the strategy of Physically Based Rendering (PBR), which involves explicitly modeling the surface characteristics and performing ray marching from surfaces to calculate illumination effects. As part of the process, the Bidirectional Reflectance Distribution Function (BRDF) of surfaces are predicted and a shading function is applied to simulate view-dependent effects [7,10]. Nonetheless, accurately determining these physical properties is an ill-posed challenge, making it difficult to infer and model all the intricate rendering effects correctly." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.611, + 0.788, + 0.732 + ], + "angle": 0, + "content": "In this paper, we draw inspiration from graphics engines that utilise illumination volumes or light probes that summarise illumination information directed towards a surface. These methods compute illumination either directly from the local illumination volume surrounding the surface [12] or from the nearest light probes [28], rather than sampling numerous outward rays from the surface's upper hemisphere. Such approaches allow fast rendering speed at run time, as illumination information is pre-calculated and stored in the volumes or light probes." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.735, + 0.788, + 0.84 + ], + "angle": 0, + "content": "Our work, named Factorised Tensorial Illumination for Gaussian Splatting (3iGS), enhances 3DGS rendering quality. We introduce a continuous local illumination field of 3D Gaussians represented by compact factorised tensors for fast evaluation. The means of the 3D Gaussians serve as the input to these factorised tensors to calculate illumination features. Subsequently, each 3D Gaussian is refined through an optimisation of its mean, opacity, anisotropic covariance, diffused colour, and BRDF features. A neural renderer then maps the incident" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.292, + 0.115, + 0.732, + 0.131 + ], + "angle": 0, + "content": "3iGS: Factorised Tensorial Illumination for 3D Gaussian Splatting" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "3" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.784, + 0.252 + ], + "angle": 0, + "content": "illumination neural field, Gaussian BRDF attributes, and viewing angle to the Gaussian's specular colour. Overall, our approach represents a Gaussian's outgoing radiance as a function of both a continuous local illumination field and the individual Gaussian's BRDF attributes relative to it. This is opposed to the conventional optimisation of the 3D Gaussians' outgoing radiance in isolation, without accounting for the effects of adjacent Gaussians or scene lighting conditions." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.253, + 0.785, + 0.358 + ], + "angle": 0, + "content": "3iGS significantly enhances the accuracy of 3DGS, offering clear advantages in scenes with reflective surfaces where surface colours change dramatically across viewing angles as shown in Fig. 1. In synthetic datasets, such as the NeRF Blender dataset and the Shiny Blender dataset, 3iGS surpasses 3DGS both quantitatively and qualitatively. Similarly, 3iGS demonstrates superior performance over 3DGS in real-world scenarios on the Tanks and Temples dataset. In summary our technical contributions are:" + }, + { + "type": "text", + "bbox": [ + 0.223, + 0.371, + 0.784, + 0.4 + ], + "angle": 0, + "content": "1. a method to optimise the outgoing radiance as an incident continuous illumination field and Gaussian BRDF features with a neural renderer;" + }, + { + "type": "text", + "bbox": [ + 0.223, + 0.402, + 0.785, + 0.431 + ], + "angle": 0, + "content": "2. an approach to model a continuous illumination field with Tensorial Factorisation for compactness and fast evaluation; and" + }, + { + "type": "text", + "bbox": [ + 0.223, + 0.433, + 0.785, + 0.462 + ], + "angle": 0, + "content": "3. superior performance in rendering quality over baseline 3D Gaussian Splatting while maintaining real time performance." + }, + { + "type": "list", + "bbox": [ + 0.223, + 0.371, + 0.785, + 0.462 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.488, + 0.388, + 0.505 + ], + "angle": 0, + "content": "2 Related Work" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.522, + 0.784, + 0.597 + ], + "angle": 0, + "content": "Our work falls into the category of learning scene representation from multi-view input images. Here we review prior work on NeRF-based representations and Gaussian splatting. We also discuss other relevant topics pertaining to inverse rendering which aims to recover scene geometry, material properties, and scene lighting conditions in Sec. 2.1." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.599, + 0.785, + 0.794 + ], + "angle": 0, + "content": "Scene Representations for View Synthesis - One of the pioneering neural rendering techniques called Neural Radiance Fields (NeRF) [22] has achieved remarkable results in novel view synthesis from multi-view images. By sampling points along rays traced from the camera into the scene, NeRF reconstructs a scene as a continuous field of outgoing radiance. The technique employs volumetric rendering to determine the colour of each pixel. This method has inspired numerous developments of other scene representations [1,2,22,31,32]. However, the vanilla NeRF, which encodes the entire scene representation into a set of MLPs, requires multiple queries of points along rays during training and inference. This massively slows down the speed required for real time rendering. To address this, other neural scene representation techniques apply hash encoding [19, 23], triplanes or factorised tensors [9, 14], and gridding [3, 11, 27] to accelerate training and inference speeds." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.796, + 0.784, + 0.84 + ], + "angle": 0, + "content": "Tensorial Factorisation - In TensoRF [9], a feature grid can be represented as a 4D tensor of which the first 3 represents the XYZ spatial grid and the last represents the feature channel dimension. To model a radiance field with grid" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "4" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.41, + 0.129 + ], + "angle": 0, + "content": "ZJ. Tang, TJ. Cham" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.146, + 0.784, + 0.178 + ], + "angle": 0, + "content": "representation, [9] propose an extension of CANDECOMP/PARAFAC (CP)-Decomposition [8] to Vector-Matrix (VM) decomposition:" + }, + { + "type": "equation", + "bbox": [ + 0.236, + 0.199, + 0.745, + 0.283 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {G} _ {c} = \\sum_ {r = 1} ^ {R _ {c}} \\mathbf {v} _ {\\mathbf {c}, \\mathbf {r}} ^ {\\mathbf {X}} \\circ \\mathbf {M} _ {\\mathbf {c}, \\mathbf {r}} ^ {\\mathbf {Y Z}} \\circ \\mathbf {b} _ {\\mathbf {3 r} - \\mathbf {2}} + \\mathbf {v} _ {\\mathbf {c}, \\mathbf {r}} ^ {\\mathbf {Y}} \\circ \\mathbf {M} _ {\\mathbf {c}, \\mathbf {r}} ^ {\\mathbf {X Z}} \\circ \\mathbf {b} _ {\\mathbf {3 r} - \\mathbf {1}} + \\mathbf {v} _ {\\mathbf {c}, \\mathbf {r}} ^ {\\mathbf {Z}} \\circ \\mathbf {M} _ {\\mathbf {c}, \\mathbf {r}} ^ {\\mathbf {X Y}} \\circ \\mathbf {b} _ {\\mathbf {3 r}} \\\\ = \\sum_ {r = 1} ^ {R _ {c}} \\mathbf {A} _ {C, r} ^ {X} \\circ \\mathbf {b} _ {3 r - 2} + \\mathbf {A} _ {C, r} ^ {Y} \\circ \\mathbf {b} _ {3 r - 1} + \\mathbf {A} _ {C, r} ^ {Z} \\circ \\mathbf {b} _ {3 r} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.292, + 0.784, + 0.337 + ], + "angle": 0, + "content": "In Eq. (1), the inputs \\(\\mathbf{v}\\) and \\(\\mathbf{M}\\) corresponds to XYZ-mode vector and matrix factorisation and \\(\\mathbf{b}\\) denotes the appearance feature mode vectors. Separately, \\(\\mathcal{G}_c\\) and \\(R_{C}\\) refers to the outgoing radiance and the colour feature channels." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.338, + 0.785, + 0.413 + ], + "angle": 0, + "content": "Gaussian Splatting - As opposed to ray marching, 3D Gaussian Splatting is a recent method for rendering scenes via rasterisation. To begin, Gaussians are fitted on a point cloud that are either initialised as a set of random points or bootstrapped with a sparse point cloud produced during the SfM process for free [17]. The Gaussians of the point cloud are defined by a function:" + }, + { + "type": "equation", + "bbox": [ + 0.389, + 0.422, + 0.785, + 0.442 + ], + "angle": 0, + "content": "\\[\ng (\\mathbf {x} | \\mu , \\boldsymbol {\\Sigma}) = e ^ {- \\frac {1}{2} (\\mathbf {x} - \\mu) ^ {T} \\boldsymbol {\\Sigma} ^ {- 1} (\\mathbf {x} - \\mu)} \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.451, + 0.788, + 0.556 + ], + "angle": 0, + "content": "where each point \\(\\mathbf{x}\\) is centered at mean \\(\\mu \\in \\mathbb{R}^3\\) with an anisotropic covariance matrix \\(\\pmb{\\Sigma} \\in \\mathbb{R}^{3x^3}\\). The mean of a Gaussian is parameterised by the coordinates \\(\\mu = (\\mu_x, \\mu_y, \\mu_z)\\) that is scaled by the full 3D covariance matrix \\(\\pmb{\\Sigma}\\). As discussed in [17], these Gaussians have no physical meanings, given the difficulty of constraining \\(\\pmb{\\Sigma}\\) to a valid semi-positive definite matrix during the optimisation process. Instead, to derive \\(\\pmb{\\Sigma}\\), a scaling matrix \\(S\\) and a rotation matrix \\(R\\) is learned during the optimisation process to scale the Gaussians:" + }, + { + "type": "equation", + "bbox": [ + 0.449, + 0.565, + 0.785, + 0.581 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {\\Sigma} = \\mathbf {R} \\mathbf {S} \\mathbf {S} ^ {\\mathrm {T}} \\mathbf {R} ^ {\\mathrm {T}} \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.591, + 0.784, + 0.633 + ], + "angle": 0, + "content": "With a viewing transformation \\(\\mathbf{W}\\) and an affine approximation of the projective transformation \\(\\mathbf{J}\\), the covariance matrix is then expressed in camera coordinates as:" + }, + { + "type": "equation", + "bbox": [ + 0.434, + 0.635, + 0.785, + 0.652 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {\\Sigma} ^ {\\prime} = \\mathbf {J} \\mathbf {W} \\boldsymbol {\\Sigma} \\mathbf {W} ^ {\\mathrm {T}} \\mathbf {J} ^ {\\mathrm {T}} \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.657, + 0.784, + 0.717 + ], + "angle": 0, + "content": "Furthermore, each Gaussian is coloured via a set of Spherical Harmonics (SH) coefficients that represent the view dependent colour \\( c_{i} \\), also known as radiance field, multiplied by its opacity \\( \\alpha \\). To colour a pixel \\( u \\) as \\( \\hat{C} \\), alpha blending of \\( N \\) ordered Gaussians is applied:" + }, + { + "type": "equation", + "bbox": [ + 0.3, + 0.727, + 0.785, + 0.768 + ], + "angle": 0, + "content": "\\[\n\\hat {\\mathbf {C}} = \\sum_ {i \\in N} T _ {i} g _ {i} \\left(\\mathbf {u} \\mid \\mu^ {\\prime}, \\boldsymbol {\\Sigma} ^ {\\prime}\\right) \\alpha_ {i} \\mathbf {c} _ {i}, \\quad T _ {i} = \\prod_ {j = 1} ^ {i - 1} \\left(1 - g _ {i} \\left(\\mathbf {u} \\mid \\mu^ {\\prime}, \\boldsymbol {\\Sigma} ^ {\\prime}\\right) \\alpha_ {i}\\right) \\tag {5}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.787, + 0.373, + 0.801 + ], + "angle": 0, + "content": "2.1 Preliminaries" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.81, + 0.784, + 0.84 + ], + "angle": 0, + "content": "As discussed, the direct optimisation of spherical harmonics to describe the outgoing radiance in individual Gaussians in 3DGS results in poor view-dependent" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.291, + 0.115, + 0.733, + 0.131 + ], + "angle": 0, + "content": "3iGS: Factorised Tensorial Illumination for 3D Gaussian Splitting" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "5" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.784, + 0.192 + ], + "angle": 0, + "content": "effects. A crucial reason is that these Gaussians do not fully model scene properties [13] and thus fail to capture the specular effects which changes drastically across viewing angles." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.192, + 0.785, + 0.269 + ], + "angle": 0, + "content": "Therefore to account for the specular highlights, it is beneficial to model the underlying properties such as the BRDF and illumination effects of the scene. In conventional computer graphics, a rendering equation is commonly applied to simulate effects of specular and diffused shading [15]. For instance, rendering Eq. (6) describes an outgoing radiance of a surface point:" + }, + { + "type": "equation", + "bbox": [ + 0.37, + 0.275, + 0.785, + 0.306 + ], + "angle": 0, + "content": "\\[\nL _ {o} (\\mathbf {x}, \\mathbf {v}) = \\int_ {\\Omega} L _ {i} (\\mathbf {x}, \\mathbf {l}) f _ {r} (\\mathbf {l}, \\mathbf {v}) (\\mathbf {l} \\cdot \\mathbf {n}) d \\mathbf {l}, \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.311, + 0.784, + 0.415 + ], + "angle": 0, + "content": "The radiance \\( L_{o} \\) emitted from a surface point \\( \\mathbf{x} \\), when observed from a viewing direction \\( \\mathbf{v} \\), is defined in Eq. (6). An integral is applied to accumulate the contribution of incident light at an incident angle \\( \\mathbf{l} \\) across the upper hemisphere \\( \\Omega \\) of \\( \\mathbf{x} \\). The function \\( f_{r} \\) denotes the Bidirectional Radiance Distribution Function (BRDF), describing the reflection characteristics of incident radiance at \\( \\mathbf{x} \\) viewed in direction \\( \\mathbf{v} \\). Lastly the inclusion of the cosine law with the normal vector \\( \\mathbf{n} \\) ensures the energy conservation." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.416, + 0.784, + 0.448 + ], + "angle": 0, + "content": "From a signal processing perspective, an alternative to Eq. (6) is expressed more generally in terms of spherical harmonic convolution [16, 21]:" + }, + { + "type": "equation", + "bbox": [ + 0.449, + 0.455, + 0.785, + 0.47 + ], + "angle": 0, + "content": "\\[\nB _ {l m} = \\Lambda_ {l} \\rho_ {l} L _ {l m} \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.476, + 0.784, + 0.52 + ], + "angle": 0, + "content": "In Eq. (7), \\( B_{lm} \\) defines the outgoing reflected light as the product of BRDF filter \\( \\rho_l \\), spherical harmonic coefficients of lighting signal \\( L_{lm} \\), and the normalisation constant \\( \\varLambda_l \\)." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.522, + 0.786, + 0.764 + ], + "angle": 0, + "content": "Some studies [13, 25] enhance 3DGS by expressing BRDF \\( f_{r} \\) as a Cook-Torrance microfacet model [10] or the GGX Trowbridge-Reitz model [7]. In these approaches, physical attributes, including roughness \\( r \\), albedo \\( a \\), metallicity \\( m \\), and the normal vector \\( \\mathbf{n} \\) are predicted and used in Eq. (6). Although these modifications marginally improve rendering quality metrics, they fail to accurately produce high-quality, view-dependent effects. This shortfall primarily stems from relying on estimated parameters for physical rendering within a simplified rendering equation [20]. Furthermore, these parameters are inherently challenging to be estimated accurately, due to the ill-posed nature of inverse rendering from multi-view images. Although numerous works [4-6, 14, 20, 26] also achieved success by exploring a neural representation of the rendering equation, these works either require prior information, such as known lighting conditions or a pre-trained model on a realistic dataset with known BRDF parameters. Furthermore these techniques are experimented with ray tracing based methods like NeRF. A work closest to ours in the area of rasterisation and Gaussian Splitting manner, is GaussianShader [13] which we compare against in Sec. 5.2." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.782, + 0.331, + 0.798 + ], + "angle": 0, + "content": "3 Method" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.81, + 0.784, + 0.84 + ], + "angle": 0, + "content": "Instead of predicting the physical BRDF properties of materials in the scene, our goal is to express the outgoing radiance of a Gaussian as a more general" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "6" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.411, + 0.129 + ], + "angle": 0, + "content": "ZJ. Tang, TJ. Cham" + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.147, + 0.775, + 0.329 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.337, + 0.788, + 0.421 + ], + "angle": 0, + "content": "Fig. 2: A visualisation of 3iGS pipeline to render a single Gaussian's colour. We interpolate an incident illumination \\( L_{i} \\) from the factorised tensorial illumination field \\( \\mathcal{G}_l \\) using a Gaussian mean \\( \\pmb{x}_i \\) as input. A neural network \\( \\mathcal{F} \\) maps the illumination field \\( L_{i} \\), the Gaussian BRDF features \\( \\rho_{i} \\), and the viewing direction \\( \\omega_{o} \\) to Gaussian's specular colour \\( c_{s} \\). Following, the diffused colour \\( c_{d} \\) and specular colour \\( c_{s} \\) are added linearly to produce the final outgoing radiance field \\( c \\)." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.451, + 0.788, + 0.512 + ], + "angle": 0, + "content": "expression of BRDF, and the incoming illumination as neural features. This idea is based on a generalized version of Eq. (7), where BRDF features modify an incoming illumination field, without the need for decomposing down to intrinsic material properties [21]." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.512, + 0.787, + 0.542 + ], + "angle": 0, + "content": "Specifically for each 3D Gaussian in the scene, the outgoing radiance field is formed by:" + }, + { + "type": "equation", + "bbox": [ + 0.427, + 0.542, + 0.786, + 0.559 + ], + "angle": 0, + "content": "\\[\n\\mathbf {c} \\left(\\omega_ {o}\\right) = \\mathbf {c} _ {\\mathbf {d}} + \\mathbf {c} _ {\\mathbf {s}} \\left(\\omega_ {o}\\right) \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.566, + 0.788, + 0.627 + ], + "angle": 0, + "content": "For viewing angle \\(\\omega_{o}\\), a Gaussian is coloured by its constant diffused colour \\(c_{d}\\) and a view dependent specular colour \\(c_{s}\\). At each Gaussian \\(i\\), a small neural network \\(\\mathcal{F}\\) maps the Gaussian BRDF features \\(\\rho_{i}\\) and the incoming illumination \\(L_{i}\\) to its specular colour viewed at an angle \\(\\omega_{o}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.425, + 0.639, + 0.786, + 0.656 + ], + "angle": 0, + "content": "\\[\n\\mathcal {F}: \\left\\{\\rho_ {i}, L _ {i}, \\omega_ {o} \\right\\} \\mapsto \\mathbf {c} _ {\\mathbf {s}} \\tag {9}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.678, + 0.628, + 0.694 + ], + "angle": 0, + "content": "3.1 Illumination Grid by Tensorial Factorisation" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.705, + 0.789, + 0.841 + ], + "angle": 0, + "content": "Our work is largely inspired by conventional computer graphics engines for fast rendering of scene and objects in video games. The fundamental rendering equation highlights the role of multi-bounce lighting in achieving indirect illumination, wherein light bounces off one surface to illuminate another. However, the process of ray tracing from each Gaussian surface into the scene is notably resource-intensive, undermining the goal of quick rendering in 3D graphics systems. To facilitate real-time rendering, one strategy involves the use of baking techniques that employ irradiance volumes [12]. This method segments a scene into distinct volumes and pre-calculates irradiance data offline. An alternative" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.291, + 0.115, + 0.733, + 0.131 + ], + "angle": 0, + "content": "3iGS: Factorised Tensorial Illumination for 3D Gaussian Splitting" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "7" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.784, + 0.207 + ], + "angle": 0, + "content": "strategy places light probes [28,29] throughout the scene to gather lighting information at specific spatial locations. When rendering the colour of a surface, the system quickly interpolates lighting information from the nearest light probes, ensuring swift rendering times." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.208, + 0.784, + 0.252 + ], + "angle": 0, + "content": "To maintain the fast rendering speed of 3DGS, our work describes a methodology of learning the illumination features of a Gaussian with a continuous grid based illumination field as:" + }, + { + "type": "equation", + "bbox": [ + 0.451, + 0.254, + 0.785, + 0.27 + ], + "angle": 0, + "content": "\\[\n\\mathcal {G} _ {l}: \\left\\{\\mathbf {x} _ {\\mathbf {i}} \\right\\} \\mapsto L _ {i} \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.276, + 0.784, + 0.336 + ], + "angle": 0, + "content": "Given a Gaussian's mean coordinate \\(\\mathbf{x_i}\\), we seek to compute an illumination field \\(L_{i}\\) by interpolating from learnable grid representation. The illumination tensors \\(\\mathcal{G}_l\\) is formulated similar to TensoRF [9] by a vector-matrix spatial factorisation as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.323, + 0.348, + 0.785, + 0.387 + ], + "angle": 0, + "content": "\\[\n\\mathcal {G} _ {l} = \\sum_ {r = 1} ^ {R _ {L}} \\mathbf {A} _ {L, r} ^ {X} \\circ \\mathbf {b} _ {3 r - 2} + \\mathbf {A} _ {L, r} ^ {Y} \\circ \\mathbf {b} _ {3 r - 1} + \\mathbf {A} _ {L, r} ^ {Z} \\circ \\mathbf {b} _ {3 r} \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.399, + 0.788, + 0.626 + ], + "angle": 0, + "content": "In Eq. (11), \\( R_{L} \\) represents the feature channels of the illumination components, \\( \\mathbf{A} \\) as feature tensors and \\( \\mathbf{b} \\) as feature vectors. The illumination feature grid is jointly learned end to end in the optimisation process together with each Gaussian in the scene. Unlike 3DGS, where each Gaussian is optimised independently, the illumination field is modelled as a continuous grid function. A Gaussian mean serves as the input to query from the factorised tensor grid via interpolation. The inclusion of this continuous incoming illumination field directed at each Gaussian is the core component of producing accurate view-dependent effects, as we show in the ablation study of Sec. 5.4. Furthermore, by formulating this field as factorised tensors, it allows the network to achieve fast rendering speed. Our illumination field is coarse, using \\( 87.5\\% \\) less voxels compared to TensoRF on synthetic datasets. This compact representation is also low in memory footprint compared to the number of optimised Gaussians, which is often a magnitude order or more higher. We refer readers to [9], which provides a comprehensive overview to describe how the tensors are factorised and interpolated." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.648, + 0.444, + 0.663 + ], + "angle": 0, + "content": "3.2 3D Gaussian Features" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.674, + 0.788, + 0.841 + ], + "angle": 0, + "content": "In 3DGS [17], Gaussians are optimised with a set of parameters: 3D positions, opacity \\(\\alpha\\), anisotropic covariance, and spherical harmonics coefficients. In our work, instead of optimising spherical harmonics as an outgoing radiance, 3iGS characterises the Gaussians with a diffused colour and learnable BRDF features. Unlike [13, 25], we do not strictly enforce physically interpretable properties commonly used in shading techniques. Aforementioned, these techniques are often simplified, too ill-posed to be decomposed individually, and insufficient to encompass all complex rendering effects [20]. Rather, we loosely follow Eq. (7) and treat BRDF feature components as a set of weights that alter the incoming illumination field. Given a continuous illumination field obtained from Eq. (11), a Gaussian's BRDF is conditionally optimised against it. This is in contrast" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "8" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.41, + 0.129 + ], + "angle": 0, + "content": "ZJ. Tang, TJ. Cham" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.193 + ], + "angle": 0, + "content": "to 3DGS where the Gaussians' outgoing radiance are individually optimised without modelling the interdependencies that should arise from a shared scene illumination, resulting in detrimental view-dependent effects." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.215, + 0.418, + 0.23 + ], + "angle": 0, + "content": "3.3 Shading Gaussians" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.241, + 0.788, + 0.332 + ], + "angle": 0, + "content": "Following Eq. (9), we shade each Gaussian by mapping its viewing directions encoded with Integrated Directional Encoding (IDE) [30], Gaussian features (obtained in Sec. 3.2), and its illumination field, to the specular colour output. We linearly add the diffused and specular colours to create its radiance field as per Eq. (8). To render the final scene, we follow the rasterisation pipeline proposed in the original 3DGS work." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.356, + 0.381, + 0.373 + ], + "angle": 0, + "content": "4 Optimisation" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.388, + 0.787, + 0.463 + ], + "angle": 0, + "content": "In the previous Sec. 3, we described the necessary components to model a scene with Gaussians and render it via rasterisation. To improve the stability of training and to enhance the final rendering quality, we first train the model with the diffused colour in the first 3,000 iterations. Following, specular colours are added to the Gaussians as in Eq. (8)." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.464, + 0.788, + 0.568 + ], + "angle": 0, + "content": "While training the tensorial illumination grid, an initial boundary which encapsulate the scene bounding box is defined. Midway through training, we shrink the illumination grid to fit the Gaussians and resample the grid with the same number of voxels. We adopt the same adaptive control of Gaussians of 3DGS [17] to limit the number of Gaussians and the units per volume. We propose to train our model with the same loss function as 3DGS for a fair evaluation:" + }, + { + "type": "equation", + "bbox": [ + 0.408, + 0.57, + 0.786, + 0.587 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = (1 - \\lambda) \\mathcal {L} _ {1} + \\lambda \\mathcal {L} _ {\\mathrm {D} - \\mathrm {S S I M}} \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.592, + 0.731, + 0.607 + ], + "angle": 0, + "content": "where we combined the \\(\\mathcal{L}_1\\) term with a D-SSIM term with \\(\\lambda\\) set to 0.2." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.632, + 0.498, + 0.649 + ], + "angle": 0, + "content": "5 Experiments and Results" + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.664, + 0.337, + 0.678 + ], + "angle": 0, + "content": "5.1 Datasets" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.689, + 0.788, + 0.78 + ], + "angle": 0, + "content": "Synthetic scenes - We show experimental results of 3iGS based on the Blender dataset released in [22]. This dataset contains challenging scenes of complex geometries with realistic non-Lambertian materials. Similarly, we evaluate our model on the Shiny Blender dataset presented in [30]. Unlike the Blender dataset, Shiny Blender contains a singular object with simple geometries in each scene with more glossy effects." + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.78, + 0.788, + 0.84 + ], + "angle": 0, + "content": "Real world complex scenes - To prove the effectiveness of our model in real world scenes, we evaluate our renderings on the Tanks and Temples dataset [18]. This dataset is obtained from video sequences of real world objects and environment." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.291, + 0.115, + 0.733, + 0.131 + ], + "angle": 0, + "content": "3iGS: Factorised Tensorial Illumination for 3D Gaussian Splitting" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "9" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.147, + 0.371, + 0.162 + ], + "angle": 0, + "content": "5.2 Comparisons" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.172, + 0.788, + 0.488 + ], + "angle": 0, + "content": "To evaluate our model, we compared against methods that apply both ray-tracing methods like NeRF, or rasterisation methods with Gaussian Splatting. Out of all prior work, 3DGS and GaussianShader is the closest work which offers real time inference speed which we will mainly compare against. On comparing the qualitative result figures, we re-ran the experiments of 3DGS [17] and GaussianShader [13] using their original repository code under settings specified by the authors. Ray-Tracing Methods such as [20,22,30] represent a scene as a radiance field using MLPs. By performing multiple samplings on rays marched from the camera into the scene, the sampled points are queried with MLP to obtain the opacity and radiance values. Volume rendering is performed to obtain the final pixel colour. Rasterisation Methods such as Gaussian Splatting (3DGS) [17] and GaussianShader [13] apply a rasterisation pipeline as opposed to ray tracing methods. These models represents a scene as Gaussians with radiance properties based on Spherical Harmonics. In, [13], 3DGS is extended by modelling a scene with additional material characteristics and a shading function is applied, as opposed to ours which uses an MLP as neural renderer. Furthermore, [13] shades Gaussians with a global differentiable environment light stored in cube maps, and optimises independent Gaussians with spherical harmonic-based color for unaccounted illumination. In our work, we represent incident illumination locally with grid-based tensors and optimise Gaussian BRDF features relative to this field." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.489, + 0.788, + 0.581 + ], + "angle": 0, + "content": "For a fair comparison, 3iGS is trained with the same loss function as 3DGS as described in Sec. 4 and the same number of iterations of 30,000 steps. We repurposed the \\(16 \\times 3\\) SH coefficients in 3DGS as BRDF feature channels and added 4 additional parameters of base colour and roughness for IDE view-directional encoding. The tensorial illumination field is set at a coarse resolution size of \\(150^{3}\\) voxels." + }, + { + "type": "image", + "bbox": [ + 0.273, + 0.603, + 0.387, + 0.655 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.388, + 0.603, + 0.502, + 0.654 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.603, + 0.614, + 0.654 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.616, + 0.603, + 0.729, + 0.654 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.274, + 0.657, + 0.382, + 0.706 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.388, + 0.657, + 0.5, + 0.706 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.657, + 0.61, + 0.706 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.617, + 0.657, + 0.726, + 0.706 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.32, + 0.707, + 0.341, + 0.714 + ], + "angle": 0, + "content": "3DGS" + }, + { + "type": "image_caption", + "bbox": [ + 0.429, + 0.707, + 0.464, + 0.714 + ], + "angle": 0, + "content": "G.Shader" + }, + { + "type": "image_caption", + "bbox": [ + 0.55, + 0.707, + 0.569, + 0.714 + ], + "angle": 0, + "content": "Ours" + }, + { + "type": "image_caption", + "bbox": [ + 0.648, + 0.707, + 0.696, + 0.714 + ], + "angle": 0, + "content": "Ground Truth" + }, + { + "type": "image_caption", + "bbox": [ + 0.215, + 0.724, + 0.787, + 0.753 + ], + "angle": 0, + "content": "Fig. 3: Comparisons of test-set views of real world scenes. 3iGS enhances 3DGS renderings by producing clearer view dependent effects as shown." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "10" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.41, + 0.129 + ], + "angle": 0, + "content": "ZJ. Tang, TJ. Cham" + }, + { + "type": "table_caption", + "bbox": [ + 0.214, + 0.154, + 0.788, + 0.281 + ], + "angle": 0, + "content": "Table 1: Our approach demonstrates superior quantitative performance over current methods when tested on Synthetic Datasets. Specifically, within the NeRF Synthetic dataset, our method surpasses all competitors across various image quality assessments (PSNR/SSIM/LPIPS). In the context of the Shiny Blender dataset, 3iGS matches the performance of existing rasterization techniques in terms of PSNR and SSIM but surpasses them in LPIPS for the majority of scenes. We encourage readers to examine the accompanying figure showcasing renderings of the Shiny Blender scene, where our method attains enhanced qualitative outcomes. Best results, benchmarked across real time rendering methods, are in bold." + }, + { + "type": "table", + "bbox": [ + 0.219, + 0.291, + 0.784, + 0.829 + ], + "angle": 0, + "content": "
NeRF Synthetic [22]
ChairDrumsLegoMicMats.ShipHotdogFicusAvg.
PSNR↑
NeRF [22]33.0025.0132.5432.9129.6228.6536.1830.1331.01
Ref-NeRF [30]33.9825.4335.1033.6527.1029.2437.0428.7431.29
ENVIDR [20]31.2222.9929.5532.1729.5221.5731.4426.6028.13
3DGS [17]35.8226.1735.6935.3430.0030.8737.6734.8333.30
G.Shader [13]35.8326.3635.8735.2330.0730.8237.8534.9733.38
G.Shader(reproduced) [13]33.7025.5032.9934.0728.8728.3735.2933.0531.48
Ours35.9026.7535.9436.0130.0031.1237.9835.4033.64
SSIM↑
NeRF [22]0.9670.9250.9610.9800.9490.8560.9740.9640.947
Ref-NeRF [30]0.9740.9290.9750.9830.9210.8640.9790.9540.947
ENVIDR [20]0.9760.9300.9610.9840.9680.8550.9630.9870.956
3DGS [17]0.9870.9540.9830.9910.9600.9070.9850.9870.969
G.Shader [13]0.9870.9490.9830.9910.9600.9050.9850.9850.968
G.Shader(reproduced) [13]0.9800.9450.9720.9890.9510.8810.9800.9820.960
Ours0.9870.9550.9830.9920.9610.9080.9860.9890.970
LPIPS↓
NeRF [22]0.0460.0910.0500.0280.0630.2060.1210.0440.081
Ref-NeRF [30]0.0290.0730.0250.0180.0780.1580.0280.0560.058
ENVIDR [20]0.0310.0800.0540.0210.0450.2280.0720.0100.067
3DGS [17]0.0120.0370.0160.0060.0340.1060.0200.0120.030
G.Shader [13]0.0120.0400.0140.0060.0330.0980.0190.0130.029
G.Shader(reproduced) [13]0.0190.0450.0260.0090.0460.1480.0290.0170.042
Ours0.0120.0360.0150.0050.0340.1020.0190.0100.029
Shiny Blender [30]
CarBallHelmetTeapotToasterCoffeeAvg.
PSNR↑
NVDiffRec [24]27.9821.7726.9740.4424.3130.7428.70
Ref-NeRF [30]30.4129.1429.9245.1925.2933.9932.32
ENVIDR [20]28.4638.8932.7341.5926.1129.4832.88
3DGS [17]27.2427.6928.3245.6820.9932.3230.37
G.Shader [13]27.9030.9828.3245.8626.2132.3931.94
G.Shader(reproduced) [13]27.5129.0228.7343.0522.8631.3430.41
Ours27.5127.6428.2146.0422.6932.5830.77
SSIM↑
NVDiffRec [24]0.9630.8580.9510.9960.9280.9730.945
Ref-NeRF [30]0.9490.9560.9550.9950.9100.9720.956
ENVIDR [20]0.9610.9910.9800.9960.9390.9490.969
3DGS [17]0.9300.9370.9510.9960.8950.9710.947
G.Shader [13]0.9310.9650.9500.9960.9290.9710.957
G.Shader(reproduced) [13]0.9300.9540.9550.9950.9000.9690.950
Ours0.9300.9380.9510.9970.9080.9730.949
LPIPS↓
NVDiffRec [24]0.0450.2970.1180.0110.1690.0760.119
Ref-NeRF [30]0.0510.3070.0870.0130.1180.0820.109
ENVIDR [20]0.0490.0670.0510.0110.1160.1390.072
3DGS [17]0.0470.1610.0790.0070.1260.0780.083
G.Shader [13]0.0450.1210.0760.0070.0790.0780.068
G.Shader(reproduced) [13]0.0450.1480.0880.0120.1110.0850.099
Ours0.0450.1560.0730.0060.0990.0760.075
" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.292, + 0.115, + 0.733, + 0.13 + ], + "angle": 0, + "content": "3iGS: Factorised Tensorial Illumination for 3D Gaussian Splitting" + }, + { + "type": "page_number", + "bbox": [ + 0.768, + 0.116, + 0.784, + 0.127 + ], + "angle": 0, + "content": "11" + }, + { + "type": "image", + "bbox": [ + 0.273, + 0.159, + 0.747, + 0.671 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.679, + 0.788, + 0.818 + ], + "angle": 0, + "content": "Fig. 4: In evaluating test-set views from the Shiny Blender dataset, we compared the performance of 3DGS [17], GaussianShader [13], and our work 3iGS. The standard 3DGS method generally yields the least satisfactory renderings, with images often appearing blurry in areas of specular reflection. GaussianShader shows a slight improvement by incorporating the GGX BRDF model, leading to marginally better results in rendering specular regions. In contrast, 3iGS stands out by employing a general rendering function that predicts neural features of illumination field and BRDF instead of relying on physical parameters. This approach allows 3iGS to surpass existing methods significantly, capturing the intricate details within specular highlights with remarkable precision." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "12" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.41, + 0.129 + ], + "angle": 0, + "content": "ZJ. Tang, TJ. Cham" + }, + { + "type": "table_caption", + "bbox": [ + 0.216, + 0.144, + 0.788, + 0.189 + ], + "angle": 0, + "content": "Table 2: A quantitative comparisons (PSNR / SSIM / LPIPS) between 3DGS [17], GaussianShader [13], and our method on real world scenarios on Tanks and Temples Dataset [18]" + }, + { + "type": "table", + "bbox": [ + 0.219, + 0.208, + 0.785, + 0.376 + ], + "angle": 0, + "content": "
Tanks and Temples Dataset [18]
BarnCaterpillarFamilyIgnatiusTruckAvg.
PSNR↑
3DGS [17]29.1326.1734.8829.5028.3829.61
G.Shader(reproduced) [13]27.6725.2333.5228.2827.6128.46
Ours29.7327.0435.3630.0428.8230.20
SSIM↑
3DGS [17]0.9200.9320.9820.9730.9450.950
G.Shader(reproduced) [13]0.8970.9150.9770.9680.9350.938
Ours0.9230.9380.9830.9740.9470.953
LPIPS↓
3DGS [17]0.1130.0740.0230.0320.0590.060
G.Shader(reproduced) [13]0.1470.0980.0290.0390.0710.077
Ours0.1120.0710.0220.0310.0570.058
" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.433, + 0.35, + 0.447 + ], + "angle": 0, + "content": "5.3 Discussion" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.462, + 0.788, + 0.793 + ], + "angle": 0, + "content": "In the comparisons detailed in Sec. 5.2, 3iGS demonstrates superior performance over the established baselines, delivering both quantitatively and qualitatively enhanced renderings in a majority of test cases on real time rendering rasterisation approaches. In the NeRF Synthetic dataset, 3iGS surpasses the prior 3DGS and GaussianShader. Although GaussianShader reportedly performs slightly better on the Shiny Blender dataset, we have included both reported and reproduced results based on the official code repository from the authors. We postulate that the Shiny Blender dataset scenes, which comprise single objects only, presents simpler geometries which facilitates an easier recovery of intrinsic material properties essential for rendering view-dependent effects. In addition, specular reflections in this dataset is primarily dominated by direct illumination from an external environment map. Thus GaussianShader which models direct lighting with a differentiable environment cube map performs well. However, when presented with a complex scene containing multiple objects, such as the NeRF Synthetic dataset shown in Fig. 1 with its intricate intra-scene interactions, GaussianShader struggles to accurately recover the physical rendering parameters. Furthermore these lighting scenarios are more complex due to indirect lighting. Therefore jointly modelling direct and indirect lighting using a continuous local incident field is crucial. NeRF based approaches reported above present competitive results. Yet, such methods are extremely slow to train, often requiring days, and are unable to perform real-time rendering needed for interactive applications." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.795, + 0.787, + 0.84 + ], + "angle": 0, + "content": "Comparing across all methodologies, our 3iGS method presents an attractive and pragmatic alternative to achieve excellent rendering quality while balancing rendering speed, as discussed in Sec. 5.4." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.292, + 0.115, + 0.733, + 0.13 + ], + "angle": 0, + "content": "3iGS: Factorised Tensorial Illumination for 3D Gaussian Splitting" + }, + { + "type": "page_number", + "bbox": [ + 0.768, + 0.116, + 0.786, + 0.127 + ], + "angle": 0, + "content": "13" + }, + { + "type": "image", + "bbox": [ + 0.244, + 0.144, + 0.408, + 0.298 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.411, + 0.145, + 0.597, + 0.308 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.599, + 0.145, + 0.788, + 0.308 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.317, + 0.785, + 0.388 + ], + "angle": 0, + "content": "Fig. 5: In contrast to 3DGS [17] and GaussianShader [13], our 3iGS method uniquely identifies both the golden specular highlights and the reflections on the Medium Tom as seen in the plastic surface of the Floor Tom (top row). Our approach successfully captures the detailed specular highlights on every cymbal within the drum setup from the Blender dataset, as presented in [22]." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.414, + 0.402, + 0.428 + ], + "angle": 0, + "content": "5.4 Ablation Studies" + }, + { + "type": "table_caption", + "bbox": [ + 0.214, + 0.459, + 0.785, + 0.528 + ], + "angle": 0, + "content": "Table 3: An ablation study of our model on the Blender synthetic dataset. We experiment 3iGS under a variety of model parameters. In the first row, we directly an outgoing radiance field similar to NeRF based methods. The second row omits the prediction of a BRDF roughness parameter which encodes the viewing direction as IDE. Both experimental results are inferior compared to our complete model." + }, + { + "type": "table", + "bbox": [ + 0.297, + 0.54, + 0.709, + 0.601 + ], + "angle": 0, + "content": "
PSNRSSIMLPIPS
Ours (outgoing radiance field)32.380.9650.035
Ours (no roughness parameter, i.e IDE)33.260.9670.031
Ours (complete model)33.640.9700.029
" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.629, + 0.785, + 0.748 + ], + "angle": 0, + "content": "In Tab. 3, we study the effectiveness of our design choices and parameters for 3iGS. In the first row, we use the Gaussian mean and interpolate features from the factorised tensors and predict the outgoing specular colours directly. In this scenario, we predict the outgoing radiance field similar to a NeRF like manner for specular colours. In the second row, we abandon the BRDF roughness parameters from the Gaussian features and apply a standard Fourier positional encoding of viewing direction. Both cases led to inferior renderings as compared to our complete model." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.75, + 0.788, + 0.84 + ], + "angle": 0, + "content": "In Tab. 4, we illustrate the training and rendering speed (test) of 3iGS against 3DGS and GaussianShader. We normalise the speed based on 3DGS. Our model performs competitively and achieve real time rendering speed although it is slower than 3DGS whereas GaussianShader performs much slower than the vanilla model. We attribute the efficient rendering speed to the use of factorised tensors for the illumination field." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "14" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.41, + 0.129 + ], + "angle": 0, + "content": "ZJ. Tang, TJ. Cham" + }, + { + "type": "table_caption", + "bbox": [ + 0.214, + 0.145, + 0.788, + 0.201 + ], + "angle": 0, + "content": "Table 4: We evaluate the test and train speed of 3DGS [17] and GaussianShader [13] on a single Tesla V100 32Gb VRAM GPU with the original codebase and settings advocated by the authors. We then report the results normalised with these rendering speed of 3DGS." + }, + { + "type": "table", + "bbox": [ + 0.364, + 0.214, + 0.64, + 0.272 + ], + "angle": 0, + "content": "
TestTrain
3DGS1.0x1.0x
GaussianShader6.3x slower12.1x slower
Ours2.0x slower3.2x Slower
" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.299, + 0.525, + 0.315 + ], + "angle": 0, + "content": "6 Limitations and Weaknesses" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.331, + 0.788, + 0.455 + ], + "angle": 0, + "content": "3iGS inherits the main challenges of factorised tensors as [9]. Our model is limited to scenes that fit within a defined bounding box. Future works could explore this direction in warping unbounded scenes to fit a tensorial grid representation. Furthermore, 3iGS inherits the weaknesses of 3DGS; a large VRAM GPU is necessary to fit 3D Gaussians, and to evaluate the illumination field. A straightforward workaround is to reduce the number of Gaussians created by adding an upper bound on the number of produced Gaussians in the adaptive control step. Our work also inherits 3DGS's difficulty in producing accurate scene geometry." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.476, + 0.36, + 0.492 + ], + "angle": 0, + "content": "7 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.508, + 0.789, + 0.844 + ], + "angle": 0, + "content": "We introduce our work, Factorised Tensorial Illumination for 3D Gaussian Splatting (3iGS), to enhance the view-dependent effects in rendering Gaussian radiance fields. Our approach overcomes the constraints of previous methods, which relied on optimising an outgoing radiance field of independent Gaussians with Spherical Harmonics (SH) parameters. We illustrate that superior view-dependent effects in 3DGS can be attained by depicting an outgoing radiance field as a continuous illumination field and the Gaussian's BRDF characteristics in relation to this field. Distinct from other methods depending on oversimplified yet restrictive rendering equations that require prediction of physical attributes of scene surfaces for shading, our methodology proves to be more efficacious. Furthermore, we have shown that fast rendering speeds are attainable through the representation of an illumination field with factorised tensors. We demonstrated our claims across diverse datasets, from synthetic to real-world environments, and compared against prior art on both quantitative and qualitative metrics. We also evaluate the effectiveness of our model parameters and design choices through an ablation study. Finally we acknowledge the limitations of our research as a catalyst for future investigative directions. Our code is released here. Acknowledgement This study is supported under the RIE2020 Industry Alignment Fund - Industry Collaboration Projects (IAF-ICP) Funding initiative, as well as cash and in-kind collaboration from the industry partner(s). The computational work for this article was partially performed on resources of the National Supercomputing Centre, Singapore." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.292, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "3iGS: Factorised Tensorial Illumination for 3D Gaussian Splitting" + }, + { + "type": "page_number", + "bbox": [ + 0.768, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "15" + }, + { + "type": "title", + "bbox": [ + 0.217, + 0.145, + 0.323, + 0.16 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.177, + 0.786, + 0.232 + ], + "angle": 0, + "content": "1. Barron, J.T., Mildenhall, B., Tancik, M., Hedman, P., Martin-Brualla, R., Srinivasan, P.P.: Mip-nerf: A multiscale representation for anti-aliasing neural radiance fields. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 5855-5864 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.233, + 0.786, + 0.287 + ], + "angle": 0, + "content": "2. Barron, J.T., Mildenhall, B., Verbin, D., Srinivasan, P.P., Hedman, P.: Mipnerf 360: Unbounded anti-aliased neural radiance fields. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5470-5479 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.288, + 0.786, + 0.328 + ], + "angle": 0, + "content": "3. Barron, J.T., Mildenhall, B., Verbin, D., Srinivasan, P.P., Hedman, P.: Zip-nerf: Anti-aliased grid-based neural radiance fields. arXiv preprint arXiv:2304.06706 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.329, + 0.786, + 0.37 + ], + "angle": 0, + "content": "4. Bi, S., Xu, Z., Srinivasan, P., Mildenhall, B., Sunkavalli, K., Hasan, M., Hold-Geoffroy, Y., Kriegman, D., Ramamoorthi, R.: Neural reflectance fields for appearance acquisition. arXiv preprint arXiv:2008.03824 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.371, + 0.786, + 0.412 + ], + "angle": 0, + "content": "5. Boss, M., Braun, R., Jampani, V., Barron, J.T., Liu, C., Lensch, H.: Nerd: Neural reflectance decomposition from image collections. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 12684-12694 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.413, + 0.786, + 0.453 + ], + "angle": 0, + "content": "6. Boss, M., Jampani, V., Braun, R., Liu, C., Barron, J., Lensch, H.: Neural-pil: Neural pre-integrated lighting for reflectance decomposition. Advances in Neural Information Processing Systems 34, 10691-10704 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.455, + 0.786, + 0.481 + ], + "angle": 0, + "content": "7. Burley, B., Studios, W.D.A.: Physically-based shading at disney. In: Acm Siggraph. vol. 2012, pp. 1-7. vol. 2012 (2012)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.482, + 0.786, + 0.522 + ], + "angle": 0, + "content": "8. Carroll, J.D., Chang, J.J.: Analysis of individual differences in multidimensional scaling via an n-way generalization of \"eckart-young\" decomposition. Psychometrika 35(3), 283-319 (1970)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.523, + 0.786, + 0.55 + ], + "angle": 0, + "content": "9. Chen, A., Xu, Z., Geiger, A., Yu, J., Su, H.: Tensorf: Tensorial radiance fields. In: European Conference on Computer Vision. pp. 333-350. Springer (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.551, + 0.786, + 0.578 + ], + "angle": 0, + "content": "0. Cook, R.L., Torrance, K.E.: A reflectance model for computer graphics. ACM Transactions on Graphics (ToG) 1(1), 7-24 (1982)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.579, + 0.786, + 0.619 + ], + "angle": 0, + "content": "1. Fridovich-Keil, S., Yu, A., Tancik, M., Chen, Q., Recht, B., Kanazawa, A.: Plenoxels: Radiance fields without neural networks. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5501-5510 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.62, + 0.786, + 0.646 + ], + "angle": 0, + "content": "2. Greger, G., Shirley, P., Hubbard, P.M., Greenberg, D.P.: The irradiance volume. IEEE Computer Graphics and Applications 18(2), 32-43 (1998)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.647, + 0.786, + 0.674 + ], + "angle": 0, + "content": "3. Jiang, Y., Tu, J., Liu, Y., Gao, X., Long, X., Wang, W., Ma, Y.: Gaussianshader: 3d gaussian splatting with shading functions for reflective surfaces (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.675, + 0.786, + 0.716 + ], + "angle": 0, + "content": "4. Jin, H., Liu, I., Xu, P., Zhang, X., Han, S., Bi, S., Zhou, X., Xu, Z., Su, H.: Tensoroir: Tensorial inverse rendering. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 165-174 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.717, + 0.786, + 0.744 + ], + "angle": 0, + "content": "5. Kajiya, J.T.: The rendering equation. In: Proceedings of the 13th annual conference on Computer graphics and interactive techniques. pp. 143-150 (1986)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.745, + 0.786, + 0.771 + ], + "angle": 0, + "content": "6. Kautz, J., Snyder, J., Sloan, P.P.J.: Fast arbitrary brdf shading for low-frequency lighting using spherical harmonics. Rendering Techniques 2(291-296), 1 (2002)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.772, + 0.786, + 0.812 + ], + "angle": 0, + "content": "7. Kerbl, B., Kopanas, G., Leimkuhler, T., Drettakis, G.: 3d gaussian splatting for real-time radiance field rendering. ACM Transactions on Graphics (ToG) 42(4), 1-14 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.813, + 0.786, + 0.84 + ], + "angle": 0, + "content": "8. Knapitsch, A., Park, J., Zhou, Q.Y., Koltun, V.: Tanks and temples: Benchmarking large-scale scene reconstruction. ACM Transactions on Graphics 36(4) (2017)" + }, + { + "type": "list", + "bbox": [ + 0.226, + 0.177, + 0.786, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "16" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.41, + 0.128 + ], + "angle": 0, + "content": "ZJ. Tang, TJ. Cham" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.147, + 0.787, + 0.203 + ], + "angle": 0, + "content": "19. Li, Z., Müller, T., Evans, A., Taylor, R.H., Unberath, M., Liu, M.Y., Lin, C.H.: Neuralangelo: High-fidelity neural surface reconstruction. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 8456-8465 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.204, + 0.787, + 0.245 + ], + "angle": 0, + "content": "20. Liang, R., Chen, H., Li, C., Chen, F., Panneer, S., Vijaykumar, N.: Envidr: Implicit differentiable renderer with neural environment lighting. arXiv preprint arXiv:2303.13022 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.246, + 0.787, + 0.3 + ], + "angle": 0, + "content": "21. Mahajan, D., Ramamoorthi, R., Curless, B.: A theory of frequency domain invariants: Spherical harmonic identities for brdf/lighting transfer and image consistency. IEEE transactions on pattern analysis and machine intelligence 30(2), 197-213 (2007)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.3, + 0.787, + 0.342 + ], + "angle": 0, + "content": "22. Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: Nerf: Representing scenes as neural radiance fields for view synthesis. In: European Conference on Computer Vision. pp. 405-421 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.342, + 0.787, + 0.383 + ], + "angle": 0, + "content": "23. Müller, T., Evans, A., Schied, C., Keller, A.: Instant neural graphics primitives with a multiresolution hash encoding. ACM Transactions on Graphics (ToG) 41(4), 1-15 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.383, + 0.787, + 0.438 + ], + "angle": 0, + "content": "24. Munkberg, J., Hasselgren, J., Shen, T., Gao, J., Chen, W., Evans, A., Müller, T., Fidler, S.: Extracting triangular 3d models, materials, and lighting from images. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 8280-8290 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.439, + 0.787, + 0.481 + ], + "angle": 0, + "content": "25. Shi, Y., Wu, Y., Wu, C., Liu, X., Zhao, C., Feng, H., Liu, J., Zhang, L., Zhang, J., Zhou, B., Ding, E., Wang, J.: Gir: 3d gaussian inverse rendering for relightable scene factorization (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.481, + 0.787, + 0.535 + ], + "angle": 0, + "content": "26. Srinivasan, P.P., Deng, B., Zhang, X., Tancik, M., Mildenhall, B., Barron, J.T.: Nerv: Neural reflectance and visibility fields for relighting and view synthesis. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 7495-7504 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.536, + 0.787, + 0.578 + ], + "angle": 0, + "content": "27. Sun, C., Sun, M., Chen, H.T.: Direct voxel grid optimization: Super-fast convergence for radiance fields reconstruction. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5459-5469 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.578, + 0.787, + 0.604 + ], + "angle": 0, + "content": "28. Technologies, U.: Light probes, https://docsunity3d.com/Manual/LightProbes.html" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.605, + 0.787, + 0.631 + ], + "angle": 0, + "content": "29. Technologies, U.: Reflection probe, https://docs.unity3d.com/Manual/class-ReflectionProbe.html" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.632, + 0.787, + 0.687 + ], + "angle": 0, + "content": "30. Verbin, D., Hedman, P., Mildenhall, B., Zickler, T., Barron, J.T., Srinivasan, P.P.: Ref-nerf: Structured view-dependent appearance for neural radiance fields. In: 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 5481-5490. IEEE (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.688, + 0.787, + 0.73 + ], + "angle": 0, + "content": "31. Wang, P., Liu, L., Liu, Y., Theobalt, C., Komura, T., Wang, W.: Neus: Learning neural implicit surfaces by volume rendering for multi-view reconstruction. arXiv preprint arXiv:2106.10689 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.73, + 0.787, + 0.757 + ], + "angle": 0, + "content": "32. Zhang, K., Riegler, G., Snavely, N., Koltun, V.: Nerf++: Analyzing and improving neural radiance fields. arXiv preprint arXiv:2010.07492 (2020)" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.147, + 0.787, + 0.757 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/2024/3iGS_ Factorised Tensorial Illumination for 3D Gaussian Splatting/25df5a9d-fc43-4ff8-b4e4-a2a9b9e269ba_origin.pdf b/2024/3iGS_ Factorised Tensorial Illumination for 3D Gaussian Splatting/25df5a9d-fc43-4ff8-b4e4-a2a9b9e269ba_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..bd016cf61ece2b2ba7c5841258bac850bde4d52e --- /dev/null +++ b/2024/3iGS_ Factorised Tensorial Illumination for 3D Gaussian Splatting/25df5a9d-fc43-4ff8-b4e4-a2a9b9e269ba_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c26b6f1e38f877f077b2e18c45b79ada093a2f6fc59033aa5cc1d5b51d46d73c +size 7427446 diff --git a/2024/3iGS_ Factorised Tensorial Illumination for 3D Gaussian Splatting/full.md b/2024/3iGS_ Factorised Tensorial Illumination for 3D Gaussian Splatting/full.md new file mode 100644 index 0000000000000000000000000000000000000000..6c52d87f6e6f8de700a165c215b5677e175e342e --- /dev/null +++ b/2024/3iGS_ Factorised Tensorial Illumination for 3D Gaussian Splatting/full.md @@ -0,0 +1,277 @@ +# 3iGS: Factorised Tensorial Illumination for 3D Gaussian Splatting + +Zhe Jun Tang1 and Tat-Jen Cham2 + +$^{1}$ S-Lab, Nanyang Technological University + $^{2}$ College of Computing & Data Science, Nanyang Technological University {zhejun001} at {e.ntu.edu.sg} + +Abstract. The use of 3D Gaussians as representation of radiance fields has enabled high quality novel view synthesis at real-time rendering speed. However, the choice of optimising the outgoing radiance of each Gaussian independently as spherical harmonics results in unsatisfactory view dependent effects. In response to these limitations, our work, Factorised Tensorial Illumination for 3D Gaussian Splatting, or 3iGS, improves upon 3D Gaussian Splatting (3DGS) rendering quality. Instead of optimising a single outgoing radiance parameter, 3iGS enhances 3DGS view-dependent effects by expressing the outgoing radiance as a function of a local illumination field and Bidirectional Reflectance Distribution Function (BRDF) features. We optimise a continuous incident illumination field through a Tensorial Factorisation representation, while separately fine-tuning the BRDF features of each 3D Gaussian relative to this illumination field. Our methodology significantly enhances the rendering quality of specular view-dependent effects of 3DGS, while maintaining rapid training and rendering speeds. + +Keywords: Gaussian Splatting $\cdot$ Neural Radiance Field $\cdot$ Novel View Synthesis + +# 1 Introduction + +3D Gaussian Splatting (3DGS) has emerged as the standard method for representing 3D objects and scenes, trained from images, to render photorealistic novel views. Unlike the other popular method of Neural Radiance Field (NeRF) [22], which models a scene as an implicit continuous function, 3DGS represents surfaces with independent 3D Gaussians of different opacities, anisotropic covariances, and spherical harmonic coefficients. To render a pixel's colour, a fast, tile-based rasteriser performs alpha blending of anisotropic Gaussian splats, sorted in accordance with the visibility order. + +Although 3DGS shows promising performance in synthesising novel views of a scene at real-time rendering speeds, its renderings fall short in more challenging scenarios that involve complex, view-dependent surface effects. When observing images with reflective and specular surfaces, the changes in surface colour across viewing angles remain consistent, rather than exhibiting the complex variations + +![](images/0f403304dd76c3b34f1f7ff28c666f61dbe5538e0bec348337db4ec1f3e8b8a6.jpg) +Fig. 1: We present test renderings from the "Drums" scene within the blender dataset [22], comparing our technique against Gaussian Splatting (3DGS) [17] and the ground truth (G.T). As the perspective shifts around the scene, the colour of the Floor Tom's top changes from translucent to reflective, showcasing intricate effects that depend on the viewpoint. These effects result from the specular reflection of incoming light and the reflections within the scene from elements like the Cymbals. Contrary to 3DGS, which struggles to capture these complex variations in light reflection, our method, 3iGS, aligns more accurately with the ground truth. + +in reflections observed in the dataset shown in Fig. 1. A logical solution is to adopt the strategy of Physically Based Rendering (PBR), which involves explicitly modeling the surface characteristics and performing ray marching from surfaces to calculate illumination effects. As part of the process, the Bidirectional Reflectance Distribution Function (BRDF) of surfaces are predicted and a shading function is applied to simulate view-dependent effects [7,10]. Nonetheless, accurately determining these physical properties is an ill-posed challenge, making it difficult to infer and model all the intricate rendering effects correctly. + +In this paper, we draw inspiration from graphics engines that utilise illumination volumes or light probes that summarise illumination information directed towards a surface. These methods compute illumination either directly from the local illumination volume surrounding the surface [12] or from the nearest light probes [28], rather than sampling numerous outward rays from the surface's upper hemisphere. Such approaches allow fast rendering speed at run time, as illumination information is pre-calculated and stored in the volumes or light probes. + +Our work, named Factorised Tensorial Illumination for Gaussian Splatting (3iGS), enhances 3DGS rendering quality. We introduce a continuous local illumination field of 3D Gaussians represented by compact factorised tensors for fast evaluation. The means of the 3D Gaussians serve as the input to these factorised tensors to calculate illumination features. Subsequently, each 3D Gaussian is refined through an optimisation of its mean, opacity, anisotropic covariance, diffused colour, and BRDF features. A neural renderer then maps the incident + +illumination neural field, Gaussian BRDF attributes, and viewing angle to the Gaussian's specular colour. Overall, our approach represents a Gaussian's outgoing radiance as a function of both a continuous local illumination field and the individual Gaussian's BRDF attributes relative to it. This is opposed to the conventional optimisation of the 3D Gaussians' outgoing radiance in isolation, without accounting for the effects of adjacent Gaussians or scene lighting conditions. + +3iGS significantly enhances the accuracy of 3DGS, offering clear advantages in scenes with reflective surfaces where surface colours change dramatically across viewing angles as shown in Fig. 1. In synthetic datasets, such as the NeRF Blender dataset and the Shiny Blender dataset, 3iGS surpasses 3DGS both quantitatively and qualitatively. Similarly, 3iGS demonstrates superior performance over 3DGS in real-world scenarios on the Tanks and Temples dataset. In summary our technical contributions are: + +1. a method to optimise the outgoing radiance as an incident continuous illumination field and Gaussian BRDF features with a neural renderer; +2. an approach to model a continuous illumination field with Tensorial Factorisation for compactness and fast evaluation; and +3. superior performance in rendering quality over baseline 3D Gaussian Splatting while maintaining real time performance. + +# 2 Related Work + +Our work falls into the category of learning scene representation from multi-view input images. Here we review prior work on NeRF-based representations and Gaussian splatting. We also discuss other relevant topics pertaining to inverse rendering which aims to recover scene geometry, material properties, and scene lighting conditions in Sec. 2.1. + +Scene Representations for View Synthesis - One of the pioneering neural rendering techniques called Neural Radiance Fields (NeRF) [22] has achieved remarkable results in novel view synthesis from multi-view images. By sampling points along rays traced from the camera into the scene, NeRF reconstructs a scene as a continuous field of outgoing radiance. The technique employs volumetric rendering to determine the colour of each pixel. This method has inspired numerous developments of other scene representations [1,2,22,31,32]. However, the vanilla NeRF, which encodes the entire scene representation into a set of MLPs, requires multiple queries of points along rays during training and inference. This massively slows down the speed required for real time rendering. To address this, other neural scene representation techniques apply hash encoding [19, 23], triplanes or factorised tensors [9, 14], and gridding [3, 11, 27] to accelerate training and inference speeds. + +Tensorial Factorisation - In TensoRF [9], a feature grid can be represented as a 4D tensor of which the first 3 represents the XYZ spatial grid and the last represents the feature channel dimension. To model a radiance field with grid + +representation, [9] propose an extension of CANDECOMP/PARAFAC (CP)-Decomposition [8] to Vector-Matrix (VM) decomposition: + +$$ +\begin{array}{l} \mathcal {G} _ {c} = \sum_ {r = 1} ^ {R _ {c}} \mathbf {v} _ {\mathbf {c}, \mathbf {r}} ^ {\mathbf {X}} \circ \mathbf {M} _ {\mathbf {c}, \mathbf {r}} ^ {\mathbf {Y Z}} \circ \mathbf {b} _ {\mathbf {3 r} - \mathbf {2}} + \mathbf {v} _ {\mathbf {c}, \mathbf {r}} ^ {\mathbf {Y}} \circ \mathbf {M} _ {\mathbf {c}, \mathbf {r}} ^ {\mathbf {X Z}} \circ \mathbf {b} _ {\mathbf {3 r} - \mathbf {1}} + \mathbf {v} _ {\mathbf {c}, \mathbf {r}} ^ {\mathbf {Z}} \circ \mathbf {M} _ {\mathbf {c}, \mathbf {r}} ^ {\mathbf {X Y}} \circ \mathbf {b} _ {\mathbf {3 r}} \\ = \sum_ {r = 1} ^ {R _ {c}} \mathbf {A} _ {C, r} ^ {X} \circ \mathbf {b} _ {3 r - 2} + \mathbf {A} _ {C, r} ^ {Y} \circ \mathbf {b} _ {3 r - 1} + \mathbf {A} _ {C, r} ^ {Z} \circ \mathbf {b} _ {3 r} \\ \end{array} +$$ + +In Eq. (1), the inputs $\mathbf{v}$ and $\mathbf{M}$ corresponds to XYZ-mode vector and matrix factorisation and $\mathbf{b}$ denotes the appearance feature mode vectors. Separately, $\mathcal{G}_c$ and $R_{C}$ refers to the outgoing radiance and the colour feature channels. + +Gaussian Splatting - As opposed to ray marching, 3D Gaussian Splatting is a recent method for rendering scenes via rasterisation. To begin, Gaussians are fitted on a point cloud that are either initialised as a set of random points or bootstrapped with a sparse point cloud produced during the SfM process for free [17]. The Gaussians of the point cloud are defined by a function: + +$$ +g (\mathbf {x} | \mu , \boldsymbol {\Sigma}) = e ^ {- \frac {1}{2} (\mathbf {x} - \mu) ^ {T} \boldsymbol {\Sigma} ^ {- 1} (\mathbf {x} - \mu)} \tag {2} +$$ + +where each point $\mathbf{x}$ is centered at mean $\mu \in \mathbb{R}^3$ with an anisotropic covariance matrix $\pmb{\Sigma} \in \mathbb{R}^{3x^3}$ . The mean of a Gaussian is parameterised by the coordinates $\mu = (\mu_x, \mu_y, \mu_z)$ that is scaled by the full 3D covariance matrix $\pmb{\Sigma}$ . As discussed in [17], these Gaussians have no physical meanings, given the difficulty of constraining $\pmb{\Sigma}$ to a valid semi-positive definite matrix during the optimisation process. Instead, to derive $\pmb{\Sigma}$ , a scaling matrix $S$ and a rotation matrix $R$ is learned during the optimisation process to scale the Gaussians: + +$$ +\boldsymbol {\Sigma} = \mathbf {R} \mathbf {S} \mathbf {S} ^ {\mathrm {T}} \mathbf {R} ^ {\mathrm {T}} \tag {3} +$$ + +With a viewing transformation $\mathbf{W}$ and an affine approximation of the projective transformation $\mathbf{J}$ , the covariance matrix is then expressed in camera coordinates as: + +$$ +\boldsymbol {\Sigma} ^ {\prime} = \mathbf {J} \mathbf {W} \boldsymbol {\Sigma} \mathbf {W} ^ {\mathrm {T}} \mathbf {J} ^ {\mathrm {T}} \tag {4} +$$ + +Furthermore, each Gaussian is coloured via a set of Spherical Harmonics (SH) coefficients that represent the view dependent colour $c_{i}$ , also known as radiance field, multiplied by its opacity $\alpha$ . To colour a pixel $u$ as $\hat{C}$ , alpha blending of $N$ ordered Gaussians is applied: + +$$ +\hat {\mathbf {C}} = \sum_ {i \in N} T _ {i} g _ {i} \left(\mathbf {u} \mid \mu^ {\prime}, \boldsymbol {\Sigma} ^ {\prime}\right) \alpha_ {i} \mathbf {c} _ {i}, \quad T _ {i} = \prod_ {j = 1} ^ {i - 1} \left(1 - g _ {i} \left(\mathbf {u} \mid \mu^ {\prime}, \boldsymbol {\Sigma} ^ {\prime}\right) \alpha_ {i}\right) \tag {5} +$$ + +# 2.1 Preliminaries + +As discussed, the direct optimisation of spherical harmonics to describe the outgoing radiance in individual Gaussians in 3DGS results in poor view-dependent + +effects. A crucial reason is that these Gaussians do not fully model scene properties [13] and thus fail to capture the specular effects which changes drastically across viewing angles. + +Therefore to account for the specular highlights, it is beneficial to model the underlying properties such as the BRDF and illumination effects of the scene. In conventional computer graphics, a rendering equation is commonly applied to simulate effects of specular and diffused shading [15]. For instance, rendering Eq. (6) describes an outgoing radiance of a surface point: + +$$ +L _ {o} (\mathbf {x}, \mathbf {v}) = \int_ {\Omega} L _ {i} (\mathbf {x}, \mathbf {l}) f _ {r} (\mathbf {l}, \mathbf {v}) (\mathbf {l} \cdot \mathbf {n}) d \mathbf {l}, \tag {6} +$$ + +The radiance $L_{o}$ emitted from a surface point $\mathbf{x}$ , when observed from a viewing direction $\mathbf{v}$ , is defined in Eq. (6). An integral is applied to accumulate the contribution of incident light at an incident angle $\mathbf{l}$ across the upper hemisphere $\Omega$ of $\mathbf{x}$ . The function $f_{r}$ denotes the Bidirectional Radiance Distribution Function (BRDF), describing the reflection characteristics of incident radiance at $\mathbf{x}$ viewed in direction $\mathbf{v}$ . Lastly the inclusion of the cosine law with the normal vector $\mathbf{n}$ ensures the energy conservation. + +From a signal processing perspective, an alternative to Eq. (6) is expressed more generally in terms of spherical harmonic convolution [16, 21]: + +$$ +B _ {l m} = \Lambda_ {l} \rho_ {l} L _ {l m} \tag {7} +$$ + +In Eq. (7), $B_{lm}$ defines the outgoing reflected light as the product of BRDF filter $\rho_l$ , spherical harmonic coefficients of lighting signal $L_{lm}$ , and the normalisation constant $\varLambda_l$ . + +Some studies [13, 25] enhance 3DGS by expressing BRDF $f_{r}$ as a Cook-Torrance microfacet model [10] or the GGX Trowbridge-Reitz model [7]. In these approaches, physical attributes, including roughness $r$ , albedo $a$ , metallicity $m$ , and the normal vector $\mathbf{n}$ are predicted and used in Eq. (6). Although these modifications marginally improve rendering quality metrics, they fail to accurately produce high-quality, view-dependent effects. This shortfall primarily stems from relying on estimated parameters for physical rendering within a simplified rendering equation [20]. Furthermore, these parameters are inherently challenging to be estimated accurately, due to the ill-posed nature of inverse rendering from multi-view images. Although numerous works [4-6, 14, 20, 26] also achieved success by exploring a neural representation of the rendering equation, these works either require prior information, such as known lighting conditions or a pre-trained model on a realistic dataset with known BRDF parameters. Furthermore these techniques are experimented with ray tracing based methods like NeRF. A work closest to ours in the area of rasterisation and Gaussian Splitting manner, is GaussianShader [13] which we compare against in Sec. 5.2. + +# 3 Method + +Instead of predicting the physical BRDF properties of materials in the scene, our goal is to express the outgoing radiance of a Gaussian as a more general + +![](images/66926acba882902ca5c87775ec218e2fb80260c9e0daf408a4dc528a355dbb09.jpg) +Fig. 2: A visualisation of 3iGS pipeline to render a single Gaussian's colour. We interpolate an incident illumination $L_{i}$ from the factorised tensorial illumination field $\mathcal{G}_l$ using a Gaussian mean $\pmb{x}_i$ as input. A neural network $\mathcal{F}$ maps the illumination field $L_{i}$ , the Gaussian BRDF features $\rho_{i}$ , and the viewing direction $\omega_{o}$ to Gaussian's specular colour $c_{s}$ . Following, the diffused colour $c_{d}$ and specular colour $c_{s}$ are added linearly to produce the final outgoing radiance field $c$ . + +expression of BRDF, and the incoming illumination as neural features. This idea is based on a generalized version of Eq. (7), where BRDF features modify an incoming illumination field, without the need for decomposing down to intrinsic material properties [21]. + +Specifically for each 3D Gaussian in the scene, the outgoing radiance field is formed by: + +$$ +\mathbf {c} \left(\omega_ {o}\right) = \mathbf {c} _ {\mathbf {d}} + \mathbf {c} _ {\mathbf {s}} \left(\omega_ {o}\right) \tag {8} +$$ + +For viewing angle $\omega_{o}$ , a Gaussian is coloured by its constant diffused colour $c_{d}$ and a view dependent specular colour $c_{s}$ . At each Gaussian $i$ , a small neural network $\mathcal{F}$ maps the Gaussian BRDF features $\rho_{i}$ and the incoming illumination $L_{i}$ to its specular colour viewed at an angle $\omega_{o}$ : + +$$ +\mathcal {F}: \left\{\rho_ {i}, L _ {i}, \omega_ {o} \right\} \mapsto \mathbf {c} _ {\mathbf {s}} \tag {9} +$$ + +# 3.1 Illumination Grid by Tensorial Factorisation + +Our work is largely inspired by conventional computer graphics engines for fast rendering of scene and objects in video games. The fundamental rendering equation highlights the role of multi-bounce lighting in achieving indirect illumination, wherein light bounces off one surface to illuminate another. However, the process of ray tracing from each Gaussian surface into the scene is notably resource-intensive, undermining the goal of quick rendering in 3D graphics systems. To facilitate real-time rendering, one strategy involves the use of baking techniques that employ irradiance volumes [12]. This method segments a scene into distinct volumes and pre-calculates irradiance data offline. An alternative + +strategy places light probes [28,29] throughout the scene to gather lighting information at specific spatial locations. When rendering the colour of a surface, the system quickly interpolates lighting information from the nearest light probes, ensuring swift rendering times. + +To maintain the fast rendering speed of 3DGS, our work describes a methodology of learning the illumination features of a Gaussian with a continuous grid based illumination field as: + +$$ +\mathcal {G} _ {l}: \left\{\mathbf {x} _ {\mathbf {i}} \right\} \mapsto L _ {i} \tag {10} +$$ + +Given a Gaussian's mean coordinate $\mathbf{x_i}$ , we seek to compute an illumination field $L_{i}$ by interpolating from learnable grid representation. The illumination tensors $\mathcal{G}_l$ is formulated similar to TensoRF [9] by a vector-matrix spatial factorisation as follows: + +$$ +\mathcal {G} _ {l} = \sum_ {r = 1} ^ {R _ {L}} \mathbf {A} _ {L, r} ^ {X} \circ \mathbf {b} _ {3 r - 2} + \mathbf {A} _ {L, r} ^ {Y} \circ \mathbf {b} _ {3 r - 1} + \mathbf {A} _ {L, r} ^ {Z} \circ \mathbf {b} _ {3 r} \tag {11} +$$ + +In Eq. (11), $R_{L}$ represents the feature channels of the illumination components, $\mathbf{A}$ as feature tensors and $\mathbf{b}$ as feature vectors. The illumination feature grid is jointly learned end to end in the optimisation process together with each Gaussian in the scene. Unlike 3DGS, where each Gaussian is optimised independently, the illumination field is modelled as a continuous grid function. A Gaussian mean serves as the input to query from the factorised tensor grid via interpolation. The inclusion of this continuous incoming illumination field directed at each Gaussian is the core component of producing accurate view-dependent effects, as we show in the ablation study of Sec. 5.4. Furthermore, by formulating this field as factorised tensors, it allows the network to achieve fast rendering speed. Our illumination field is coarse, using $87.5\%$ less voxels compared to TensoRF on synthetic datasets. This compact representation is also low in memory footprint compared to the number of optimised Gaussians, which is often a magnitude order or more higher. We refer readers to [9], which provides a comprehensive overview to describe how the tensors are factorised and interpolated. + +# 3.2 3D Gaussian Features + +In 3DGS [17], Gaussians are optimised with a set of parameters: 3D positions, opacity $\alpha$ , anisotropic covariance, and spherical harmonics coefficients. In our work, instead of optimising spherical harmonics as an outgoing radiance, 3iGS characterises the Gaussians with a diffused colour and learnable BRDF features. Unlike [13, 25], we do not strictly enforce physically interpretable properties commonly used in shading techniques. Aforementioned, these techniques are often simplified, too ill-posed to be decomposed individually, and insufficient to encompass all complex rendering effects [20]. Rather, we loosely follow Eq. (7) and treat BRDF feature components as a set of weights that alter the incoming illumination field. Given a continuous illumination field obtained from Eq. (11), a Gaussian's BRDF is conditionally optimised against it. This is in contrast + +to 3DGS where the Gaussians' outgoing radiance are individually optimised without modelling the interdependencies that should arise from a shared scene illumination, resulting in detrimental view-dependent effects. + +# 3.3 Shading Gaussians + +Following Eq. (9), we shade each Gaussian by mapping its viewing directions encoded with Integrated Directional Encoding (IDE) [30], Gaussian features (obtained in Sec. 3.2), and its illumination field, to the specular colour output. We linearly add the diffused and specular colours to create its radiance field as per Eq. (8). To render the final scene, we follow the rasterisation pipeline proposed in the original 3DGS work. + +# 4 Optimisation + +In the previous Sec. 3, we described the necessary components to model a scene with Gaussians and render it via rasterisation. To improve the stability of training and to enhance the final rendering quality, we first train the model with the diffused colour in the first 3,000 iterations. Following, specular colours are added to the Gaussians as in Eq. (8). + +While training the tensorial illumination grid, an initial boundary which encapsulate the scene bounding box is defined. Midway through training, we shrink the illumination grid to fit the Gaussians and resample the grid with the same number of voxels. We adopt the same adaptive control of Gaussians of 3DGS [17] to limit the number of Gaussians and the units per volume. We propose to train our model with the same loss function as 3DGS for a fair evaluation: + +$$ +\mathcal {L} = (1 - \lambda) \mathcal {L} _ {1} + \lambda \mathcal {L} _ {\mathrm {D} - \mathrm {S S I M}} \tag {12} +$$ + +where we combined the $\mathcal{L}_1$ term with a D-SSIM term with $\lambda$ set to 0.2. + +# 5 Experiments and Results + +# 5.1 Datasets + +Synthetic scenes - We show experimental results of 3iGS based on the Blender dataset released in [22]. This dataset contains challenging scenes of complex geometries with realistic non-Lambertian materials. Similarly, we evaluate our model on the Shiny Blender dataset presented in [30]. Unlike the Blender dataset, Shiny Blender contains a singular object with simple geometries in each scene with more glossy effects. + +Real world complex scenes - To prove the effectiveness of our model in real world scenes, we evaluate our renderings on the Tanks and Temples dataset [18]. This dataset is obtained from video sequences of real world objects and environment. + +# 5.2 Comparisons + +To evaluate our model, we compared against methods that apply both ray-tracing methods like NeRF, or rasterisation methods with Gaussian Splatting. Out of all prior work, 3DGS and GaussianShader is the closest work which offers real time inference speed which we will mainly compare against. On comparing the qualitative result figures, we re-ran the experiments of 3DGS [17] and GaussianShader [13] using their original repository code under settings specified by the authors. Ray-Tracing Methods such as [20,22,30] represent a scene as a radiance field using MLPs. By performing multiple samplings on rays marched from the camera into the scene, the sampled points are queried with MLP to obtain the opacity and radiance values. Volume rendering is performed to obtain the final pixel colour. Rasterisation Methods such as Gaussian Splatting (3DGS) [17] and GaussianShader [13] apply a rasterisation pipeline as opposed to ray tracing methods. These models represents a scene as Gaussians with radiance properties based on Spherical Harmonics. In, [13], 3DGS is extended by modelling a scene with additional material characteristics and a shading function is applied, as opposed to ours which uses an MLP as neural renderer. Furthermore, [13] shades Gaussians with a global differentiable environment light stored in cube maps, and optimises independent Gaussians with spherical harmonic-based color for unaccounted illumination. In our work, we represent incident illumination locally with grid-based tensors and optimise Gaussian BRDF features relative to this field. + +For a fair comparison, 3iGS is trained with the same loss function as 3DGS as described in Sec. 4 and the same number of iterations of 30,000 steps. We repurposed the $16 \times 3$ SH coefficients in 3DGS as BRDF feature channels and added 4 additional parameters of base colour and roughness for IDE view-directional encoding. The tensorial illumination field is set at a coarse resolution size of $150^{3}$ voxels. + +![](images/8ce7991025814077ad57694b81dacd1ef8d5c33fe53f0afd139ae6e5d478ad4d.jpg) + +![](images/8116bb6874e0729fb74de9f0e54aba1ba206c1fa2f1ae40a612f86ff2b3a7acf.jpg) + +![](images/79f8cb7a84722ad8eb637b31f071f3efceb07cbc03aa12dbb3a95131eea56e4e.jpg) + +![](images/56ba062ea5895beeba6c80f78ff50f889696d1a18399ede7980b1fc8cdba65e3.jpg) + +![](images/b3dc5171bbf61f0175eb762d82e642437a838f3f710262ecda4503158f3d9d20.jpg) +3DGS + +![](images/bc7f0d57516468cc3a72d643e0a2e177f5ec85b9e73b9f702be2d0a094a632f2.jpg) +G.Shader +Fig. 3: Comparisons of test-set views of real world scenes. 3iGS enhances 3DGS renderings by producing clearer view dependent effects as shown. + +![](images/30920ddd5356e96e1cae80fbd12b008f55b92f1e9abf323b713b9dc4e7a7ceb0.jpg) +Ours + +![](images/8a1855c886cba3ea446e7412cf9d05c3ad1660491f57e037471de92349fcaa68.jpg) +Ground Truth + +Table 1: Our approach demonstrates superior quantitative performance over current methods when tested on Synthetic Datasets. Specifically, within the NeRF Synthetic dataset, our method surpasses all competitors across various image quality assessments (PSNR/SSIM/LPIPS). In the context of the Shiny Blender dataset, 3iGS matches the performance of existing rasterization techniques in terms of PSNR and SSIM but surpasses them in LPIPS for the majority of scenes. We encourage readers to examine the accompanying figure showcasing renderings of the Shiny Blender scene, where our method attains enhanced qualitative outcomes. Best results, benchmarked across real time rendering methods, are in bold. + +
NeRF Synthetic [22]
ChairDrumsLegoMicMats.ShipHotdogFicusAvg.
PSNR↑
NeRF [22]33.0025.0132.5432.9129.6228.6536.1830.1331.01
Ref-NeRF [30]33.9825.4335.1033.6527.1029.2437.0428.7431.29
ENVIDR [20]31.2222.9929.5532.1729.5221.5731.4426.6028.13
3DGS [17]35.8226.1735.6935.3430.0030.8737.6734.8333.30
G.Shader [13]35.8326.3635.8735.2330.0730.8237.8534.9733.38
G.Shader(reproduced) [13]33.7025.5032.9934.0728.8728.3735.2933.0531.48
Ours35.9026.7535.9436.0130.0031.1237.9835.4033.64
SSIM↑
NeRF [22]0.9670.9250.9610.9800.9490.8560.9740.9640.947
Ref-NeRF [30]0.9740.9290.9750.9830.9210.8640.9790.9540.947
ENVIDR [20]0.9760.9300.9610.9840.9680.8550.9630.9870.956
3DGS [17]0.9870.9540.9830.9910.9600.9070.9850.9870.969
G.Shader [13]0.9870.9490.9830.9910.9600.9050.9850.9850.968
G.Shader(reproduced) [13]0.9800.9450.9720.9890.9510.8810.9800.9820.960
Ours0.9870.9550.9830.9920.9610.9080.9860.9890.970
LPIPS↓
NeRF [22]0.0460.0910.0500.0280.0630.2060.1210.0440.081
Ref-NeRF [30]0.0290.0730.0250.0180.0780.1580.0280.0560.058
ENVIDR [20]0.0310.0800.0540.0210.0450.2280.0720.0100.067
3DGS [17]0.0120.0370.0160.0060.0340.1060.0200.0120.030
G.Shader [13]0.0120.0400.0140.0060.0330.0980.0190.0130.029
G.Shader(reproduced) [13]0.0190.0450.0260.0090.0460.1480.0290.0170.042
Ours0.0120.0360.0150.0050.0340.1020.0190.0100.029
Shiny Blender [30]
CarBallHelmetTeapotToasterCoffeeAvg.
PSNR↑
NVDiffRec [24]27.9821.7726.9740.4424.3130.7428.70
Ref-NeRF [30]30.4129.1429.9245.1925.2933.9932.32
ENVIDR [20]28.4638.8932.7341.5926.1129.4832.88
3DGS [17]27.2427.6928.3245.6820.9932.3230.37
G.Shader [13]27.9030.9828.3245.8626.2132.3931.94
G.Shader(reproduced) [13]27.5129.0228.7343.0522.8631.3430.41
Ours27.5127.6428.2146.0422.6932.5830.77
SSIM↑
NVDiffRec [24]0.9630.8580.9510.9960.9280.9730.945
Ref-NeRF [30]0.9490.9560.9550.9950.9100.9720.956
ENVIDR [20]0.9610.9910.9800.9960.9390.9490.969
3DGS [17]0.9300.9370.9510.9960.8950.9710.947
G.Shader [13]0.9310.9650.9500.9960.9290.9710.957
G.Shader(reproduced) [13]0.9300.9540.9550.9950.9000.9690.950
Ours0.9300.9380.9510.9970.9080.9730.949
LPIPS↓
NVDiffRec [24]0.0450.2970.1180.0110.1690.0760.119
Ref-NeRF [30]0.0510.3070.0870.0130.1180.0820.109
ENVIDR [20]0.0490.0670.0510.0110.1160.1390.072
3DGS [17]0.0470.1610.0790.0070.1260.0780.083
G.Shader [13]0.0450.1210.0760.0070.0790.0780.068
G.Shader(reproduced) [13]0.0450.1480.0880.0120.1110.0850.099
Ours0.0450.1560.0730.0060.0990.0760.075
+ +![](images/96510cdb9df4a04f8a975529066e0e5d8b5e3ab590ff0943ead053b00ec4af6a.jpg) +Fig. 4: In evaluating test-set views from the Shiny Blender dataset, we compared the performance of 3DGS [17], GaussianShader [13], and our work 3iGS. The standard 3DGS method generally yields the least satisfactory renderings, with images often appearing blurry in areas of specular reflection. GaussianShader shows a slight improvement by incorporating the GGX BRDF model, leading to marginally better results in rendering specular regions. In contrast, 3iGS stands out by employing a general rendering function that predicts neural features of illumination field and BRDF instead of relying on physical parameters. This approach allows 3iGS to surpass existing methods significantly, capturing the intricate details within specular highlights with remarkable precision. + +Table 2: A quantitative comparisons (PSNR / SSIM / LPIPS) between 3DGS [17], GaussianShader [13], and our method on real world scenarios on Tanks and Temples Dataset [18] + +
Tanks and Temples Dataset [18]
BarnCaterpillarFamilyIgnatiusTruckAvg.
PSNR↑
3DGS [17]29.1326.1734.8829.5028.3829.61
G.Shader(reproduced) [13]27.6725.2333.5228.2827.6128.46
Ours29.7327.0435.3630.0428.8230.20
SSIM↑
3DGS [17]0.9200.9320.9820.9730.9450.950
G.Shader(reproduced) [13]0.8970.9150.9770.9680.9350.938
Ours0.9230.9380.9830.9740.9470.953
LPIPS↓
3DGS [17]0.1130.0740.0230.0320.0590.060
G.Shader(reproduced) [13]0.1470.0980.0290.0390.0710.077
Ours0.1120.0710.0220.0310.0570.058
+ +# 5.3 Discussion + +In the comparisons detailed in Sec. 5.2, 3iGS demonstrates superior performance over the established baselines, delivering both quantitatively and qualitatively enhanced renderings in a majority of test cases on real time rendering rasterisation approaches. In the NeRF Synthetic dataset, 3iGS surpasses the prior 3DGS and GaussianShader. Although GaussianShader reportedly performs slightly better on the Shiny Blender dataset, we have included both reported and reproduced results based on the official code repository from the authors. We postulate that the Shiny Blender dataset scenes, which comprise single objects only, presents simpler geometries which facilitates an easier recovery of intrinsic material properties essential for rendering view-dependent effects. In addition, specular reflections in this dataset is primarily dominated by direct illumination from an external environment map. Thus GaussianShader which models direct lighting with a differentiable environment cube map performs well. However, when presented with a complex scene containing multiple objects, such as the NeRF Synthetic dataset shown in Fig. 1 with its intricate intra-scene interactions, GaussianShader struggles to accurately recover the physical rendering parameters. Furthermore these lighting scenarios are more complex due to indirect lighting. Therefore jointly modelling direct and indirect lighting using a continuous local incident field is crucial. NeRF based approaches reported above present competitive results. Yet, such methods are extremely slow to train, often requiring days, and are unable to perform real-time rendering needed for interactive applications. + +Comparing across all methodologies, our 3iGS method presents an attractive and pragmatic alternative to achieve excellent rendering quality while balancing rendering speed, as discussed in Sec. 5.4. + +![](images/1d015a307ed8d70196e68321e2845c04f15d6b6457fc6df4bea45d20dbad5a5a.jpg) +Fig. 5: In contrast to 3DGS [17] and GaussianShader [13], our 3iGS method uniquely identifies both the golden specular highlights and the reflections on the Medium Tom as seen in the plastic surface of the Floor Tom (top row). Our approach successfully captures the detailed specular highlights on every cymbal within the drum setup from the Blender dataset, as presented in [22]. + +![](images/d4f630b34d2a3017bd0d5e9c88bfc7ae22160bbe7ba4e155a34202af239a452a.jpg) + +![](images/e90cd63d4a7140ffc259d6e11ca6c9411103f4d71f9d90d8e03f98e062cc6716.jpg) + +# 5.4 Ablation Studies + +Table 3: An ablation study of our model on the Blender synthetic dataset. We experiment 3iGS under a variety of model parameters. In the first row, we directly an outgoing radiance field similar to NeRF based methods. The second row omits the prediction of a BRDF roughness parameter which encodes the viewing direction as IDE. Both experimental results are inferior compared to our complete model. + +
PSNRSSIMLPIPS
Ours (outgoing radiance field)32.380.9650.035
Ours (no roughness parameter, i.e IDE)33.260.9670.031
Ours (complete model)33.640.9700.029
+ +In Tab. 3, we study the effectiveness of our design choices and parameters for 3iGS. In the first row, we use the Gaussian mean and interpolate features from the factorised tensors and predict the outgoing specular colours directly. In this scenario, we predict the outgoing radiance field similar to a NeRF like manner for specular colours. In the second row, we abandon the BRDF roughness parameters from the Gaussian features and apply a standard Fourier positional encoding of viewing direction. Both cases led to inferior renderings as compared to our complete model. + +In Tab. 4, we illustrate the training and rendering speed (test) of 3iGS against 3DGS and GaussianShader. We normalise the speed based on 3DGS. Our model performs competitively and achieve real time rendering speed although it is slower than 3DGS whereas GaussianShader performs much slower than the vanilla model. We attribute the efficient rendering speed to the use of factorised tensors for the illumination field. + +Table 4: We evaluate the test and train speed of 3DGS [17] and GaussianShader [13] on a single Tesla V100 32Gb VRAM GPU with the original codebase and settings advocated by the authors. We then report the results normalised with these rendering speed of 3DGS. + +
TestTrain
3DGS1.0x1.0x
GaussianShader6.3x slower12.1x slower
Ours2.0x slower3.2x Slower
+ +# 6 Limitations and Weaknesses + +3iGS inherits the main challenges of factorised tensors as [9]. Our model is limited to scenes that fit within a defined bounding box. Future works could explore this direction in warping unbounded scenes to fit a tensorial grid representation. Furthermore, 3iGS inherits the weaknesses of 3DGS; a large VRAM GPU is necessary to fit 3D Gaussians, and to evaluate the illumination field. A straightforward workaround is to reduce the number of Gaussians created by adding an upper bound on the number of produced Gaussians in the adaptive control step. Our work also inherits 3DGS's difficulty in producing accurate scene geometry. + +# 7 Conclusion + +We introduce our work, Factorised Tensorial Illumination for 3D Gaussian Splatting (3iGS), to enhance the view-dependent effects in rendering Gaussian radiance fields. Our approach overcomes the constraints of previous methods, which relied on optimising an outgoing radiance field of independent Gaussians with Spherical Harmonics (SH) parameters. We illustrate that superior view-dependent effects in 3DGS can be attained by depicting an outgoing radiance field as a continuous illumination field and the Gaussian's BRDF characteristics in relation to this field. Distinct from other methods depending on oversimplified yet restrictive rendering equations that require prediction of physical attributes of scene surfaces for shading, our methodology proves to be more efficacious. Furthermore, we have shown that fast rendering speeds are attainable through the representation of an illumination field with factorised tensors. We demonstrated our claims across diverse datasets, from synthetic to real-world environments, and compared against prior art on both quantitative and qualitative metrics. We also evaluate the effectiveness of our model parameters and design choices through an ablation study. Finally we acknowledge the limitations of our research as a catalyst for future investigative directions. Our code is released here. Acknowledgement This study is supported under the RIE2020 Industry Alignment Fund - Industry Collaboration Projects (IAF-ICP) Funding initiative, as well as cash and in-kind collaboration from the industry partner(s). The computational work for this article was partially performed on resources of the National Supercomputing Centre, Singapore. + +# References + +1. Barron, J.T., Mildenhall, B., Tancik, M., Hedman, P., Martin-Brualla, R., Srinivasan, P.P.: Mip-nerf: A multiscale representation for anti-aliasing neural radiance fields. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 5855-5864 (2021) +2. Barron, J.T., Mildenhall, B., Verbin, D., Srinivasan, P.P., Hedman, P.: Mipnerf 360: Unbounded anti-aliased neural radiance fields. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5470-5479 (2022) +3. Barron, J.T., Mildenhall, B., Verbin, D., Srinivasan, P.P., Hedman, P.: Zip-nerf: Anti-aliased grid-based neural radiance fields. arXiv preprint arXiv:2304.06706 (2023) +4. Bi, S., Xu, Z., Srinivasan, P., Mildenhall, B., Sunkavalli, K., Hasan, M., Hold-Geoffroy, Y., Kriegman, D., Ramamoorthi, R.: Neural reflectance fields for appearance acquisition. arXiv preprint arXiv:2008.03824 (2020) +5. Boss, M., Braun, R., Jampani, V., Barron, J.T., Liu, C., Lensch, H.: Nerd: Neural reflectance decomposition from image collections. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 12684-12694 (2021) +6. Boss, M., Jampani, V., Braun, R., Liu, C., Barron, J., Lensch, H.: Neural-pil: Neural pre-integrated lighting for reflectance decomposition. Advances in Neural Information Processing Systems 34, 10691-10704 (2021) +7. Burley, B., Studios, W.D.A.: Physically-based shading at disney. In: Acm Siggraph. vol. 2012, pp. 1-7. vol. 2012 (2012) +8. Carroll, J.D., Chang, J.J.: Analysis of individual differences in multidimensional scaling via an n-way generalization of "eckart-young" decomposition. Psychometrika 35(3), 283-319 (1970) +9. Chen, A., Xu, Z., Geiger, A., Yu, J., Su, H.: Tensorf: Tensorial radiance fields. In: European Conference on Computer Vision. pp. 333-350. Springer (2022) +0. Cook, R.L., Torrance, K.E.: A reflectance model for computer graphics. ACM Transactions on Graphics (ToG) 1(1), 7-24 (1982) +1. Fridovich-Keil, S., Yu, A., Tancik, M., Chen, Q., Recht, B., Kanazawa, A.: Plenoxels: Radiance fields without neural networks. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5501-5510 (2022) +2. Greger, G., Shirley, P., Hubbard, P.M., Greenberg, D.P.: The irradiance volume. IEEE Computer Graphics and Applications 18(2), 32-43 (1998) +3. Jiang, Y., Tu, J., Liu, Y., Gao, X., Long, X., Wang, W., Ma, Y.: Gaussianshader: 3d gaussian splatting with shading functions for reflective surfaces (2023) +4. Jin, H., Liu, I., Xu, P., Zhang, X., Han, S., Bi, S., Zhou, X., Xu, Z., Su, H.: Tensoroir: Tensorial inverse rendering. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 165-174 (2023) +5. Kajiya, J.T.: The rendering equation. In: Proceedings of the 13th annual conference on Computer graphics and interactive techniques. pp. 143-150 (1986) +6. Kautz, J., Snyder, J., Sloan, P.P.J.: Fast arbitrary brdf shading for low-frequency lighting using spherical harmonics. Rendering Techniques 2(291-296), 1 (2002) +7. Kerbl, B., Kopanas, G., Leimkuhler, T., Drettakis, G.: 3d gaussian splatting for real-time radiance field rendering. ACM Transactions on Graphics (ToG) 42(4), 1-14 (2023) +8. Knapitsch, A., Park, J., Zhou, Q.Y., Koltun, V.: Tanks and temples: Benchmarking large-scale scene reconstruction. ACM Transactions on Graphics 36(4) (2017) + +19. Li, Z., Müller, T., Evans, A., Taylor, R.H., Unberath, M., Liu, M.Y., Lin, C.H.: Neuralangelo: High-fidelity neural surface reconstruction. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 8456-8465 (2023) +20. Liang, R., Chen, H., Li, C., Chen, F., Panneer, S., Vijaykumar, N.: Envidr: Implicit differentiable renderer with neural environment lighting. arXiv preprint arXiv:2303.13022 (2023) +21. Mahajan, D., Ramamoorthi, R., Curless, B.: A theory of frequency domain invariants: Spherical harmonic identities for brdf/lighting transfer and image consistency. IEEE transactions on pattern analysis and machine intelligence 30(2), 197-213 (2007) +22. Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: Nerf: Representing scenes as neural radiance fields for view synthesis. In: European Conference on Computer Vision. pp. 405-421 (2020) +23. Müller, T., Evans, A., Schied, C., Keller, A.: Instant neural graphics primitives with a multiresolution hash encoding. ACM Transactions on Graphics (ToG) 41(4), 1-15 (2022) +24. Munkberg, J., Hasselgren, J., Shen, T., Gao, J., Chen, W., Evans, A., Müller, T., Fidler, S.: Extracting triangular 3d models, materials, and lighting from images. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 8280-8290 (2022) +25. Shi, Y., Wu, Y., Wu, C., Liu, X., Zhao, C., Feng, H., Liu, J., Zhang, L., Zhang, J., Zhou, B., Ding, E., Wang, J.: Gir: 3d gaussian inverse rendering for relightable scene factorization (2023) +26. Srinivasan, P.P., Deng, B., Zhang, X., Tancik, M., Mildenhall, B., Barron, J.T.: Nerv: Neural reflectance and visibility fields for relighting and view synthesis. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 7495-7504 (2021) +27. Sun, C., Sun, M., Chen, H.T.: Direct voxel grid optimization: Super-fast convergence for radiance fields reconstruction. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5459-5469 (2022) +28. Technologies, U.: Light probes, https://docsunity3d.com/Manual/LightProbes.html +29. Technologies, U.: Reflection probe, https://docs.unity3d.com/Manual/class-ReflectionProbe.html +30. Verbin, D., Hedman, P., Mildenhall, B., Zickler, T., Barron, J.T., Srinivasan, P.P.: Ref-nerf: Structured view-dependent appearance for neural radiance fields. In: 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 5481-5490. IEEE (2022) +31. Wang, P., Liu, L., Liu, Y., Theobalt, C., Komura, T., Wang, W.: Neus: Learning neural implicit surfaces by volume rendering for multi-view reconstruction. arXiv preprint arXiv:2106.10689 (2021) +32. Zhang, K., Riegler, G., Snavely, N., Koltun, V.: Nerf++: Analyzing and improving neural radiance fields. arXiv preprint arXiv:2010.07492 (2020) \ No newline at end of file diff --git a/2024/3iGS_ Factorised Tensorial Illumination for 3D Gaussian Splatting/images.zip b/2024/3iGS_ Factorised Tensorial Illumination for 3D Gaussian Splatting/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..3544081747bb79d49a6e8edbbedee4b61f349fd7 --- /dev/null +++ b/2024/3iGS_ Factorised Tensorial Illumination for 3D Gaussian Splatting/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:931aa697772ca5414f38bb2f0755d50ad170f091526ee3e4d78ba1bca6ea964a +size 629439 diff --git a/2024/3iGS_ Factorised Tensorial Illumination for 3D Gaussian Splatting/layout.json b/2024/3iGS_ Factorised Tensorial Illumination for 3D Gaussian Splatting/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..d450f7551f5e07870248a77083852347ffad3f8b --- /dev/null +++ b/2024/3iGS_ Factorised Tensorial Illumination for 3D Gaussian Splatting/layout.json @@ -0,0 +1,7786 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 145, + 111, + 470, + 148 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 111, + 470, + 148 + ], + "spans": [ + { + "bbox": [ + 145, + 111, + 470, + 148 + ], + "type": "text", + "content": "3iGS: Factorised Tensorial Illumination for 3D Gaussian Splatting" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 219, + 168, + 395, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 168, + 395, + 180 + ], + "spans": [ + { + "bbox": [ + 219, + 168, + 395, + 180 + ], + "type": "text", + "content": "Zhe Jun Tang1 and Tat-Jen Cham2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 153, + 190, + 461, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 153, + 190, + 461, + 224 + ], + "spans": [ + { + "bbox": [ + 153, + 190, + 461, + 224 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 153, + 190, + 461, + 224 + ], + "type": "text", + "content": " S-Lab, Nanyang Technological University \n" + }, + { + "bbox": [ + 153, + 190, + 461, + 224 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 153, + 190, + 461, + 224 + ], + "type": "text", + "content": " College of Computing & Data Science, Nanyang Technological University {zhejun001} at {e.ntu.edu.sg}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 160, + 254, + 455, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 254, + 455, + 430 + ], + "spans": [ + { + "bbox": [ + 160, + 254, + 455, + 430 + ], + "type": "text", + "content": "Abstract. The use of 3D Gaussians as representation of radiance fields has enabled high quality novel view synthesis at real-time rendering speed. However, the choice of optimising the outgoing radiance of each Gaussian independently as spherical harmonics results in unsatisfactory view dependent effects. In response to these limitations, our work, Factorised Tensorial Illumination for 3D Gaussian Splatting, or 3iGS, improves upon 3D Gaussian Splatting (3DGS) rendering quality. Instead of optimising a single outgoing radiance parameter, 3iGS enhances 3DGS view-dependent effects by expressing the outgoing radiance as a function of a local illumination field and Bidirectional Reflectance Distribution Function (BRDF) features. We optimise a continuous incident illumination field through a Tensorial Factorisation representation, while separately fine-tuning the BRDF features of each 3D Gaussian relative to this illumination field. Our methodology significantly enhances the rendering quality of specular view-dependent effects of 3DGS, while maintaining rapid training and rendering speeds." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 160, + 441, + 455, + 463 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 441, + 455, + 463 + ], + "spans": [ + { + "bbox": [ + 160, + 441, + 455, + 463 + ], + "type": "text", + "content": "Keywords: Gaussian Splatting " + }, + { + "bbox": [ + 160, + 441, + 455, + 463 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 160, + 441, + 455, + 463 + ], + "type": "text", + "content": " Neural Radiance Field " + }, + { + "bbox": [ + 160, + 441, + 455, + 463 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 160, + 441, + 455, + 463 + ], + "type": "text", + "content": " Novel View Synthesis" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 483, + 230, + 496 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 483, + 230, + 496 + ], + "spans": [ + { + "bbox": [ + 132, + 483, + 230, + 496 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 510, + 482, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 510, + 482, + 605 + ], + "spans": [ + { + "bbox": [ + 130, + 510, + 482, + 605 + ], + "type": "text", + "content": "3D Gaussian Splatting (3DGS) has emerged as the standard method for representing 3D objects and scenes, trained from images, to render photorealistic novel views. Unlike the other popular method of Neural Radiance Field (NeRF) [22], which models a scene as an implicit continuous function, 3DGS represents surfaces with independent 3D Gaussians of different opacities, anisotropic covariances, and spherical harmonic coefficients. To render a pixel's colour, a fast, tile-based rasteriser performs alpha blending of anisotropic Gaussian splats, sorted in accordance with the visibility order." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 605, + 483, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 605, + 483, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 605, + 483, + 666 + ], + "type": "text", + "content": "Although 3DGS shows promising performance in synthesising novel views of a scene at real-time rendering speeds, its renderings fall short in more challenging scenarios that involve complex, view-dependent surface effects. When observing images with reflective and specular surfaces, the changes in surface colour across viewing angles remain consistent, rather than exhibiting the complex variations" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 143, + 112, + 482, + 261 + ], + "blocks": [ + { + "bbox": [ + 143, + 112, + 482, + 261 + ], + "lines": [ + { + "bbox": [ + 143, + 112, + 482, + 261 + ], + "spans": [ + { + "bbox": [ + 143, + 112, + 482, + 261 + ], + "type": "image", + "image_path": "0f403304dd76c3b34f1f7ff28c666f61dbe5538e0bec348337db4ec1f3e8b8a6.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 268, + 482, + 357 + ], + "lines": [ + { + "bbox": [ + 130, + 268, + 482, + 357 + ], + "spans": [ + { + "bbox": [ + 130, + 268, + 482, + 357 + ], + "type": "text", + "content": "Fig. 1: We present test renderings from the \"Drums\" scene within the blender dataset [22], comparing our technique against Gaussian Splatting (3DGS) [17] and the ground truth (G.T). As the perspective shifts around the scene, the colour of the Floor Tom's top changes from translucent to reflective, showcasing intricate effects that depend on the viewpoint. These effects result from the specular reflection of incoming light and the reflections within the scene from elements like the Cymbals. Contrary to 3DGS, which struggles to capture these complex variations in light reflection, our method, 3iGS, aligns more accurately with the ground truth." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 385, + 482, + 481 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 385, + 482, + 481 + ], + "spans": [ + { + "bbox": [ + 130, + 385, + 482, + 481 + ], + "type": "text", + "content": "in reflections observed in the dataset shown in Fig. 1. A logical solution is to adopt the strategy of Physically Based Rendering (PBR), which involves explicitly modeling the surface characteristics and performing ray marching from surfaces to calculate illumination effects. As part of the process, the Bidirectional Reflectance Distribution Function (BRDF) of surfaces are predicted and a shading function is applied to simulate view-dependent effects [7,10]. Nonetheless, accurately determining these physical properties is an ill-posed challenge, making it difficult to infer and model all the intricate rendering effects correctly." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 483, + 482, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 483, + 482, + 579 + ], + "spans": [ + { + "bbox": [ + 130, + 483, + 482, + 579 + ], + "type": "text", + "content": "In this paper, we draw inspiration from graphics engines that utilise illumination volumes or light probes that summarise illumination information directed towards a surface. These methods compute illumination either directly from the local illumination volume surrounding the surface [12] or from the nearest light probes [28], rather than sampling numerous outward rays from the surface's upper hemisphere. Such approaches allow fast rendering speed at run time, as illumination information is pre-calculated and stored in the volumes or light probes." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 582, + 482, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 582, + 482, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 582, + 482, + 665 + ], + "type": "text", + "content": "Our work, named Factorised Tensorial Illumination for Gaussian Splatting (3iGS), enhances 3DGS rendering quality. We introduce a continuous local illumination field of 3D Gaussians represented by compact factorised tensors for fast evaluation. The means of the 3D Gaussians serve as the input to these factorised tensors to calculate illumination features. Subsequently, each 3D Gaussian is refined through an optimisation of its mean, opacity, anisotropic covariance, diffused colour, and BRDF features. A neural renderer then maps the incident" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 251, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 251, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 251, + 102 + ], + "type": "text", + "content": "ZJ. Tang, TJ. Cham" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 479, + 199 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 479, + 199 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 479, + 199 + ], + "type": "text", + "content": "illumination neural field, Gaussian BRDF attributes, and viewing angle to the Gaussian's specular colour. Overall, our approach represents a Gaussian's outgoing radiance as a function of both a continuous local illumination field and the individual Gaussian's BRDF attributes relative to it. This is opposed to the conventional optimisation of the 3D Gaussians' outgoing radiance in isolation, without accounting for the effects of adjacent Gaussians or scene lighting conditions." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 200, + 480, + 283 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 200, + 480, + 283 + ], + "spans": [ + { + "bbox": [ + 130, + 200, + 480, + 283 + ], + "type": "text", + "content": "3iGS significantly enhances the accuracy of 3DGS, offering clear advantages in scenes with reflective surfaces where surface colours change dramatically across viewing angles as shown in Fig. 1. In synthetic datasets, such as the NeRF Blender dataset and the Shiny Blender dataset, 3iGS surpasses 3DGS both quantitatively and qualitatively. Similarly, 3iGS demonstrates superior performance over 3DGS in real-world scenarios on the Tanks and Temples dataset. In summary our technical contributions are:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 136, + 293, + 480, + 365 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 136, + 293, + 479, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 293, + 479, + 316 + ], + "spans": [ + { + "bbox": [ + 136, + 293, + 479, + 316 + ], + "type": "text", + "content": "1. a method to optimise the outgoing radiance as an incident continuous illumination field and Gaussian BRDF features with a neural renderer;" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 136, + 318, + 480, + 341 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 318, + 480, + 341 + ], + "spans": [ + { + "bbox": [ + 136, + 318, + 480, + 341 + ], + "type": "text", + "content": "2. an approach to model a continuous illumination field with Tensorial Factorisation for compactness and fast evaluation; and" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 136, + 342, + 480, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 342, + 480, + 365 + ], + "spans": [ + { + "bbox": [ + 136, + 342, + 480, + 365 + ], + "type": "text", + "content": "3. superior performance in rendering quality over baseline 3D Gaussian Splatting while maintaining real time performance." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 132, + 386, + 237, + 399 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 386, + 237, + 399 + ], + "spans": [ + { + "bbox": [ + 132, + 386, + 237, + 399 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 413, + 479, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 413, + 479, + 472 + ], + "spans": [ + { + "bbox": [ + 130, + 413, + 479, + 472 + ], + "type": "text", + "content": "Our work falls into the category of learning scene representation from multi-view input images. Here we review prior work on NeRF-based representations and Gaussian splatting. We also discuss other relevant topics pertaining to inverse rendering which aims to recover scene geometry, material properties, and scene lighting conditions in Sec. 2.1." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 474, + 480, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 474, + 480, + 628 + ], + "spans": [ + { + "bbox": [ + 130, + 474, + 480, + 628 + ], + "type": "text", + "content": "Scene Representations for View Synthesis - One of the pioneering neural rendering techniques called Neural Radiance Fields (NeRF) [22] has achieved remarkable results in novel view synthesis from multi-view images. By sampling points along rays traced from the camera into the scene, NeRF reconstructs a scene as a continuous field of outgoing radiance. The technique employs volumetric rendering to determine the colour of each pixel. This method has inspired numerous developments of other scene representations [1,2,22,31,32]. However, the vanilla NeRF, which encodes the entire scene representation into a set of MLPs, requires multiple queries of points along rays during training and inference. This massively slows down the speed required for real time rendering. To address this, other neural scene representation techniques apply hash encoding [19, 23], triplanes or factorised tensors [9, 14], and gridding [3, 11, 27] to accelerate training and inference speeds." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 630, + 479, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 630, + 479, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 630, + 479, + 665 + ], + "type": "text", + "content": "Tensorial Factorisation - In TensoRF [9], a feature grid can be represented as a 4D tensor of which the first 3 represents the XYZ spatial grid and the last represents the feature channel dimension. To model a radiance field with grid" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 178, + 91, + 447, + 103 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 178, + 91, + 447, + 103 + ], + "spans": [ + { + "bbox": [ + 178, + 91, + 447, + 103 + ], + "type": "text", + "content": "3iGS: Factorised Tensorial Illumination for 3D Gaussian Splatting" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 115, + 479, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 115, + 479, + 140 + ], + "spans": [ + { + "bbox": [ + 132, + 115, + 479, + 140 + ], + "type": "text", + "content": "representation, [9] propose an extension of CANDECOMP/PARAFAC (CP)-Decomposition [8] to Vector-Matrix (VM) decomposition:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 144, + 157, + 455, + 224 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 144, + 157, + 455, + 224 + ], + "spans": [ + { + "bbox": [ + 144, + 157, + 455, + 224 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {G} _ {c} = \\sum_ {r = 1} ^ {R _ {c}} \\mathbf {v} _ {\\mathbf {c}, \\mathbf {r}} ^ {\\mathbf {X}} \\circ \\mathbf {M} _ {\\mathbf {c}, \\mathbf {r}} ^ {\\mathbf {Y Z}} \\circ \\mathbf {b} _ {\\mathbf {3 r} - \\mathbf {2}} + \\mathbf {v} _ {\\mathbf {c}, \\mathbf {r}} ^ {\\mathbf {Y}} \\circ \\mathbf {M} _ {\\mathbf {c}, \\mathbf {r}} ^ {\\mathbf {X Z}} \\circ \\mathbf {b} _ {\\mathbf {3 r} - \\mathbf {1}} + \\mathbf {v} _ {\\mathbf {c}, \\mathbf {r}} ^ {\\mathbf {Z}} \\circ \\mathbf {M} _ {\\mathbf {c}, \\mathbf {r}} ^ {\\mathbf {X Y}} \\circ \\mathbf {b} _ {\\mathbf {3 r}} \\\\ = \\sum_ {r = 1} ^ {R _ {c}} \\mathbf {A} _ {C, r} ^ {X} \\circ \\mathbf {b} _ {3 r - 2} + \\mathbf {A} _ {C, r} ^ {Y} \\circ \\mathbf {b} _ {3 r - 1} + \\mathbf {A} _ {C, r} ^ {Z} \\circ \\mathbf {b} _ {3 r} \\\\ \\end{array}", + "image_path": "a6f45aef1c3c065845e99f2cacfc7b7208d7340af44b7ead1b6292170953ff44.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 231, + 479, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 231, + 479, + 266 + ], + "spans": [ + { + "bbox": [ + 130, + 231, + 479, + 266 + ], + "type": "text", + "content": "In Eq. (1), the inputs " + }, + { + "bbox": [ + 130, + 231, + 479, + 266 + ], + "type": "inline_equation", + "content": "\\mathbf{v}" + }, + { + "bbox": [ + 130, + 231, + 479, + 266 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 231, + 479, + 266 + ], + "type": "inline_equation", + "content": "\\mathbf{M}" + }, + { + "bbox": [ + 130, + 231, + 479, + 266 + ], + "type": "text", + "content": " corresponds to XYZ-mode vector and matrix factorisation and " + }, + { + "bbox": [ + 130, + 231, + 479, + 266 + ], + "type": "inline_equation", + "content": "\\mathbf{b}" + }, + { + "bbox": [ + 130, + 231, + 479, + 266 + ], + "type": "text", + "content": " denotes the appearance feature mode vectors. Separately, " + }, + { + "bbox": [ + 130, + 231, + 479, + 266 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_c" + }, + { + "bbox": [ + 130, + 231, + 479, + 266 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 231, + 479, + 266 + ], + "type": "inline_equation", + "content": "R_{C}" + }, + { + "bbox": [ + 130, + 231, + 479, + 266 + ], + "type": "text", + "content": " refers to the outgoing radiance and the colour feature channels." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 267, + 480, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 267, + 480, + 327 + ], + "spans": [ + { + "bbox": [ + 130, + 267, + 480, + 327 + ], + "type": "text", + "content": "Gaussian Splatting - As opposed to ray marching, 3D Gaussian Splatting is a recent method for rendering scenes via rasterisation. To begin, Gaussians are fitted on a point cloud that are either initialised as a set of random points or bootstrapped with a sparse point cloud produced during the SfM process for free [17]. The Gaussians of the point cloud are defined by a function:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 238, + 334, + 480, + 350 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 238, + 334, + 480, + 350 + ], + "spans": [ + { + "bbox": [ + 238, + 334, + 480, + 350 + ], + "type": "interline_equation", + "content": "g (\\mathbf {x} | \\mu , \\boldsymbol {\\Sigma}) = e ^ {- \\frac {1}{2} (\\mathbf {x} - \\mu) ^ {T} \\boldsymbol {\\Sigma} ^ {- 1} (\\mathbf {x} - \\mu)} \\tag {2}", + "image_path": "c5033e81a804426494fcd2527b79e58f487b6d9882daf74a1542c7551594bceb.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 357, + 482, + 440 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 357, + 482, + 440 + ], + "spans": [ + { + "bbox": [ + 130, + 357, + 482, + 440 + ], + "type": "text", + "content": "where each point " + }, + { + "bbox": [ + 130, + 357, + 482, + 440 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 130, + 357, + 482, + 440 + ], + "type": "text", + "content": " is centered at mean " + }, + { + "bbox": [ + 130, + 357, + 482, + 440 + ], + "type": "inline_equation", + "content": "\\mu \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 130, + 357, + 482, + 440 + ], + "type": "text", + "content": " with an anisotropic covariance matrix " + }, + { + "bbox": [ + 130, + 357, + 482, + 440 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma} \\in \\mathbb{R}^{3x^3}" + }, + { + "bbox": [ + 130, + 357, + 482, + 440 + ], + "type": "text", + "content": ". The mean of a Gaussian is parameterised by the coordinates " + }, + { + "bbox": [ + 130, + 357, + 482, + 440 + ], + "type": "inline_equation", + "content": "\\mu = (\\mu_x, \\mu_y, \\mu_z)" + }, + { + "bbox": [ + 130, + 357, + 482, + 440 + ], + "type": "text", + "content": " that is scaled by the full 3D covariance matrix " + }, + { + "bbox": [ + 130, + 357, + 482, + 440 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}" + }, + { + "bbox": [ + 130, + 357, + 482, + 440 + ], + "type": "text", + "content": ". As discussed in [17], these Gaussians have no physical meanings, given the difficulty of constraining " + }, + { + "bbox": [ + 130, + 357, + 482, + 440 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}" + }, + { + "bbox": [ + 130, + 357, + 482, + 440 + ], + "type": "text", + "content": " to a valid semi-positive definite matrix during the optimisation process. Instead, to derive " + }, + { + "bbox": [ + 130, + 357, + 482, + 440 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}" + }, + { + "bbox": [ + 130, + 357, + 482, + 440 + ], + "type": "text", + "content": ", a scaling matrix " + }, + { + "bbox": [ + 130, + 357, + 482, + 440 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 130, + 357, + 482, + 440 + ], + "type": "text", + "content": " and a rotation matrix " + }, + { + "bbox": [ + 130, + 357, + 482, + 440 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 130, + 357, + 482, + 440 + ], + "type": "text", + "content": " is learned during the optimisation process to scale the Gaussians:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 274, + 447, + 480, + 460 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 274, + 447, + 480, + 460 + ], + "spans": [ + { + "bbox": [ + 274, + 447, + 480, + 460 + ], + "type": "interline_equation", + "content": "\\boldsymbol {\\Sigma} = \\mathbf {R} \\mathbf {S} \\mathbf {S} ^ {\\mathrm {T}} \\mathbf {R} ^ {\\mathrm {T}} \\tag {3}", + "image_path": "4739fc0e8c8346975f37276d80c15799fd6831f6d2735f367c8878940cc939b1.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 468, + 479, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 468, + 479, + 501 + ], + "spans": [ + { + "bbox": [ + 130, + 468, + 479, + 501 + ], + "type": "text", + "content": "With a viewing transformation " + }, + { + "bbox": [ + 130, + 468, + 479, + 501 + ], + "type": "inline_equation", + "content": "\\mathbf{W}" + }, + { + "bbox": [ + 130, + 468, + 479, + 501 + ], + "type": "text", + "content": " and an affine approximation of the projective transformation " + }, + { + "bbox": [ + 130, + 468, + 479, + 501 + ], + "type": "inline_equation", + "content": "\\mathbf{J}" + }, + { + "bbox": [ + 130, + 468, + 479, + 501 + ], + "type": "text", + "content": ", the covariance matrix is then expressed in camera coordinates as:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 265, + 502, + 480, + 516 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 265, + 502, + 480, + 516 + ], + "spans": [ + { + "bbox": [ + 265, + 502, + 480, + 516 + ], + "type": "interline_equation", + "content": "\\boldsymbol {\\Sigma} ^ {\\prime} = \\mathbf {J} \\mathbf {W} \\boldsymbol {\\Sigma} \\mathbf {W} ^ {\\mathrm {T}} \\mathbf {J} ^ {\\mathrm {T}} \\tag {4}", + "image_path": "e655a2d4024683538aa89645bb842b9d1a6fa08c667623ba86879a1c072c8031.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 520, + 479, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 520, + 479, + 567 + ], + "spans": [ + { + "bbox": [ + 130, + 520, + 479, + 567 + ], + "type": "text", + "content": "Furthermore, each Gaussian is coloured via a set of Spherical Harmonics (SH) coefficients that represent the view dependent colour " + }, + { + "bbox": [ + 130, + 520, + 479, + 567 + ], + "type": "inline_equation", + "content": "c_{i}" + }, + { + "bbox": [ + 130, + 520, + 479, + 567 + ], + "type": "text", + "content": ", also known as radiance field, multiplied by its opacity " + }, + { + "bbox": [ + 130, + 520, + 479, + 567 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 130, + 520, + 479, + 567 + ], + "type": "text", + "content": ". To colour a pixel " + }, + { + "bbox": [ + 130, + 520, + 479, + 567 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 130, + 520, + 479, + 567 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 130, + 520, + 479, + 567 + ], + "type": "inline_equation", + "content": "\\hat{C}" + }, + { + "bbox": [ + 130, + 520, + 479, + 567 + ], + "type": "text", + "content": ", alpha blending of " + }, + { + "bbox": [ + 130, + 520, + 479, + 567 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 130, + 520, + 479, + 567 + ], + "type": "text", + "content": " ordered Gaussians is applied:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 183, + 575, + 480, + 608 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 183, + 575, + 480, + 608 + ], + "spans": [ + { + "bbox": [ + 183, + 575, + 480, + 608 + ], + "type": "interline_equation", + "content": "\\hat {\\mathbf {C}} = \\sum_ {i \\in N} T _ {i} g _ {i} \\left(\\mathbf {u} \\mid \\mu^ {\\prime}, \\boldsymbol {\\Sigma} ^ {\\prime}\\right) \\alpha_ {i} \\mathbf {c} _ {i}, \\quad T _ {i} = \\prod_ {j = 1} ^ {i - 1} \\left(1 - g _ {i} \\left(\\mathbf {u} \\mid \\mu^ {\\prime}, \\boldsymbol {\\Sigma} ^ {\\prime}\\right) \\alpha_ {i}\\right) \\tag {5}", + "image_path": "17c14dea4648e8dfce950e53d110e7a8b615b12201bb761e59b94807566c3f96.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 132, + 623, + 228, + 634 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 623, + 228, + 634 + ], + "spans": [ + { + "bbox": [ + 132, + 623, + 228, + 634 + ], + "type": "text", + "content": "2.1 Preliminaries" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 130, + 641, + 479, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 641, + 479, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 641, + 479, + 665 + ], + "type": "text", + "content": "As discussed, the direct optimisation of spherical harmonics to describe the outgoing radiance in individual Gaussians in 3DGS results in poor view-dependent" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 250, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 250, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 250, + 102 + ], + "type": "text", + "content": "ZJ. Tang, TJ. Cham" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 479, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 479, + 152 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 479, + 152 + ], + "type": "text", + "content": "effects. A crucial reason is that these Gaussians do not fully model scene properties [13] and thus fail to capture the specular effects which changes drastically across viewing angles." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 152, + 480, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 152, + 480, + 213 + ], + "spans": [ + { + "bbox": [ + 130, + 152, + 480, + 213 + ], + "type": "text", + "content": "Therefore to account for the specular highlights, it is beneficial to model the underlying properties such as the BRDF and illumination effects of the scene. In conventional computer graphics, a rendering equation is commonly applied to simulate effects of specular and diffused shading [15]. For instance, rendering Eq. (6) describes an outgoing radiance of a surface point:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 226, + 217, + 480, + 242 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 226, + 217, + 480, + 242 + ], + "spans": [ + { + "bbox": [ + 226, + 217, + 480, + 242 + ], + "type": "interline_equation", + "content": "L _ {o} (\\mathbf {x}, \\mathbf {v}) = \\int_ {\\Omega} L _ {i} (\\mathbf {x}, \\mathbf {l}) f _ {r} (\\mathbf {l}, \\mathbf {v}) (\\mathbf {l} \\cdot \\mathbf {n}) d \\mathbf {l}, \\tag {6}", + "image_path": "31a915d18e193d35316e3efc223ed3f1a1145eea4899b6efbe58dad75128fa22.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 246, + 479, + 328 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 246, + 479, + 328 + ], + "spans": [ + { + "bbox": [ + 130, + 246, + 479, + 328 + ], + "type": "text", + "content": "The radiance " + }, + { + "bbox": [ + 130, + 246, + 479, + 328 + ], + "type": "inline_equation", + "content": "L_{o}" + }, + { + "bbox": [ + 130, + 246, + 479, + 328 + ], + "type": "text", + "content": " emitted from a surface point " + }, + { + "bbox": [ + 130, + 246, + 479, + 328 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 130, + 246, + 479, + 328 + ], + "type": "text", + "content": ", when observed from a viewing direction " + }, + { + "bbox": [ + 130, + 246, + 479, + 328 + ], + "type": "inline_equation", + "content": "\\mathbf{v}" + }, + { + "bbox": [ + 130, + 246, + 479, + 328 + ], + "type": "text", + "content": ", is defined in Eq. (6). An integral is applied to accumulate the contribution of incident light at an incident angle " + }, + { + "bbox": [ + 130, + 246, + 479, + 328 + ], + "type": "inline_equation", + "content": "\\mathbf{l}" + }, + { + "bbox": [ + 130, + 246, + 479, + 328 + ], + "type": "text", + "content": " across the upper hemisphere " + }, + { + "bbox": [ + 130, + 246, + 479, + 328 + ], + "type": "inline_equation", + "content": "\\Omega" + }, + { + "bbox": [ + 130, + 246, + 479, + 328 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 130, + 246, + 479, + 328 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 130, + 246, + 479, + 328 + ], + "type": "text", + "content": ". The function " + }, + { + "bbox": [ + 130, + 246, + 479, + 328 + ], + "type": "inline_equation", + "content": "f_{r}" + }, + { + "bbox": [ + 130, + 246, + 479, + 328 + ], + "type": "text", + "content": " denotes the Bidirectional Radiance Distribution Function (BRDF), describing the reflection characteristics of incident radiance at " + }, + { + "bbox": [ + 130, + 246, + 479, + 328 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 130, + 246, + 479, + 328 + ], + "type": "text", + "content": " viewed in direction " + }, + { + "bbox": [ + 130, + 246, + 479, + 328 + ], + "type": "inline_equation", + "content": "\\mathbf{v}" + }, + { + "bbox": [ + 130, + 246, + 479, + 328 + ], + "type": "text", + "content": ". Lastly the inclusion of the cosine law with the normal vector " + }, + { + "bbox": [ + 130, + 246, + 479, + 328 + ], + "type": "inline_equation", + "content": "\\mathbf{n}" + }, + { + "bbox": [ + 130, + 246, + 479, + 328 + ], + "type": "text", + "content": " ensures the energy conservation." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 329, + 479, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 329, + 479, + 354 + ], + "spans": [ + { + "bbox": [ + 130, + 329, + 479, + 354 + ], + "type": "text", + "content": "From a signal processing perspective, an alternative to Eq. (6) is expressed more generally in terms of spherical harmonic convolution [16, 21]:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 274, + 360, + 480, + 372 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 274, + 360, + 480, + 372 + ], + "spans": [ + { + "bbox": [ + 274, + 360, + 480, + 372 + ], + "type": "interline_equation", + "content": "B _ {l m} = \\Lambda_ {l} \\rho_ {l} L _ {l m} \\tag {7}", + "image_path": "362fec6ffb456574494d34e0c4768dcf12b6a6cf301c1e1da24ad2c06903ba97.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 376, + 479, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 376, + 479, + 411 + ], + "spans": [ + { + "bbox": [ + 130, + 376, + 479, + 411 + ], + "type": "text", + "content": "In Eq. (7), " + }, + { + "bbox": [ + 130, + 376, + 479, + 411 + ], + "type": "inline_equation", + "content": "B_{lm}" + }, + { + "bbox": [ + 130, + 376, + 479, + 411 + ], + "type": "text", + "content": " defines the outgoing reflected light as the product of BRDF filter " + }, + { + "bbox": [ + 130, + 376, + 479, + 411 + ], + "type": "inline_equation", + "content": "\\rho_l" + }, + { + "bbox": [ + 130, + 376, + 479, + 411 + ], + "type": "text", + "content": ", spherical harmonic coefficients of lighting signal " + }, + { + "bbox": [ + 130, + 376, + 479, + 411 + ], + "type": "inline_equation", + "content": "L_{lm}" + }, + { + "bbox": [ + 130, + 376, + 479, + 411 + ], + "type": "text", + "content": ", and the normalisation constant " + }, + { + "bbox": [ + 130, + 376, + 479, + 411 + ], + "type": "inline_equation", + "content": "\\varLambda_l" + }, + { + "bbox": [ + 130, + 376, + 479, + 411 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 413, + 481, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 413, + 481, + 605 + ], + "spans": [ + { + "bbox": [ + 130, + 413, + 481, + 605 + ], + "type": "text", + "content": "Some studies [13, 25] enhance 3DGS by expressing BRDF " + }, + { + "bbox": [ + 130, + 413, + 481, + 605 + ], + "type": "inline_equation", + "content": "f_{r}" + }, + { + "bbox": [ + 130, + 413, + 481, + 605 + ], + "type": "text", + "content": " as a Cook-Torrance microfacet model [10] or the GGX Trowbridge-Reitz model [7]. In these approaches, physical attributes, including roughness " + }, + { + "bbox": [ + 130, + 413, + 481, + 605 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 130, + 413, + 481, + 605 + ], + "type": "text", + "content": ", albedo " + }, + { + "bbox": [ + 130, + 413, + 481, + 605 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 130, + 413, + 481, + 605 + ], + "type": "text", + "content": ", metallicity " + }, + { + "bbox": [ + 130, + 413, + 481, + 605 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 130, + 413, + 481, + 605 + ], + "type": "text", + "content": ", and the normal vector " + }, + { + "bbox": [ + 130, + 413, + 481, + 605 + ], + "type": "inline_equation", + "content": "\\mathbf{n}" + }, + { + "bbox": [ + 130, + 413, + 481, + 605 + ], + "type": "text", + "content": " are predicted and used in Eq. (6). Although these modifications marginally improve rendering quality metrics, they fail to accurately produce high-quality, view-dependent effects. This shortfall primarily stems from relying on estimated parameters for physical rendering within a simplified rendering equation [20]. Furthermore, these parameters are inherently challenging to be estimated accurately, due to the ill-posed nature of inverse rendering from multi-view images. Although numerous works [4-6, 14, 20, 26] also achieved success by exploring a neural representation of the rendering equation, these works either require prior information, such as known lighting conditions or a pre-trained model on a realistic dataset with known BRDF parameters. Furthermore these techniques are experimented with ray tracing based methods like NeRF. A work closest to ours in the area of rasterisation and Gaussian Splitting manner, is GaussianShader [13] which we compare against in Sec. 5.2." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 619, + 202, + 632 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 619, + 202, + 632 + ], + "spans": [ + { + "bbox": [ + 132, + 619, + 202, + 632 + ], + "type": "text", + "content": "3 Method" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 641, + 479, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 641, + 479, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 641, + 479, + 665 + ], + "type": "text", + "content": "Instead of predicting the physical BRDF properties of materials in the scene, our goal is to express the outgoing radiance of a Gaussian as a more general" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 178, + 91, + 448, + 103 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 178, + 91, + 448, + 103 + ], + "spans": [ + { + "bbox": [ + 178, + 91, + 448, + 103 + ], + "type": "text", + "content": "3iGS: Factorised Tensorial Illumination for 3D Gaussian Splitting" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 133, + 116, + 474, + 260 + ], + "blocks": [ + { + "bbox": [ + 133, + 116, + 474, + 260 + ], + "lines": [ + { + "bbox": [ + 133, + 116, + 474, + 260 + ], + "spans": [ + { + "bbox": [ + 133, + 116, + 474, + 260 + ], + "type": "image", + "image_path": "66926acba882902ca5c87775ec218e2fb80260c9e0daf408a4dc528a355dbb09.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 266, + 482, + 333 + ], + "lines": [ + { + "bbox": [ + 130, + 266, + 482, + 333 + ], + "spans": [ + { + "bbox": [ + 130, + 266, + 482, + 333 + ], + "type": "text", + "content": "Fig. 2: A visualisation of 3iGS pipeline to render a single Gaussian's colour. We interpolate an incident illumination " + }, + { + "bbox": [ + 130, + 266, + 482, + 333 + ], + "type": "inline_equation", + "content": "L_{i}" + }, + { + "bbox": [ + 130, + 266, + 482, + 333 + ], + "type": "text", + "content": " from the factorised tensorial illumination field " + }, + { + "bbox": [ + 130, + 266, + 482, + 333 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_l" + }, + { + "bbox": [ + 130, + 266, + 482, + 333 + ], + "type": "text", + "content": " using a Gaussian mean " + }, + { + "bbox": [ + 130, + 266, + 482, + 333 + ], + "type": "inline_equation", + "content": "\\pmb{x}_i" + }, + { + "bbox": [ + 130, + 266, + 482, + 333 + ], + "type": "text", + "content": " as input. A neural network " + }, + { + "bbox": [ + 130, + 266, + 482, + 333 + ], + "type": "inline_equation", + "content": "\\mathcal{F}" + }, + { + "bbox": [ + 130, + 266, + 482, + 333 + ], + "type": "text", + "content": " maps the illumination field " + }, + { + "bbox": [ + 130, + 266, + 482, + 333 + ], + "type": "inline_equation", + "content": "L_{i}" + }, + { + "bbox": [ + 130, + 266, + 482, + 333 + ], + "type": "text", + "content": ", the Gaussian BRDF features " + }, + { + "bbox": [ + 130, + 266, + 482, + 333 + ], + "type": "inline_equation", + "content": "\\rho_{i}" + }, + { + "bbox": [ + 130, + 266, + 482, + 333 + ], + "type": "text", + "content": ", and the viewing direction " + }, + { + "bbox": [ + 130, + 266, + 482, + 333 + ], + "type": "inline_equation", + "content": "\\omega_{o}" + }, + { + "bbox": [ + 130, + 266, + 482, + 333 + ], + "type": "text", + "content": " to Gaussian's specular colour " + }, + { + "bbox": [ + 130, + 266, + 482, + 333 + ], + "type": "inline_equation", + "content": "c_{s}" + }, + { + "bbox": [ + 130, + 266, + 482, + 333 + ], + "type": "text", + "content": ". Following, the diffused colour " + }, + { + "bbox": [ + 130, + 266, + 482, + 333 + ], + "type": "inline_equation", + "content": "c_{d}" + }, + { + "bbox": [ + 130, + 266, + 482, + 333 + ], + "type": "text", + "content": " and specular colour " + }, + { + "bbox": [ + 130, + 266, + 482, + 333 + ], + "type": "inline_equation", + "content": "c_{s}" + }, + { + "bbox": [ + 130, + 266, + 482, + 333 + ], + "type": "text", + "content": " are added linearly to produce the final outgoing radiance field " + }, + { + "bbox": [ + 130, + 266, + 482, + 333 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 130, + 266, + 482, + 333 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 357, + 482, + 405 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 357, + 482, + 405 + ], + "spans": [ + { + "bbox": [ + 130, + 357, + 482, + 405 + ], + "type": "text", + "content": "expression of BRDF, and the incoming illumination as neural features. This idea is based on a generalized version of Eq. (7), where BRDF features modify an incoming illumination field, without the need for decomposing down to intrinsic material properties [21]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 405, + 481, + 429 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 405, + 481, + 429 + ], + "spans": [ + { + "bbox": [ + 130, + 405, + 481, + 429 + ], + "type": "text", + "content": "Specifically for each 3D Gaussian in the scene, the outgoing radiance field is formed by:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 261, + 429, + 481, + 442 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 261, + 429, + 481, + 442 + ], + "spans": [ + { + "bbox": [ + 261, + 429, + 481, + 442 + ], + "type": "interline_equation", + "content": "\\mathbf {c} \\left(\\omega_ {o}\\right) = \\mathbf {c} _ {\\mathbf {d}} + \\mathbf {c} _ {\\mathbf {s}} \\left(\\omega_ {o}\\right) \\tag {8}", + "image_path": "c493fd0e058ddb4fd275d4c209139550845057b2ffda67e6ff5b702f190dce3b.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 131, + 448, + 482, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 448, + 482, + 496 + ], + "spans": [ + { + "bbox": [ + 131, + 448, + 482, + 496 + ], + "type": "text", + "content": "For viewing angle " + }, + { + "bbox": [ + 131, + 448, + 482, + 496 + ], + "type": "inline_equation", + "content": "\\omega_{o}" + }, + { + "bbox": [ + 131, + 448, + 482, + 496 + ], + "type": "text", + "content": ", a Gaussian is coloured by its constant diffused colour " + }, + { + "bbox": [ + 131, + 448, + 482, + 496 + ], + "type": "inline_equation", + "content": "c_{d}" + }, + { + "bbox": [ + 131, + 448, + 482, + 496 + ], + "type": "text", + "content": " and a view dependent specular colour " + }, + { + "bbox": [ + 131, + 448, + 482, + 496 + ], + "type": "inline_equation", + "content": "c_{s}" + }, + { + "bbox": [ + 131, + 448, + 482, + 496 + ], + "type": "text", + "content": ". At each Gaussian " + }, + { + "bbox": [ + 131, + 448, + 482, + 496 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 131, + 448, + 482, + 496 + ], + "type": "text", + "content": ", a small neural network " + }, + { + "bbox": [ + 131, + 448, + 482, + 496 + ], + "type": "inline_equation", + "content": "\\mathcal{F}" + }, + { + "bbox": [ + 131, + 448, + 482, + 496 + ], + "type": "text", + "content": " maps the Gaussian BRDF features " + }, + { + "bbox": [ + 131, + 448, + 482, + 496 + ], + "type": "inline_equation", + "content": "\\rho_{i}" + }, + { + "bbox": [ + 131, + 448, + 482, + 496 + ], + "type": "text", + "content": " and the incoming illumination " + }, + { + "bbox": [ + 131, + 448, + 482, + 496 + ], + "type": "inline_equation", + "content": "L_{i}" + }, + { + "bbox": [ + 131, + 448, + 482, + 496 + ], + "type": "text", + "content": " to its specular colour viewed at an angle " + }, + { + "bbox": [ + 131, + 448, + 482, + 496 + ], + "type": "inline_equation", + "content": "\\omega_{o}" + }, + { + "bbox": [ + 131, + 448, + 482, + 496 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 260, + 506, + 481, + 519 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 260, + 506, + 481, + 519 + ], + "spans": [ + { + "bbox": [ + 260, + 506, + 481, + 519 + ], + "type": "interline_equation", + "content": "\\mathcal {F}: \\left\\{\\rho_ {i}, L _ {i}, \\omega_ {o} \\right\\} \\mapsto \\mathbf {c} _ {\\mathbf {s}} \\tag {9}", + "image_path": "112ebcc72f46976cc817765068a38154b74350b45135f49bb9a49fdc15202e5c.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 131, + 536, + 384, + 549 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 536, + 384, + 549 + ], + "spans": [ + { + "bbox": [ + 131, + 536, + 384, + 549 + ], + "type": "text", + "content": "3.1 Illumination Grid by Tensorial Factorisation" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 558, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 558, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 558, + 482, + 666 + ], + "type": "text", + "content": "Our work is largely inspired by conventional computer graphics engines for fast rendering of scene and objects in video games. The fundamental rendering equation highlights the role of multi-bounce lighting in achieving indirect illumination, wherein light bounces off one surface to illuminate another. However, the process of ray tracing from each Gaussian surface into the scene is notably resource-intensive, undermining the goal of quick rendering in 3D graphics systems. To facilitate real-time rendering, one strategy involves the use of baking techniques that employ irradiance volumes [12]. This method segments a scene into distinct volumes and pre-calculates irradiance data offline. An alternative" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 251, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 251, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 251, + 102 + ], + "type": "text", + "content": "ZJ. Tang, TJ. Cham" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 479, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 479, + 163 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 479, + 163 + ], + "type": "text", + "content": "strategy places light probes [28,29] throughout the scene to gather lighting information at specific spatial locations. When rendering the colour of a surface, the system quickly interpolates lighting information from the nearest light probes, ensuring swift rendering times." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 164, + 479, + 199 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 164, + 479, + 199 + ], + "spans": [ + { + "bbox": [ + 130, + 164, + 479, + 199 + ], + "type": "text", + "content": "To maintain the fast rendering speed of 3DGS, our work describes a methodology of learning the illumination features of a Gaussian with a continuous grid based illumination field as:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 276, + 201, + 480, + 213 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 201, + 480, + 213 + ], + "spans": [ + { + "bbox": [ + 276, + 201, + 480, + 213 + ], + "type": "interline_equation", + "content": "\\mathcal {G} _ {l}: \\left\\{\\mathbf {x} _ {\\mathbf {i}} \\right\\} \\mapsto L _ {i} \\tag {10}", + "image_path": "dfe16a657cbbc9820833759023d5c4959acfcaf562c282281e5a9e5510565f93.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 131, + 218, + 479, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 218, + 479, + 266 + ], + "spans": [ + { + "bbox": [ + 131, + 218, + 479, + 266 + ], + "type": "text", + "content": "Given a Gaussian's mean coordinate " + }, + { + "bbox": [ + 131, + 218, + 479, + 266 + ], + "type": "inline_equation", + "content": "\\mathbf{x_i}" + }, + { + "bbox": [ + 131, + 218, + 479, + 266 + ], + "type": "text", + "content": ", we seek to compute an illumination field " + }, + { + "bbox": [ + 131, + 218, + 479, + 266 + ], + "type": "inline_equation", + "content": "L_{i}" + }, + { + "bbox": [ + 131, + 218, + 479, + 266 + ], + "type": "text", + "content": " by interpolating from learnable grid representation. The illumination tensors " + }, + { + "bbox": [ + 131, + 218, + 479, + 266 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_l" + }, + { + "bbox": [ + 131, + 218, + 479, + 266 + ], + "type": "text", + "content": " is formulated similar to TensoRF [9] by a vector-matrix spatial factorisation as follows:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 197, + 275, + 480, + 306 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 197, + 275, + 480, + 306 + ], + "spans": [ + { + "bbox": [ + 197, + 275, + 480, + 306 + ], + "type": "interline_equation", + "content": "\\mathcal {G} _ {l} = \\sum_ {r = 1} ^ {R _ {L}} \\mathbf {A} _ {L, r} ^ {X} \\circ \\mathbf {b} _ {3 r - 2} + \\mathbf {A} _ {L, r} ^ {Y} \\circ \\mathbf {b} _ {3 r - 1} + \\mathbf {A} _ {L, r} ^ {Z} \\circ \\mathbf {b} _ {3 r} \\tag {11}", + "image_path": "21ff3c8ffa4d066cdaa5338d453ab734ddf2eb6192fef37531ae713ce5ca64fc.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 316, + 482, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 316, + 482, + 495 + ], + "spans": [ + { + "bbox": [ + 130, + 316, + 482, + 495 + ], + "type": "text", + "content": "In Eq. (11), " + }, + { + "bbox": [ + 130, + 316, + 482, + 495 + ], + "type": "inline_equation", + "content": "R_{L}" + }, + { + "bbox": [ + 130, + 316, + 482, + 495 + ], + "type": "text", + "content": " represents the feature channels of the illumination components, " + }, + { + "bbox": [ + 130, + 316, + 482, + 495 + ], + "type": "inline_equation", + "content": "\\mathbf{A}" + }, + { + "bbox": [ + 130, + 316, + 482, + 495 + ], + "type": "text", + "content": " as feature tensors and " + }, + { + "bbox": [ + 130, + 316, + 482, + 495 + ], + "type": "inline_equation", + "content": "\\mathbf{b}" + }, + { + "bbox": [ + 130, + 316, + 482, + 495 + ], + "type": "text", + "content": " as feature vectors. The illumination feature grid is jointly learned end to end in the optimisation process together with each Gaussian in the scene. Unlike 3DGS, where each Gaussian is optimised independently, the illumination field is modelled as a continuous grid function. A Gaussian mean serves as the input to query from the factorised tensor grid via interpolation. The inclusion of this continuous incoming illumination field directed at each Gaussian is the core component of producing accurate view-dependent effects, as we show in the ablation study of Sec. 5.4. Furthermore, by formulating this field as factorised tensors, it allows the network to achieve fast rendering speed. Our illumination field is coarse, using " + }, + { + "bbox": [ + 130, + 316, + 482, + 495 + ], + "type": "inline_equation", + "content": "87.5\\%" + }, + { + "bbox": [ + 130, + 316, + 482, + 495 + ], + "type": "text", + "content": " less voxels compared to TensoRF on synthetic datasets. This compact representation is also low in memory footprint compared to the number of optimised Gaussians, which is often a magnitude order or more higher. We refer readers to [9], which provides a comprehensive overview to describe how the tensors are factorised and interpolated." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 131, + 513, + 271, + 525 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 513, + 271, + 525 + ], + "spans": [ + { + "bbox": [ + 131, + 513, + 271, + 525 + ], + "type": "text", + "content": "3.2 3D Gaussian Features" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 533, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 533, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 533, + 482, + 666 + ], + "type": "text", + "content": "In 3DGS [17], Gaussians are optimised with a set of parameters: 3D positions, opacity " + }, + { + "bbox": [ + 130, + 533, + 482, + 666 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 130, + 533, + 482, + 666 + ], + "type": "text", + "content": ", anisotropic covariance, and spherical harmonics coefficients. In our work, instead of optimising spherical harmonics as an outgoing radiance, 3iGS characterises the Gaussians with a diffused colour and learnable BRDF features. Unlike [13, 25], we do not strictly enforce physically interpretable properties commonly used in shading techniques. Aforementioned, these techniques are often simplified, too ill-posed to be decomposed individually, and insufficient to encompass all complex rendering effects [20]. Rather, we loosely follow Eq. (7) and treat BRDF feature components as a set of weights that alter the incoming illumination field. Given a continuous illumination field obtained from Eq. (11), a Gaussian's BRDF is conditionally optimised against it. This is in contrast" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 178, + 91, + 448, + 103 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 178, + 91, + 448, + 103 + ], + "spans": [ + { + "bbox": [ + 178, + 91, + 448, + 103 + ], + "type": "text", + "content": "3iGS: Factorised Tensorial Illumination for 3D Gaussian Splitting" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 152 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 152 + ], + "type": "text", + "content": "to 3DGS where the Gaussians' outgoing radiance are individually optimised without modelling the interdependencies that should arise from a shared scene illumination, resulting in detrimental view-dependent effects." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 131, + 170, + 255, + 182 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 170, + 255, + 182 + ], + "spans": [ + { + "bbox": [ + 131, + 170, + 255, + 182 + ], + "type": "text", + "content": "3.3 Shading Gaussians" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 190, + 482, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 190, + 482, + 262 + ], + "spans": [ + { + "bbox": [ + 130, + 190, + 482, + 262 + ], + "type": "text", + "content": "Following Eq. (9), we shade each Gaussian by mapping its viewing directions encoded with Integrated Directional Encoding (IDE) [30], Gaussian features (obtained in Sec. 3.2), and its illumination field, to the specular colour output. We linearly add the diffused and specular colours to create its radiance field as per Eq. (8). To render the final scene, we follow the rasterisation pipeline proposed in the original 3DGS work." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 131, + 281, + 233, + 295 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 281, + 233, + 295 + ], + "spans": [ + { + "bbox": [ + 131, + 281, + 233, + 295 + ], + "type": "text", + "content": "4 Optimisation" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 307, + 481, + 366 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 307, + 481, + 366 + ], + "spans": [ + { + "bbox": [ + 130, + 307, + 481, + 366 + ], + "type": "text", + "content": "In the previous Sec. 3, we described the necessary components to model a scene with Gaussians and render it via rasterisation. To improve the stability of training and to enhance the final rendering quality, we first train the model with the diffused colour in the first 3,000 iterations. Following, specular colours are added to the Gaussians as in Eq. (8)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 367, + 482, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 367, + 482, + 449 + ], + "spans": [ + { + "bbox": [ + 130, + 367, + 482, + 449 + ], + "type": "text", + "content": "While training the tensorial illumination grid, an initial boundary which encapsulate the scene bounding box is defined. Midway through training, we shrink the illumination grid to fit the Gaussians and resample the grid with the same number of voxels. We adopt the same adaptive control of Gaussians of 3DGS [17] to limit the number of Gaussians and the units per volume. We propose to train our model with the same loss function as 3DGS for a fair evaluation:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 249, + 451, + 481, + 464 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 249, + 451, + 481, + 464 + ], + "spans": [ + { + "bbox": [ + 249, + 451, + 481, + 464 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = (1 - \\lambda) \\mathcal {L} _ {1} + \\lambda \\mathcal {L} _ {\\mathrm {D} - \\mathrm {S S I M}} \\tag {12}", + "image_path": "2b3f72f5fead8e5f7c069f5013af59c1c90926b58ae74c74ac4d79050b281ca9.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 131, + 468, + 447, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 468, + 447, + 480 + ], + "spans": [ + { + "bbox": [ + 131, + 468, + 447, + 480 + ], + "type": "text", + "content": "where we combined the " + }, + { + "bbox": [ + 131, + 468, + 447, + 480 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_1" + }, + { + "bbox": [ + 131, + 468, + 447, + 480 + ], + "type": "text", + "content": " term with a D-SSIM term with " + }, + { + "bbox": [ + 131, + 468, + 447, + 480 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 131, + 468, + 447, + 480 + ], + "type": "text", + "content": " set to 0.2." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 131, + 500, + 304, + 514 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 500, + 304, + 514 + ], + "spans": [ + { + "bbox": [ + 131, + 500, + 304, + 514 + ], + "type": "text", + "content": "5 Experiments and Results" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 131, + 525, + 206, + 536 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 525, + 206, + 536 + ], + "spans": [ + { + "bbox": [ + 131, + 525, + 206, + 536 + ], + "type": "text", + "content": "5.1 Datasets" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 130, + 545, + 482, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 545, + 482, + 617 + ], + "spans": [ + { + "bbox": [ + 130, + 545, + 482, + 617 + ], + "type": "text", + "content": "Synthetic scenes - We show experimental results of 3iGS based on the Blender dataset released in [22]. This dataset contains challenging scenes of complex geometries with realistic non-Lambertian materials. Similarly, we evaluate our model on the Shiny Blender dataset presented in [30]. Unlike the Blender dataset, Shiny Blender contains a singular object with simple geometries in each scene with more glossy effects." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 131, + 617, + 482, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 617, + 482, + 665 + ], + "spans": [ + { + "bbox": [ + 131, + 617, + 482, + 665 + ], + "type": "text", + "content": "Real world complex scenes - To prove the effectiveness of our model in real world scenes, we evaluate our renderings on the Tanks and Temples dataset [18]. This dataset is obtained from video sequences of real world objects and environment." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 250, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 250, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 250, + 102 + ], + "type": "text", + "content": "ZJ. Tang, TJ. Cham" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 227, + 128 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 116, + 227, + 128 + ], + "spans": [ + { + "bbox": [ + 132, + 116, + 227, + 128 + ], + "type": "text", + "content": "5.2 Comparisons" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 136, + 482, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 136, + 482, + 386 + ], + "spans": [ + { + "bbox": [ + 130, + 136, + 482, + 386 + ], + "type": "text", + "content": "To evaluate our model, we compared against methods that apply both ray-tracing methods like NeRF, or rasterisation methods with Gaussian Splatting. Out of all prior work, 3DGS and GaussianShader is the closest work which offers real time inference speed which we will mainly compare against. On comparing the qualitative result figures, we re-ran the experiments of 3DGS [17] and GaussianShader [13] using their original repository code under settings specified by the authors. Ray-Tracing Methods such as [20,22,30] represent a scene as a radiance field using MLPs. By performing multiple samplings on rays marched from the camera into the scene, the sampled points are queried with MLP to obtain the opacity and radiance values. Volume rendering is performed to obtain the final pixel colour. Rasterisation Methods such as Gaussian Splatting (3DGS) [17] and GaussianShader [13] apply a rasterisation pipeline as opposed to ray tracing methods. These models represents a scene as Gaussians with radiance properties based on Spherical Harmonics. In, [13], 3DGS is extended by modelling a scene with additional material characteristics and a shading function is applied, as opposed to ours which uses an MLP as neural renderer. Furthermore, [13] shades Gaussians with a global differentiable environment light stored in cube maps, and optimises independent Gaussians with spherical harmonic-based color for unaccounted illumination. In our work, we represent incident illumination locally with grid-based tensors and optimise Gaussian BRDF features relative to this field." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 387, + 482, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 387, + 482, + 460 + ], + "spans": [ + { + "bbox": [ + 130, + 387, + 482, + 460 + ], + "type": "text", + "content": "For a fair comparison, 3iGS is trained with the same loss function as 3DGS as described in Sec. 4 and the same number of iterations of 30,000 steps. We repurposed the " + }, + { + "bbox": [ + 130, + 387, + 482, + 460 + ], + "type": "inline_equation", + "content": "16 \\times 3" + }, + { + "bbox": [ + 130, + 387, + 482, + 460 + ], + "type": "text", + "content": " SH coefficients in 3DGS as BRDF feature channels and added 4 additional parameters of base colour and roughness for IDE view-directional encoding. The tensorial illumination field is set at a coarse resolution size of " + }, + { + "bbox": [ + 130, + 387, + 482, + 460 + ], + "type": "inline_equation", + "content": "150^{3}" + }, + { + "bbox": [ + 130, + 387, + 482, + 460 + ], + "type": "text", + "content": " voxels." + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 167, + 477, + 236, + 518 + ], + "blocks": [ + { + "bbox": [ + 167, + 477, + 236, + 518 + ], + "lines": [ + { + "bbox": [ + 167, + 477, + 236, + 518 + ], + "spans": [ + { + "bbox": [ + 167, + 477, + 236, + 518 + ], + "type": "image", + "image_path": "8ce7991025814077ad57694b81dacd1ef8d5c33fe53f0afd139ae6e5d478ad4d.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 237, + 477, + 307, + 517 + ], + "blocks": [ + { + "bbox": [ + 237, + 477, + 307, + 517 + ], + "lines": [ + { + "bbox": [ + 237, + 477, + 307, + 517 + ], + "spans": [ + { + "bbox": [ + 237, + 477, + 307, + 517 + ], + "type": "image", + "image_path": "8116bb6874e0729fb74de9f0e54aba1ba206c1fa2f1ae40a612f86ff2b3a7acf.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 307, + 477, + 375, + 517 + ], + "blocks": [ + { + "bbox": [ + 307, + 477, + 375, + 517 + ], + "lines": [ + { + "bbox": [ + 307, + 477, + 375, + 517 + ], + "spans": [ + { + "bbox": [ + 307, + 477, + 375, + 517 + ], + "type": "image", + "image_path": "79f8cb7a84722ad8eb637b31f071f3efceb07cbc03aa12dbb3a95131eea56e4e.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 376, + 477, + 446, + 517 + ], + "blocks": [ + { + "bbox": [ + 376, + 477, + 446, + 517 + ], + "lines": [ + { + "bbox": [ + 376, + 477, + 446, + 517 + ], + "spans": [ + { + "bbox": [ + 376, + 477, + 446, + 517 + ], + "type": "image", + "image_path": "56ba062ea5895beeba6c80f78ff50f889696d1a18399ede7980b1fc8cdba65e3.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 167, + 520, + 233, + 559 + ], + "blocks": [ + { + "bbox": [ + 167, + 520, + 233, + 559 + ], + "lines": [ + { + "bbox": [ + 167, + 520, + 233, + 559 + ], + "spans": [ + { + "bbox": [ + 167, + 520, + 233, + 559 + ], + "type": "image", + "image_path": "b3dc5171bbf61f0175eb762d82e642437a838f3f710262ecda4503158f3d9d20.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 195, + 559, + 208, + 565 + ], + "lines": [ + { + "bbox": [ + 195, + 559, + 208, + 565 + ], + "spans": [ + { + "bbox": [ + 195, + 559, + 208, + 565 + ], + "type": "text", + "content": "3DGS" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 237, + 520, + 306, + 559 + ], + "blocks": [ + { + "bbox": [ + 237, + 520, + 306, + 559 + ], + "lines": [ + { + "bbox": [ + 237, + 520, + 306, + 559 + ], + "spans": [ + { + "bbox": [ + 237, + 520, + 306, + 559 + ], + "type": "image", + "image_path": "bc7f0d57516468cc3a72d643e0a2e177f5ec85b9e73b9f702be2d0a094a632f2.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 262, + 559, + 283, + 565 + ], + "lines": [ + { + "bbox": [ + 262, + 559, + 283, + 565 + ], + "spans": [ + { + "bbox": [ + 262, + 559, + 283, + 565 + ], + "type": "text", + "content": "G.Shader" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 131, + 573, + 481, + 596 + ], + "lines": [ + { + "bbox": [ + 131, + 573, + 481, + 596 + ], + "spans": [ + { + "bbox": [ + 131, + 573, + 481, + 596 + ], + "type": "text", + "content": "Fig. 3: Comparisons of test-set views of real world scenes. 3iGS enhances 3DGS renderings by producing clearer view dependent effects as shown." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 306, + 520, + 373, + 559 + ], + "blocks": [ + { + "bbox": [ + 306, + 520, + 373, + 559 + ], + "lines": [ + { + "bbox": [ + 306, + 520, + 373, + 559 + ], + "spans": [ + { + "bbox": [ + 306, + 520, + 373, + 559 + ], + "type": "image", + "image_path": "30920ddd5356e96e1cae80fbd12b008f55b92f1e9abf323b713b9dc4e7a7ceb0.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 336, + 559, + 348, + 565 + ], + "lines": [ + { + "bbox": [ + 336, + 559, + 348, + 565 + ], + "spans": [ + { + "bbox": [ + 336, + 559, + 348, + 565 + ], + "type": "text", + "content": "Ours" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 377, + 520, + 444, + 559 + ], + "blocks": [ + { + "bbox": [ + 377, + 520, + 444, + 559 + ], + "lines": [ + { + "bbox": [ + 377, + 520, + 444, + 559 + ], + "spans": [ + { + "bbox": [ + 377, + 520, + 444, + 559 + ], + "type": "image", + "image_path": "8a1855c886cba3ea446e7412cf9d05c3ad1660491f57e037471de92349fcaa68.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 396, + 559, + 425, + 565 + ], + "lines": [ + { + "bbox": [ + 396, + 559, + 425, + 565 + ], + "spans": [ + { + "bbox": [ + 396, + 559, + 425, + 565 + ], + "type": "text", + "content": "Ground Truth" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 178, + 91, + 448, + 103 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 178, + 91, + 448, + 103 + ], + "spans": [ + { + "bbox": [ + 178, + 91, + 448, + 103 + ], + "type": "text", + "content": "3iGS: Factorised Tensorial Illumination for 3D Gaussian Splitting" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 134, + 230, + 479, + 656 + ], + "blocks": [ + { + "bbox": [ + 130, + 121, + 482, + 222 + ], + "lines": [ + { + "bbox": [ + 130, + 121, + 482, + 222 + ], + "spans": [ + { + "bbox": [ + 130, + 121, + 482, + 222 + ], + "type": "text", + "content": "Table 1: Our approach demonstrates superior quantitative performance over current methods when tested on Synthetic Datasets. Specifically, within the NeRF Synthetic dataset, our method surpasses all competitors across various image quality assessments (PSNR/SSIM/LPIPS). In the context of the Shiny Blender dataset, 3iGS matches the performance of existing rasterization techniques in terms of PSNR and SSIM but surpasses them in LPIPS for the majority of scenes. We encourage readers to examine the accompanying figure showcasing renderings of the Shiny Blender scene, where our method attains enhanced qualitative outcomes. Best results, benchmarked across real time rendering methods, are in bold." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 134, + 230, + 479, + 656 + ], + "lines": [ + { + "bbox": [ + 134, + 230, + 479, + 656 + ], + "spans": [ + { + "bbox": [ + 134, + 230, + 479, + 656 + ], + "type": "table", + "html": "
NeRF Synthetic [22]
ChairDrumsLegoMicMats.ShipHotdogFicusAvg.
PSNR↑
NeRF [22]33.0025.0132.5432.9129.6228.6536.1830.1331.01
Ref-NeRF [30]33.9825.4335.1033.6527.1029.2437.0428.7431.29
ENVIDR [20]31.2222.9929.5532.1729.5221.5731.4426.6028.13
3DGS [17]35.8226.1735.6935.3430.0030.8737.6734.8333.30
G.Shader [13]35.8326.3635.8735.2330.0730.8237.8534.9733.38
G.Shader(reproduced) [13]33.7025.5032.9934.0728.8728.3735.2933.0531.48
Ours35.9026.7535.9436.0130.0031.1237.9835.4033.64
SSIM↑
NeRF [22]0.9670.9250.9610.9800.9490.8560.9740.9640.947
Ref-NeRF [30]0.9740.9290.9750.9830.9210.8640.9790.9540.947
ENVIDR [20]0.9760.9300.9610.9840.9680.8550.9630.9870.956
3DGS [17]0.9870.9540.9830.9910.9600.9070.9850.9870.969
G.Shader [13]0.9870.9490.9830.9910.9600.9050.9850.9850.968
G.Shader(reproduced) [13]0.9800.9450.9720.9890.9510.8810.9800.9820.960
Ours0.9870.9550.9830.9920.9610.9080.9860.9890.970
LPIPS↓
NeRF [22]0.0460.0910.0500.0280.0630.2060.1210.0440.081
Ref-NeRF [30]0.0290.0730.0250.0180.0780.1580.0280.0560.058
ENVIDR [20]0.0310.0800.0540.0210.0450.2280.0720.0100.067
3DGS [17]0.0120.0370.0160.0060.0340.1060.0200.0120.030
G.Shader [13]0.0120.0400.0140.0060.0330.0980.0190.0130.029
G.Shader(reproduced) [13]0.0190.0450.0260.0090.0460.1480.0290.0170.042
Ours0.0120.0360.0150.0050.0340.1020.0190.0100.029
Shiny Blender [30]
CarBallHelmetTeapotToasterCoffeeAvg.
PSNR↑
NVDiffRec [24]27.9821.7726.9740.4424.3130.7428.70
Ref-NeRF [30]30.4129.1429.9245.1925.2933.9932.32
ENVIDR [20]28.4638.8932.7341.5926.1129.4832.88
3DGS [17]27.2427.6928.3245.6820.9932.3230.37
G.Shader [13]27.9030.9828.3245.8626.2132.3931.94
G.Shader(reproduced) [13]27.5129.0228.7343.0522.8631.3430.41
Ours27.5127.6428.2146.0422.6932.5830.77
SSIM↑
NVDiffRec [24]0.9630.8580.9510.9960.9280.9730.945
Ref-NeRF [30]0.9490.9560.9550.9950.9100.9720.956
ENVIDR [20]0.9610.9910.9800.9960.9390.9490.969
3DGS [17]0.9300.9370.9510.9960.8950.9710.947
G.Shader [13]0.9310.9650.9500.9960.9290.9710.957
G.Shader(reproduced) [13]0.9300.9540.9550.9950.9000.9690.950
Ours0.9300.9380.9510.9970.9080.9730.949
LPIPS↓
NVDiffRec [24]0.0450.2970.1180.0110.1690.0760.119
Ref-NeRF [30]0.0510.3070.0870.0130.1180.0820.109
ENVIDR [20]0.0490.0670.0510.0110.1160.1390.072
3DGS [17]0.0470.1610.0790.0070.1260.0780.083
G.Shader [13]0.0450.1210.0760.0070.0790.0780.068
G.Shader(reproduced) [13]0.0450.1480.0880.0120.1110.0850.099
Ours0.0450.1560.0730.0060.0990.0760.075
", + "image_path": "c96b59b0ab579676aba2db33b57eaaf8e4bdd61609d56a7721c436c3fe12c278.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 250, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 250, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 250, + 102 + ], + "type": "text", + "content": "ZJ. Tang, TJ. Cham" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 167, + 125, + 457, + 531 + ], + "blocks": [ + { + "bbox": [ + 167, + 125, + 457, + 531 + ], + "lines": [ + { + "bbox": [ + 167, + 125, + 457, + 531 + ], + "spans": [ + { + "bbox": [ + 167, + 125, + 457, + 531 + ], + "type": "image", + "image_path": "96510cdb9df4a04f8a975529066e0e5d8b5e3ab590ff0943ead053b00ec4af6a.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 537, + 482, + 647 + ], + "lines": [ + { + "bbox": [ + 130, + 537, + 482, + 647 + ], + "spans": [ + { + "bbox": [ + 130, + 537, + 482, + 647 + ], + "type": "text", + "content": "Fig. 4: In evaluating test-set views from the Shiny Blender dataset, we compared the performance of 3DGS [17], GaussianShader [13], and our work 3iGS. The standard 3DGS method generally yields the least satisfactory renderings, with images often appearing blurry in areas of specular reflection. GaussianShader shows a slight improvement by incorporating the GGX BRDF model, leading to marginally better results in rendering specular regions. In contrast, 3iGS stands out by employing a general rendering function that predicts neural features of illumination field and BRDF instead of relying on physical parameters. This approach allows 3iGS to surpass existing methods significantly, capturing the intricate details within specular highlights with remarkable precision." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 178, + 91, + 448, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 178, + 91, + 448, + 102 + ], + "spans": [ + { + "bbox": [ + 178, + 91, + 448, + 102 + ], + "type": "text", + "content": "3iGS: Factorised Tensorial Illumination for 3D Gaussian Splitting" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 479, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 479, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 479, + 100 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 134, + 164, + 480, + 297 + ], + "blocks": [ + { + "bbox": [ + 132, + 114, + 482, + 149 + ], + "lines": [ + { + "bbox": [ + 132, + 114, + 482, + 149 + ], + "spans": [ + { + "bbox": [ + 132, + 114, + 482, + 149 + ], + "type": "text", + "content": "Table 2: A quantitative comparisons (PSNR / SSIM / LPIPS) between 3DGS [17], GaussianShader [13], and our method on real world scenarios on Tanks and Temples Dataset [18]" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 134, + 164, + 480, + 297 + ], + "lines": [ + { + "bbox": [ + 134, + 164, + 480, + 297 + ], + "spans": [ + { + "bbox": [ + 134, + 164, + 480, + 297 + ], + "type": "table", + "html": "
Tanks and Temples Dataset [18]
BarnCaterpillarFamilyIgnatiusTruckAvg.
PSNR↑
3DGS [17]29.1326.1734.8829.5028.3829.61
G.Shader(reproduced) [13]27.6725.2333.5228.2827.6128.46
Ours29.7327.0435.3630.0428.8230.20
SSIM↑
3DGS [17]0.9200.9320.9820.9730.9450.950
G.Shader(reproduced) [13]0.8970.9150.9770.9680.9350.938
Ours0.9230.9380.9830.9740.9470.953
LPIPS↓
3DGS [17]0.1130.0740.0230.0320.0590.060
G.Shader(reproduced) [13]0.1470.0980.0290.0390.0710.077
Ours0.1120.0710.0220.0310.0570.058
", + "image_path": "f9b8852e7add543394a02e62c58ce73a6eb67eb4b859d24e22f2d74cb6906841.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 342, + 214, + 354 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 342, + 214, + 354 + ], + "spans": [ + { + "bbox": [ + 132, + 342, + 214, + 354 + ], + "type": "text", + "content": "5.3 Discussion" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 365, + 482, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 365, + 482, + 628 + ], + "spans": [ + { + "bbox": [ + 130, + 365, + 482, + 628 + ], + "type": "text", + "content": "In the comparisons detailed in Sec. 5.2, 3iGS demonstrates superior performance over the established baselines, delivering both quantitatively and qualitatively enhanced renderings in a majority of test cases on real time rendering rasterisation approaches. In the NeRF Synthetic dataset, 3iGS surpasses the prior 3DGS and GaussianShader. Although GaussianShader reportedly performs slightly better on the Shiny Blender dataset, we have included both reported and reproduced results based on the official code repository from the authors. We postulate that the Shiny Blender dataset scenes, which comprise single objects only, presents simpler geometries which facilitates an easier recovery of intrinsic material properties essential for rendering view-dependent effects. In addition, specular reflections in this dataset is primarily dominated by direct illumination from an external environment map. Thus GaussianShader which models direct lighting with a differentiable environment cube map performs well. However, when presented with a complex scene containing multiple objects, such as the NeRF Synthetic dataset shown in Fig. 1 with its intricate intra-scene interactions, GaussianShader struggles to accurately recover the physical rendering parameters. Furthermore these lighting scenarios are more complex due to indirect lighting. Therefore jointly modelling direct and indirect lighting using a continuous local incident field is crucial. NeRF based approaches reported above present competitive results. Yet, such methods are extremely slow to train, often requiring days, and are unable to perform real-time rendering needed for interactive applications." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 629, + 481, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 629, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 629, + 481, + 665 + ], + "type": "text", + "content": "Comparing across all methodologies, our 3iGS method presents an attractive and pragmatic alternative to achieve excellent rendering quality while balancing rendering speed, as discussed in Sec. 5.4." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 250, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 250, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 250, + 102 + ], + "type": "text", + "content": "ZJ. Tang, TJ. Cham" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 149, + 114, + 249, + 236 + ], + "blocks": [ + { + "bbox": [ + 149, + 114, + 249, + 236 + ], + "lines": [ + { + "bbox": [ + 149, + 114, + 249, + 236 + ], + "spans": [ + { + "bbox": [ + 149, + 114, + 249, + 236 + ], + "type": "image", + "image_path": "1d015a307ed8d70196e68321e2845c04f15d6b6457fc6df4bea45d20dbad5a5a.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 251, + 480, + 307 + ], + "lines": [ + { + "bbox": [ + 130, + 251, + 480, + 307 + ], + "spans": [ + { + "bbox": [ + 130, + 251, + 480, + 307 + ], + "type": "text", + "content": "Fig. 5: In contrast to 3DGS [17] and GaussianShader [13], our 3iGS method uniquely identifies both the golden specular highlights and the reflections on the Medium Tom as seen in the plastic surface of the Floor Tom (top row). Our approach successfully captures the detailed specular highlights on every cymbal within the drum setup from the Blender dataset, as presented in [22]." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 251, + 114, + 365, + 243 + ], + "blocks": [ + { + "bbox": [ + 251, + 114, + 365, + 243 + ], + "lines": [ + { + "bbox": [ + 251, + 114, + 365, + 243 + ], + "spans": [ + { + "bbox": [ + 251, + 114, + 365, + 243 + ], + "type": "image", + "image_path": "d4f630b34d2a3017bd0d5e9c88bfc7ae22160bbe7ba4e155a34202af239a452a.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 366, + 114, + 482, + 243 + ], + "blocks": [ + { + "bbox": [ + 366, + 114, + 482, + 243 + ], + "lines": [ + { + "bbox": [ + 366, + 114, + 482, + 243 + ], + "spans": [ + { + "bbox": [ + 366, + 114, + 482, + 243 + ], + "type": "image", + "image_path": "e90cd63d4a7140ffc259d6e11ca6c9411103f4d71f9d90d8e03f98e062cc6716.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 327, + 246, + 338 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 327, + 246, + 338 + ], + "spans": [ + { + "bbox": [ + 132, + 327, + 246, + 338 + ], + "type": "text", + "content": "5.4 Ablation Studies" + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 181, + 427, + 433, + 475 + ], + "blocks": [ + { + "bbox": [ + 130, + 363, + 480, + 418 + ], + "lines": [ + { + "bbox": [ + 130, + 363, + 480, + 418 + ], + "spans": [ + { + "bbox": [ + 130, + 363, + 480, + 418 + ], + "type": "text", + "content": "Table 3: An ablation study of our model on the Blender synthetic dataset. We experiment 3iGS under a variety of model parameters. In the first row, we directly an outgoing radiance field similar to NeRF based methods. The second row omits the prediction of a BRDF roughness parameter which encodes the viewing direction as IDE. Both experimental results are inferior compared to our complete model." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 181, + 427, + 433, + 475 + ], + "lines": [ + { + "bbox": [ + 181, + 427, + 433, + 475 + ], + "spans": [ + { + "bbox": [ + 181, + 427, + 433, + 475 + ], + "type": "table", + "html": "
PSNRSSIMLPIPS
Ours (outgoing radiance field)32.380.9650.035
Ours (no roughness parameter, i.e IDE)33.260.9670.031
Ours (complete model)33.640.9700.029
", + "image_path": "12eca1b3f281f5d531dc2f52b26b8a9ea273522007aebc501fab3ebf891f4be7.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 498, + 480, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 498, + 480, + 592 + ], + "spans": [ + { + "bbox": [ + 130, + 498, + 480, + 592 + ], + "type": "text", + "content": "In Tab. 3, we study the effectiveness of our design choices and parameters for 3iGS. In the first row, we use the Gaussian mean and interpolate features from the factorised tensors and predict the outgoing specular colours directly. In this scenario, we predict the outgoing radiance field similar to a NeRF like manner for specular colours. In the second row, we abandon the BRDF roughness parameters from the Gaussian features and apply a standard Fourier positional encoding of viewing direction. Both cases led to inferior renderings as compared to our complete model." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 594, + 482, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 594, + 482, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 594, + 482, + 665 + ], + "type": "text", + "content": "In Tab. 4, we illustrate the training and rendering speed (test) of 3iGS against 3DGS and GaussianShader. We normalise the speed based on 3DGS. Our model performs competitively and achieve real time rendering speed although it is slower than 3DGS whereas GaussianShader performs much slower than the vanilla model. We attribute the efficient rendering speed to the use of factorised tensors for the illumination field." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 178, + 91, + 448, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 178, + 91, + 448, + 102 + ], + "spans": [ + { + "bbox": [ + 178, + 91, + 448, + 102 + ], + "type": "text", + "content": "3iGS: Factorised Tensorial Illumination for 3D Gaussian Splitting" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 222, + 169, + 391, + 215 + ], + "blocks": [ + { + "bbox": [ + 130, + 114, + 482, + 159 + ], + "lines": [ + { + "bbox": [ + 130, + 114, + 482, + 159 + ], + "spans": [ + { + "bbox": [ + 130, + 114, + 482, + 159 + ], + "type": "text", + "content": "Table 4: We evaluate the test and train speed of 3DGS [17] and GaussianShader [13] on a single Tesla V100 32Gb VRAM GPU with the original codebase and settings advocated by the authors. We then report the results normalised with these rendering speed of 3DGS." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 222, + 169, + 391, + 215 + ], + "lines": [ + { + "bbox": [ + 222, + 169, + 391, + 215 + ], + "spans": [ + { + "bbox": [ + 222, + 169, + 391, + 215 + ], + "type": "table", + "html": "
TestTrain
3DGS1.0x1.0x
GaussianShader6.3x slower12.1x slower
Ours2.0x slower3.2x Slower
", + "image_path": "0f597eb59f61bec678f8ce8242b2c13e158cf0f4bc152f138359e179f2e2339e.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 236, + 321, + 249 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 236, + 321, + 249 + ], + "spans": [ + { + "bbox": [ + 132, + 236, + 321, + 249 + ], + "type": "text", + "content": "6 Limitations and Weaknesses" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 262, + 482, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 262, + 482, + 360 + ], + "spans": [ + { + "bbox": [ + 130, + 262, + 482, + 360 + ], + "type": "text", + "content": "3iGS inherits the main challenges of factorised tensors as [9]. Our model is limited to scenes that fit within a defined bounding box. Future works could explore this direction in warping unbounded scenes to fit a tensorial grid representation. Furthermore, 3iGS inherits the weaknesses of 3DGS; a large VRAM GPU is necessary to fit 3D Gaussians, and to evaluate the illumination field. A straightforward workaround is to reduce the number of Gaussians created by adding an upper bound on the number of produced Gaussians in the adaptive control step. Our work also inherits 3DGS's difficulty in producing accurate scene geometry." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 376, + 220, + 389 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 376, + 220, + 389 + ], + "spans": [ + { + "bbox": [ + 132, + 376, + 220, + 389 + ], + "type": "text", + "content": "7 Conclusion" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 402, + 482, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 402, + 482, + 668 + ], + "spans": [ + { + "bbox": [ + 130, + 402, + 482, + 668 + ], + "type": "text", + "content": "We introduce our work, Factorised Tensorial Illumination for 3D Gaussian Splatting (3iGS), to enhance the view-dependent effects in rendering Gaussian radiance fields. Our approach overcomes the constraints of previous methods, which relied on optimising an outgoing radiance field of independent Gaussians with Spherical Harmonics (SH) parameters. We illustrate that superior view-dependent effects in 3DGS can be attained by depicting an outgoing radiance field as a continuous illumination field and the Gaussian's BRDF characteristics in relation to this field. Distinct from other methods depending on oversimplified yet restrictive rendering equations that require prediction of physical attributes of scene surfaces for shading, our methodology proves to be more efficacious. Furthermore, we have shown that fast rendering speeds are attainable through the representation of an illumination field with factorised tensors. We demonstrated our claims across diverse datasets, from synthetic to real-world environments, and compared against prior art on both quantitative and qualitative metrics. We also evaluate the effectiveness of our model parameters and design choices through an ablation study. Finally we acknowledge the limitations of our research as a catalyst for future investigative directions. Our code is released here. Acknowledgement This study is supported under the RIE2020 Industry Alignment Fund - Industry Collaboration Projects (IAF-ICP) Funding initiative, as well as cash and in-kind collaboration from the industry partner(s). The computational work for this article was partially performed on resources of the National Supercomputing Centre, Singapore." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 250, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 250, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 250, + 102 + ], + "type": "text", + "content": "ZJ. Tang, TJ. Cham" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 114, + 197, + 126 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 114, + 197, + 126 + ], + "spans": [ + { + "bbox": [ + 132, + 114, + 197, + 126 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 138, + 140, + 481, + 665 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 138, + 140, + 481, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 140, + 481, + 183 + ], + "spans": [ + { + "bbox": [ + 138, + 140, + 481, + 183 + ], + "type": "text", + "content": "1. Barron, J.T., Mildenhall, B., Tancik, M., Hedman, P., Martin-Brualla, R., Srinivasan, P.P.: Mip-nerf: A multiscale representation for anti-aliasing neural radiance fields. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 5855-5864 (2021)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 138, + 184, + 481, + 227 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 184, + 481, + 227 + ], + "spans": [ + { + "bbox": [ + 138, + 184, + 481, + 227 + ], + "type": "text", + "content": "2. Barron, J.T., Mildenhall, B., Verbin, D., Srinivasan, P.P., Hedman, P.: Mipnerf 360: Unbounded anti-aliased neural radiance fields. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5470-5479 (2022)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 138, + 228, + 481, + 259 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 228, + 481, + 259 + ], + "spans": [ + { + "bbox": [ + 138, + 228, + 481, + 259 + ], + "type": "text", + "content": "3. Barron, J.T., Mildenhall, B., Verbin, D., Srinivasan, P.P., Hedman, P.: Zip-nerf: Anti-aliased grid-based neural radiance fields. arXiv preprint arXiv:2304.06706 (2023)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 138, + 260, + 481, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 260, + 481, + 293 + ], + "spans": [ + { + "bbox": [ + 138, + 260, + 481, + 293 + ], + "type": "text", + "content": "4. Bi, S., Xu, Z., Srinivasan, P., Mildenhall, B., Sunkavalli, K., Hasan, M., Hold-Geoffroy, Y., Kriegman, D., Ramamoorthi, R.: Neural reflectance fields for appearance acquisition. arXiv preprint arXiv:2008.03824 (2020)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 138, + 293, + 481, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 293, + 481, + 326 + ], + "spans": [ + { + "bbox": [ + 138, + 293, + 481, + 326 + ], + "type": "text", + "content": "5. Boss, M., Braun, R., Jampani, V., Barron, J.T., Liu, C., Lensch, H.: Nerd: Neural reflectance decomposition from image collections. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 12684-12694 (2021)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 138, + 327, + 481, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 327, + 481, + 358 + ], + "spans": [ + { + "bbox": [ + 138, + 327, + 481, + 358 + ], + "type": "text", + "content": "6. Boss, M., Jampani, V., Braun, R., Liu, C., Barron, J., Lensch, H.: Neural-pil: Neural pre-integrated lighting for reflectance decomposition. Advances in Neural Information Processing Systems 34, 10691-10704 (2021)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 138, + 360, + 481, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 360, + 481, + 380 + ], + "spans": [ + { + "bbox": [ + 138, + 360, + 481, + 380 + ], + "type": "text", + "content": "7. Burley, B., Studios, W.D.A.: Physically-based shading at disney. In: Acm Siggraph. vol. 2012, pp. 1-7. vol. 2012 (2012)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 138, + 381, + 481, + 413 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 381, + 481, + 413 + ], + "spans": [ + { + "bbox": [ + 138, + 381, + 481, + 413 + ], + "type": "text", + "content": "8. Carroll, J.D., Chang, J.J.: Analysis of individual differences in multidimensional scaling via an n-way generalization of \"eckart-young\" decomposition. Psychometrika 35(3), 283-319 (1970)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 138, + 414, + 481, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 414, + 481, + 435 + ], + "spans": [ + { + "bbox": [ + 138, + 414, + 481, + 435 + ], + "type": "text", + "content": "9. Chen, A., Xu, Z., Geiger, A., Yu, J., Su, H.: Tensorf: Tensorial radiance fields. In: European Conference on Computer Vision. pp. 333-350. Springer (2022)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 138, + 436, + 481, + 457 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 436, + 481, + 457 + ], + "spans": [ + { + "bbox": [ + 138, + 436, + 481, + 457 + ], + "type": "text", + "content": "0. Cook, R.L., Torrance, K.E.: A reflectance model for computer graphics. ACM Transactions on Graphics (ToG) 1(1), 7-24 (1982)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 138, + 458, + 481, + 490 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 458, + 481, + 490 + ], + "spans": [ + { + "bbox": [ + 138, + 458, + 481, + 490 + ], + "type": "text", + "content": "1. Fridovich-Keil, S., Yu, A., Tancik, M., Chen, Q., Recht, B., Kanazawa, A.: Plenoxels: Radiance fields without neural networks. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5501-5510 (2022)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 138, + 491, + 481, + 511 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 491, + 481, + 511 + ], + "spans": [ + { + "bbox": [ + 138, + 491, + 481, + 511 + ], + "type": "text", + "content": "2. Greger, G., Shirley, P., Hubbard, P.M., Greenberg, D.P.: The irradiance volume. IEEE Computer Graphics and Applications 18(2), 32-43 (1998)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 138, + 512, + 481, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 512, + 481, + 533 + ], + "spans": [ + { + "bbox": [ + 138, + 512, + 481, + 533 + ], + "type": "text", + "content": "3. Jiang, Y., Tu, J., Liu, Y., Gao, X., Long, X., Wang, W., Ma, Y.: Gaussianshader: 3d gaussian splatting with shading functions for reflective surfaces (2023)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 138, + 534, + 481, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 534, + 481, + 567 + ], + "spans": [ + { + "bbox": [ + 138, + 534, + 481, + 567 + ], + "type": "text", + "content": "4. Jin, H., Liu, I., Xu, P., Zhang, X., Han, S., Bi, S., Zhou, X., Xu, Z., Su, H.: Tensoroir: Tensorial inverse rendering. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 165-174 (2023)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 138, + 567, + 481, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 567, + 481, + 589 + ], + "spans": [ + { + "bbox": [ + 138, + 567, + 481, + 589 + ], + "type": "text", + "content": "5. Kajiya, J.T.: The rendering equation. In: Proceedings of the 13th annual conference on Computer graphics and interactive techniques. pp. 143-150 (1986)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 138, + 590, + 481, + 610 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 590, + 481, + 610 + ], + "spans": [ + { + "bbox": [ + 138, + 590, + 481, + 610 + ], + "type": "text", + "content": "6. Kautz, J., Snyder, J., Sloan, P.P.J.: Fast arbitrary brdf shading for low-frequency lighting using spherical harmonics. Rendering Techniques 2(291-296), 1 (2002)" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 138, + 611, + 481, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 611, + 481, + 643 + ], + "spans": [ + { + "bbox": [ + 138, + 611, + 481, + 643 + ], + "type": "text", + "content": "7. Kerbl, B., Kopanas, G., Leimkuhler, T., Drettakis, G.: 3d gaussian splatting for real-time radiance field rendering. ACM Transactions on Graphics (ToG) 42(4), 1-14 (2023)" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 138, + 643, + 481, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 643, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 138, + 643, + 481, + 665 + ], + "type": "text", + "content": "8. Knapitsch, A., Park, J., Zhou, Q.Y., Koltun, V.: Tanks and temples: Benchmarking large-scale scene reconstruction. ACM Transactions on Graphics 36(4) (2017)" + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 178, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 178, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 178, + 91, + 447, + 102 + ], + "type": "text", + "content": "3iGS: Factorised Tensorial Illumination for 3D Gaussian Splitting" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 481, + 599 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 133, + 116, + 481, + 160 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 116, + 481, + 160 + ], + "spans": [ + { + "bbox": [ + 133, + 116, + 481, + 160 + ], + "type": "text", + "content": "19. Li, Z., Müller, T., Evans, A., Taylor, R.H., Unberath, M., Liu, M.Y., Lin, C.H.: Neuralangelo: High-fidelity neural surface reconstruction. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 8456-8465 (2023)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 161, + 481, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 161, + 481, + 194 + ], + "spans": [ + { + "bbox": [ + 132, + 161, + 481, + 194 + ], + "type": "text", + "content": "20. Liang, R., Chen, H., Li, C., Chen, F., Panneer, S., Vijaykumar, N.: Envidr: Implicit differentiable renderer with neural environment lighting. arXiv preprint arXiv:2303.13022 (2023)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 194, + 481, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 194, + 481, + 237 + ], + "spans": [ + { + "bbox": [ + 132, + 194, + 481, + 237 + ], + "type": "text", + "content": "21. Mahajan, D., Ramamoorthi, R., Curless, B.: A theory of frequency domain invariants: Spherical harmonic identities for brdf/lighting transfer and image consistency. IEEE transactions on pattern analysis and machine intelligence 30(2), 197-213 (2007)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 237, + 481, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 237, + 481, + 270 + ], + "spans": [ + { + "bbox": [ + 132, + 237, + 481, + 270 + ], + "type": "text", + "content": "22. Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: Nerf: Representing scenes as neural radiance fields for view synthesis. In: European Conference on Computer Vision. pp. 405-421 (2020)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 270, + 481, + 303 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 270, + 481, + 303 + ], + "spans": [ + { + "bbox": [ + 132, + 270, + 481, + 303 + ], + "type": "text", + "content": "23. Müller, T., Evans, A., Schied, C., Keller, A.: Instant neural graphics primitives with a multiresolution hash encoding. ACM Transactions on Graphics (ToG) 41(4), 1-15 (2022)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 303, + 481, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 303, + 481, + 346 + ], + "spans": [ + { + "bbox": [ + 132, + 303, + 481, + 346 + ], + "type": "text", + "content": "24. Munkberg, J., Hasselgren, J., Shen, T., Gao, J., Chen, W., Evans, A., Müller, T., Fidler, S.: Extracting triangular 3d models, materials, and lighting from images. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 8280-8290 (2022)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 347, + 481, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 347, + 481, + 380 + ], + "spans": [ + { + "bbox": [ + 132, + 347, + 481, + 380 + ], + "type": "text", + "content": "25. Shi, Y., Wu, Y., Wu, C., Liu, X., Zhao, C., Feng, H., Liu, J., Zhang, L., Zhang, J., Zhou, B., Ding, E., Wang, J.: Gir: 3d gaussian inverse rendering for relightable scene factorization (2023)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 380, + 481, + 423 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 380, + 481, + 423 + ], + "spans": [ + { + "bbox": [ + 132, + 380, + 481, + 423 + ], + "type": "text", + "content": "26. Srinivasan, P.P., Deng, B., Zhang, X., Tancik, M., Mildenhall, B., Barron, J.T.: Nerv: Neural reflectance and visibility fields for relighting and view synthesis. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 7495-7504 (2021)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 424, + 481, + 457 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 424, + 481, + 457 + ], + "spans": [ + { + "bbox": [ + 132, + 424, + 481, + 457 + ], + "type": "text", + "content": "27. Sun, C., Sun, M., Chen, H.T.: Direct voxel grid optimization: Super-fast convergence for radiance fields reconstruction. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5459-5469 (2022)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 457, + 481, + 478 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 457, + 481, + 478 + ], + "spans": [ + { + "bbox": [ + 132, + 457, + 481, + 478 + ], + "type": "text", + "content": "28. Technologies, U.: Light probes, https://docsunity3d.com/Manual/LightProbes.html" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 479, + 481, + 499 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 479, + 481, + 499 + ], + "spans": [ + { + "bbox": [ + 132, + 479, + 481, + 499 + ], + "type": "text", + "content": "29. Technologies, U.: Reflection probe, https://docs.unity3d.com/Manual/class-ReflectionProbe.html" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 132, + 500, + 481, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 500, + 481, + 544 + ], + "spans": [ + { + "bbox": [ + 132, + 500, + 481, + 544 + ], + "type": "text", + "content": "30. Verbin, D., Hedman, P., Mildenhall, B., Zickler, T., Barron, J.T., Srinivasan, P.P.: Ref-nerf: Structured view-dependent appearance for neural radiance fields. In: 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 5481-5490. IEEE (2022)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 132, + 544, + 481, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 544, + 481, + 578 + ], + "spans": [ + { + "bbox": [ + 132, + 544, + 481, + 578 + ], + "type": "text", + "content": "31. Wang, P., Liu, L., Liu, Y., Theobalt, C., Komura, T., Wang, W.: Neus: Learning neural implicit surfaces by volume rendering for multi-view reconstruction. arXiv preprint arXiv:2106.10689 (2021)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 132, + 578, + 481, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 578, + 481, + 599 + ], + "spans": [ + { + "bbox": [ + 132, + 578, + 481, + 599 + ], + "type": "text", + "content": "32. Zhang, K., Riegler, G., Snavely, N., Koltun, V.: Nerf++: Analyzing and improving neural radiance fields. arXiv preprint arXiv:2010.07492 (2020)" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 250, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 250, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 250, + 101 + ], + "type": "text", + "content": "ZJ. Tang, TJ. Cham" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/3x2_ 3D Object Part Segmentation by 2D Semantic Correspondences/4a9028d8-b05c-4422-ac23-0a7be9202087_content_list.json b/2024/3x2_ 3D Object Part Segmentation by 2D Semantic Correspondences/4a9028d8-b05c-4422-ac23-0a7be9202087_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..b0a8d31be1fa3e0ad2826e883c2d95fee4883e3d --- /dev/null +++ b/2024/3x2_ 3D Object Part Segmentation by 2D Semantic Correspondences/4a9028d8-b05c-4422-ac23-0a7be9202087_content_list.json @@ -0,0 +1,1744 @@ +[ + { + "type": "text", + "text": "$3 \\times 2$ : 3D Object Part Segmentation by 2D Semantic Correspondences", + "text_level": 1, + "bbox": [ + 133, + 48, + 866, + 107 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Anh Thai $^{1,2}$ , Weiyao Wang $^{2}$ , Hao Tang $^{2}$ , Stefan Stojanov $^{1}$ , James M. Rehg $^{3}$ , and Matt Feiszli $^{2}$", + "bbox": [ + 87, + 139, + 910, + 178 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Georgia Institute of Technology", + "bbox": [ + 329, + 193, + 665, + 213 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "2 Meta AI, FAIR", + "bbox": [ + 409, + 213, + 585, + 230 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "3 University of Illinois Urbana-Champaign", + "bbox": [ + 285, + 230, + 709, + 250 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/cfcd26146e1a2e80dc84caa2831a7bb7f368e50237efbc29024997c17fd81f1b.jpg", + "image_caption": [ + "Fig. 1: We propose 3-By-2, a novel training-free method for low-shot 3D object part segmentation that achieves SOTA performance on both zero-shot and few-shot settings." + ], + "image_footnote": [], + "bbox": [ + 89, + 266, + 903, + 476 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract. 3D object part segmentation is essential in computer vision applications. While substantial progress has been made in 2D object part segmentation, the 3D counterpart has received less attention, in part due to the scarcity of annotated 3D datasets, which are expensive to collect. In this work, we propose to leverage a few annotated 3D shapes or richly annotated 2D datasets to perform 3D object part segmentation. We present our novel approach, termed 3-By-2 that achieves SOTA performance on different benchmarks with various granularity levels. By using features from pretrained foundation models and exploiting semantic and geometric correspondences, we are able to overcome the challenges of limited 3D annotations. Our approach leverages available 2D labels, enabling effective 3D object part segmentation. Our method 3-By-2 can accommodate various part taxonomies and granularities, demonstrating part label transfer ability across different object categories. Project website: https://ngailapdi.github.io/projects/3by2/.", + "bbox": [ + 142, + 544, + 854, + 813 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 77, + 840, + 312, + 861 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "3D object part understanding is essential in various research fields and applications, such as robotics [20, 25, 37] and graphics [14]. Through our understanding", + "bbox": [ + 75, + 879, + 922, + 920 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "Work done as an intern at Meta AI (FAIR).", + "bbox": [ + 99, + 928, + 535, + 947 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "of the world, objects can be decomposed into parts based on diverse properties (e.g., geometry or affordance [8, 21]). However, these different decompositions do not always align with one another—the same object can be segmented into parts differently depending on the specific use case. For instance, a driver might perceive a car in terms of its functional components like the steering wheel, accelerator pedal, and brake pedal. Conversely, a manufacturing worker may view the car as an assembly of structural parts, such as the frame, bumper, and windshield. Further, various parts with similar functionalities or structures can be shared among different object classes (e.g., the term \"leg\" can apply to multiple furniture items). How can we design a 3D part segmentation system that has high performance across such different requirements and scenarios?", + "bbox": [ + 77, + 55, + 917, + 269 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Recent works in 3D part segmentation have integrated language as an additional input [1, 19, 52] by leveraging vision-language models to prompt the segmentation. However, grounding visual parts using language is inherently ambiguous. This is because parts can be described using diverse phrases that may include synonyms, various levels of detail, and differences in terminology (structural vs functional), which presents challenges for these models [19]. In contrast, images capture rich information about object shapes, textures and spatial part relationships. These properties can directly be parsed and compared using visual similarities between objects despite differences in linguistic expression. Therefore, it is important to study the limits and potentials of reasoning about visual similarity for generalization across different objects and categories.", + "bbox": [ + 77, + 274, + 917, + 487 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this work, we investigate the 3D part segmentation task from this different perspective and propose a novel method called 3-By-2. Since labeling 3D data is expensive, we design 3-by-2 to leverage existing extensively annotated 2D part segmentation datasets [10,31] or a few-labeled 3D shapes to perform object part segmentation without additional training or finetuning. Our method does not need any language input and can flexibly handle segmentation tasks at various levels of granularity.", + "bbox": [ + 77, + 492, + 917, + 627 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We build our method based on the observation that because objects are constructed from parts, and because various objects often share a common set of parts with similar visual structures, this should allow part label transfer from one object to another without any language description. Recent studies [36, 47] have demonstrated the strong 2D semantic correspondences encoded by features of image diffusion models that generalize across different domains (e.g. sketch vs real images). To label a query 3D object point cloud, we leverage these strong representations to perform 2D pixel correspondence-based label transfer from in-the-wild 2D datasets or 2D renders of a few labeled 3D objects. To the best of our knowledge, we are the first to use diffusion model features for semantic label transfer in the context of 3D part segmentation.", + "bbox": [ + 77, + 632, + 917, + 845 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "While it might seem that obtaining 2D part labels for multi-view renders of an object through label transfer and back-projection into 3D is intuitively straightforward, a high performance and efficient implementation requires careful consideration of the challenges of 3D part segmentation: 1) Precise determination of 3D object part boundaries, which is particularly challenging for unstructured", + "bbox": [ + 79, + 850, + 917, + 947 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 79, + 17, + 94, + 29 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "A. Thai et al.", + "bbox": [ + 159, + 16, + 292, + 30 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/206dbe6a15b8c3e45c52762fb89e8f2882ccc9f4b2f5c9e661a279ea8635f83d.jpg", + "image_caption": [ + "Fig. 2: Overview of our proposed method 3-By-2. (1) Render the input object in multiple camera viewpoints, (2) Perform 2D part segmentation on each view individually by leveraging 2D semantic correspondences and 2D class-agnostic segmentation model, (3) Aggregate the 2D predictions from multiple views using our proposed mask-consistency module, (4) Back-project the predictions to 3D using depth information." + ], + "image_footnote": [], + "bbox": [ + 82, + 53, + 915, + 152 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "data like point clouds, and 2) Flexible adaptation to different levels of part granularity. To this end, we introduce three novel elements of our method: non-overlapping generation, mask-level label transfer and mask-consistency modules (see Fig. 2). These components work efficiently together to ensure precise 3D part segmentation masks and boundaries across a range of object categories and part levels (Fig. 1 and Tables 1, 2, 3).", + "bbox": [ + 72, + 266, + 917, + 383 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Overall, 3-By-2 is a training-free method independent of language inputs, instead relying solely on the 2D labels provided by a 2D database. Unlike previous methods that require 3D segmentation priors like point-cloud clusters [19] or mesh surface information [1,32], our approach has only a single requirement: calibrated cameras for back-projection. This can be known during the rendering process or predicted using SfM approaches.", + "bbox": [ + 72, + 383, + 917, + 500 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We validate the performance of our approach with PartNet-Ensembled [19], a dataset tailored for language-input models, and PartNet [24], which is not tailored for language. These datasets exhibit multiple levels of granularity. Notably, unlike previous approaches that require category-specific fine-tuning for few-shot scenarios [19, 32], 3-By-2 achieves SOTA performance without any training or fine-tuning requirements in either a zero-shot or few-shot setting. Additionally, we identify that models with language inputs exhibit suboptimal performance with highly fine-grained part terminologies. This highlights the advantages of our approach, which effectively handles these fine-grained object parts. Furthermore, we conduct comprehensive ablation studies and demonstrate the transferability of parts across different object categories, which benefits the understanding of object part compositionality.", + "bbox": [ + 72, + 500, + 920, + 734 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In summary, our contributions are 4-fold:", + "bbox": [ + 111, + 736, + 554, + 754 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- A novel, training-free method, 3-By-2, that achieves SOTA performance on benchmarks with different levels of granularity for zero-shot and few-shot 3D object part segmentation.", + "- The first to provide an effective approach for leveraging image diffusion model's features [36] to establish 2D semantic correspondences in the context of 3D part segmentation.", + "- Novel non-overlapping mask generation, mask-level label transfer, and mask-consistency modules that effectively transfer part labels from 2D database and extrapolate them to 3D." + ], + "bbox": [ + 89, + 772, + 917, + 947 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "3-By-2", + "bbox": [ + 769, + 16, + 840, + 32 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 903, + 16, + 917, + 30 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "- Demonstrating the flexibility of 3-By-2 in accommodating various database settings and in generalizing between different object categories.", + "bbox": [ + 92, + 53, + 917, + 92 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2 Related Work", + "text_level": 1, + "bbox": [ + 79, + 125, + 326, + 147 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.1 3D Part Segmentation", + "text_level": 1, + "bbox": [ + 79, + 167, + 414, + 188 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In contrast to its 2D counterpart, the progress in this field has been relatively limited, primarily due to the high cost associated with collecting and annotating 3D datasets. Currently, all of the available large-scale annotated 3D object part datasets are synthetic [18,24,40,45]. The most widely used benchmarks [24,45] are predominantly derived from objects within the ShapeNetCore [5] dataset. This problem has been tackled using architectures that take 3D representations [24,29] as inputs. These methods were trained in a supervised manner, requiring large-scale annotated data. More recent approaches have attempted to investigate data-efficient training scenarios where only a few 3D shapes are annotated [19,32,39,50].", + "bbox": [ + 79, + 203, + 917, + 396 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.2 Multi-view 2D-3D Segmentation Using Foundation Models", + "text_level": 1, + "bbox": [ + 79, + 426, + 857, + 445 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Although multi-view approaches have been widely utilized in the past for 3D segmentation [7,13,48], the rapid advancement of 2D foundation models [16,17] has encouraged more SOTA research aimed at leveraging these models to perform 3D segmentation in a multi-view fashion. CLIP [30] and GLIP [17] have been employed to integrate language information from multiple 2D views into 3D for open-vocabulary segmentation [1,19,27,35,50]. SAM [16], due to its ability to output per-pixel masks, has been used as an effective tool for multi-view 2D-3D segmentation, both on 3D structures like point clouds [35,42,44,46,50] or in NeRF-style [4].", + "bbox": [ + 79, + 461, + 917, + 635 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Scene Segmentation. Various combinations of foundation models have been explored for this task. While [35] leverages CLIP and SAM to support open-vocabulary 3D part segmentation, others use SAM with carefully designed prompts [4] or post-processing techniques [44]. Building upon these successes, concurrent works [12,26,42] seek to improve SAM utilization strategies. Our work differs by focusing on part segmentation, which requires finer granularity. This distinction in objectives directly influences the processing of SAM predictions, tailored to suit their specific characteristics. For example, while scene segmentation methods may disregard or merge masks covering parts of objects, part segmentation approaches might encourage splitting, depending on the desired level of detail.", + "bbox": [ + 79, + 637, + 934, + 830 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Part Segmentation. PartSLIP [19] and SATR [1] were among the first to employ foundation models for this task, pioneering the use of GLIP for open-vocabulary segmentation. Concurrent works have seen the integration of SAM into their pipelines [15,43,50]. Zhou et al. [50] and Kim et al. [15] use SAM with GLIP-predicted bounding boxes, while Xue et al. [43] employ SAM with furthest point sampling for each view, extending predictions to 3D with GLIP labels. Our", + "bbox": [ + 79, + 832, + 917, + 947 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 79, + 17, + 94, + 30 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "A. Thai et al.", + "bbox": [ + 159, + 16, + 292, + 30 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "approach shares with these works the use of SAM for 2D segmentation before 3D aggregation. In contrast, our method focuses solely on visual cues without language inputs, employing image diffusion model's features [36]. To improve SAM's accuracy, we introduce a novel non-overlapping mask generation module, eliminating the need for GLIP-generated bounding boxes.", + "bbox": [ + 77, + 55, + 917, + 152 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "2.3 Part Label Transfer using Correspondences", + "text_level": 1, + "bbox": [ + 79, + 177, + 668, + 196 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Transferring labels from annotated datasets to non-annotated datasets has been considered recently in [34] for open-vocabulary 2D part segmentation and previously in [6, 51] for 3D part segmentation. While [34] used DINOv1 [3] feature representations for dense label transfer between related objects in the base classes and novel object classes, Zhu et al. [51] relied on classical SIFT [22] features for establishing correspondences in 2D images. Chen et al. [6], in contrast, train a network to regress the correspondences directly on the input point cloud.", + "bbox": [ + 79, + 206, + 917, + 343 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We share with these approaches the use of semantic correspondences to identify optimal candidates for label transfer. However, our primary objective sets us apart significantly from [34], as we focus on segmenting 3D objects. Compared to [51], we leverage class-agnostic segmentation models to avoid dense pixel/patch sampling. Furthermore, unlike [6], we do not require direct operations on 3D point clouds or any specific 3D representations. Additionally, we introduce a mask-consistency module for per mask label voting, rather than relying solely on small local patches.", + "bbox": [ + 79, + 344, + 917, + 497 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Semantic Correspondences from Foundation Models. Many vision foundation models have demonstrated an inherent capability to implicitly capture semantic correspondences across different instances within the same category (e.g., matching chair backs) and across diverse categories (e.g., aligning dog's legs with cat's legs) [2,11,36,47]. In this work, we leverage semantic correspondences established by [36] to transfer part labels from annotated 2D datasets to query 3D objects.", + "bbox": [ + 79, + 499, + 917, + 635 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3 Method", + "text_level": 1, + "bbox": [ + 79, + 660, + 239, + 682 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Given a database $\\mathcal{D}$ consisting of 2D part annotations, our goal is to segment each query object $q$ into parts using the visual part vocabulary provided by $\\mathcal{D}$ . Note that $\\mathcal{D}$ can either be gathered from 2D (image) part datasets or from renders of a few 3D objects captured at different view-points. Our method consists of three main steps (Fig. 2): (1) render a set of 2D RGB images $\\mathcal{I}q$ of 3D object $q$ from $K$ distinct camera viewpoints; (2) perform 2D part segmentation on the rendered images; (3) aggregate image-level predictions through a mask-consistency aggregation module to obtain 3D predictions.", + "bbox": [ + 79, + 697, + 917, + 853 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.1 2D Part Segmentation", + "text_level": 1, + "bbox": [ + 79, + 879, + 414, + 899 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "There are two primary approaches to tackle this task: (1) Top-down, using segmentation mechanisms such as SAM [16], or (2) Bottom-up, which involves la", + "bbox": [ + 79, + 908, + 917, + 947 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "3-By-2", + "bbox": [ + 769, + 14, + 837, + 30 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 903, + 16, + 917, + 30 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/e97d61a381c5ee17448db1eb3f59252539cc7b90918569145f2c2bc1daf3098b.jpg", + "image_caption": [ + "Fig. 3: The process of pixel-level part label transferring. For each pixel $p$ in the query image $I_{k}$ , we perform the following: (1) Extract the feature $f(p)$ , along with the feature grid for each image $I_{\\mathcal{D}}$ in the database $\\mathcal{D}$ ; (2) Measure cosine similarity between $f(p)$ and the feature of each pixel within each feature grid, (3) Obtain the best match of $p$ over $\\mathcal{D}$ by determining the most similar pixel $p_{\\mathcal{D}}$ over all images $I_{\\mathcal{D}}$ ; (4) Assign the label of $p$ is to be the label of $p_{\\mathcal{D}}$ ." + ], + "image_footnote": [], + "bbox": [ + 99, + 58, + 883, + 178 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/8a5d33ac48e0f1040cfad7f99a5f9d27919b1e194577f94e0c4c190dc4c0840a.jpg", + "image_caption": [ + "(a)" + ], + "image_footnote": [], + "bbox": [ + 99, + 294, + 479, + 380 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/3f62c6385a69e3e5d61fd0b77403fb45581133ec2efe76e9c4111693130faab8.jpg", + "image_caption": [ + "(b)", + "Fig. 4: (a) Non-overlapping 2D Mask Proposal. We address the issue of overlapping masks produced by SAM. The masks are first sorted by their areas. Subsequently, the smaller masks are stacked on top of the larger ones. Non-overlapping masks are obtained by taking the visible segment of each mask. (b) Different mask sampling strategies for label transfer. Our strategy provides accurate, dense prediction with clear part boundaries." + ], + "image_footnote": [], + "bbox": [ + 542, + 295, + 912, + 380 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "beling each pixel individually. While SAM produces high-quality 2D masks with sharp boundaries, it operates in a class-agnostic manner, often leading to high overlap between sub-parts, parts, and instances. Simply selecting the mask with the highest score may result in incorrect granularity and lacks the flexibility required for part segmentation.", + "bbox": [ + 72, + 533, + 917, + 630 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Conversely, doing label transfer for each pixel individually in the image is computationally impractical, particularly for part segmentation tasks where high resolution is preferred. Sparsely sampling and labeling pixels can result in under-segmented masks, particularly for smaller parts that are less likely to be sampled compared to larger parts (see Fig. 4b). Moreover, accurately determining part boundaries for individual pixels can be challenging, which may result in increased errors when extrapolating to 3D, particularly with unstructured 3D representations like point clouds. These issues raise the important question: how do we transfer part labels and preserve part boundaries without sacrificing computational resources?", + "bbox": [ + 72, + 634, + 917, + 827 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To address this question, we propose a 2D segmentation method that combines the strengths of both approaches which consists of 3 novel components: (1) Single-pixel 2D label transfer using semantic correspondences derived from DIFT [36], (2) Non-overlapping 2D mask proposal module, which refines SAM's multi-granularity predicted masks into non-overlapping part masks, and (3) Mask-level label transfer by integrating (1) and (2).", + "bbox": [ + 72, + 830, + 920, + 949 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 77, + 16, + 94, + 29 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "A. Thai et al.", + "bbox": [ + 159, + 14, + 292, + 30 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/4e6332bb0c5ce2a514edb1b89e96aaafb0f172c60357b240c7623744e91f2126.jpg", + "image_caption": [ + "Fig.5: Two approaches to aggregate 3D part labels from multiple 2D views. Aggregating 3D part labels from multiple 2D views through geometric correspondence can be achieved by either point or mask label consistency." + ], + "image_footnote": [], + "bbox": [ + 116, + 81, + 188, + 141 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/dea239a485f5790362d5a94a9cb7b3df903b1fb4e4ce9b1a9495e58d7d10c807.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 198, + 60, + 489, + 105 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/b65086ae59d580d41b24b0db1194cc8a82383dcc510ce4ca72fde3180a51c29e.jpg", + "image_caption": [ + "Mask label consistency between multiple views" + ], + "image_footnote": [], + "bbox": [ + 200, + 113, + 489, + 157 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/8b9f13c2b24dc9d116ff4abd011e4138bf2f10a61eac193fd4a50dcb2196ad78.jpg", + "image_caption": [ + "Fig.6: Effectiveness of mask label consistency. Enforcing consistency at the mask level can mitigate discrepancies at each individual point and contributes to smoother segmentation." + ], + "image_footnote": [], + "bbox": [ + 539, + 65, + 917, + 159 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Single-pixel 2D Label Transfer. At the core of our method is the 2D label transfer process. The goal is to transfer pixel labels from the annotated 2D database $\\mathcal{D}$ to the query RGB image $I_{k} \\in \\mathcal{I}q$ : for a pixel $p$ in the foreground object in $I_{k}$ , we aim to identify the best-matched pixel $p'$ in each image $I_{\\mathcal{D}}$ in the database $\\mathcal{D}$ and assign initial label to $p$ by $p'$ . To this end, we leverage the established semantic correspondence of DIFT [36]. While recent works have demonstrated the effectiveness of image diffusion models in extracting semantic correspondences, as evidenced by evaluations on datasets like SPair-71K [23], we are the first to leverage these features for transferring semantic labels in the context of 3D part segmentation. Specifically, $p' = \\arg \\max_{p' \\in I_{\\mathcal{D}}} \\cos(f(p), f(p'))$ where $\\cos$ and $f(x)$ denotes the cosine similarity score and the feature representing pixel $x$ . The best pixel correspondence $p_{\\mathcal{D}}$ of $p$ over the entire database is obtained by taking the most similar match within all the images in the database. Formally, $p_{\\mathcal{D}} = \\arg \\max_{\\mathcal{D}} p'$ . The label of $p$ is then assigned to be the label of $p_{\\mathcal{D}}$ (see Fig. 3).", + "bbox": [ + 72, + 297, + 920, + 588 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Coarse-to-fine correspondence search. Finding the nearest neighbor for a query pixel across the entire database can be prohibitively costly, especially for part segmentation which operates in high resolutions. We propose a coarse-to-fine strategy: using the coarse feature maps generated by DIFT [36], we first conduct the search at the coarse level to localize the region of the best match. We then extract the $3 \\times 3$ window centered at this region (in feature space) for a fine search (see Fig. 7). This approach ensures that we compute per-pixel similarity scores only within the region of interest, rather than across the entire image, improving computational efficiency. For instance, when processing a pair of images with a resolution of $800 \\times 800$ , coarse-to-fine correspondence search achieves a speed improvement of approximately 2000 times in terms of wall clock time.", + "bbox": [ + 72, + 593, + 917, + 808 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Non-overlapping 2D Mask Proposal. We propose the use of class-agnostic 2D part mask proposal, specifically from SAM [16]. By assuming that each mask proposal corresponds to a subset of a part, we can then selectively sample pixels within each mask proposal for label transferring. The labels are subsequently propagated to each pixel of the 2D masks through a majority voting process based on the sampled pixels within the mask. To address the issue posed by the highly overlapping predictions from SAM's multi-granularity model, we intro", + "bbox": [ + 72, + 811, + 917, + 949 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "3-By-2", + "bbox": [ + 769, + 16, + 840, + 32 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 903, + 16, + 917, + 30 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Input 3D Object", + "bbox": [ + 113, + 53, + 184, + 63 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/cab047ae98ccbf6f9876a4d6488f212bdc2cb48e5e983dabf86d9cc85aa54e7d.jpg", + "image_caption": [ + "Fig. 7: Coase-to-fine correspondence search. We first conduct searching on a coarse level to identify the region of best match. We then extract the $3 \\times 3$ window centered at this region in feature space for a fine search. This approach is approximately 2000 times faster in terms of wall time for large $N$ ( $800 \\times 800$ )." + ], + "image_footnote": [], + "bbox": [ + 133, + 53, + 864, + 206 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "duce a non-overlapping 2D mask generation module. This module takes SAM masks as inputs and outputs a set of mutually exclusive 2D masks.", + "bbox": [ + 72, + 289, + 920, + 328 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We arrange the SAM output masks in descending order of mask area and stack smaller masks on top of larger ones. This ensures that if mask $A$ is a subset of mask $B$ , stacking $A$ on top of $B$ results in non-overlapping masks, namely $A$ and $B \\setminus A$ . Non-overlapping masks are finally obtained by taking the visible segments of each mask (see Fig. 4a).", + "bbox": [ + 72, + 330, + 920, + 427 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "2D Mask Label Assignment. After obtaining the non-overlapping masks, we sparsely sample pixels in each mask to transfer label. We then perform majority voting to assign the dominant label for each 2D mask, weighted by the confidence score (cosine similarity) of the best pixel correspondence matches.", + "bbox": [ + 72, + 429, + 920, + 505 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3.2 Mask-consistency Aggregation Module", + "text_level": 1, + "bbox": [ + 75, + 539, + 617, + 560 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Given a set of 2D RGB images with part segmentation predictions, we aim to extrapolate these segmentation labels to 3D using geometric correspondences. Prior works [19,32] aggregate multi-view information for each 3D point or mesh triangle face through a weighted sum of multi-view 2D predictions. To fully maintain the high-quality part boundaries predicted by SAM in 2D, we choose to aggregate multi-view predictions for each 2D mask instead. This observation is based on the fact that part identities remain constant across multiple views (e.g., the seat in view 1 should be segmented as the seat in view 2, see Fig. 5). Intuitively, mask consistency can be seen as an additional constraint on point consistency, encouraging points within the same 2D mask to remain associated with the same masks in the 3D space", + "bbox": [ + 72, + 578, + 917, + 791 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We present a novel mask-consistency aggregation module that takes a set of 2D part segmentation predictions for multiple views as input. Our approach involves constructing an undirected unweighted graph, denoted as $G:V\\to E$ , where each vertex corresponds to a 2D mask in a given view. The edges of the graph connect masks from different views that capture the projection of the same 3D points. We construct a set of mask correspondences for each vertex $v\\in V$ , $\\mathcal{M}_v = \\{v,u_1,u_2,\\dots u_N\\}$ where an edge $e_i$ connects $v$ and $u_{i}$ . A mask $v$ is defined as oversegmented when there exists at least 2 masks in $\\mathcal{M}_v$ that", + "bbox": [ + 72, + 793, + 920, + 949 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 77, + 16, + 94, + 30 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "A. Thai et al.", + "bbox": [ + 159, + 14, + 292, + 30 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/1c9607d3b815359e95575fa7a7897f879f58857de511a2a43f1191a0afe5f972.jpg", + "image_caption": [ + "Fig. 8: Mask-consistency process. (1) Each vertex of $G$ corresponds to a mask in a given image. The edge connecting each pair of vertices denotes that the pair contains the projection of the same 3D points. Mask consistency set $\\mathcal{M}_v$ for each $v$ is obtained via the first-order neighborhood of $v$ . (2) $v_1$ is detected as under-segmented since $\\mathcal{M}_{v_1}$ consists of masks from the same view with different labels $(v_2, v_3)$ and hence, is discarded. (3) Traverse $\\mathcal{M}_{v_i}$ to obtain labels for $\\mathcal{M}_{v_i}$ . (4) Obtain label for each mask by majority voting. Here we show a simple example for visualization purpose." + ], + "image_footnote": [], + "bbox": [ + 121, + 43, + 871, + 177 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "belong to the same image but are assigned with different labels. For instance, in Fig. 8, vertex $v_{1}$ corresponds to an undersegmented mask. Formally,", + "bbox": [ + 75, + 315, + 917, + 354 + ], + "page_idx": 8 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {S} _ {v} = \\left\\{u _ {i}, u _ {j} \\in I _ {k} \\text {a n d} l \\left(u _ {i}\\right) \\neq l \\left(u _ {j}\\right) \\mid u _ {i}, u _ {j} \\in \\mathcal {M} _ {v} \\right\\} \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 237, + 370, + 917, + 393 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "where $l(x)$ denotes the label of $x$ . We discard $v$ if $|S_v| > \\epsilon$ . That is, if $v$ is consistently determined as undersegmented across multiple views, we discard the contribution of $v$ in the final label assignment. We then traverse the graph simultaneously from each vertex using breadth-first-search to accumulate the labels for each $\\mathcal{M}_v$ . Subsequently, we perform majority voting to assign labels to each $\\mathcal{M}_v$ . Finally, for each mask, we identify the most frequently assigned label as the final label.", + "bbox": [ + 72, + 406, + 917, + 541 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "The simple intuition behind this approach is: if a part occasionally receives incorrect labels in some challenging views, employing majority voting within the mask correspondence set can calibrate these errors. Further, performing this on the mask level ensures that if two 2D points share the same mask label in the majority of the views, they will ultimately be assigned with the same final label. This approach calibrates potential discrepancies in individual point-wise aggregations (see Fig. 6).", + "bbox": [ + 72, + 544, + 917, + 679 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4 Experiments", + "text_level": 1, + "bbox": [ + 75, + 710, + 309, + 733 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In this section, we first report the performance of 3-By-2 against baselines on PartNet-Ensembled (PartNetE) [19] in Sec. 4.1 and on PartNet [24] with \"level-3\" annotation in Sec. 4.2. Note the distinction between these datasets since PartNetE consists of a distinct set of articulated objects from [41]. These datasets also exhibit different granularity of part annotations. While PartNetE consists of both basic parts like chair back and fine-grained parts like scissors screw, PartNet with \"level-3\" annotation contains all fine-grained parts such as \"back_frame_vertical_bar\". In Sec. 4.3, we conduct comprehensive ablation studies to verify the necessity of each components in 3-By-2. Our few-shot experiments refer to the setting where a few labeled 3D objects are available for", + "bbox": [ + 72, + 754, + 920, + 949 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "3-By-2", + "bbox": [ + 769, + 14, + 840, + 34 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 900, + 16, + 917, + 30 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Table 1: Few-shot performance on PartNetE [19] dataset. The left columns show performance on the 17 categories that supervised methods [28,29,38] (first 3 rows) were trained on with additional 28K objects. The right columns show performance on the 28 categories with only 8 objects/category in the training set. [19,33,49,50] and ours (last 5 rows) only have access to 8 objects/category during training for all 45 categories. Please refer to the Supplement for the full table on all 45 categories.", + "bbox": [ + 72, + 53, + 920, + 159 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/f91cdc4b60d83063b55ff820c0e8d341935a251d55476949aa921afe5314faa9.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodsChairSci-ssorsLap-topDoorMicro-waveKey-boardAvg. (17)Cam-eraUSBStap-lerDisp-enserKet-tleEye-gl.Avg. (28)Avg. (45)
PointNext [29]0.9180.5730.3250.4380.4050.4500.5910.3320.6790.8860.2600.4510.8810.4570.502
PointNet++ [28]0.8470.5000.5540.4570.4360.7450.5330.0650.5240.5160.1210.2090.7620.2500.365
SoftGroup [38]0.8830.7600.1840.5310.3830.5890.5050.2360.4410.8010.1890.5740.7240.3130.384
ACD [33]0.3900.3910.1110.1890.0660.2610.1960.1010.2520.5000.1940.4020.7820.2590.235
Prototype [49]0.7080.4300.2790.3340.2700.4490.4190.3200.6540.8070.5340.6070.7790.4700.451
PartSLIP [19]0.8540.6030.2970.4080.4270.5360.5670.5830.5610.8480.7380.7700.8830.6250.603
PartSLIP++ [50]0.8530.6050.2970.4510.4950.7240.5740.6320.5750.6300.7200.8560.8830.6420.615
3-By-2 (ours)0.8440.6570.4530.5440.4020.8960.6040.6260.7900.9010.7820.8150.9280.6650.642
", + "bbox": [ + 82, + 164, + 910, + 300 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "each object category while there is no annotated 3D part labels in the zero-shot setting. In this setting, we leverage labels from the 2D domain instead.", + "bbox": [ + 72, + 321, + 917, + 360 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4.1 Performance on PartNet-Ensembled", + "text_level": 1, + "bbox": [ + 75, + 390, + 585, + 409 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Data & Metric. We use the dataset provided by Liu et al. [19] for both the few-shot and zero-shot settings. For each object in both few-shot and test sets, we render 20 RGB images from different views with resolution $800 \\times 800$ . We report mean IoU (mIoU) performance of all baselines using the evaluation protocol provided by [19] on the input point clouds. Specifically, the performance of a part is not considered if it does not exist in the queried object.", + "bbox": [ + 72, + 422, + 920, + 539 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Few-shot Baselines. We compare 3-By-2 against fully-supervised semantic segmentation [28, 29, 38], few-shot semantic segmentation [33, 49] and language-based [19, 50] methods. The fully supervised methods [28, 29, 38] were trained on 28K objects of 17 overlapping categories between PartNetE [19], in addition to the few-shot set consisting of 8 objects/category. The second group of baselines [19, 33, 49, 50] were only trained on the few-shot set. PartSLIP and PartSLIP++, a concurrent work, rely on large vision-language model (GLIP [17]) to guide the 2D part detection before extending to the 3D point cloud segmentation. We provide more detailed descriptions in the Supplement. We omit the evaluation of MvDeCor [32] on this benchmark since it requires ground-truth 3D meshes, whereas PartNetE only provides dense point clouds as inputs.", + "bbox": [ + 72, + 541, + 917, + 752 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Few-shot Setting. In this setting, 8 objects/category serve as the few-shot set. We evaluate on the entire test set of PartNetE [19]. For a fair comparison, we remove part labels in the test set that do not exist in the few-shot set. We present our few-shot results in Table 1. Compared to fully-supervised 3D methods, we outperform by $1 - 10\\%$ mIoU on these categories. Additionally, we demonstrate a significant performance boost on the remaining 28 categories (21-41% mIoU). We further outperform PartSLIP and PartSLIP++ on both subsets, achieving $\\sim 3\\%$ mIoU improvements overall.", + "bbox": [ + 72, + 754, + 917, + 908 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Performance on Real-world Scans. Please note that there is currently no publicly available real-world 3D part segmentation dataset for direct comparison. How-", + "bbox": [ + 75, + 908, + 917, + 947 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 79, + 16, + 104, + 30 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "A. Thai et al.", + "bbox": [ + 159, + 14, + 292, + 30 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/7e835724b9e05921d14103290237e05ef4c2255e1f6f1071cb0d2913a2dff458.jpg", + "table_caption": [ + "Table 2: Zero-shot performance on the subset of PartNetE [19] that overlaps with PACO [31]. Our method effectively leverages 2D in-the-wild part segmentation dataset to perform 3D part segmentation." + ], + "table_footnote": [], + "table_body": "
MethodsKet- tleMicro- waveSci- sorsF.- ChairMouseBot- tleLaptopClockRemoteLampAvg.- (18)
SAMPro3D [42]+ OpenMask3D [35]0.0260.0010.1180.4370.0190.1030.0170.0070.0840.0740.146
PartSLIP [19]0.2080.1660.2180.9170.2700.7630.2700.2670.1150.3710.341
VLPart [34]-MC0.2110.1920.1930.8130.0000.2160.0600.2050.1320.1660.222
3-By-2 (ours)0.7650.3480.5940.7120.3070.8070.3940.2530.2390.5000.430
", + "bbox": [ + 138, + 107, + 854, + 211 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/b42c4d8ce612c331e0e89dde43624d8ae0ab5ef0ca9296bb575ebcc2f789aca7.jpg", + "table_caption": [ + "Table 3: Performance on PartNet dataset with \"level-3\" annotations in the few-shot setting. Bold and underline denote best and second best performance respectively." + ], + "table_footnote": [], + "table_body": "
MethodsBot- tleMicro- waveDis- playDish- washerFau- cetKnifeEar- phoneClockBedTrash- canAvg.
MvDeCor [32]0.4210.3770.6000.3270.2120.1870.2050.1430.0990.1990.277
PartSLIP [19]0.3440.1430.3860.2280.0090.0230.0640.0170.0030.0310.125
3-By-2 (ours)0.4540.3890.5670.4290.2030.1960.2250.1160.0960.1340.281
", + "bbox": [ + 164, + 256, + 825, + 331 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "ever, we demonstrate the robustness of our method using real-world objects, as shown in Fig. 1. These objects were originally introduced by Liu et al. [19] and captured using an iPhone12 camera.", + "bbox": [ + 77, + 352, + 917, + 409 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Zero-shot Baselines. We compare 3-By-2 with PartSLIP [19], VLPart [34]-MC and SAMPro3D [42] + OpenMask3D [35]. For PartSLIP, we prompt the pre-trained GLIP model with the language inputs without finetuning, following Liu et al. [19]. VLPart [34] is a SOTA 2D part segmentation method that was trained on a combination of various large-scale 2D part datasets. We replace our 2D part segmentation module with a pre-trained VLPart model, retaining the 3D mask-consistency aggregation module as 3-By-2, and term this baseline VLPart-MC. During inference, to guide VLPart effectively, we prompt the model with language inputs as in PartSLIP. SAMPro3D [42] is a SOTA zero-shot instance segmentation method for 3D scenes using SAM at its core. For semantic segmentation evaluation, we integrate SAMPro3D with OpenMask3D [35], an open-vocabulary 3D scene segmentation method.", + "bbox": [ + 77, + 414, + 917, + 648 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Zero-shot Setting. Since we do not have access to any labeled 3D objects in this setting, to effectively transfer part labels, we leverage PACO [31]. This dataset is a fine-grained and richly annotated 2D datasets consisting of objects from COCO-LVIS [9]. We crop and mask each annotated object using the provided object bounding box and segmentation mask to form the database. Further, we filter out small objects or objects with limited visibility, using the area of the object segmentation mask as a criterion.", + "bbox": [ + 77, + 652, + 917, + 786 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "In Table 2 we show the performance of all baselines and 3-By-2 on the subset of PartNetE that overlaps with PACO dataset [31]. By leveraging the abundance and fine-grained of 2D in-the-wild part segmentation datasets, we achieve superior performance compared to all baselines (9-29% mIoU). We significantly outperform PartSLIP on challenging categories with small or thin parts (e.g. scissors and lamp by 28% and 13% mIoU respectively). These results highlight the effectiveness of 3-By-2 even when the database includes challenging real-world images with partial occlusion and truncation.", + "bbox": [ + 79, + 791, + 917, + 946 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "3-By-2", + "bbox": [ + 769, + 16, + 837, + 32 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 893, + 16, + 915, + 30 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Effectiveness of Our 2D Segmentation Module. We demonstrate the effectiveness of our 2D segmenter, leveraging SAM and DIFT, by showcasing its strong performance against VLPart [34], a SOTA 2D part segmentation method (see Table 2, last 2 rows). Note that VLPart was trained on PACO [31] among other 2D part datasets. Therefore, it is reasonable to anticipate that this method can effectively use knowledge from PACO to accurately segment the 18 overlapping categories between PartNetE and PACO. For both VLPart-MC and 3-By-2, we maintain the same 3D aggregation module. Our method significantly outperforms VLPart-MC, demonstrating the advantage of our proposed 2D segmentation module.", + "bbox": [ + 72, + 55, + 922, + 230 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Comparison to SOTA Scene Segmentation Approach. SAMPro3D [42] is a concurrent work with SOTA performance on zero-shot instance segmentation in 3D scene. This is a training-free model that effectively prompts SAM within the 2D domain using 3D point projections. As in Table 2, we outperform this baseline by a significant margin, highlighting the non-trivial nature of adapting scene segmentation methods for 3D part segmentation tasks, particularly those involving post-processing of 2D foundation models.", + "bbox": [ + 72, + 232, + 922, + 369 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "4.2 Performance on Level-3 PartNet", + "text_level": 1, + "bbox": [ + 75, + 401, + 542, + 422 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "In this experiment, we select 10 categories from PartNet [24] that come with fine-grained (\"level-3\") annotations. We randomly select 10 objects per category from the training set (following [32]) to form our few-shot set, and up to 50 objects per category from the test set for evaluation, ensuring overlap with ShapeNetCore.v2 [5]. Given that PartSLIP [19] employs point cloud RGB for superpoint generation, which serves as 3D priors, our decision to choose overlapping objects with ShapeNetCore.v2 is to preserve object texture information. We use the same few-shot and test set for all baselines.", + "bbox": [ + 72, + 440, + 922, + 595 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Data. As inputs to our approach, we render 15 overlapping views for each textured mesh using Blender cycle renderer with realistic lighting from HDRI environment maps.", + "bbox": [ + 72, + 596, + 922, + 655 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Baselines. The baselines are reproduced following the papers' recommended training procedure. Specifically, we pre-train MvDeCor [32] on the entire training set of the selected categories consisting of 86 views per non-textured object, with rendered RGB, depth and normal maps as inputs. We then fine-tune the segmentation heads for each individual object category in the few-shot set with 15 views per object. Note that the input for this stage also includes RGB, depth and normal maps.", + "bbox": [ + 72, + 656, + 922, + 791 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "For PartSLIP [19], we derive the language prompt by traversing the part hierarchy and concatenating labels from each level along the path, spanning from root to leaf. For example, the path \"bottle/jug/handle\" is transformed into \"bottle jug handle\". This adaptation is due to the potential for different leaf nodes to share identical labels (e.g., bottle/normal_bottle/handle and bottle/jug/handle), as relying solely on the leaf node label could introduce confusion in predictions. We adopt PartSLIP's point cloud, image rendering and data processing pipeline with default parameters.", + "bbox": [ + 72, + 793, + 922, + 949 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 79, + 16, + 104, + 30 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "A. Thai et al.", + "bbox": [ + 159, + 14, + 292, + 30 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/1f15ac454461a3021fee1a45e302b62048560c94c0320667450bbfeefe46f97c.jpg", + "table_caption": [ + "Table 4: Ablation of the non-overlapping mask generation module." + ], + "table_footnote": [], + "table_body": "
2D Mask ProposalScissorsMouseSuitcaseBottleChair
SAM0.4570.4400.2850.0040.638
Non-overlap0.6750.6840.8130.8100.844
", + "bbox": [ + 84, + 91, + 498, + 141 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/2143a2074b2fd8888de2ac37932188cb45da51e6c407142baaf352c914ce3b63.jpg", + "table_caption": [ + "Table 5: Ablation of our proposed mask-consistency component." + ], + "table_footnote": [], + "table_body": "
3D Label AggregationScissorsSuitcasePrinterClock
Point-Consistency0.6190.5790.0090.363
Mask-Consistency0.6750.6840.0850.458
", + "bbox": [ + 527, + 91, + 929, + 139 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Evaluation & Metric. We uniformly sample 300K points on the surface of each labeled ground truth mesh and employ nearest neighbor assignment to associate a ground-truth label with each point. This point set is used for evaluating all methods for a fair comparison and eliminating any randomness introduced by the point cloud sampling step. We use part mIoU on the sampled point set as the evaluation metric. We employ the standard mIoU calculation, which considers the performance of all parts in the vocabulary, even in cases where they may not exist in certain objects. Additionally, different from MvDeCor, we do not exclude the \"others\" label during evaluation based on ground-truth labels. For a fair comparison, we applied the same evaluation approach across all methods.", + "bbox": [ + 72, + 164, + 922, + 357 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Results. We show results in Table 3. Compared to PartSLIP [19], we outperformed on all categories by a significant margin (16% mIoU on average), demonstrating the challenges posed by fine-grained settings for GLIP [17]. While our performance is on par with MvDeCor [32], it is important to note that MvDeCor is both pretrained and finetuned on PartNet [24], using ground truth depth and normal maps as additional inputs. In contrast, our method requires no training on the target data distribution.", + "bbox": [ + 72, + 359, + 922, + 495 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "4.3 Ablation Study", + "text_level": 1, + "bbox": [ + 75, + 525, + 331, + 544 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Non-overlapping Mask Generation. In Table 4, we illustrate the effectiveness of our proposed non-overlapping mask generation module. The comparison involves evaluating the performance of our method with and without this module. In the case of the model without the non-overlapping mask generation module, we directly utilize the predicted SAM outputs for label transferring. The results indicate that our non-overlapping mask generation module is necessary for achieving an optimal performance.", + "bbox": [ + 72, + 559, + 922, + 694 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Mask-consistency Module. In Table 5, we demonstrate the effectiveness of our proposed mask-consistency component, which improves the final performance especially on objects with small parts.", + "bbox": [ + 72, + 695, + 922, + 752 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Properties of Database. In this section, we investigate two key questions: 1) Can 3-By-2 accurately segment the query object within a database containing multiple object categories? and 2) Is it possible to transfer parts with the same semantic meaning between different object categories?", + "bbox": [ + 72, + 754, + 920, + 830 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Multi-category database. To address question 1, we perform experiments using databases containing 1, 2, and 8 categories respectively (see Table 6). Specifically, taking the query category as \"Kettle\", for the 2-category setting we construct a database consisting of \"Kettle, Kitchen Pot\". We selected these categories due to their shared semantic parts with \"Kettle\", which could potentially lead to confusion (e.g., kettle lid vs. kitchen pot lid). With 8-category setting, we add in", + "bbox": [ + 72, + 832, + 922, + 949 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "3-By-2", + "bbox": [ + 769, + 16, + 837, + 32 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 893, + 16, + 917, + 30 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/e4363b7de9a57c8627dfd3e17b08fda9540213ab7b8a09ddb016d96d540490ed.jpg", + "table_caption": [ + "Table 6: Multi-category database experiment. Performance of Kettle in various database settings is reported with mIoU. Our method shows robustness in performance even when more categories are added in the database." + ], + "table_footnote": [], + "table_body": "
DatabaseLidHandleSpoutAvg.
1-category0.7590.9040.7830.815
2-category0.7030.8200.7480.757
8-category0.7270.7730.7560.752
", + "bbox": [ + 142, + 159, + 469, + 230 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/5f373a99fa6a259990e70929b60edc6f489a697f39f9947ab6c0356cc9b045e7.jpg", + "table_caption": [ + "Table 7: Cross-category database experiment. We report the performance of 18 tables with wheels in Part-NetE. Results show that our method can transfer wheel annotations from Chair to correct the prediction on Table wheels." + ], + "table_footnote": [], + "table_body": "
DatabaseLegTabletopWheelAvg.
Table only0.5860.6470.0000.411
Chair & Table0.6410.6330.6000.625
", + "bbox": [ + 588, + 177, + 915, + 224 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "categories that are completely different and do not share any parts with \"Kettle\" (e.g. \"Eyeglasses\"). In general, with more categories in the database, there is a slight decrease in the average performance. Notably, there are marginal differences between 2-category and 8-category (second and third rows), highlighting the ability of 3-By-2 in handling both diverse object taxonomy and part segmentation. This finding is particularly interesting since many prior works [19,32] require finetuning each category separately for few-shot evaluation.", + "bbox": [ + 72, + 237, + 917, + 372 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Cross-category database. Considering question 2, we note that the few-shot set of \"Table\" in PartNetE lacks objects with wheels as a part, whereas such objects are present in the test set. To address this, we incorporate the \"Chair\" category where the wheel part exists in the database. We evaluate on 18 tables in PartNetE test set with the \"wheel\" part annotated (see Table 7). Compared to the table only few-shot set, combining the database with \"Chair\" improves the performance on \"leg\" by $\\sim 6\\%$ mIoU. The improvement in the \"leg\" part can be attributed to the inclusion of \"Chair\" in the database, which reduces the likelihood of the model incorrectly associating \"wheel\" with \"leg\" due to the absence of \"wheel\" in the few-shot set. Interestingly, the performance for \"wheel\" increases significantly, $+60\\%$ mIoU through the label transfer from chair wheels.", + "bbox": [ + 72, + 373, + 920, + 586 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "While the concept may seem intuitive, our findings shed new light on object part compositionality. Despite the diversity in appearances and shapes across various object categories, there exists a finite set of object parts that are shared among them. Recognizing the transferability of these parts is important for facilitating rapid learning of novel objects across a range of tasks. Further, our results show the ability to correct wrong predictions of our approach by transferring labels from another category. Please refer to the Sup. for additional studies.", + "bbox": [ + 72, + 586, + 917, + 723 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "5 Conclusion", + "text_level": 1, + "bbox": [ + 75, + 752, + 285, + 773 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "In this work, we propose 3-By-2, a novel, training-free method that achieves SOTA performance on benchmarks with diverse levels of part granularity without the need for language inputs, on both zero-shot and few-shot settings. We demonstrate the flexibility of 3-By-2 in transferring part labels between different object categories. We hope the development of 3-By-2 can encourage further exploration of visual similarities for this task.", + "bbox": [ + 72, + 793, + 917, + 910 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 79, + 16, + 104, + 30 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "A. Thai et al.", + "bbox": [ + 159, + 14, + 292, + 30 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Acknowledgement", + "text_level": 1, + "bbox": [ + 77, + 53, + 338, + 76 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "This work was partly supported by NIH R01HD104624-01A1.", + "bbox": [ + 75, + 94, + 731, + 113 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 77, + 143, + 237, + 165 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "1. Abdelreheem, A., Skorokhodov, I., Ovsjanikov, M., Wonka, P.: Satr: Zero-shot semantic segmentation of 3d shapes. arXiv preprint arXiv:2304.04909 (2023) 2, 3, 4", + "2. Amir, S., Gandelsman, Y., Bagon, S., Dekel, T.: Deep vit features as dense visual descriptors. arXiv preprint arXiv:2112.05814 2(3), 4 (2021) 5", + "3. Caron, M., Touvron, H., Misra, I., Jégou, H., Mairal, J., Bojanowski, P., Joulin, A.: Emerging properties in self-supervised vision transformers. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 9650-9660 (2021) 5", + "4. Cen, J., Zhou, Z., Fang, J., Shen, W., Xie, L., Jiang, D., Zhang, X., Tian, Q., et al.: Segment anything in 3d with nerfs. Advances in Neural Information Processing Systems 36 (2024) 4", + "5. Chang, A.X., Funkhouser, T., Guibas, L., Hanrahan, P., Huang, Q., Li, Z., Savarese, S., Savva, M., Song, S., Su, H., et al.: Shapenet: An information-rich 3d model repository. arXiv preprint arXiv:1512.03012 (2015) 4, 12", + "6. Chen, N., Liu, L., Cui, Z., Chen, R., Ceylan, D., Tu, C., Wang, W.: Unsupervised learning of intrinsic structural representation points. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 9121-9130 (2020) 5", + "7. Dai, A., Nießner, M.: 3dmv: Joint 3d-multi-view prediction for 3d semantic scene segmentation. In: Proceedings of the European Conference on Computer Vision (ECCV). pp. 452-468 (2018) 4", + "8. Deng, S., Xu, X., Wu, C., Chen, K., Jia, K.: 3d affordancenet: A benchmark for visual object affordance understanding. In: proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 1778-1787 (2021) 2", + "9. Gupta, A., Dollar, P., Girshick, R.: Lvis: A dataset for large vocabulary instance segmentation. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 5356-5364 (2019) 11", + "10. He, J., Yang, S., Yang, S., Kortylewski, A., Yuan, X., Chen, J.N., Liu, S., Yang, C., Yu, Q., Yuille, A.: Partimagenet: A large, high-quality dataset of parts. In: European Conference on Computer Vision. pp. 128-145. Springer (2022) 2", + "11. Hedlin, E., Sharma, G., Mahajan, S., Isack, H., Kar, A., Tagliasacchi, A., Yi, K.M.: Unsupervised semantic correspondence using stable diffusion. arXiv preprint arXiv:2305.15581 (2023) 5", + "12. Huang, R., Peng, S., Takmaz, A., Tombari, F., Pollefeys, M., Song, S., Huang, G., Engelmann, F.: Segment3d: Learning fine-grained class-agnostic 3d segmentation without manual labels. arXiv preprint arXiv:2312.17232 (2023) 4", + "13. Jaritz, M., Gu, J., Su, H.: Multi-view pointnet for 3d scene understanding. 2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW) pp. 3995-4003 (2019), https://apisemanticscholar.org/CorpusID:203593088", + "14. Kalogerakis, E., Hertzmann, A., Singh, K.: Learning 3D Mesh Segmentation and Labeling. ACM Transactions on Graphics 29(3) (2010) 1" + ], + "bbox": [ + 79, + 183, + 920, + 947 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "3-By-2", + "bbox": [ + 767, + 14, + 840, + 30 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 893, + 16, + 917, + 30 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "15. Kim, H., Sung, M.: Partstad: 2d-to-3d part segmentation task adaptation (2024) 4", + "16. Kirillov, A., Mintun, E., Ravi, N., Mao, H., Rolland, C., Gustafson, L., Xiao, T., Whitehead, S., Berg, A.C., Lo, W.Y., et al.: Segment anything. arXiv preprint arXiv:2304.02643 (2023) 4, 5, 7", + "17. Li, L.H., Zhang, P., Zhang, H., Yang, J., Li, C., Zhong, Y., Wang, L., Yuan, L., Zhang, L., Hwang, J.N., et al.: Grounded language-image pre-training. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 10965-10975 (2022) 4, 10, 13", + "18. Li, Y., Upadhyay, U., Habib Slim, A.A., Arpit Prajapati, S.P., Wonka, P., Elhoseiny, M.: 3d compat: Composition of materials on parts of 3d things (eccv 2022). ECCV (2022) 4", + "19. Liu, M., Zhu, Y., Cai, H., Han, S., Ling, Z., Porikli, F., Su, H.: Partslip: Low-shot part segmentation for 3d point clouds via pretrained image-language models. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 21736-21746 (2023) 2, 3, 4, 8, 9, 10, 11, 12, 13, 14", + "20. Liu, W., Mao, J., Hsu, J., Hermans, T., Garg, A., Wu, J.: Composable part-based manipulation. In: 7th Annual Conference on Robot Learning (2023), https://openreview.net/forum?id=o-K3HVUeEw1", + "21. Liu, X., Xu, X., Rao, A., Gan, C., Yi, L.: Autogpart: Intermediate supervision search for generalizable 3d part segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 11624-11634 (2022) 2", + "22. Lowe, D.G.: Distinctive image features from scale-invariant keypoints. International journal of computer vision 60, 91-110 (2004) 5", + "23. Min, J., Lee, J., Ponce, J., Cho, M.: Spair-71k: A large-scale benchmark for semantic correspondence. arXiv preprint arXiv:1908.10543 (2019) 7", + "24. Mo, K., Zhu, S., Chang, A.X., Yi, L., Tripathi, S., Guibas, L.J., Su, H.: Partnet: A large-scale benchmark for fine-grained and hierarchical part-level 3d object understanding. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 909-918 (2019) 3, 4, 9, 12, 13", + "25. Nadeau, P., Giamou, M., Kelly, J.: The sum of its parts: Visual part segmentation for inertial parameter identification of manipulated objects. arXiv preprint arXiv:2302.06685 (2023) 1", + "26. Nguyen, P.D.A., Ngo, T.D., Gan, C., Kalogerakis, E., Tran, A., Pham, C., Nguyen, K.: Open3dis: Open-vocabulary 3d instance segmentation with 2d mask guidance (2023) 4", + "27. Peng, S., Genova, K., Jiang, C., Tagliasacchi, A., Pollefeys, M., Funkhouser, T., et al.: Openscene: 3d scene understanding with open vocabularies. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 815-824 (2023) 4", + "28. Qi, C.R., Yi, L., Su, H., Guibas, L.J.: Pointnet++: Deep hierarchical feature learning on point sets in a metric space. Advances in neural information processing systems 30 (2017) 10", + "29. Qian, G., Li, Y., Peng, H., Mai, J., Hammoud, H., Elhoseiny, M., Ghanem, B.: Pointnext: Revisiting pointnet++ with improved training and scaling strategies. Advances in Neural Information Processing Systems 35, 23192-23204 (2022) 4, 10", + "30. Radford, A., Kim, J.W., Hallacy, C., Ramesh, A., Goh, G., Agarwal, S., Sastry, G., Askell, A., Mishkin, P., Clark, J., et al.: Learning transferable visual models from natural language supervision. In: International conference on machine learning. pp. 8748-8763. PMLR (2021) 4" + ], + "bbox": [ + 77, + 56, + 917, + 947 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 79, + 16, + 104, + 30 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "A. Thai et al.", + "bbox": [ + 157, + 14, + 292, + 30 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "31. Ramanathan, V., Kalia, A., Petrovic, V., Wen, Y., Zheng, B., Guo, B., Wang, R., Marquez, A., Kovvuri, R., Kadian, A., et al.: Paco: Parts and attributes of common objects. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 7141-7151 (2023) 2, 11, 12", + "32. Sharma, G., Yin, K., Maji, S., Kalogerakis, E., Litany, O., Fidler, S.: Mvdecor: Multi-view dense correspondence learning for fine-grained 3d segmentation. In: European Conference on Computer Vision. pp. 550-567. Springer (2022) 3, 4, 8, 10, 11, 12, 13, 14", + "33. Singh, C., Murdoch, W.J., Yu, B.: Hierarchical interpretations for neural network predictions. arXiv preprint arXiv:1806.05337 (2018) 10", + "34. Sun, P., Chen, S., Zhu, C., Xiao, F., Luo, P., Xie, S., Yan, Z.: Going denser with open-vocabulary part segmentation. arXiv preprint arXiv:2305.11173 (2023) 5, 11, 12", + "35. Takmaz, A., Fedele, E., Sumner, R.W., Pollefeys, M., Tombari, F., Engelmann, F.: Openmask3d: Open-vocabulary 3d instance segmentation. arXiv preprint arXiv:2306.13631 (2023) 4, 11", + "36. Tang, L., Jia, M., Wang, Q., Phoo, C.P., Hariharan, B.: Emergent correspondence from image diffusion. arXiv preprint arXiv:2306.03881 (2023) 2, 3, 5, 6, 7", + "37. Varadarajan, K.M., Vincze, M.: Object part segmentation and classification in range images for grasping. In: 2011 15th International Conference on Advanced Robotics (ICAR). pp. 21-27. IEEE (2011) 1", + "38. Vu, T., Kim, K., Luu, T.M., Nguyen, T., Yoo, C.D.: Softgroup for 3d instance segmentation on point clouds. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 2708-2717 (2022) 10", + "39. Wang, L., Li, X., Fang, Y.: Few-shot learning of part-specific probability space for 3d shape segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (June 2020) 4", + "40. Wang, R., Zhang, Y., Mao, J., Zhang, R., Cheng, C.Y., Wu, J.: Ikea-manual: Seeing shape assembly step by step. Advances in Neural Information Processing Systems 35, 28428-28440 (2022) 4", + "41. Xiang, F., Qin, Y., Mo, K., Xia, Y., Zhu, H., Liu, F., Liu, M., Jiang, H., Yuan, Y., Wang, H., et al.: Sapien: A simulated part-based interactive environment. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 11097-11107 (2020) 9", + "42. Xu, M., Yin, X., Qiu, L., Liu, Y., Tong, X., Han, X.: Sampro3d: Locating sam prompts in 3d for zero-shot scene segmentation. arXiv preprint arXiv:2311.17707 (2023) 4, 11, 12", + "43. Xue, Y., Chen, N., Liu, J., Sun, W.: Zerops: High-quality cross-modal knowledge transfer for zero-shot 3d part segmentation (2023) 4", + "44. Yang, Y., Wu, X., He, T., Zhao, H., Liu, X.: Sam3d: Segment anything in 3d scenes. arXiv preprint arXiv:2306.03908 (2023) 4", + "45. Yi, L., Kim, V.G., Ceylan, D., Shen, I.C., Yan, M., Su, H., Lu, C., Huang, Q., Sheffer, A., Guibas, L.: A scalable active framework for region annotation in 3d shape collections. SIGGRAPH Asia (2016) 4", + "46. Yu, Q., Du, H., Liu, C., Yu, X.: When 3d bounding-box meets sam: Point cloud instance segmentation with weak-and-noisy supervision. ArXiv abs/2309.00828 (2023), https://api-semanticscholar.org/CorpusID:261530997 4", + "47. Zhang, J., Herrmann, C., Hur, J., Cabrera, L.P., Jampani, V., Sun, D., Yang, M.H.: A tale of two features: Stable diffusion complements dino for zero-shot semantic correspondence. arXiv preprint arXiv:2305.15347 (2023) 2, 5" + ], + "bbox": [ + 75, + 56, + 917, + 947 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "3-By-2", + "bbox": [ + 767, + 16, + 840, + 32 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 893, + 16, + 917, + 30 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "48. Zhao, L., Lu, J., Zhou, J.: Similarity-aware fusion network for 3d semantic segmentation. 2021 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS) pp. 1585-1592 (2021), https://apisemantic scholar.org/CorpusID:235732071 4", + "49. Zhao, N., Chua, T.S., Lee, G.H.: Few-shot 3d point cloud semantic segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 8873-8882 (2021) 10", + "50. Zhou, Y., Gu, J., Li, X., Liu, M., Fang, Y., Su, H.: Partslip++: Enhancing low-shot 3d part segmentation via multi-view instance segmentation and maximum likelihood estimation. arXiv preprint arXiv:2312.03015 (2023) 4, 10", + "51. Zhu, J., Zhang, Y., Guo, J., Liu, H., Liu, M., Liu, Y., Guo, Y.: Label transfer between images and 3d shapes via local correspondence encoding. Comput. Aided Geom. Des. 71(C), 255-266 (may 2019). https://doi.org/10.1016/j.cagd.2019.04.009, https://doi.org/10.1016/j.cagd.2019.04.009 5", + "52. Zhu, X., Zhang, R., He, B., Guo, Z., Zeng, Z., Qin, Z., Zhang, S., Gao, P.: Pointclip v2: Prompting clip and gpt for powerful 3d open-world learning. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 2639-2650 (2023) 2" + ], + "bbox": [ + 75, + 56, + 917, + 395 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 79, + 16, + 104, + 30 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "A. Thai et al.", + "bbox": [ + 159, + 14, + 292, + 30 + ], + "page_idx": 17 + } +] \ No newline at end of file diff --git a/2024/3x2_ 3D Object Part Segmentation by 2D Semantic Correspondences/4a9028d8-b05c-4422-ac23-0a7be9202087_model.json b/2024/3x2_ 3D Object Part Segmentation by 2D Semantic Correspondences/4a9028d8-b05c-4422-ac23-0a7be9202087_model.json new file mode 100644 index 0000000000000000000000000000000000000000..bbd65a7341ed7c2a5ca3a00454139041be51be1c --- /dev/null +++ b/2024/3x2_ 3D Object Part Segmentation by 2D Semantic Correspondences/4a9028d8-b05c-4422-ac23-0a7be9202087_model.json @@ -0,0 +1,2414 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.134, + 0.05, + 0.868, + 0.108 + ], + "angle": 0, + "content": "\\(3 \\times 2\\): 3D Object Part Segmentation by 2D Semantic Correspondences" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.14, + 0.911, + 0.18 + ], + "angle": 0, + "content": "Anh Thai\\(^{1,2}\\), Weiyao Wang\\(^{2}\\), Hao Tang\\(^{2}\\), Stefan Stojanov\\(^{1}\\), James M. Rehg\\(^{3}\\), and Matt Feiszli\\(^{2}\\)" + }, + { + "type": "text", + "bbox": [ + 0.33, + 0.195, + 0.668, + 0.214 + ], + "angle": 0, + "content": "1 Georgia Institute of Technology" + }, + { + "type": "text", + "bbox": [ + 0.411, + 0.214, + 0.586, + 0.231 + ], + "angle": 0, + "content": "2 Meta AI, FAIR" + }, + { + "type": "text", + "bbox": [ + 0.287, + 0.231, + 0.71, + 0.251 + ], + "angle": 0, + "content": "3 University of Illinois Urbana-Champaign" + }, + { + "type": "image", + "bbox": [ + 0.09, + 0.268, + 0.905, + 0.478 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.076, + 0.496, + 0.92, + 0.534 + ], + "angle": 0, + "content": "Fig. 1: We propose 3-By-2, a novel training-free method for low-shot 3D object part segmentation that achieves SOTA performance on both zero-shot and few-shot settings." + }, + { + "type": "text", + "bbox": [ + 0.143, + 0.545, + 0.857, + 0.814 + ], + "angle": 0, + "content": "Abstract. 3D object part segmentation is essential in computer vision applications. While substantial progress has been made in 2D object part segmentation, the 3D counterpart has received less attention, in part due to the scarcity of annotated 3D datasets, which are expensive to collect. In this work, we propose to leverage a few annotated 3D shapes or richly annotated 2D datasets to perform 3D object part segmentation. We present our novel approach, termed 3-By-2 that achieves SOTA performance on different benchmarks with various granularity levels. By using features from pretrained foundation models and exploiting semantic and geometric correspondences, we are able to overcome the challenges of limited 3D annotations. Our approach leverages available 2D labels, enabling effective 3D object part segmentation. Our method 3-By-2 can accommodate various part taxonomies and granularities, demonstrating part label transfer ability across different object categories. Project website: https://ngailapdi.github.io/projects/3by2/." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.842, + 0.313, + 0.862 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.88, + 0.924, + 0.921 + ], + "angle": 0, + "content": "3D object part understanding is essential in various research fields and applications, such as robotics [20, 25, 37] and graphics [14]. Through our understanding" + }, + { + "type": "page_footnote", + "bbox": [ + 0.1, + 0.93, + 0.537, + 0.949 + ], + "angle": 0, + "content": "Work done as an intern at Meta AI (FAIR)." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.08, + 0.018, + 0.095, + 0.03 + ], + "angle": 0, + "content": "2" + }, + { + "type": "header", + "bbox": [ + 0.16, + 0.017, + 0.293, + 0.032 + ], + "angle": 0, + "content": "A. Thai et al." + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.056, + 0.919, + 0.27 + ], + "angle": 0, + "content": "of the world, objects can be decomposed into parts based on diverse properties (e.g., geometry or affordance [8, 21]). However, these different decompositions do not always align with one another—the same object can be segmented into parts differently depending on the specific use case. For instance, a driver might perceive a car in terms of its functional components like the steering wheel, accelerator pedal, and brake pedal. Conversely, a manufacturing worker may view the car as an assembly of structural parts, such as the frame, bumper, and windshield. Further, various parts with similar functionalities or structures can be shared among different object classes (e.g., the term \"leg\" can apply to multiple furniture items). How can we design a 3D part segmentation system that has high performance across such different requirements and scenarios?" + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.275, + 0.919, + 0.488 + ], + "angle": 0, + "content": "Recent works in 3D part segmentation have integrated language as an additional input [1, 19, 52] by leveraging vision-language models to prompt the segmentation. However, grounding visual parts using language is inherently ambiguous. This is because parts can be described using diverse phrases that may include synonyms, various levels of detail, and differences in terminology (structural vs functional), which presents challenges for these models [19]. In contrast, images capture rich information about object shapes, textures and spatial part relationships. These properties can directly be parsed and compared using visual similarities between objects despite differences in linguistic expression. Therefore, it is important to study the limits and potentials of reasoning about visual similarity for generalization across different objects and categories." + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.493, + 0.919, + 0.629 + ], + "angle": 0, + "content": "In this work, we investigate the 3D part segmentation task from this different perspective and propose a novel method called 3-By-2. Since labeling 3D data is expensive, we design 3-by-2 to leverage existing extensively annotated 2D part segmentation datasets [10,31] or a few-labeled 3D shapes to perform object part segmentation without additional training or finetuning. Our method does not need any language input and can flexibly handle segmentation tasks at various levels of granularity." + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.634, + 0.919, + 0.847 + ], + "angle": 0, + "content": "We build our method based on the observation that because objects are constructed from parts, and because various objects often share a common set of parts with similar visual structures, this should allow part label transfer from one object to another without any language description. Recent studies [36, 47] have demonstrated the strong 2D semantic correspondences encoded by features of image diffusion models that generalize across different domains (e.g. sketch vs real images). To label a query 3D object point cloud, we leverage these strong representations to perform 2D pixel correspondence-based label transfer from in-the-wild 2D datasets or 2D renders of a few labeled 3D objects. To the best of our knowledge, we are the first to use diffusion model features for semantic label transfer in the context of 3D part segmentation." + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.852, + 0.919, + 0.948 + ], + "angle": 0, + "content": "While it might seem that obtaining 2D part labels for multi-view renders of an object through label transfer and back-projection into 3D is intuitively straightforward, a high performance and efficient implementation requires careful consideration of the challenges of 3D part segmentation: 1) Precise determination of 3D object part boundaries, which is particularly challenging for unstructured" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.77, + 0.017, + 0.842, + 0.034 + ], + "angle": 0, + "content": "3-By-2" + }, + { + "type": "page_number", + "bbox": [ + 0.904, + 0.017, + 0.92, + 0.031 + ], + "angle": 0, + "content": "3" + }, + { + "type": "image", + "bbox": [ + 0.084, + 0.055, + 0.916, + 0.153 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.161, + 0.92, + 0.252 + ], + "angle": 0, + "content": "Fig. 2: Overview of our proposed method 3-By-2. (1) Render the input object in multiple camera viewpoints, (2) Perform 2D part segmentation on each view individually by leveraging 2D semantic correspondences and 2D class-agnostic segmentation model, (3) Aggregate the 2D predictions from multiple views using our proposed mask-consistency module, (4) Back-project the predictions to 3D using depth information." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.267, + 0.92, + 0.384 + ], + "angle": 0, + "content": "data like point clouds, and 2) Flexible adaptation to different levels of part granularity. To this end, we introduce three novel elements of our method: non-overlapping generation, mask-level label transfer and mask-consistency modules (see Fig. 2). These components work efficiently together to ensure precise 3D part segmentation masks and boundaries across a range of object categories and part levels (Fig. 1 and Tables 1, 2, 3)." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.385, + 0.92, + 0.502 + ], + "angle": 0, + "content": "Overall, 3-By-2 is a training-free method independent of language inputs, instead relying solely on the 2D labels provided by a 2D database. Unlike previous methods that require 3D segmentation priors like point-cloud clusters [19] or mesh surface information [1,32], our approach has only a single requirement: calibrated cameras for back-projection. This can be known during the rendering process or predicted using SfM approaches." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.502, + 0.922, + 0.736 + ], + "angle": 0, + "content": "We validate the performance of our approach with PartNet-Ensembled [19], a dataset tailored for language-input models, and PartNet [24], which is not tailored for language. These datasets exhibit multiple levels of granularity. Notably, unlike previous approaches that require category-specific fine-tuning for few-shot scenarios [19, 32], 3-By-2 achieves SOTA performance without any training or fine-tuning requirements in either a zero-shot or few-shot setting. Additionally, we identify that models with language inputs exhibit suboptimal performance with highly fine-grained part terminologies. This highlights the advantages of our approach, which effectively handles these fine-grained object parts. Furthermore, we conduct comprehensive ablation studies and demonstrate the transferability of parts across different object categories, which benefits the understanding of object part compositionality." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.737, + 0.555, + 0.755 + ], + "angle": 0, + "content": "In summary, our contributions are 4-fold:" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.773, + 0.918, + 0.83 + ], + "angle": 0, + "content": "- A novel, training-free method, 3-By-2, that achieves SOTA performance on benchmarks with different levels of granularity for zero-shot and few-shot 3D object part segmentation." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.832, + 0.918, + 0.888 + ], + "angle": 0, + "content": "- The first to provide an effective approach for leveraging image diffusion model's features [36] to establish 2D semantic correspondences in the context of 3D part segmentation." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.891, + 0.918, + 0.948 + ], + "angle": 0, + "content": "- Novel non-overlapping mask generation, mask-level label transfer, and mask-consistency modules that effectively transfer part labels from 2D database and extrapolate them to 3D." + }, + { + "type": "list", + "bbox": [ + 0.091, + 0.773, + 0.918, + 0.948 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.08, + 0.018, + 0.095, + 0.031 + ], + "angle": 0, + "content": "4" + }, + { + "type": "header", + "bbox": [ + 0.16, + 0.017, + 0.294, + 0.032 + ], + "angle": 0, + "content": "A. Thai et al." + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.055, + 0.918, + 0.094 + ], + "angle": 0, + "content": "- Demonstrating the flexibility of 3-By-2 in accommodating various database settings and in generalizing between different object categories." + }, + { + "type": "title", + "bbox": [ + 0.081, + 0.126, + 0.329, + 0.148 + ], + "angle": 0, + "content": "2 Related Work" + }, + { + "type": "title", + "bbox": [ + 0.081, + 0.169, + 0.416, + 0.189 + ], + "angle": 0, + "content": "2.1 3D Part Segmentation" + }, + { + "type": "text", + "bbox": [ + 0.081, + 0.204, + 0.918, + 0.398 + ], + "angle": 0, + "content": "In contrast to its 2D counterpart, the progress in this field has been relatively limited, primarily due to the high cost associated with collecting and annotating 3D datasets. Currently, all of the available large-scale annotated 3D object part datasets are synthetic [18,24,40,45]. The most widely used benchmarks [24,45] are predominantly derived from objects within the ShapeNetCore [5] dataset. This problem has been tackled using architectures that take 3D representations [24,29] as inputs. These methods were trained in a supervised manner, requiring large-scale annotated data. More recent approaches have attempted to investigate data-efficient training scenarios where only a few 3D shapes are annotated [19,32,39,50]." + }, + { + "type": "title", + "bbox": [ + 0.081, + 0.427, + 0.858, + 0.447 + ], + "angle": 0, + "content": "2.2 Multi-view 2D-3D Segmentation Using Foundation Models" + }, + { + "type": "text", + "bbox": [ + 0.081, + 0.462, + 0.918, + 0.637 + ], + "angle": 0, + "content": "Although multi-view approaches have been widely utilized in the past for 3D segmentation [7,13,48], the rapid advancement of 2D foundation models [16,17] has encouraged more SOTA research aimed at leveraging these models to perform 3D segmentation in a multi-view fashion. CLIP [30] and GLIP [17] have been employed to integrate language information from multiple 2D views into 3D for open-vocabulary segmentation [1,19,27,35,50]. SAM [16], due to its ability to output per-pixel masks, has been used as an effective tool for multi-view 2D-3D segmentation, both on 3D structures like point clouds [35,42,44,46,50] or in NeRF-style [4]." + }, + { + "type": "text", + "bbox": [ + 0.081, + 0.639, + 0.935, + 0.832 + ], + "angle": 0, + "content": "Scene Segmentation. Various combinations of foundation models have been explored for this task. While [35] leverages CLIP and SAM to support open-vocabulary 3D part segmentation, others use SAM with carefully designed prompts [4] or post-processing techniques [44]. Building upon these successes, concurrent works [12,26,42] seek to improve SAM utilization strategies. Our work differs by focusing on part segmentation, which requires finer granularity. This distinction in objectives directly influences the processing of SAM predictions, tailored to suit their specific characteristics. For example, while scene segmentation methods may disregard or merge masks covering parts of objects, part segmentation approaches might encourage splitting, depending on the desired level of detail." + }, + { + "type": "text", + "bbox": [ + 0.081, + 0.833, + 0.918, + 0.948 + ], + "angle": 0, + "content": "Part Segmentation. PartSLIP [19] and SATR [1] were among the first to employ foundation models for this task, pioneering the use of GLIP for open-vocabulary segmentation. Concurrent works have seen the integration of SAM into their pipelines [15,43,50]. Zhou et al. [50] and Kim et al. [15] use SAM with GLIP-predicted bounding boxes, while Xue et al. [43] employ SAM with furthest point sampling for each view, extending predictions to 3D with GLIP labels. Our" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.77, + 0.016, + 0.84, + 0.032 + ], + "angle": 0, + "content": "3-By-2" + }, + { + "type": "page_number", + "bbox": [ + 0.904, + 0.017, + 0.918, + 0.031 + ], + "angle": 0, + "content": "5" + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.056, + 0.918, + 0.153 + ], + "angle": 0, + "content": "approach shares with these works the use of SAM for 2D segmentation before 3D aggregation. In contrast, our method focuses solely on visual cues without language inputs, employing image diffusion model's features [36]. To improve SAM's accuracy, we introduce a novel non-overlapping mask generation module, eliminating the need for GLIP-generated bounding boxes." + }, + { + "type": "title", + "bbox": [ + 0.08, + 0.178, + 0.67, + 0.198 + ], + "angle": 0, + "content": "2.3 Part Label Transfer using Correspondences" + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.208, + 0.918, + 0.344 + ], + "angle": 0, + "content": "Transferring labels from annotated datasets to non-annotated datasets has been considered recently in [34] for open-vocabulary 2D part segmentation and previously in [6, 51] for 3D part segmentation. While [34] used DINOv1 [3] feature representations for dense label transfer between related objects in the base classes and novel object classes, Zhu et al. [51] relied on classical SIFT [22] features for establishing correspondences in 2D images. Chen et al. [6], in contrast, train a network to regress the correspondences directly on the input point cloud." + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.345, + 0.918, + 0.499 + ], + "angle": 0, + "content": "We share with these approaches the use of semantic correspondences to identify optimal candidates for label transfer. However, our primary objective sets us apart significantly from [34], as we focus on segmenting 3D objects. Compared to [51], we leverage class-agnostic segmentation models to avoid dense pixel/patch sampling. Furthermore, unlike [6], we do not require direct operations on 3D point clouds or any specific 3D representations. Additionally, we introduce a mask-consistency module for per mask label voting, rather than relying solely on small local patches." + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.5, + 0.918, + 0.636 + ], + "angle": 0, + "content": "Semantic Correspondences from Foundation Models. Many vision foundation models have demonstrated an inherent capability to implicitly capture semantic correspondences across different instances within the same category (e.g., matching chair backs) and across diverse categories (e.g., aligning dog's legs with cat's legs) [2,11,36,47]. In this work, we leverage semantic correspondences established by [36] to transfer part labels from annotated 2D datasets to query 3D objects." + }, + { + "type": "title", + "bbox": [ + 0.08, + 0.661, + 0.241, + 0.683 + ], + "angle": 0, + "content": "3 Method" + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.699, + 0.918, + 0.855 + ], + "angle": 0, + "content": "Given a database \\(\\mathcal{D}\\) consisting of 2D part annotations, our goal is to segment each query object \\(q\\) into parts using the visual part vocabulary provided by \\(\\mathcal{D}\\). Note that \\(\\mathcal{D}\\) can either be gathered from 2D (image) part datasets or from renders of a few 3D objects captured at different view-points. Our method consists of three main steps (Fig. 2): (1) render a set of 2D RGB images \\(\\mathcal{I}q\\) of 3D object \\(q\\) from \\(K\\) distinct camera viewpoints; (2) perform 2D part segmentation on the rendered images; (3) aggregate image-level predictions through a mask-consistency aggregation module to obtain 3D predictions." + }, + { + "type": "title", + "bbox": [ + 0.08, + 0.88, + 0.416, + 0.9 + ], + "angle": 0, + "content": "3.1 2D Part Segmentation" + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.909, + 0.918, + 0.948 + ], + "angle": 0, + "content": "There are two primary approaches to tackle this task: (1) Top-down, using segmentation mechanisms such as SAM [16], or (2) Bottom-up, which involves la" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.079, + 0.017, + 0.095, + 0.03 + ], + "angle": 0, + "content": "6" + }, + { + "type": "header", + "bbox": [ + 0.16, + 0.016, + 0.295, + 0.032 + ], + "angle": 0, + "content": "A. Thai et al." + }, + { + "type": "image", + "bbox": [ + 0.1, + 0.059, + 0.885, + 0.179 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.182, + 0.921, + 0.289 + ], + "angle": 0, + "content": "Fig. 3: The process of pixel-level part label transferring. For each pixel \\( p \\) in the query image \\( I_{k} \\), we perform the following: (1) Extract the feature \\( f(p) \\), along with the feature grid for each image \\( I_{\\mathcal{D}} \\) in the database \\( \\mathcal{D} \\); (2) Measure cosine similarity between \\( f(p) \\) and the feature of each pixel within each feature grid, (3) Obtain the best match of \\( p \\) over \\( \\mathcal{D} \\) by determining the most similar pixel \\( p_{\\mathcal{D}} \\) over all images \\( I_{\\mathcal{D}} \\); (4) Assign the label of \\( p \\) is to be the label of \\( p_{\\mathcal{D}} \\)." + }, + { + "type": "image", + "bbox": [ + 0.101, + 0.295, + 0.48, + 0.382 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.276, + 0.385, + 0.306, + 0.4 + ], + "angle": 0, + "content": "(a)" + }, + { + "type": "image", + "bbox": [ + 0.544, + 0.297, + 0.914, + 0.382 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.711, + 0.385, + 0.743, + 0.4 + ], + "angle": 0, + "content": "(b)" + }, + { + "type": "image_caption", + "bbox": [ + 0.074, + 0.408, + 0.921, + 0.514 + ], + "angle": 0, + "content": "Fig. 4: (a) Non-overlapping 2D Mask Proposal. We address the issue of overlapping masks produced by SAM. The masks are first sorted by their areas. Subsequently, the smaller masks are stacked on top of the larger ones. Non-overlapping masks are obtained by taking the visible segment of each mask. (b) Different mask sampling strategies for label transfer. Our strategy provides accurate, dense prediction with clear part boundaries." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.534, + 0.92, + 0.631 + ], + "angle": 0, + "content": "beling each pixel individually. While SAM produces high-quality 2D masks with sharp boundaries, it operates in a class-agnostic manner, often leading to high overlap between sub-parts, parts, and instances. Simply selecting the mask with the highest score may result in incorrect granularity and lacks the flexibility required for part segmentation." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.635, + 0.92, + 0.829 + ], + "angle": 0, + "content": "Conversely, doing label transfer for each pixel individually in the image is computationally impractical, particularly for part segmentation tasks where high resolution is preferred. Sparsely sampling and labeling pixels can result in under-segmented masks, particularly for smaller parts that are less likely to be sampled compared to larger parts (see Fig. 4b). Moreover, accurately determining part boundaries for individual pixels can be challenging, which may result in increased errors when extrapolating to 3D, particularly with unstructured 3D representations like point clouds. These issues raise the important question: how do we transfer part labels and preserve part boundaries without sacrificing computational resources?" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.832, + 0.922, + 0.95 + ], + "angle": 0, + "content": "To address this question, we propose a 2D segmentation method that combines the strengths of both approaches which consists of 3 novel components: (1) Single-pixel 2D label transfer using semantic correspondences derived from DIFT [36], (2) Non-overlapping 2D mask proposal module, which refines SAM's multi-granularity predicted masks into non-overlapping part masks, and (3) Mask-level label transfer by integrating (1) and (2)." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.77, + 0.017, + 0.842, + 0.034 + ], + "angle": 0, + "content": "3-By-2" + }, + { + "type": "page_number", + "bbox": [ + 0.904, + 0.017, + 0.92, + 0.031 + ], + "angle": 0, + "content": "7" + }, + { + "type": "header", + "bbox": [ + 0.115, + 0.054, + 0.185, + 0.065 + ], + "angle": 0, + "content": "Input 3D Object" + }, + { + "type": "image", + "bbox": [ + 0.118, + 0.082, + 0.191, + 0.142 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.2, + 0.061, + 0.49, + 0.107 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.254, + 0.108, + 0.451, + 0.114 + ], + "angle": 0, + "content": "Mask label consistency between multiple views" + }, + { + "type": "image", + "bbox": [ + 0.201, + 0.114, + 0.49, + 0.159 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.541, + 0.066, + 0.92, + 0.16 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.174, + 0.502, + 0.283 + ], + "angle": 0, + "content": "Fig.5: Two approaches to aggregate 3D part labels from multiple 2D views. Aggregating 3D part labels from multiple 2D views through geometric correspondence can be achieved by either point or mask label consistency." + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.174, + 0.94, + 0.263 + ], + "angle": 0, + "content": "Fig.6: Effectiveness of mask label consistency. Enforcing consistency at the mask level can mitigate discrepancies at each individual point and contributes to smoother segmentation." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.298, + 0.922, + 0.59 + ], + "angle": 0, + "content": "Single-pixel 2D Label Transfer. At the core of our method is the 2D label transfer process. The goal is to transfer pixel labels from the annotated 2D database \\(\\mathcal{D}\\) to the query RGB image \\(I_{k} \\in \\mathcal{I}q\\): for a pixel \\(p\\) in the foreground object in \\(I_{k}\\), we aim to identify the best-matched pixel \\(p'\\) in each image \\(I_{\\mathcal{D}}\\) in the database \\(\\mathcal{D}\\) and assign initial label to \\(p\\) by \\(p'\\). To this end, we leverage the established semantic correspondence of DIFT [36]. While recent works have demonstrated the effectiveness of image diffusion models in extracting semantic correspondences, as evidenced by evaluations on datasets like SPair-71K [23], we are the first to leverage these features for transferring semantic labels in the context of 3D part segmentation. Specifically, \\(p' = \\arg \\max_{p' \\in I_{\\mathcal{D}}} \\cos(f(p), f(p'))\\) where \\(\\cos\\) and \\(f(x)\\) denotes the cosine similarity score and the feature representing pixel \\(x\\). The best pixel correspondence \\(p_{\\mathcal{D}}\\) of \\(p\\) over the entire database is obtained by taking the most similar match within all the images in the database. Formally, \\(p_{\\mathcal{D}} = \\arg \\max_{\\mathcal{D}} p'\\). The label of \\(p\\) is then assigned to be the label of \\(p_{\\mathcal{D}}\\) (see Fig. 3)." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.595, + 0.92, + 0.809 + ], + "angle": 0, + "content": "Coarse-to-fine correspondence search. Finding the nearest neighbor for a query pixel across the entire database can be prohibitively costly, especially for part segmentation which operates in high resolutions. We propose a coarse-to-fine strategy: using the coarse feature maps generated by DIFT [36], we first conduct the search at the coarse level to localize the region of the best match. We then extract the \\(3 \\times 3\\) window centered at this region (in feature space) for a fine search (see Fig. 7). This approach ensures that we compute per-pixel similarity scores only within the region of interest, rather than across the entire image, improving computational efficiency. For instance, when processing a pair of images with a resolution of \\(800 \\times 800\\), coarse-to-fine correspondence search achieves a speed improvement of approximately 2000 times in terms of wall clock time." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.813, + 0.92, + 0.95 + ], + "angle": 0, + "content": "Non-overlapping 2D Mask Proposal. We propose the use of class-agnostic 2D part mask proposal, specifically from SAM [16]. By assuming that each mask proposal corresponds to a subset of a part, we can then selectively sample pixels within each mask proposal for label transferring. The labels are subsequently propagated to each pixel of the 2D masks through a majority voting process based on the sampled pixels within the mask. To address the issue posed by the highly overlapping predictions from SAM's multi-granularity model, we intro" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.079, + 0.017, + 0.096, + 0.031 + ], + "angle": 0, + "content": "8" + }, + { + "type": "header", + "bbox": [ + 0.16, + 0.016, + 0.295, + 0.032 + ], + "angle": 0, + "content": "A. Thai et al." + }, + { + "type": "image", + "bbox": [ + 0.135, + 0.055, + 0.865, + 0.207 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.21, + 0.921, + 0.283 + ], + "angle": 0, + "content": "Fig. 7: Coase-to-fine correspondence search. We first conduct searching on a coarse level to identify the region of best match. We then extract the \\(3 \\times 3\\) window centered at this region in feature space for a fine search. This approach is approximately 2000 times faster in terms of wall time for large \\(N\\) (\\(800 \\times 800\\))." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.29, + 0.921, + 0.33 + ], + "angle": 0, + "content": "duce a non-overlapping 2D mask generation module. This module takes SAM masks as inputs and outputs a set of mutually exclusive 2D masks." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.331, + 0.922, + 0.429 + ], + "angle": 0, + "content": "We arrange the SAM output masks in descending order of mask area and stack smaller masks on top of larger ones. This ensures that if mask \\( A \\) is a subset of mask \\( B \\), stacking \\( A \\) on top of \\( B \\) results in non-overlapping masks, namely \\( A \\) and \\( B \\setminus A \\). Non-overlapping masks are finally obtained by taking the visible segments of each mask (see Fig. 4a)." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.43, + 0.922, + 0.507 + ], + "angle": 0, + "content": "2D Mask Label Assignment. After obtaining the non-overlapping masks, we sparsely sample pixels in each mask to transfer label. We then perform majority voting to assign the dominant label for each 2D mask, weighted by the confidence score (cosine similarity) of the best pixel correspondence matches." + }, + { + "type": "title", + "bbox": [ + 0.076, + 0.541, + 0.618, + 0.562 + ], + "angle": 0, + "content": "3.2 Mask-consistency Aggregation Module" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.579, + 0.92, + 0.793 + ], + "angle": 0, + "content": "Given a set of 2D RGB images with part segmentation predictions, we aim to extrapolate these segmentation labels to 3D using geometric correspondences. Prior works [19,32] aggregate multi-view information for each 3D point or mesh triangle face through a weighted sum of multi-view 2D predictions. To fully maintain the high-quality part boundaries predicted by SAM in 2D, we choose to aggregate multi-view predictions for each 2D mask instead. This observation is based on the fact that part identities remain constant across multiple views (e.g., the seat in view 1 should be segmented as the seat in view 2, see Fig. 5). Intuitively, mask consistency can be seen as an additional constraint on point consistency, encouraging points within the same 2D mask to remain associated with the same masks in the 3D space" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.795, + 0.922, + 0.95 + ], + "angle": 0, + "content": "We present a novel mask-consistency aggregation module that takes a set of 2D part segmentation predictions for multiple views as input. Our approach involves constructing an undirected unweighted graph, denoted as \\( G:V\\to E \\), where each vertex corresponds to a 2D mask in a given view. The edges of the graph connect masks from different views that capture the projection of the same 3D points. We construct a set of mask correspondences for each vertex \\( v\\in V \\), \\( \\mathcal{M}_v = \\{v,u_1,u_2,\\dots u_N\\} \\) where an edge \\( e_i \\) connects \\( v \\) and \\( u_{i} \\). A mask \\( v \\) is defined as oversegmented when there exists at least 2 masks in \\( \\mathcal{M}_v \\) that" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.77, + 0.016, + 0.842, + 0.035 + ], + "angle": 0, + "content": "3-By-2" + }, + { + "type": "page_number", + "bbox": [ + 0.903, + 0.017, + 0.92, + 0.032 + ], + "angle": 0, + "content": "9" + }, + { + "type": "image", + "bbox": [ + 0.123, + 0.044, + 0.874, + 0.178 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.184, + 0.922, + 0.31 + ], + "angle": 0, + "content": "Fig. 8: Mask-consistency process. (1) Each vertex of \\( G \\) corresponds to a mask in a given image. The edge connecting each pair of vertices denotes that the pair contains the projection of the same 3D points. Mask consistency set \\( \\mathcal{M}_v \\) for each \\( v \\) is obtained via the first-order neighborhood of \\( v \\). (2) \\( v_1 \\) is detected as under-segmented since \\( \\mathcal{M}_{v_1} \\) consists of masks from the same view with different labels \\( (v_2, v_3) \\) and hence, is discarded. (3) Traverse \\( \\mathcal{M}_{v_i} \\) to obtain labels for \\( \\mathcal{M}_{v_i} \\). (4) Obtain label for each mask by majority voting. Here we show a simple example for visualization purpose." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.316, + 0.92, + 0.356 + ], + "angle": 0, + "content": "belong to the same image but are assigned with different labels. For instance, in Fig. 8, vertex \\( v_{1} \\) corresponds to an undersegmented mask. Formally," + }, + { + "type": "equation", + "bbox": [ + 0.238, + 0.371, + 0.92, + 0.394 + ], + "angle": 0, + "content": "\\[\n\\mathcal {S} _ {v} = \\left\\{u _ {i}, u _ {j} \\in I _ {k} \\text {a n d} l \\left(u _ {i}\\right) \\neq l \\left(u _ {j}\\right) \\mid u _ {i}, u _ {j} \\in \\mathcal {M} _ {v} \\right\\} \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.408, + 0.92, + 0.543 + ], + "angle": 0, + "content": "where \\( l(x) \\) denotes the label of \\( x \\). We discard \\( v \\) if \\( |S_v| > \\epsilon \\). That is, if \\( v \\) is consistently determined as undersegmented across multiple views, we discard the contribution of \\( v \\) in the final label assignment. We then traverse the graph simultaneously from each vertex using breadth-first-search to accumulate the labels for each \\( \\mathcal{M}_v \\). Subsequently, we perform majority voting to assign labels to each \\( \\mathcal{M}_v \\). Finally, for each mask, we identify the most frequently assigned label as the final label." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.545, + 0.92, + 0.681 + ], + "angle": 0, + "content": "The simple intuition behind this approach is: if a part occasionally receives incorrect labels in some challenging views, employing majority voting within the mask correspondence set can calibrate these errors. Further, performing this on the mask level ensures that if two 2D points share the same mask label in the majority of the views, they will ultimately be assigned with the same final label. This approach calibrates potential discrepancies in individual point-wise aggregations (see Fig. 6)." + }, + { + "type": "title", + "bbox": [ + 0.076, + 0.712, + 0.312, + 0.734 + ], + "angle": 0, + "content": "4 Experiments" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.755, + 0.922, + 0.95 + ], + "angle": 0, + "content": "In this section, we first report the performance of 3-By-2 against baselines on PartNet-Ensembled (PartNetE) [19] in Sec. 4.1 and on PartNet [24] with \"level-3\" annotation in Sec. 4.2. Note the distinction between these datasets since PartNetE consists of a distinct set of articulated objects from [41]. These datasets also exhibit different granularity of part annotations. While PartNetE consists of both basic parts like chair back and fine-grained parts like scissors screw, PartNet with \"level-3\" annotation contains all fine-grained parts such as \"back_frame_vertical_bar\". In Sec. 4.3, we conduct comprehensive ablation studies to verify the necessity of each components in 3-By-2. Our few-shot experiments refer to the setting where a few labeled 3D objects are available for" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.08, + 0.017, + 0.105, + 0.031 + ], + "angle": 0, + "content": "10" + }, + { + "type": "header", + "bbox": [ + 0.16, + 0.016, + 0.294, + 0.032 + ], + "angle": 0, + "content": "A. Thai et al." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.054, + 0.921, + 0.16 + ], + "angle": 0, + "content": "Table 1: Few-shot performance on PartNetE [19] dataset. The left columns show performance on the 17 categories that supervised methods [28,29,38] (first 3 rows) were trained on with additional 28K objects. The right columns show performance on the 28 categories with only 8 objects/category in the training set. [19,33,49,50] and ours (last 5 rows) only have access to 8 objects/category during training for all 45 categories. Please refer to the Supplement for the full table on all 45 categories." + }, + { + "type": "table", + "bbox": [ + 0.084, + 0.165, + 0.911, + 0.301 + ], + "angle": 0, + "content": "
MethodsChairSci-ssorsLap-topDoorMicro-waveKey-boardAvg. (17)Cam-eraUSBStap-lerDisp-enserKet-tleEye-gl.Avg. (28)Avg. (45)
PointNext [29]0.9180.5730.3250.4380.4050.4500.5910.3320.6790.8860.2600.4510.8810.4570.502
PointNet++ [28]0.8470.5000.5540.4570.4360.7450.5330.0650.5240.5160.1210.2090.7620.2500.365
SoftGroup [38]0.8830.7600.1840.5310.3830.5890.5050.2360.4410.8010.1890.5740.7240.3130.384
ACD [33]0.3900.3910.1110.1890.0660.2610.1960.1010.2520.5000.1940.4020.7820.2590.235
Prototype [49]0.7080.4300.2790.3340.2700.4490.4190.3200.6540.8070.5340.6070.7790.4700.451
PartSLIP [19]0.8540.6030.2970.4080.4270.5360.5670.5830.5610.8480.7380.7700.8830.6250.603
PartSLIP++ [50]0.8530.6050.2970.4510.4950.7240.5740.6320.5750.6300.7200.8560.8830.6420.615
3-By-2 (ours)0.8440.6570.4530.5440.4020.8960.6040.6260.7900.9010.7820.8150.9280.6650.642
" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.322, + 0.918, + 0.361 + ], + "angle": 0, + "content": "each object category while there is no annotated 3D part labels in the zero-shot setting. In this setting, we leverage labels from the 2D domain instead." + }, + { + "type": "title", + "bbox": [ + 0.076, + 0.391, + 0.586, + 0.41 + ], + "angle": 0, + "content": "4.1 Performance on PartNet-Ensembled" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.424, + 0.921, + 0.54 + ], + "angle": 0, + "content": "Data & Metric. We use the dataset provided by Liu et al. [19] for both the few-shot and zero-shot settings. For each object in both few-shot and test sets, we render 20 RGB images from different views with resolution \\(800 \\times 800\\). We report mean IoU (mIoU) performance of all baselines using the evaluation protocol provided by [19] on the input point clouds. Specifically, the performance of a part is not considered if it does not exist in the queried object." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.542, + 0.92, + 0.753 + ], + "angle": 0, + "content": "Few-shot Baselines. We compare 3-By-2 against fully-supervised semantic segmentation [28, 29, 38], few-shot semantic segmentation [33, 49] and language-based [19, 50] methods. The fully supervised methods [28, 29, 38] were trained on 28K objects of 17 overlapping categories between PartNetE [19], in addition to the few-shot set consisting of 8 objects/category. The second group of baselines [19, 33, 49, 50] were only trained on the few-shot set. PartSLIP and PartSLIP++, a concurrent work, rely on large vision-language model (GLIP [17]) to guide the 2D part detection before extending to the 3D point cloud segmentation. We provide more detailed descriptions in the Supplement. We omit the evaluation of MvDeCor [32] on this benchmark since it requires ground-truth 3D meshes, whereas PartNetE only provides dense point clouds as inputs." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.755, + 0.919, + 0.909 + ], + "angle": 0, + "content": "Few-shot Setting. In this setting, 8 objects/category serve as the few-shot set. We evaluate on the entire test set of PartNetE [19]. For a fair comparison, we remove part labels in the test set that do not exist in the few-shot set. We present our few-shot results in Table 1. Compared to fully-supervised 3D methods, we outperform by \\(1 - 10\\%\\) mIoU on these categories. Additionally, we demonstrate a significant performance boost on the remaining 28 categories (21-41% mIoU). We further outperform PartSLIP and PartSLIP++ on both subsets, achieving \\(\\sim 3\\%\\) mIoU improvements overall." + }, + { + "type": "text", + "bbox": [ + 0.076, + 0.91, + 0.918, + 0.949 + ], + "angle": 0, + "content": "Performance on Real-world Scans. Please note that there is currently no publicly available real-world 3D part segmentation dataset for direct comparison. How-" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.77, + 0.017, + 0.84, + 0.033 + ], + "angle": 0, + "content": "3-By-2" + }, + { + "type": "page_number", + "bbox": [ + 0.894, + 0.017, + 0.917, + 0.031 + ], + "angle": 0, + "content": "11" + }, + { + "type": "table_caption", + "bbox": [ + 0.079, + 0.054, + 0.918, + 0.107 + ], + "angle": 0, + "content": "Table 2: Zero-shot performance on the subset of PartNetE [19] that overlaps with PACO [31]. Our method effectively leverages 2D in-the-wild part segmentation dataset to perform 3D part segmentation." + }, + { + "type": "table", + "bbox": [ + 0.14, + 0.108, + 0.855, + 0.212 + ], + "angle": 0, + "content": "
MethodsKet- tleMicro- waveSci- sorsF.- ChairMouseBot- tleLaptopClockRemoteLampAvg.- (18)
SAMPro3D [42]+ OpenMask3D [35]0.0260.0010.1180.4370.0190.1030.0170.0070.0840.0740.146
PartSLIP [19]0.2080.1660.2180.9170.2700.7630.2700.2670.1150.3710.341
VLPart [34]-MC0.2110.1920.1930.8130.0000.2160.0600.2050.1320.1660.222
3-By-2 (ours)0.7650.3480.5940.7120.3070.8070.3940.2530.2390.5000.430
" + }, + { + "type": "table_caption", + "bbox": [ + 0.079, + 0.221, + 0.918, + 0.255 + ], + "angle": 0, + "content": "Table 3: Performance on PartNet dataset with \"level-3\" annotations in the few-shot setting. Bold and underline denote best and second best performance respectively." + }, + { + "type": "table", + "bbox": [ + 0.166, + 0.257, + 0.828, + 0.332 + ], + "angle": 0, + "content": "
MethodsBot- tleMicro- waveDis- playDish- washerFau- cetKnifeEar- phoneClockBedTrash- canAvg.
MvDeCor [32]0.4210.3770.6000.3270.2120.1870.2050.1430.0990.1990.277
PartSLIP [19]0.3440.1430.3860.2280.0090.0230.0640.0170.0030.0310.125
3-By-2 (ours)0.4540.3890.5670.4290.2030.1960.2250.1160.0960.1340.281
" + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.353, + 0.918, + 0.411 + ], + "angle": 0, + "content": "ever, we demonstrate the robustness of our method using real-world objects, as shown in Fig. 1. These objects were originally introduced by Liu et al. [19] and captured using an iPhone12 camera." + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.415, + 0.918, + 0.649 + ], + "angle": 0, + "content": "Zero-shot Baselines. We compare 3-By-2 with PartSLIP [19], VLPart [34]-MC and SAMPro3D [42] + OpenMask3D [35]. For PartSLIP, we prompt the pre-trained GLIP model with the language inputs without finetuning, following Liu et al. [19]. VLPart [34] is a SOTA 2D part segmentation method that was trained on a combination of various large-scale 2D part datasets. We replace our 2D part segmentation module with a pre-trained VLPart model, retaining the 3D mask-consistency aggregation module as 3-By-2, and term this baseline VLPart-MC. During inference, to guide VLPart effectively, we prompt the model with language inputs as in PartSLIP. SAMPro3D [42] is a SOTA zero-shot instance segmentation method for 3D scenes using SAM at its core. For semantic segmentation evaluation, we integrate SAMPro3D with OpenMask3D [35], an open-vocabulary 3D scene segmentation method." + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.653, + 0.918, + 0.788 + ], + "angle": 0, + "content": "Zero-shot Setting. Since we do not have access to any labeled 3D objects in this setting, to effectively transfer part labels, we leverage PACO [31]. This dataset is a fine-grained and richly annotated 2D datasets consisting of objects from COCO-LVIS [9]. We crop and mask each annotated object using the provided object bounding box and segmentation mask to form the database. Further, we filter out small objects or objects with limited visibility, using the area of the object segmentation mask as a criterion." + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.793, + 0.918, + 0.947 + ], + "angle": 0, + "content": "In Table 2 we show the performance of all baselines and 3-By-2 on the subset of PartNetE that overlaps with PACO dataset [31]. By leveraging the abundance and fine-grained of 2D in-the-wild part segmentation datasets, we achieve superior performance compared to all baselines (9-29% mIoU). We significantly outperform PartSLIP on challenging categories with small or thin parts (e.g. scissors and lamp by 28% and 13% mIoU respectively). These results highlight the effectiveness of 3-By-2 even when the database includes challenging real-world images with partial occlusion and truncation." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.08, + 0.017, + 0.106, + 0.032 + ], + "angle": 0, + "content": "12" + }, + { + "type": "header", + "bbox": [ + 0.16, + 0.016, + 0.295, + 0.032 + ], + "angle": 0, + "content": "A. Thai et al." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.056, + 0.923, + 0.232 + ], + "angle": 0, + "content": "Effectiveness of Our 2D Segmentation Module. We demonstrate the effectiveness of our 2D segmenter, leveraging SAM and DIFT, by showcasing its strong performance against VLPart [34], a SOTA 2D part segmentation method (see Table 2, last 2 rows). Note that VLPart was trained on PACO [31] among other 2D part datasets. Therefore, it is reasonable to anticipate that this method can effectively use knowledge from PACO to accurately segment the 18 overlapping categories between PartNetE and PACO. For both VLPart-MC and 3-By-2, we maintain the same 3D aggregation module. Our method significantly outperforms VLPart-MC, demonstrating the advantage of our proposed 2D segmentation module." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.233, + 0.924, + 0.37 + ], + "angle": 0, + "content": "Comparison to SOTA Scene Segmentation Approach. SAMPro3D [42] is a concurrent work with SOTA performance on zero-shot instance segmentation in 3D scene. This is a training-free model that effectively prompts SAM within the 2D domain using 3D point projections. As in Table 2, we outperform this baseline by a significant margin, highlighting the non-trivial nature of adapting scene segmentation methods for 3D part segmentation tasks, particularly those involving post-processing of 2D foundation models." + }, + { + "type": "title", + "bbox": [ + 0.076, + 0.403, + 0.543, + 0.423 + ], + "angle": 0, + "content": "4.2 Performance on Level-3 PartNet" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.441, + 0.923, + 0.596 + ], + "angle": 0, + "content": "In this experiment, we select 10 categories from PartNet [24] that come with fine-grained (\"level-3\") annotations. We randomly select 10 objects per category from the training set (following [32]) to form our few-shot set, and up to 50 objects per category from the test set for evaluation, ensuring overlap with ShapeNetCore.v2 [5]. Given that PartSLIP [19] employs point cloud RGB for superpoint generation, which serves as 3D priors, our decision to choose overlapping objects with ShapeNetCore.v2 is to preserve object texture information. We use the same few-shot and test set for all baselines." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.598, + 0.923, + 0.656 + ], + "angle": 0, + "content": "Data. As inputs to our approach, we render 15 overlapping views for each textured mesh using Blender cycle renderer with realistic lighting from HDRI environment maps." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.657, + 0.924, + 0.793 + ], + "angle": 0, + "content": "Baselines. The baselines are reproduced following the papers' recommended training procedure. Specifically, we pre-train MvDeCor [32] on the entire training set of the selected categories consisting of 86 views per non-textured object, with rendered RGB, depth and normal maps as inputs. We then fine-tune the segmentation heads for each individual object category in the few-shot set with 15 views per object. Note that the input for this stage also includes RGB, depth and normal maps." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.794, + 0.924, + 0.95 + ], + "angle": 0, + "content": "For PartSLIP [19], we derive the language prompt by traversing the part hierarchy and concatenating labels from each level along the path, spanning from root to leaf. For example, the path \"bottle/jug/handle\" is transformed into \"bottle jug handle\". This adaptation is due to the potential for different leaf nodes to share identical labels (e.g., bottle/normal_bottle/handle and bottle/jug/handle), as relying solely on the leaf node label could introduce confusion in predictions. We adopt PartSLIP's point cloud, image rendering and data processing pipeline with default parameters." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.77, + 0.017, + 0.84, + 0.033 + ], + "angle": 0, + "content": "3-By-2" + }, + { + "type": "page_number", + "bbox": [ + 0.894, + 0.017, + 0.919, + 0.031 + ], + "angle": 0, + "content": "13" + }, + { + "type": "table_caption", + "bbox": [ + 0.077, + 0.054, + 0.51, + 0.089 + ], + "angle": 0, + "content": "Table 4: Ablation of the non-overlapping mask generation module." + }, + { + "type": "table", + "bbox": [ + 0.085, + 0.092, + 0.499, + 0.142 + ], + "angle": 0, + "content": "
2D Mask ProposalScissorsMouseSuitcaseBottleChair
SAM0.4570.4400.2850.0040.638
Non-overlap0.6750.6840.8130.8100.844
" + }, + { + "type": "table_caption", + "bbox": [ + 0.522, + 0.056, + 0.938, + 0.09 + ], + "angle": 0, + "content": "Table 5: Ablation of our proposed mask-consistency component." + }, + { + "type": "table", + "bbox": [ + 0.53, + 0.092, + 0.93, + 0.141 + ], + "angle": 0, + "content": "
3D Label AggregationScissorsSuitcasePrinterClock
Point-Consistency0.6190.5790.0090.363
Mask-Consistency0.6750.6840.0850.458
" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.165, + 0.923, + 0.359 + ], + "angle": 0, + "content": "Evaluation & Metric. We uniformly sample 300K points on the surface of each labeled ground truth mesh and employ nearest neighbor assignment to associate a ground-truth label with each point. This point set is used for evaluating all methods for a fair comparison and eliminating any randomness introduced by the point cloud sampling step. We use part mIoU on the sampled point set as the evaluation metric. We employ the standard mIoU calculation, which considers the performance of all parts in the vocabulary, even in cases where they may not exist in certain objects. Additionally, different from MvDeCor, we do not exclude the \"others\" label during evaluation based on ground-truth labels. For a fair comparison, we applied the same evaluation approach across all methods." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.36, + 0.924, + 0.496 + ], + "angle": 0, + "content": "Results. We show results in Table 3. Compared to PartSLIP [19], we outperformed on all categories by a significant margin (16% mIoU on average), demonstrating the challenges posed by fine-grained settings for GLIP [17]. While our performance is on par with MvDeCor [32], it is important to note that MvDeCor is both pretrained and finetuned on PartNet [24], using ground truth depth and normal maps as additional inputs. In contrast, our method requires no training on the target data distribution." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.526, + 0.334, + 0.545 + ], + "angle": 0, + "content": "4.3 Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.56, + 0.923, + 0.695 + ], + "angle": 0, + "content": "Non-overlapping Mask Generation. In Table 4, we illustrate the effectiveness of our proposed non-overlapping mask generation module. The comparison involves evaluating the performance of our method with and without this module. In the case of the model without the non-overlapping mask generation module, we directly utilize the predicted SAM outputs for label transferring. The results indicate that our non-overlapping mask generation module is necessary for achieving an optimal performance." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.697, + 0.923, + 0.754 + ], + "angle": 0, + "content": "Mask-consistency Module. In Table 5, we demonstrate the effectiveness of our proposed mask-consistency component, which improves the final performance especially on objects with small parts." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.755, + 0.922, + 0.832 + ], + "angle": 0, + "content": "Properties of Database. In this section, we investigate two key questions: 1) Can 3-By-2 accurately segment the query object within a database containing multiple object categories? and 2) Is it possible to transfer parts with the same semantic meaning between different object categories?" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.833, + 0.924, + 0.95 + ], + "angle": 0, + "content": "Multi-category database. To address question 1, we perform experiments using databases containing 1, 2, and 8 categories respectively (see Table 6). Specifically, taking the query category as \"Kettle\", for the 2-category setting we construct a database consisting of \"Kettle, Kitchen Pot\". We selected these categories due to their shared semantic parts with \"Kettle\", which could potentially lead to confusion (e.g., kettle lid vs. kitchen pot lid). With 8-category setting, we add in" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.08, + 0.017, + 0.106, + 0.032 + ], + "angle": 0, + "content": "14" + }, + { + "type": "header", + "bbox": [ + 0.16, + 0.016, + 0.295, + 0.032 + ], + "angle": 0, + "content": "A. Thai et al." + }, + { + "type": "table_caption", + "bbox": [ + 0.076, + 0.054, + 0.543, + 0.143 + ], + "angle": 0, + "content": "Table 6: Multi-category database experiment. Performance of Kettle in various database settings is reported with mIoU. Our method shows robustness in performance even when more categories are added in the database." + }, + { + "type": "table", + "bbox": [ + 0.145, + 0.16, + 0.472, + 0.232 + ], + "angle": 0, + "content": "
DatabaseLidHandleSpoutAvg.
1-category0.7590.9040.7830.815
2-category0.7030.8200.7480.757
8-category0.7270.7730.7560.752
" + }, + { + "type": "table_caption", + "bbox": [ + 0.557, + 0.054, + 0.951, + 0.176 + ], + "angle": 0, + "content": "Table 7: Cross-category database experiment. We report the performance of 18 tables with wheels in Part-NetE. Results show that our method can transfer wheel annotations from Chair to correct the prediction on Table wheels." + }, + { + "type": "table", + "bbox": [ + 0.589, + 0.178, + 0.917, + 0.225 + ], + "angle": 0, + "content": "
DatabaseLegTabletopWheelAvg.
Table only0.5860.6470.0000.411
Chair & Table0.6410.6330.6000.625
" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.238, + 0.919, + 0.373 + ], + "angle": 0, + "content": "categories that are completely different and do not share any parts with \"Kettle\" (e.g. \"Eyeglasses\"). In general, with more categories in the database, there is a slight decrease in the average performance. Notably, there are marginal differences between 2-category and 8-category (second and third rows), highlighting the ability of 3-By-2 in handling both diverse object taxonomy and part segmentation. This finding is particularly interesting since many prior works [19,32] require finetuning each category separately for few-shot evaluation." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.374, + 0.921, + 0.587 + ], + "angle": 0, + "content": "Cross-category database. Considering question 2, we note that the few-shot set of \"Table\" in PartNetE lacks objects with wheels as a part, whereas such objects are present in the test set. To address this, we incorporate the \"Chair\" category where the wheel part exists in the database. We evaluate on 18 tables in PartNetE test set with the \"wheel\" part annotated (see Table 7). Compared to the table only few-shot set, combining the database with \"Chair\" improves the performance on \"leg\" by \\(\\sim 6\\%\\) mIoU. The improvement in the \"leg\" part can be attributed to the inclusion of \"Chair\" in the database, which reduces the likelihood of the model incorrectly associating \"wheel\" with \"leg\" due to the absence of \"wheel\" in the few-shot set. Interestingly, the performance for \"wheel\" increases significantly, \\(+60\\%\\) mIoU through the label transfer from chair wheels." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.588, + 0.92, + 0.724 + ], + "angle": 0, + "content": "While the concept may seem intuitive, our findings shed new light on object part compositionality. Despite the diversity in appearances and shapes across various object categories, there exists a finite set of object parts that are shared among them. Recognizing the transferability of these parts is important for facilitating rapid learning of novel objects across a range of tasks. Further, our results show the ability to correct wrong predictions of our approach by transferring labels from another category. Please refer to the Sup. for additional studies." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.754, + 0.288, + 0.775 + ], + "angle": 0, + "content": "5 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.795, + 0.92, + 0.912 + ], + "angle": 0, + "content": "In this work, we propose 3-By-2, a novel, training-free method that achieves SOTA performance on benchmarks with diverse levels of part granularity without the need for language inputs, on both zero-shot and few-shot settings. We demonstrate the flexibility of 3-By-2 in transferring part labels between different object categories. We hope the development of 3-By-2 can encourage further exploration of visual similarities for this task." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.769, + 0.016, + 0.842, + 0.032 + ], + "angle": 0, + "content": "3-By-2" + }, + { + "type": "page_number", + "bbox": [ + 0.894, + 0.017, + 0.92, + 0.031 + ], + "angle": 0, + "content": "15" + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.054, + 0.341, + 0.077 + ], + "angle": 0, + "content": "Acknowledgement" + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.095, + 0.733, + 0.114 + ], + "angle": 0, + "content": "This work was partly supported by NIH R01HD104624-01A1." + }, + { + "type": "title", + "bbox": [ + 0.078, + 0.144, + 0.238, + 0.166 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.089, + 0.184, + 0.92, + 0.235 + ], + "angle": 0, + "content": "1. Abdelreheem, A., Skorokhodov, I., Ovsjanikov, M., Wonka, P.: Satr: Zero-shot semantic segmentation of 3d shapes. arXiv preprint arXiv:2304.04909 (2023) 2, 3, 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.236, + 0.92, + 0.273 + ], + "angle": 0, + "content": "2. Amir, S., Gandelsman, Y., Bagon, S., Dekel, T.: Deep vit features as dense visual descriptors. arXiv preprint arXiv:2112.05814 2(3), 4 (2021) 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.089, + 0.274, + 0.921, + 0.342 + ], + "angle": 0, + "content": "3. Caron, M., Touvron, H., Misra, I., Jégou, H., Mairal, J., Bojanowski, P., Joulin, A.: Emerging properties in self-supervised vision transformers. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 9650-9660 (2021) 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.344, + 0.92, + 0.398 + ], + "angle": 0, + "content": "4. Cen, J., Zhou, Z., Fang, J., Shen, W., Xie, L., Jiang, D., Zhang, X., Tian, Q., et al.: Segment anything in 3d with nerfs. Advances in Neural Information Processing Systems 36 (2024) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.398, + 0.918, + 0.451 + ], + "angle": 0, + "content": "5. Chang, A.X., Funkhouser, T., Guibas, L., Hanrahan, P., Huang, Q., Li, Z., Savarese, S., Savva, M., Song, S., Su, H., et al.: Shapenet: An information-rich 3d model repository. arXiv preprint arXiv:1512.03012 (2015) 4, 12" + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.452, + 0.92, + 0.522 + ], + "angle": 0, + "content": "6. Chen, N., Liu, L., Cui, Z., Chen, R., Ceylan, D., Tu, C., Wang, W.: Unsupervised learning of intrinsic structural representation points. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 9121-9130 (2020) 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.523, + 0.92, + 0.576 + ], + "angle": 0, + "content": "7. Dai, A., Nießner, M.: 3dmv: Joint 3d-multi-view prediction for 3d semantic scene segmentation. In: Proceedings of the European Conference on Computer Vision (ECCV). pp. 452-468 (2018) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.577, + 0.918, + 0.629 + ], + "angle": 0, + "content": "8. Deng, S., Xu, X., Wu, C., Chen, K., Jia, K.: 3d affordancenet: A benchmark for visual object affordance understanding. In: proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 1778-1787 (2021) 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.63, + 0.918, + 0.683 + ], + "angle": 0, + "content": "9. Gupta, A., Dollar, P., Girshick, R.: Lvis: A dataset for large vocabulary instance segmentation. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 5356-5364 (2019) 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.082, + 0.683, + 0.918, + 0.735 + ], + "angle": 0, + "content": "10. He, J., Yang, S., Yang, S., Kortylewski, A., Yuan, X., Chen, J.N., Liu, S., Yang, C., Yu, Q., Yuille, A.: Partimagenet: A large, high-quality dataset of parts. In: European Conference on Computer Vision. pp. 128-145. Springer (2022) 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.736, + 0.918, + 0.789 + ], + "angle": 0, + "content": "11. Hedlin, E., Sharma, G., Mahajan, S., Isack, H., Kar, A., Tagliasacchi, A., Yi, K.M.: Unsupervised semantic correspondence using stable diffusion. arXiv preprint arXiv:2305.15581 (2023) 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.789, + 0.918, + 0.842 + ], + "angle": 0, + "content": "12. Huang, R., Peng, S., Takmaz, A., Tombari, F., Pollefeys, M., Song, S., Huang, G., Engelmann, F.: Segment3d: Learning fine-grained class-agnostic 3d segmentation without manual labels. arXiv preprint arXiv:2312.17232 (2023) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.843, + 0.918, + 0.911 + ], + "angle": 0, + "content": "13. Jaritz, M., Gu, J., Su, H.: Multi-view pointnet for 3d scene understanding. 2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW) pp. 3995-4003 (2019), https://apisemanticscholar.org/CorpusID:203593088" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.913, + 0.918, + 0.949 + ], + "angle": 0, + "content": "14. Kalogerakis, E., Hertzmann, A., Singh, K.: Learning 3D Mesh Segmentation and Labeling. ACM Transactions on Graphics 29(3) (2010) 1" + }, + { + "type": "list", + "bbox": [ + 0.081, + 0.184, + 0.921, + 0.949 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.08, + 0.017, + 0.106, + 0.032 + ], + "angle": 0, + "content": "16" + }, + { + "type": "header", + "bbox": [ + 0.159, + 0.016, + 0.295, + 0.032 + ], + "angle": 0, + "content": "A. Thai et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.057, + 0.918, + 0.091 + ], + "angle": 0, + "content": "15. Kim, H., Sung, M.: Partstad: 2d-to-3d part segmentation task adaptation (2024) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.093, + 0.92, + 0.145 + ], + "angle": 0, + "content": "16. Kirillov, A., Mintun, E., Ravi, N., Mao, H., Rolland, C., Gustafson, L., Xiao, T., Whitehead, S., Berg, A.C., Lo, W.Y., et al.: Segment anything. arXiv preprint arXiv:2304.02643 (2023) 4, 5, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.145, + 0.918, + 0.215 + ], + "angle": 0, + "content": "17. Li, L.H., Zhang, P., Zhang, H., Yang, J., Li, C., Zhong, Y., Wang, L., Yuan, L., Zhang, L., Hwang, J.N., et al.: Grounded language-image pre-training. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 10965-10975 (2022) 4, 10, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.215, + 0.918, + 0.267 + ], + "angle": 0, + "content": "18. Li, Y., Upadhyay, U., Habib Slim, A.A., Arpit Prajapati, S.P., Wonka, P., Elhoseiny, M.: 3d compat: Composition of materials on parts of 3d things (eccv 2022). ECCV (2022) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.081, + 0.268, + 0.918, + 0.338 + ], + "angle": 0, + "content": "19. Liu, M., Zhu, Y., Cai, H., Han, S., Ling, Z., Porikli, F., Su, H.: Partslip: Low-shot part segmentation for 3d point clouds via pretrained image-language models. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 21736-21746 (2023) 2, 3, 4, 8, 9, 10, 11, 12, 13, 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.338, + 0.918, + 0.39 + ], + "angle": 0, + "content": "20. Liu, W., Mao, J., Hsu, J., Hermans, T., Garg, A., Wu, J.: Composable part-based manipulation. In: 7th Annual Conference on Robot Learning (2023), https://openreview.net/forum?id=o-K3HVUeEw1" + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.39, + 0.918, + 0.458 + ], + "angle": 0, + "content": "21. Liu, X., Xu, X., Rao, A., Gan, C., Yi, L.: Autogpart: Intermediate supervision search for generalizable 3d part segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 11624-11634 (2022) 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.459, + 0.918, + 0.495 + ], + "angle": 0, + "content": "22. Lowe, D.G.: Distinctive image features from scale-invariant keypoints. International journal of computer vision 60, 91-110 (2004) 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.495, + 0.918, + 0.53 + ], + "angle": 0, + "content": "23. Min, J., Lee, J., Ponce, J., Cho, M.: Spair-71k: A large-scale benchmark for semantic correspondence. arXiv preprint arXiv:1908.10543 (2019) 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.529, + 0.918, + 0.599 + ], + "angle": 0, + "content": "24. Mo, K., Zhu, S., Chang, A.X., Yi, L., Tripathi, S., Guibas, L.J., Su, H.: Partnet: A large-scale benchmark for fine-grained and hierarchical part-level 3d object understanding. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 909-918 (2019) 3, 4, 9, 12, 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.599, + 0.918, + 0.651 + ], + "angle": 0, + "content": "25. Nadeau, P., Giamou, M., Kelly, J.: The sum of its parts: Visual part segmentation for inertial parameter identification of manipulated objects. arXiv preprint arXiv:2302.06685 (2023) 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.651, + 0.918, + 0.702 + ], + "angle": 0, + "content": "26. Nguyen, P.D.A., Ngo, T.D., Gan, C., Kalogerakis, E., Tran, A., Pham, C., Nguyen, K.: Open3dis: Open-vocabulary 3d instance segmentation with 2d mask guidance (2023) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.703, + 0.918, + 0.773 + ], + "angle": 0, + "content": "27. Peng, S., Genova, K., Jiang, C., Tagliasacchi, A., Pollefeys, M., Funkhouser, T., et al.: Openscene: 3d scene understanding with open vocabularies. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 815-824 (2023) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.773, + 0.918, + 0.825 + ], + "angle": 0, + "content": "28. Qi, C.R., Yi, L., Su, H., Guibas, L.J.: Pointnet++: Deep hierarchical feature learning on point sets in a metric space. Advances in neural information processing systems 30 (2017) 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.825, + 0.918, + 0.878 + ], + "angle": 0, + "content": "29. Qian, G., Li, Y., Peng, H., Mai, J., Hammoud, H., Elhoseiny, M., Ghanem, B.: Pointnext: Revisiting pointnet++ with improved training and scaling strategies. Advances in Neural Information Processing Systems 35, 23192-23204 (2022) 4, 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.08, + 0.878, + 0.918, + 0.949 + ], + "angle": 0, + "content": "30. Radford, A., Kim, J.W., Hallacy, C., Ramesh, A., Goh, G., Agarwal, S., Sastry, G., Askell, A., Mishkin, P., Clark, J., et al.: Learning transferable visual models from natural language supervision. In: International conference on machine learning. pp. 8748-8763. PMLR (2021) 4" + }, + { + "type": "list", + "bbox": [ + 0.079, + 0.057, + 0.92, + 0.949 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.769, + 0.017, + 0.842, + 0.033 + ], + "angle": 0, + "content": "3-By-2" + }, + { + "type": "page_number", + "bbox": [ + 0.894, + 0.017, + 0.92, + 0.031 + ], + "angle": 0, + "content": "17" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.057, + 0.92, + 0.129 + ], + "angle": 0, + "content": "31. Ramanathan, V., Kalia, A., Petrovic, V., Wen, Y., Zheng, B., Guo, B., Wang, R., Marquez, A., Kovvuri, R., Kadian, A., et al.: Paco: Parts and attributes of common objects. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 7141-7151 (2023) 2, 11, 12" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.13, + 0.918, + 0.2 + ], + "angle": 0, + "content": "32. Sharma, G., Yin, K., Maji, S., Kalogerakis, E., Litany, O., Fidler, S.: Mvdecor: Multi-view dense correspondence learning for fine-grained 3d segmentation. In: European Conference on Computer Vision. pp. 550-567. Springer (2022) 3, 4, 8, 10, 11, 12, 13, 14" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.201, + 0.918, + 0.236 + ], + "angle": 0, + "content": "33. Singh, C., Murdoch, W.J., Yu, B.: Hierarchical interpretations for neural network predictions. arXiv preprint arXiv:1806.05337 (2018) 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.237, + 0.918, + 0.288 + ], + "angle": 0, + "content": "34. Sun, P., Chen, S., Zhu, C., Xiao, F., Luo, P., Xie, S., Yan, Z.: Going denser with open-vocabulary part segmentation. arXiv preprint arXiv:2305.11173 (2023) 5, 11, 12" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.289, + 0.918, + 0.342 + ], + "angle": 0, + "content": "35. Takmaz, A., Fedele, E., Sumner, R.W., Pollefeys, M., Tombari, F., Engelmann, F.: Openmask3d: Open-vocabulary 3d instance segmentation. arXiv preprint arXiv:2306.13631 (2023) 4, 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.343, + 0.918, + 0.379 + ], + "angle": 0, + "content": "36. Tang, L., Jia, M., Wang, Q., Phoo, C.P., Hariharan, B.: Emergent correspondence from image diffusion. arXiv preprint arXiv:2306.03881 (2023) 2, 3, 5, 6, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.379, + 0.918, + 0.432 + ], + "angle": 0, + "content": "37. Varadarajan, K.M., Vincze, M.: Object part segmentation and classification in range images for grasping. In: 2011 15th International Conference on Advanced Robotics (ICAR). pp. 21-27. IEEE (2011) 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.433, + 0.918, + 0.486 + ], + "angle": 0, + "content": "38. Vu, T., Kim, K., Luu, T.M., Nguyen, T., Yoo, C.D.: Softgroup for 3d instance segmentation on point clouds. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 2708-2717 (2022) 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.486, + 0.918, + 0.539 + ], + "angle": 0, + "content": "39. Wang, L., Li, X., Fang, Y.: Few-shot learning of part-specific probability space for 3d shape segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (June 2020) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.539, + 0.918, + 0.592 + ], + "angle": 0, + "content": "40. Wang, R., Zhang, Y., Mao, J., Zhang, R., Cheng, C.Y., Wu, J.: Ikea-manual: Seeing shape assembly step by step. Advances in Neural Information Processing Systems 35, 28428-28440 (2022) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.592, + 0.918, + 0.664 + ], + "angle": 0, + "content": "41. Xiang, F., Qin, Y., Mo, K., Xia, Y., Zhu, H., Liu, F., Liu, M., Jiang, H., Yuan, Y., Wang, H., et al.: Sapien: A simulated part-based interactive environment. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 11097-11107 (2020) 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.664, + 0.918, + 0.717 + ], + "angle": 0, + "content": "42. Xu, M., Yin, X., Qiu, L., Liu, Y., Tong, X., Han, X.: Sampro3d: Locating sam prompts in 3d for zero-shot scene segmentation. arXiv preprint arXiv:2311.17707 (2023) 4, 11, 12" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.717, + 0.918, + 0.753 + ], + "angle": 0, + "content": "43. Xue, Y., Chen, N., Liu, J., Sun, W.: Zerops: High-quality cross-modal knowledge transfer for zero-shot 3d part segmentation (2023) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.753, + 0.918, + 0.788 + ], + "angle": 0, + "content": "44. Yang, Y., Wu, X., He, T., Zhao, H., Liu, X.: Sam3d: Segment anything in 3d scenes. arXiv preprint arXiv:2306.03908 (2023) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.788, + 0.918, + 0.842 + ], + "angle": 0, + "content": "45. Yi, L., Kim, V.G., Ceylan, D., Shen, I.C., Yan, M., Su, H., Lu, C., Huang, Q., Sheffer, A., Guibas, L.: A scalable active framework for region annotation in 3d shape collections. SIGGRAPH Asia (2016) 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.842, + 0.918, + 0.895 + ], + "angle": 0, + "content": "46. Yu, Q., Du, H., Liu, C., Yu, X.: When 3d bounding-box meets sam: Point cloud instance segmentation with weak-and-noisy supervision. ArXiv abs/2309.00828 (2023), https://api-semanticscholar.org/CorpusID:261530997 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.895, + 0.918, + 0.949 + ], + "angle": 0, + "content": "47. Zhang, J., Herrmann, C., Hur, J., Cabrera, L.P., Jampani, V., Sun, D., Yang, M.H.: A tale of two features: Stable diffusion complements dino for zero-shot semantic correspondence. arXiv preprint arXiv:2305.15347 (2023) 2, 5" + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.057, + 0.92, + 0.949 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.08, + 0.017, + 0.105, + 0.031 + ], + "angle": 0, + "content": "18" + }, + { + "type": "header", + "bbox": [ + 0.16, + 0.016, + 0.294, + 0.032 + ], + "angle": 0, + "content": "A. Thai et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.079, + 0.057, + 0.92, + 0.129 + ], + "angle": 0, + "content": "48. Zhao, L., Lu, J., Zhou, J.: Similarity-aware fusion network for 3d semantic segmentation. 2021 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS) pp. 1585-1592 (2021), https://apisemantic scholar.org/CorpusID:235732071 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.13, + 0.918, + 0.183 + ], + "angle": 0, + "content": "49. Zhao, N., Chua, T.S., Lee, G.H.: Few-shot 3d point cloud semantic segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 8873-8882 (2021) 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.183, + 0.918, + 0.235 + ], + "angle": 0, + "content": "50. Zhou, Y., Gu, J., Li, X., Liu, M., Fang, Y., Su, H.: Partslip++: Enhancing low-shot 3d part segmentation via multi-view instance segmentation and maximum likelihood estimation. arXiv preprint arXiv:2312.03015 (2023) 4, 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.236, + 0.919, + 0.324 + ], + "angle": 0, + "content": "51. Zhu, J., Zhang, Y., Guo, J., Liu, H., Liu, M., Liu, Y., Guo, Y.: Label transfer between images and 3d shapes via local correspondence encoding. Comput. Aided Geom. Des. 71(C), 255-266 (may 2019). https://doi.org/10.1016/j.cagd.2019.04.009, https://doi.org/10.1016/j.cagd.2019.04.009 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.325, + 0.92, + 0.396 + ], + "angle": 0, + "content": "52. Zhu, X., Zhang, R., He, B., Guo, Z., Zeng, Z., Qin, Z., Zhang, S., Gao, P.: Pointclip v2: Prompting clip and gpt for powerful 3d open-world learning. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 2639-2650 (2023) 2" + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.057, + 0.92, + 0.396 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/2024/3x2_ 3D Object Part Segmentation by 2D Semantic Correspondences/4a9028d8-b05c-4422-ac23-0a7be9202087_origin.pdf b/2024/3x2_ 3D Object Part Segmentation by 2D Semantic Correspondences/4a9028d8-b05c-4422-ac23-0a7be9202087_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..88e5e45c125e3823770d1be19b19a855672b3001 --- /dev/null +++ b/2024/3x2_ 3D Object Part Segmentation by 2D Semantic Correspondences/4a9028d8-b05c-4422-ac23-0a7be9202087_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:005f78979494ac2523e3649bc89ad767fda737ccaff4c3abeabe0c358f64ff96 +size 3216695 diff --git a/2024/3x2_ 3D Object Part Segmentation by 2D Semantic Correspondences/full.md b/2024/3x2_ 3D Object Part Segmentation by 2D Semantic Correspondences/full.md new file mode 100644 index 0000000000000000000000000000000000000000..f19d46a01bf1090b98ab1b8ab19c255e21d0efa6 --- /dev/null +++ b/2024/3x2_ 3D Object Part Segmentation by 2D Semantic Correspondences/full.md @@ -0,0 +1,290 @@ +# $3 \times 2$ : 3D Object Part Segmentation by 2D Semantic Correspondences + +Anh Thai $^{1,2}$ , Weiyao Wang $^{2}$ , Hao Tang $^{2}$ , Stefan Stojanov $^{1}$ , James M. Rehg $^{3}$ , and Matt Feiszli $^{2}$ + +1 Georgia Institute of Technology + +2 Meta AI, FAIR + +3 University of Illinois Urbana-Champaign + +![](images/cfcd26146e1a2e80dc84caa2831a7bb7f368e50237efbc29024997c17fd81f1b.jpg) +Fig. 1: We propose 3-By-2, a novel training-free method for low-shot 3D object part segmentation that achieves SOTA performance on both zero-shot and few-shot settings. + +Abstract. 3D object part segmentation is essential in computer vision applications. While substantial progress has been made in 2D object part segmentation, the 3D counterpart has received less attention, in part due to the scarcity of annotated 3D datasets, which are expensive to collect. In this work, we propose to leverage a few annotated 3D shapes or richly annotated 2D datasets to perform 3D object part segmentation. We present our novel approach, termed 3-By-2 that achieves SOTA performance on different benchmarks with various granularity levels. By using features from pretrained foundation models and exploiting semantic and geometric correspondences, we are able to overcome the challenges of limited 3D annotations. Our approach leverages available 2D labels, enabling effective 3D object part segmentation. Our method 3-By-2 can accommodate various part taxonomies and granularities, demonstrating part label transfer ability across different object categories. Project website: https://ngailapdi.github.io/projects/3by2/. + +# 1 Introduction + +3D object part understanding is essential in various research fields and applications, such as robotics [20, 25, 37] and graphics [14]. Through our understanding + +of the world, objects can be decomposed into parts based on diverse properties (e.g., geometry or affordance [8, 21]). However, these different decompositions do not always align with one another—the same object can be segmented into parts differently depending on the specific use case. For instance, a driver might perceive a car in terms of its functional components like the steering wheel, accelerator pedal, and brake pedal. Conversely, a manufacturing worker may view the car as an assembly of structural parts, such as the frame, bumper, and windshield. Further, various parts with similar functionalities or structures can be shared among different object classes (e.g., the term "leg" can apply to multiple furniture items). How can we design a 3D part segmentation system that has high performance across such different requirements and scenarios? + +Recent works in 3D part segmentation have integrated language as an additional input [1, 19, 52] by leveraging vision-language models to prompt the segmentation. However, grounding visual parts using language is inherently ambiguous. This is because parts can be described using diverse phrases that may include synonyms, various levels of detail, and differences in terminology (structural vs functional), which presents challenges for these models [19]. In contrast, images capture rich information about object shapes, textures and spatial part relationships. These properties can directly be parsed and compared using visual similarities between objects despite differences in linguistic expression. Therefore, it is important to study the limits and potentials of reasoning about visual similarity for generalization across different objects and categories. + +In this work, we investigate the 3D part segmentation task from this different perspective and propose a novel method called 3-By-2. Since labeling 3D data is expensive, we design 3-by-2 to leverage existing extensively annotated 2D part segmentation datasets [10,31] or a few-labeled 3D shapes to perform object part segmentation without additional training or finetuning. Our method does not need any language input and can flexibly handle segmentation tasks at various levels of granularity. + +We build our method based on the observation that because objects are constructed from parts, and because various objects often share a common set of parts with similar visual structures, this should allow part label transfer from one object to another without any language description. Recent studies [36, 47] have demonstrated the strong 2D semantic correspondences encoded by features of image diffusion models that generalize across different domains (e.g. sketch vs real images). To label a query 3D object point cloud, we leverage these strong representations to perform 2D pixel correspondence-based label transfer from in-the-wild 2D datasets or 2D renders of a few labeled 3D objects. To the best of our knowledge, we are the first to use diffusion model features for semantic label transfer in the context of 3D part segmentation. + +While it might seem that obtaining 2D part labels for multi-view renders of an object through label transfer and back-projection into 3D is intuitively straightforward, a high performance and efficient implementation requires careful consideration of the challenges of 3D part segmentation: 1) Precise determination of 3D object part boundaries, which is particularly challenging for unstructured + +![](images/206dbe6a15b8c3e45c52762fb89e8f2882ccc9f4b2f5c9e661a279ea8635f83d.jpg) +Fig. 2: Overview of our proposed method 3-By-2. (1) Render the input object in multiple camera viewpoints, (2) Perform 2D part segmentation on each view individually by leveraging 2D semantic correspondences and 2D class-agnostic segmentation model, (3) Aggregate the 2D predictions from multiple views using our proposed mask-consistency module, (4) Back-project the predictions to 3D using depth information. + +data like point clouds, and 2) Flexible adaptation to different levels of part granularity. To this end, we introduce three novel elements of our method: non-overlapping generation, mask-level label transfer and mask-consistency modules (see Fig. 2). These components work efficiently together to ensure precise 3D part segmentation masks and boundaries across a range of object categories and part levels (Fig. 1 and Tables 1, 2, 3). + +Overall, 3-By-2 is a training-free method independent of language inputs, instead relying solely on the 2D labels provided by a 2D database. Unlike previous methods that require 3D segmentation priors like point-cloud clusters [19] or mesh surface information [1,32], our approach has only a single requirement: calibrated cameras for back-projection. This can be known during the rendering process or predicted using SfM approaches. + +We validate the performance of our approach with PartNet-Ensembled [19], a dataset tailored for language-input models, and PartNet [24], which is not tailored for language. These datasets exhibit multiple levels of granularity. Notably, unlike previous approaches that require category-specific fine-tuning for few-shot scenarios [19, 32], 3-By-2 achieves SOTA performance without any training or fine-tuning requirements in either a zero-shot or few-shot setting. Additionally, we identify that models with language inputs exhibit suboptimal performance with highly fine-grained part terminologies. This highlights the advantages of our approach, which effectively handles these fine-grained object parts. Furthermore, we conduct comprehensive ablation studies and demonstrate the transferability of parts across different object categories, which benefits the understanding of object part compositionality. + +In summary, our contributions are 4-fold: + +- A novel, training-free method, 3-By-2, that achieves SOTA performance on benchmarks with different levels of granularity for zero-shot and few-shot 3D object part segmentation. +- The first to provide an effective approach for leveraging image diffusion model's features [36] to establish 2D semantic correspondences in the context of 3D part segmentation. +- Novel non-overlapping mask generation, mask-level label transfer, and mask-consistency modules that effectively transfer part labels from 2D database and extrapolate them to 3D. + +- Demonstrating the flexibility of 3-By-2 in accommodating various database settings and in generalizing between different object categories. + +# 2 Related Work + +# 2.1 3D Part Segmentation + +In contrast to its 2D counterpart, the progress in this field has been relatively limited, primarily due to the high cost associated with collecting and annotating 3D datasets. Currently, all of the available large-scale annotated 3D object part datasets are synthetic [18,24,40,45]. The most widely used benchmarks [24,45] are predominantly derived from objects within the ShapeNetCore [5] dataset. This problem has been tackled using architectures that take 3D representations [24,29] as inputs. These methods were trained in a supervised manner, requiring large-scale annotated data. More recent approaches have attempted to investigate data-efficient training scenarios where only a few 3D shapes are annotated [19,32,39,50]. + +# 2.2 Multi-view 2D-3D Segmentation Using Foundation Models + +Although multi-view approaches have been widely utilized in the past for 3D segmentation [7,13,48], the rapid advancement of 2D foundation models [16,17] has encouraged more SOTA research aimed at leveraging these models to perform 3D segmentation in a multi-view fashion. CLIP [30] and GLIP [17] have been employed to integrate language information from multiple 2D views into 3D for open-vocabulary segmentation [1,19,27,35,50]. SAM [16], due to its ability to output per-pixel masks, has been used as an effective tool for multi-view 2D-3D segmentation, both on 3D structures like point clouds [35,42,44,46,50] or in NeRF-style [4]. + +Scene Segmentation. Various combinations of foundation models have been explored for this task. While [35] leverages CLIP and SAM to support open-vocabulary 3D part segmentation, others use SAM with carefully designed prompts [4] or post-processing techniques [44]. Building upon these successes, concurrent works [12,26,42] seek to improve SAM utilization strategies. Our work differs by focusing on part segmentation, which requires finer granularity. This distinction in objectives directly influences the processing of SAM predictions, tailored to suit their specific characteristics. For example, while scene segmentation methods may disregard or merge masks covering parts of objects, part segmentation approaches might encourage splitting, depending on the desired level of detail. + +Part Segmentation. PartSLIP [19] and SATR [1] were among the first to employ foundation models for this task, pioneering the use of GLIP for open-vocabulary segmentation. Concurrent works have seen the integration of SAM into their pipelines [15,43,50]. Zhou et al. [50] and Kim et al. [15] use SAM with GLIP-predicted bounding boxes, while Xue et al. [43] employ SAM with furthest point sampling for each view, extending predictions to 3D with GLIP labels. Our + +approach shares with these works the use of SAM for 2D segmentation before 3D aggregation. In contrast, our method focuses solely on visual cues without language inputs, employing image diffusion model's features [36]. To improve SAM's accuracy, we introduce a novel non-overlapping mask generation module, eliminating the need for GLIP-generated bounding boxes. + +# 2.3 Part Label Transfer using Correspondences + +Transferring labels from annotated datasets to non-annotated datasets has been considered recently in [34] for open-vocabulary 2D part segmentation and previously in [6, 51] for 3D part segmentation. While [34] used DINOv1 [3] feature representations for dense label transfer between related objects in the base classes and novel object classes, Zhu et al. [51] relied on classical SIFT [22] features for establishing correspondences in 2D images. Chen et al. [6], in contrast, train a network to regress the correspondences directly on the input point cloud. + +We share with these approaches the use of semantic correspondences to identify optimal candidates for label transfer. However, our primary objective sets us apart significantly from [34], as we focus on segmenting 3D objects. Compared to [51], we leverage class-agnostic segmentation models to avoid dense pixel/patch sampling. Furthermore, unlike [6], we do not require direct operations on 3D point clouds or any specific 3D representations. Additionally, we introduce a mask-consistency module for per mask label voting, rather than relying solely on small local patches. + +Semantic Correspondences from Foundation Models. Many vision foundation models have demonstrated an inherent capability to implicitly capture semantic correspondences across different instances within the same category (e.g., matching chair backs) and across diverse categories (e.g., aligning dog's legs with cat's legs) [2,11,36,47]. In this work, we leverage semantic correspondences established by [36] to transfer part labels from annotated 2D datasets to query 3D objects. + +# 3 Method + +Given a database $\mathcal{D}$ consisting of 2D part annotations, our goal is to segment each query object $q$ into parts using the visual part vocabulary provided by $\mathcal{D}$ . Note that $\mathcal{D}$ can either be gathered from 2D (image) part datasets or from renders of a few 3D objects captured at different view-points. Our method consists of three main steps (Fig. 2): (1) render a set of 2D RGB images $\mathcal{I}q$ of 3D object $q$ from $K$ distinct camera viewpoints; (2) perform 2D part segmentation on the rendered images; (3) aggregate image-level predictions through a mask-consistency aggregation module to obtain 3D predictions. + +# 3.1 2D Part Segmentation + +There are two primary approaches to tackle this task: (1) Top-down, using segmentation mechanisms such as SAM [16], or (2) Bottom-up, which involves la + +![](images/e97d61a381c5ee17448db1eb3f59252539cc7b90918569145f2c2bc1daf3098b.jpg) +Fig. 3: The process of pixel-level part label transferring. For each pixel $p$ in the query image $I_{k}$ , we perform the following: (1) Extract the feature $f(p)$ , along with the feature grid for each image $I_{\mathcal{D}}$ in the database $\mathcal{D}$ ; (2) Measure cosine similarity between $f(p)$ and the feature of each pixel within each feature grid, (3) Obtain the best match of $p$ over $\mathcal{D}$ by determining the most similar pixel $p_{\mathcal{D}}$ over all images $I_{\mathcal{D}}$ ; (4) Assign the label of $p$ is to be the label of $p_{\mathcal{D}}$ . + +![](images/8a5d33ac48e0f1040cfad7f99a5f9d27919b1e194577f94e0c4c190dc4c0840a.jpg) +(a) + +![](images/3f62c6385a69e3e5d61fd0b77403fb45581133ec2efe76e9c4111693130faab8.jpg) +(b) +Fig. 4: (a) Non-overlapping 2D Mask Proposal. We address the issue of overlapping masks produced by SAM. The masks are first sorted by their areas. Subsequently, the smaller masks are stacked on top of the larger ones. Non-overlapping masks are obtained by taking the visible segment of each mask. (b) Different mask sampling strategies for label transfer. Our strategy provides accurate, dense prediction with clear part boundaries. + +beling each pixel individually. While SAM produces high-quality 2D masks with sharp boundaries, it operates in a class-agnostic manner, often leading to high overlap between sub-parts, parts, and instances. Simply selecting the mask with the highest score may result in incorrect granularity and lacks the flexibility required for part segmentation. + +Conversely, doing label transfer for each pixel individually in the image is computationally impractical, particularly for part segmentation tasks where high resolution is preferred. Sparsely sampling and labeling pixels can result in under-segmented masks, particularly for smaller parts that are less likely to be sampled compared to larger parts (see Fig. 4b). Moreover, accurately determining part boundaries for individual pixels can be challenging, which may result in increased errors when extrapolating to 3D, particularly with unstructured 3D representations like point clouds. These issues raise the important question: how do we transfer part labels and preserve part boundaries without sacrificing computational resources? + +To address this question, we propose a 2D segmentation method that combines the strengths of both approaches which consists of 3 novel components: (1) Single-pixel 2D label transfer using semantic correspondences derived from DIFT [36], (2) Non-overlapping 2D mask proposal module, which refines SAM's multi-granularity predicted masks into non-overlapping part masks, and (3) Mask-level label transfer by integrating (1) and (2). + +![](images/4e6332bb0c5ce2a514edb1b89e96aaafb0f172c60357b240c7623744e91f2126.jpg) +Fig.5: Two approaches to aggregate 3D part labels from multiple 2D views. Aggregating 3D part labels from multiple 2D views through geometric correspondence can be achieved by either point or mask label consistency. + +![](images/dea239a485f5790362d5a94a9cb7b3df903b1fb4e4ce9b1a9495e58d7d10c807.jpg) + +![](images/b65086ae59d580d41b24b0db1194cc8a82383dcc510ce4ca72fde3180a51c29e.jpg) +Mask label consistency between multiple views + +![](images/8b9f13c2b24dc9d116ff4abd011e4138bf2f10a61eac193fd4a50dcb2196ad78.jpg) +Fig.6: Effectiveness of mask label consistency. Enforcing consistency at the mask level can mitigate discrepancies at each individual point and contributes to smoother segmentation. + +Single-pixel 2D Label Transfer. At the core of our method is the 2D label transfer process. The goal is to transfer pixel labels from the annotated 2D database $\mathcal{D}$ to the query RGB image $I_{k} \in \mathcal{I}q$ : for a pixel $p$ in the foreground object in $I_{k}$ , we aim to identify the best-matched pixel $p'$ in each image $I_{\mathcal{D}}$ in the database $\mathcal{D}$ and assign initial label to $p$ by $p'$ . To this end, we leverage the established semantic correspondence of DIFT [36]. While recent works have demonstrated the effectiveness of image diffusion models in extracting semantic correspondences, as evidenced by evaluations on datasets like SPair-71K [23], we are the first to leverage these features for transferring semantic labels in the context of 3D part segmentation. Specifically, $p' = \arg \max_{p' \in I_{\mathcal{D}}} \cos(f(p), f(p'))$ where $\cos$ and $f(x)$ denotes the cosine similarity score and the feature representing pixel $x$ . The best pixel correspondence $p_{\mathcal{D}}$ of $p$ over the entire database is obtained by taking the most similar match within all the images in the database. Formally, $p_{\mathcal{D}} = \arg \max_{\mathcal{D}} p'$ . The label of $p$ is then assigned to be the label of $p_{\mathcal{D}}$ (see Fig. 3). + +Coarse-to-fine correspondence search. Finding the nearest neighbor for a query pixel across the entire database can be prohibitively costly, especially for part segmentation which operates in high resolutions. We propose a coarse-to-fine strategy: using the coarse feature maps generated by DIFT [36], we first conduct the search at the coarse level to localize the region of the best match. We then extract the $3 \times 3$ window centered at this region (in feature space) for a fine search (see Fig. 7). This approach ensures that we compute per-pixel similarity scores only within the region of interest, rather than across the entire image, improving computational efficiency. For instance, when processing a pair of images with a resolution of $800 \times 800$ , coarse-to-fine correspondence search achieves a speed improvement of approximately 2000 times in terms of wall clock time. + +Non-overlapping 2D Mask Proposal. We propose the use of class-agnostic 2D part mask proposal, specifically from SAM [16]. By assuming that each mask proposal corresponds to a subset of a part, we can then selectively sample pixels within each mask proposal for label transferring. The labels are subsequently propagated to each pixel of the 2D masks through a majority voting process based on the sampled pixels within the mask. To address the issue posed by the highly overlapping predictions from SAM's multi-granularity model, we intro + +![](images/cab047ae98ccbf6f9876a4d6488f212bdc2cb48e5e983dabf86d9cc85aa54e7d.jpg) +Fig. 7: Coase-to-fine correspondence search. We first conduct searching on a coarse level to identify the region of best match. We then extract the $3 \times 3$ window centered at this region in feature space for a fine search. This approach is approximately 2000 times faster in terms of wall time for large $N$ ( $800 \times 800$ ). + +duce a non-overlapping 2D mask generation module. This module takes SAM masks as inputs and outputs a set of mutually exclusive 2D masks. + +We arrange the SAM output masks in descending order of mask area and stack smaller masks on top of larger ones. This ensures that if mask $A$ is a subset of mask $B$ , stacking $A$ on top of $B$ results in non-overlapping masks, namely $A$ and $B \setminus A$ . Non-overlapping masks are finally obtained by taking the visible segments of each mask (see Fig. 4a). + +2D Mask Label Assignment. After obtaining the non-overlapping masks, we sparsely sample pixels in each mask to transfer label. We then perform majority voting to assign the dominant label for each 2D mask, weighted by the confidence score (cosine similarity) of the best pixel correspondence matches. + +# 3.2 Mask-consistency Aggregation Module + +Given a set of 2D RGB images with part segmentation predictions, we aim to extrapolate these segmentation labels to 3D using geometric correspondences. Prior works [19,32] aggregate multi-view information for each 3D point or mesh triangle face through a weighted sum of multi-view 2D predictions. To fully maintain the high-quality part boundaries predicted by SAM in 2D, we choose to aggregate multi-view predictions for each 2D mask instead. This observation is based on the fact that part identities remain constant across multiple views (e.g., the seat in view 1 should be segmented as the seat in view 2, see Fig. 5). Intuitively, mask consistency can be seen as an additional constraint on point consistency, encouraging points within the same 2D mask to remain associated with the same masks in the 3D space + +We present a novel mask-consistency aggregation module that takes a set of 2D part segmentation predictions for multiple views as input. Our approach involves constructing an undirected unweighted graph, denoted as $G:V\to E$ , where each vertex corresponds to a 2D mask in a given view. The edges of the graph connect masks from different views that capture the projection of the same 3D points. We construct a set of mask correspondences for each vertex $v\in V$ , $\mathcal{M}_v = \{v,u_1,u_2,\dots u_N\}$ where an edge $e_i$ connects $v$ and $u_{i}$ . A mask $v$ is defined as oversegmented when there exists at least 2 masks in $\mathcal{M}_v$ that + +![](images/1c9607d3b815359e95575fa7a7897f879f58857de511a2a43f1191a0afe5f972.jpg) +Fig. 8: Mask-consistency process. (1) Each vertex of $G$ corresponds to a mask in a given image. The edge connecting each pair of vertices denotes that the pair contains the projection of the same 3D points. Mask consistency set $\mathcal{M}_v$ for each $v$ is obtained via the first-order neighborhood of $v$ . (2) $v_1$ is detected as under-segmented since $\mathcal{M}_{v_1}$ consists of masks from the same view with different labels $(v_2, v_3)$ and hence, is discarded. (3) Traverse $\mathcal{M}_{v_i}$ to obtain labels for $\mathcal{M}_{v_i}$ . (4) Obtain label for each mask by majority voting. Here we show a simple example for visualization purpose. + +belong to the same image but are assigned with different labels. For instance, in Fig. 8, vertex $v_{1}$ corresponds to an undersegmented mask. Formally, + +$$ +\mathcal {S} _ {v} = \left\{u _ {i}, u _ {j} \in I _ {k} \text {a n d} l \left(u _ {i}\right) \neq l \left(u _ {j}\right) \mid u _ {i}, u _ {j} \in \mathcal {M} _ {v} \right\} \tag {1} +$$ + +where $l(x)$ denotes the label of $x$ . We discard $v$ if $|S_v| > \epsilon$ . That is, if $v$ is consistently determined as undersegmented across multiple views, we discard the contribution of $v$ in the final label assignment. We then traverse the graph simultaneously from each vertex using breadth-first-search to accumulate the labels for each $\mathcal{M}_v$ . Subsequently, we perform majority voting to assign labels to each $\mathcal{M}_v$ . Finally, for each mask, we identify the most frequently assigned label as the final label. + +The simple intuition behind this approach is: if a part occasionally receives incorrect labels in some challenging views, employing majority voting within the mask correspondence set can calibrate these errors. Further, performing this on the mask level ensures that if two 2D points share the same mask label in the majority of the views, they will ultimately be assigned with the same final label. This approach calibrates potential discrepancies in individual point-wise aggregations (see Fig. 6). + +# 4 Experiments + +In this section, we first report the performance of 3-By-2 against baselines on PartNet-Ensembled (PartNetE) [19] in Sec. 4.1 and on PartNet [24] with "level-3" annotation in Sec. 4.2. Note the distinction between these datasets since PartNetE consists of a distinct set of articulated objects from [41]. These datasets also exhibit different granularity of part annotations. While PartNetE consists of both basic parts like chair back and fine-grained parts like scissors screw, PartNet with "level-3" annotation contains all fine-grained parts such as "back_frame_vertical_bar". In Sec. 4.3, we conduct comprehensive ablation studies to verify the necessity of each components in 3-By-2. Our few-shot experiments refer to the setting where a few labeled 3D objects are available for + +Table 1: Few-shot performance on PartNetE [19] dataset. The left columns show performance on the 17 categories that supervised methods [28,29,38] (first 3 rows) were trained on with additional 28K objects. The right columns show performance on the 28 categories with only 8 objects/category in the training set. [19,33,49,50] and ours (last 5 rows) only have access to 8 objects/category during training for all 45 categories. Please refer to the Supplement for the full table on all 45 categories. + +
MethodsChairSci-ssorsLap-topDoorMicro-waveKey-boardAvg. (17)Cam-eraUSBStap-lerDisp-enserKet-tleEye-gl.Avg. (28)Avg. (45)
PointNext [29]0.9180.5730.3250.4380.4050.4500.5910.3320.6790.8860.2600.4510.8810.4570.502
PointNet++ [28]0.8470.5000.5540.4570.4360.7450.5330.0650.5240.5160.1210.2090.7620.2500.365
SoftGroup [38]0.8830.7600.1840.5310.3830.5890.5050.2360.4410.8010.1890.5740.7240.3130.384
ACD [33]0.3900.3910.1110.1890.0660.2610.1960.1010.2520.5000.1940.4020.7820.2590.235
Prototype [49]0.7080.4300.2790.3340.2700.4490.4190.3200.6540.8070.5340.6070.7790.4700.451
PartSLIP [19]0.8540.6030.2970.4080.4270.5360.5670.5830.5610.8480.7380.7700.8830.6250.603
PartSLIP++ [50]0.8530.6050.2970.4510.4950.7240.5740.6320.5750.6300.7200.8560.8830.6420.615
3-By-2 (ours)0.8440.6570.4530.5440.4020.8960.6040.6260.7900.9010.7820.8150.9280.6650.642
+ +each object category while there is no annotated 3D part labels in the zero-shot setting. In this setting, we leverage labels from the 2D domain instead. + +# 4.1 Performance on PartNet-Ensembled + +Data & Metric. We use the dataset provided by Liu et al. [19] for both the few-shot and zero-shot settings. For each object in both few-shot and test sets, we render 20 RGB images from different views with resolution $800 \times 800$ . We report mean IoU (mIoU) performance of all baselines using the evaluation protocol provided by [19] on the input point clouds. Specifically, the performance of a part is not considered if it does not exist in the queried object. + +Few-shot Baselines. We compare 3-By-2 against fully-supervised semantic segmentation [28, 29, 38], few-shot semantic segmentation [33, 49] and language-based [19, 50] methods. The fully supervised methods [28, 29, 38] were trained on 28K objects of 17 overlapping categories between PartNetE [19], in addition to the few-shot set consisting of 8 objects/category. The second group of baselines [19, 33, 49, 50] were only trained on the few-shot set. PartSLIP and PartSLIP++, a concurrent work, rely on large vision-language model (GLIP [17]) to guide the 2D part detection before extending to the 3D point cloud segmentation. We provide more detailed descriptions in the Supplement. We omit the evaluation of MvDeCor [32] on this benchmark since it requires ground-truth 3D meshes, whereas PartNetE only provides dense point clouds as inputs. + +Few-shot Setting. In this setting, 8 objects/category serve as the few-shot set. We evaluate on the entire test set of PartNetE [19]. For a fair comparison, we remove part labels in the test set that do not exist in the few-shot set. We present our few-shot results in Table 1. Compared to fully-supervised 3D methods, we outperform by $1 - 10\%$ mIoU on these categories. Additionally, we demonstrate a significant performance boost on the remaining 28 categories (21-41% mIoU). We further outperform PartSLIP and PartSLIP++ on both subsets, achieving $\sim 3\%$ mIoU improvements overall. + +Performance on Real-world Scans. Please note that there is currently no publicly available real-world 3D part segmentation dataset for direct comparison. How- + +Table 2: Zero-shot performance on the subset of PartNetE [19] that overlaps with PACO [31]. Our method effectively leverages 2D in-the-wild part segmentation dataset to perform 3D part segmentation. + +
MethodsKet- tleMicro- waveSci- sorsF.- ChairMouseBot- tleLaptopClockRemoteLampAvg.- (18)
SAMPro3D [42]+ OpenMask3D [35]0.0260.0010.1180.4370.0190.1030.0170.0070.0840.0740.146
PartSLIP [19]0.2080.1660.2180.9170.2700.7630.2700.2670.1150.3710.341
VLPart [34]-MC0.2110.1920.1930.8130.0000.2160.0600.2050.1320.1660.222
3-By-2 (ours)0.7650.3480.5940.7120.3070.8070.3940.2530.2390.5000.430
+ +Table 3: Performance on PartNet dataset with "level-3" annotations in the few-shot setting. Bold and underline denote best and second best performance respectively. + +
MethodsBot- tleMicro- waveDis- playDish- washerFau- cetKnifeEar- phoneClockBedTrash- canAvg.
MvDeCor [32]0.4210.3770.6000.3270.2120.1870.2050.1430.0990.1990.277
PartSLIP [19]0.3440.1430.3860.2280.0090.0230.0640.0170.0030.0310.125
3-By-2 (ours)0.4540.3890.5670.4290.2030.1960.2250.1160.0960.1340.281
+ +ever, we demonstrate the robustness of our method using real-world objects, as shown in Fig. 1. These objects were originally introduced by Liu et al. [19] and captured using an iPhone12 camera. + +Zero-shot Baselines. We compare 3-By-2 with PartSLIP [19], VLPart [34]-MC and SAMPro3D [42] + OpenMask3D [35]. For PartSLIP, we prompt the pre-trained GLIP model with the language inputs without finetuning, following Liu et al. [19]. VLPart [34] is a SOTA 2D part segmentation method that was trained on a combination of various large-scale 2D part datasets. We replace our 2D part segmentation module with a pre-trained VLPart model, retaining the 3D mask-consistency aggregation module as 3-By-2, and term this baseline VLPart-MC. During inference, to guide VLPart effectively, we prompt the model with language inputs as in PartSLIP. SAMPro3D [42] is a SOTA zero-shot instance segmentation method for 3D scenes using SAM at its core. For semantic segmentation evaluation, we integrate SAMPro3D with OpenMask3D [35], an open-vocabulary 3D scene segmentation method. + +Zero-shot Setting. Since we do not have access to any labeled 3D objects in this setting, to effectively transfer part labels, we leverage PACO [31]. This dataset is a fine-grained and richly annotated 2D datasets consisting of objects from COCO-LVIS [9]. We crop and mask each annotated object using the provided object bounding box and segmentation mask to form the database. Further, we filter out small objects or objects with limited visibility, using the area of the object segmentation mask as a criterion. + +In Table 2 we show the performance of all baselines and 3-By-2 on the subset of PartNetE that overlaps with PACO dataset [31]. By leveraging the abundance and fine-grained of 2D in-the-wild part segmentation datasets, we achieve superior performance compared to all baselines (9-29% mIoU). We significantly outperform PartSLIP on challenging categories with small or thin parts (e.g. scissors and lamp by 28% and 13% mIoU respectively). These results highlight the effectiveness of 3-By-2 even when the database includes challenging real-world images with partial occlusion and truncation. + +Effectiveness of Our 2D Segmentation Module. We demonstrate the effectiveness of our 2D segmenter, leveraging SAM and DIFT, by showcasing its strong performance against VLPart [34], a SOTA 2D part segmentation method (see Table 2, last 2 rows). Note that VLPart was trained on PACO [31] among other 2D part datasets. Therefore, it is reasonable to anticipate that this method can effectively use knowledge from PACO to accurately segment the 18 overlapping categories between PartNetE and PACO. For both VLPart-MC and 3-By-2, we maintain the same 3D aggregation module. Our method significantly outperforms VLPart-MC, demonstrating the advantage of our proposed 2D segmentation module. + +Comparison to SOTA Scene Segmentation Approach. SAMPro3D [42] is a concurrent work with SOTA performance on zero-shot instance segmentation in 3D scene. This is a training-free model that effectively prompts SAM within the 2D domain using 3D point projections. As in Table 2, we outperform this baseline by a significant margin, highlighting the non-trivial nature of adapting scene segmentation methods for 3D part segmentation tasks, particularly those involving post-processing of 2D foundation models. + +# 4.2 Performance on Level-3 PartNet + +In this experiment, we select 10 categories from PartNet [24] that come with fine-grained ("level-3") annotations. We randomly select 10 objects per category from the training set (following [32]) to form our few-shot set, and up to 50 objects per category from the test set for evaluation, ensuring overlap with ShapeNetCore.v2 [5]. Given that PartSLIP [19] employs point cloud RGB for superpoint generation, which serves as 3D priors, our decision to choose overlapping objects with ShapeNetCore.v2 is to preserve object texture information. We use the same few-shot and test set for all baselines. + +Data. As inputs to our approach, we render 15 overlapping views for each textured mesh using Blender cycle renderer with realistic lighting from HDRI environment maps. + +Baselines. The baselines are reproduced following the papers' recommended training procedure. Specifically, we pre-train MvDeCor [32] on the entire training set of the selected categories consisting of 86 views per non-textured object, with rendered RGB, depth and normal maps as inputs. We then fine-tune the segmentation heads for each individual object category in the few-shot set with 15 views per object. Note that the input for this stage also includes RGB, depth and normal maps. + +For PartSLIP [19], we derive the language prompt by traversing the part hierarchy and concatenating labels from each level along the path, spanning from root to leaf. For example, the path "bottle/jug/handle" is transformed into "bottle jug handle". This adaptation is due to the potential for different leaf nodes to share identical labels (e.g., bottle/normal_bottle/handle and bottle/jug/handle), as relying solely on the leaf node label could introduce confusion in predictions. We adopt PartSLIP's point cloud, image rendering and data processing pipeline with default parameters. + +Table 4: Ablation of the non-overlapping mask generation module. + +
2D Mask ProposalScissorsMouseSuitcaseBottleChair
SAM0.4570.4400.2850.0040.638
Non-overlap0.6750.6840.8130.8100.844
+ +Table 5: Ablation of our proposed mask-consistency component. + +
3D Label AggregationScissorsSuitcasePrinterClock
Point-Consistency0.6190.5790.0090.363
Mask-Consistency0.6750.6840.0850.458
+ +Evaluation & Metric. We uniformly sample 300K points on the surface of each labeled ground truth mesh and employ nearest neighbor assignment to associate a ground-truth label with each point. This point set is used for evaluating all methods for a fair comparison and eliminating any randomness introduced by the point cloud sampling step. We use part mIoU on the sampled point set as the evaluation metric. We employ the standard mIoU calculation, which considers the performance of all parts in the vocabulary, even in cases where they may not exist in certain objects. Additionally, different from MvDeCor, we do not exclude the "others" label during evaluation based on ground-truth labels. For a fair comparison, we applied the same evaluation approach across all methods. + +Results. We show results in Table 3. Compared to PartSLIP [19], we outperformed on all categories by a significant margin (16% mIoU on average), demonstrating the challenges posed by fine-grained settings for GLIP [17]. While our performance is on par with MvDeCor [32], it is important to note that MvDeCor is both pretrained and finetuned on PartNet [24], using ground truth depth and normal maps as additional inputs. In contrast, our method requires no training on the target data distribution. + +# 4.3 Ablation Study + +Non-overlapping Mask Generation. In Table 4, we illustrate the effectiveness of our proposed non-overlapping mask generation module. The comparison involves evaluating the performance of our method with and without this module. In the case of the model without the non-overlapping mask generation module, we directly utilize the predicted SAM outputs for label transferring. The results indicate that our non-overlapping mask generation module is necessary for achieving an optimal performance. + +Mask-consistency Module. In Table 5, we demonstrate the effectiveness of our proposed mask-consistency component, which improves the final performance especially on objects with small parts. + +Properties of Database. In this section, we investigate two key questions: 1) Can 3-By-2 accurately segment the query object within a database containing multiple object categories? and 2) Is it possible to transfer parts with the same semantic meaning between different object categories? + +Multi-category database. To address question 1, we perform experiments using databases containing 1, 2, and 8 categories respectively (see Table 6). Specifically, taking the query category as "Kettle", for the 2-category setting we construct a database consisting of "Kettle, Kitchen Pot". We selected these categories due to their shared semantic parts with "Kettle", which could potentially lead to confusion (e.g., kettle lid vs. kitchen pot lid). With 8-category setting, we add in + +Table 6: Multi-category database experiment. Performance of Kettle in various database settings is reported with mIoU. Our method shows robustness in performance even when more categories are added in the database. + +
DatabaseLidHandleSpoutAvg.
1-category0.7590.9040.7830.815
2-category0.7030.8200.7480.757
8-category0.7270.7730.7560.752
+ +Table 7: Cross-category database experiment. We report the performance of 18 tables with wheels in Part-NetE. Results show that our method can transfer wheel annotations from Chair to correct the prediction on Table wheels. + +
DatabaseLegTabletopWheelAvg.
Table only0.5860.6470.0000.411
Chair & Table0.6410.6330.6000.625
+ +categories that are completely different and do not share any parts with "Kettle" (e.g. "Eyeglasses"). In general, with more categories in the database, there is a slight decrease in the average performance. Notably, there are marginal differences between 2-category and 8-category (second and third rows), highlighting the ability of 3-By-2 in handling both diverse object taxonomy and part segmentation. This finding is particularly interesting since many prior works [19,32] require finetuning each category separately for few-shot evaluation. + +Cross-category database. Considering question 2, we note that the few-shot set of "Table" in PartNetE lacks objects with wheels as a part, whereas such objects are present in the test set. To address this, we incorporate the "Chair" category where the wheel part exists in the database. We evaluate on 18 tables in PartNetE test set with the "wheel" part annotated (see Table 7). Compared to the table only few-shot set, combining the database with "Chair" improves the performance on "leg" by $\sim 6\%$ mIoU. The improvement in the "leg" part can be attributed to the inclusion of "Chair" in the database, which reduces the likelihood of the model incorrectly associating "wheel" with "leg" due to the absence of "wheel" in the few-shot set. Interestingly, the performance for "wheel" increases significantly, $+60\%$ mIoU through the label transfer from chair wheels. + +While the concept may seem intuitive, our findings shed new light on object part compositionality. Despite the diversity in appearances and shapes across various object categories, there exists a finite set of object parts that are shared among them. Recognizing the transferability of these parts is important for facilitating rapid learning of novel objects across a range of tasks. Further, our results show the ability to correct wrong predictions of our approach by transferring labels from another category. Please refer to the Sup. for additional studies. + +# 5 Conclusion + +In this work, we propose 3-By-2, a novel, training-free method that achieves SOTA performance on benchmarks with diverse levels of part granularity without the need for language inputs, on both zero-shot and few-shot settings. We demonstrate the flexibility of 3-By-2 in transferring part labels between different object categories. We hope the development of 3-By-2 can encourage further exploration of visual similarities for this task. + +# Acknowledgement + +This work was partly supported by NIH R01HD104624-01A1. + +# References + +1. Abdelreheem, A., Skorokhodov, I., Ovsjanikov, M., Wonka, P.: Satr: Zero-shot semantic segmentation of 3d shapes. arXiv preprint arXiv:2304.04909 (2023) 2, 3, 4 +2. Amir, S., Gandelsman, Y., Bagon, S., Dekel, T.: Deep vit features as dense visual descriptors. arXiv preprint arXiv:2112.05814 2(3), 4 (2021) 5 +3. Caron, M., Touvron, H., Misra, I., Jégou, H., Mairal, J., Bojanowski, P., Joulin, A.: Emerging properties in self-supervised vision transformers. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 9650-9660 (2021) 5 +4. Cen, J., Zhou, Z., Fang, J., Shen, W., Xie, L., Jiang, D., Zhang, X., Tian, Q., et al.: Segment anything in 3d with nerfs. Advances in Neural Information Processing Systems 36 (2024) 4 +5. Chang, A.X., Funkhouser, T., Guibas, L., Hanrahan, P., Huang, Q., Li, Z., Savarese, S., Savva, M., Song, S., Su, H., et al.: Shapenet: An information-rich 3d model repository. arXiv preprint arXiv:1512.03012 (2015) 4, 12 +6. Chen, N., Liu, L., Cui, Z., Chen, R., Ceylan, D., Tu, C., Wang, W.: Unsupervised learning of intrinsic structural representation points. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 9121-9130 (2020) 5 +7. Dai, A., Nießner, M.: 3dmv: Joint 3d-multi-view prediction for 3d semantic scene segmentation. In: Proceedings of the European Conference on Computer Vision (ECCV). pp. 452-468 (2018) 4 +8. Deng, S., Xu, X., Wu, C., Chen, K., Jia, K.: 3d affordancenet: A benchmark for visual object affordance understanding. In: proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 1778-1787 (2021) 2 +9. Gupta, A., Dollar, P., Girshick, R.: Lvis: A dataset for large vocabulary instance segmentation. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 5356-5364 (2019) 11 +10. He, J., Yang, S., Yang, S., Kortylewski, A., Yuan, X., Chen, J.N., Liu, S., Yang, C., Yu, Q., Yuille, A.: Partimagenet: A large, high-quality dataset of parts. In: European Conference on Computer Vision. pp. 128-145. Springer (2022) 2 +11. Hedlin, E., Sharma, G., Mahajan, S., Isack, H., Kar, A., Tagliasacchi, A., Yi, K.M.: Unsupervised semantic correspondence using stable diffusion. arXiv preprint arXiv:2305.15581 (2023) 5 +12. Huang, R., Peng, S., Takmaz, A., Tombari, F., Pollefeys, M., Song, S., Huang, G., Engelmann, F.: Segment3d: Learning fine-grained class-agnostic 3d segmentation without manual labels. arXiv preprint arXiv:2312.17232 (2023) 4 +13. Jaritz, M., Gu, J., Su, H.: Multi-view pointnet for 3d scene understanding. 2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW) pp. 3995-4003 (2019), https://apisemanticscholar.org/CorpusID:203593088 +14. Kalogerakis, E., Hertzmann, A., Singh, K.: Learning 3D Mesh Segmentation and Labeling. ACM Transactions on Graphics 29(3) (2010) 1 + +15. Kim, H., Sung, M.: Partstad: 2d-to-3d part segmentation task adaptation (2024) 4 +16. Kirillov, A., Mintun, E., Ravi, N., Mao, H., Rolland, C., Gustafson, L., Xiao, T., Whitehead, S., Berg, A.C., Lo, W.Y., et al.: Segment anything. arXiv preprint arXiv:2304.02643 (2023) 4, 5, 7 +17. Li, L.H., Zhang, P., Zhang, H., Yang, J., Li, C., Zhong, Y., Wang, L., Yuan, L., Zhang, L., Hwang, J.N., et al.: Grounded language-image pre-training. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 10965-10975 (2022) 4, 10, 13 +18. Li, Y., Upadhyay, U., Habib Slim, A.A., Arpit Prajapati, S.P., Wonka, P., Elhoseiny, M.: 3d compat: Composition of materials on parts of 3d things (eccv 2022). ECCV (2022) 4 +19. Liu, M., Zhu, Y., Cai, H., Han, S., Ling, Z., Porikli, F., Su, H.: Partslip: Low-shot part segmentation for 3d point clouds via pretrained image-language models. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 21736-21746 (2023) 2, 3, 4, 8, 9, 10, 11, 12, 13, 14 +20. Liu, W., Mao, J., Hsu, J., Hermans, T., Garg, A., Wu, J.: Composable part-based manipulation. In: 7th Annual Conference on Robot Learning (2023), https://openreview.net/forum?id=o-K3HVUeEw1 +21. Liu, X., Xu, X., Rao, A., Gan, C., Yi, L.: Autogpart: Intermediate supervision search for generalizable 3d part segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 11624-11634 (2022) 2 +22. Lowe, D.G.: Distinctive image features from scale-invariant keypoints. International journal of computer vision 60, 91-110 (2004) 5 +23. Min, J., Lee, J., Ponce, J., Cho, M.: Spair-71k: A large-scale benchmark for semantic correspondence. arXiv preprint arXiv:1908.10543 (2019) 7 +24. Mo, K., Zhu, S., Chang, A.X., Yi, L., Tripathi, S., Guibas, L.J., Su, H.: Partnet: A large-scale benchmark for fine-grained and hierarchical part-level 3d object understanding. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 909-918 (2019) 3, 4, 9, 12, 13 +25. Nadeau, P., Giamou, M., Kelly, J.: The sum of its parts: Visual part segmentation for inertial parameter identification of manipulated objects. arXiv preprint arXiv:2302.06685 (2023) 1 +26. Nguyen, P.D.A., Ngo, T.D., Gan, C., Kalogerakis, E., Tran, A., Pham, C., Nguyen, K.: Open3dis: Open-vocabulary 3d instance segmentation with 2d mask guidance (2023) 4 +27. Peng, S., Genova, K., Jiang, C., Tagliasacchi, A., Pollefeys, M., Funkhouser, T., et al.: Openscene: 3d scene understanding with open vocabularies. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 815-824 (2023) 4 +28. Qi, C.R., Yi, L., Su, H., Guibas, L.J.: Pointnet++: Deep hierarchical feature learning on point sets in a metric space. Advances in neural information processing systems 30 (2017) 10 +29. Qian, G., Li, Y., Peng, H., Mai, J., Hammoud, H., Elhoseiny, M., Ghanem, B.: Pointnext: Revisiting pointnet++ with improved training and scaling strategies. Advances in Neural Information Processing Systems 35, 23192-23204 (2022) 4, 10 +30. Radford, A., Kim, J.W., Hallacy, C., Ramesh, A., Goh, G., Agarwal, S., Sastry, G., Askell, A., Mishkin, P., Clark, J., et al.: Learning transferable visual models from natural language supervision. In: International conference on machine learning. pp. 8748-8763. PMLR (2021) 4 + +31. Ramanathan, V., Kalia, A., Petrovic, V., Wen, Y., Zheng, B., Guo, B., Wang, R., Marquez, A., Kovvuri, R., Kadian, A., et al.: Paco: Parts and attributes of common objects. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 7141-7151 (2023) 2, 11, 12 +32. Sharma, G., Yin, K., Maji, S., Kalogerakis, E., Litany, O., Fidler, S.: Mvdecor: Multi-view dense correspondence learning for fine-grained 3d segmentation. In: European Conference on Computer Vision. pp. 550-567. Springer (2022) 3, 4, 8, 10, 11, 12, 13, 14 +33. Singh, C., Murdoch, W.J., Yu, B.: Hierarchical interpretations for neural network predictions. arXiv preprint arXiv:1806.05337 (2018) 10 +34. Sun, P., Chen, S., Zhu, C., Xiao, F., Luo, P., Xie, S., Yan, Z.: Going denser with open-vocabulary part segmentation. arXiv preprint arXiv:2305.11173 (2023) 5, 11, 12 +35. Takmaz, A., Fedele, E., Sumner, R.W., Pollefeys, M., Tombari, F., Engelmann, F.: Openmask3d: Open-vocabulary 3d instance segmentation. arXiv preprint arXiv:2306.13631 (2023) 4, 11 +36. Tang, L., Jia, M., Wang, Q., Phoo, C.P., Hariharan, B.: Emergent correspondence from image diffusion. arXiv preprint arXiv:2306.03881 (2023) 2, 3, 5, 6, 7 +37. Varadarajan, K.M., Vincze, M.: Object part segmentation and classification in range images for grasping. In: 2011 15th International Conference on Advanced Robotics (ICAR). pp. 21-27. IEEE (2011) 1 +38. Vu, T., Kim, K., Luu, T.M., Nguyen, T., Yoo, C.D.: Softgroup for 3d instance segmentation on point clouds. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 2708-2717 (2022) 10 +39. Wang, L., Li, X., Fang, Y.: Few-shot learning of part-specific probability space for 3d shape segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (June 2020) 4 +40. Wang, R., Zhang, Y., Mao, J., Zhang, R., Cheng, C.Y., Wu, J.: Ikea-manual: Seeing shape assembly step by step. Advances in Neural Information Processing Systems 35, 28428-28440 (2022) 4 +41. Xiang, F., Qin, Y., Mo, K., Xia, Y., Zhu, H., Liu, F., Liu, M., Jiang, H., Yuan, Y., Wang, H., et al.: Sapien: A simulated part-based interactive environment. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 11097-11107 (2020) 9 +42. Xu, M., Yin, X., Qiu, L., Liu, Y., Tong, X., Han, X.: Sampro3d: Locating sam prompts in 3d for zero-shot scene segmentation. arXiv preprint arXiv:2311.17707 (2023) 4, 11, 12 +43. Xue, Y., Chen, N., Liu, J., Sun, W.: Zerops: High-quality cross-modal knowledge transfer for zero-shot 3d part segmentation (2023) 4 +44. Yang, Y., Wu, X., He, T., Zhao, H., Liu, X.: Sam3d: Segment anything in 3d scenes. arXiv preprint arXiv:2306.03908 (2023) 4 +45. Yi, L., Kim, V.G., Ceylan, D., Shen, I.C., Yan, M., Su, H., Lu, C., Huang, Q., Sheffer, A., Guibas, L.: A scalable active framework for region annotation in 3d shape collections. SIGGRAPH Asia (2016) 4 +46. Yu, Q., Du, H., Liu, C., Yu, X.: When 3d bounding-box meets sam: Point cloud instance segmentation with weak-and-noisy supervision. ArXiv abs/2309.00828 (2023), https://api-semanticscholar.org/CorpusID:261530997 4 +47. Zhang, J., Herrmann, C., Hur, J., Cabrera, L.P., Jampani, V., Sun, D., Yang, M.H.: A tale of two features: Stable diffusion complements dino for zero-shot semantic correspondence. arXiv preprint arXiv:2305.15347 (2023) 2, 5 + +48. Zhao, L., Lu, J., Zhou, J.: Similarity-aware fusion network for 3d semantic segmentation. 2021 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS) pp. 1585-1592 (2021), https://apisemantic scholar.org/CorpusID:235732071 4 +49. Zhao, N., Chua, T.S., Lee, G.H.: Few-shot 3d point cloud semantic segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 8873-8882 (2021) 10 +50. Zhou, Y., Gu, J., Li, X., Liu, M., Fang, Y., Su, H.: Partslip++: Enhancing low-shot 3d part segmentation via multi-view instance segmentation and maximum likelihood estimation. arXiv preprint arXiv:2312.03015 (2023) 4, 10 +51. Zhu, J., Zhang, Y., Guo, J., Liu, H., Liu, M., Liu, Y., Guo, Y.: Label transfer between images and 3d shapes via local correspondence encoding. Comput. Aided Geom. Des. 71(C), 255-266 (may 2019). https://doi.org/10.1016/j.cagd.2019.04.009, https://doi.org/10.1016/j.cagd.2019.04.009 5 +52. Zhu, X., Zhang, R., He, B., Guo, Z., Zeng, Z., Qin, Z., Zhang, S., Gao, P.: Pointclip v2: Prompting clip and gpt for powerful 3d open-world learning. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 2639-2650 (2023) 2 \ No newline at end of file diff --git a/2024/3x2_ 3D Object Part Segmentation by 2D Semantic Correspondences/images.zip b/2024/3x2_ 3D Object Part Segmentation by 2D Semantic Correspondences/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..d46bfedd40bbe57cac3bcc4a1a4e8079310c8a24 --- /dev/null +++ b/2024/3x2_ 3D Object Part Segmentation by 2D Semantic Correspondences/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1e69b85d3b41c0de2a106817ba9424a82e28be3c7e342626d22d1d764adcb557 +size 350686 diff --git a/2024/3x2_ 3D Object Part Segmentation by 2D Semantic Correspondences/layout.json b/2024/3x2_ 3D Object Part Segmentation by 2D Semantic Correspondences/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..c38f9148caad75c0ea68cdebd80d3e94e8f6551a --- /dev/null +++ b/2024/3x2_ 3D Object Part Segmentation by 2D Semantic Correspondences/layout.json @@ -0,0 +1,9161 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 55, + 30, + 358, + 66 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 30, + 358, + 66 + ], + "spans": [ + { + "bbox": [ + 55, + 30, + 358, + 66 + ], + "type": "inline_equation", + "content": "3 \\times 2" + }, + { + "bbox": [ + 55, + 30, + 358, + 66 + ], + "type": "text", + "content": ": 3D Object Part Segmentation by 2D Semantic Correspondences" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 36, + 86, + 376, + 110 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 36, + 86, + 376, + 110 + ], + "spans": [ + { + "bbox": [ + 36, + 86, + 376, + 110 + ], + "type": "text", + "content": "Anh Thai" + }, + { + "bbox": [ + 36, + 86, + 376, + 110 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 36, + 86, + 376, + 110 + ], + "type": "text", + "content": ", Weiyao Wang" + }, + { + "bbox": [ + 36, + 86, + 376, + 110 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 36, + 86, + 376, + 110 + ], + "type": "text", + "content": ", Hao Tang" + }, + { + "bbox": [ + 36, + 86, + 376, + 110 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 36, + 86, + 376, + 110 + ], + "type": "text", + "content": ", Stefan Stojanov" + }, + { + "bbox": [ + 36, + 86, + 376, + 110 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 36, + 86, + 376, + 110 + ], + "type": "text", + "content": ", James M. Rehg" + }, + { + "bbox": [ + 36, + 86, + 376, + 110 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 36, + 86, + 376, + 110 + ], + "type": "text", + "content": ", and Matt Feiszli" + }, + { + "bbox": [ + 36, + 86, + 376, + 110 + ], + "type": "inline_equation", + "content": "^{2}" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 136, + 119, + 275, + 131 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 119, + 275, + 131 + ], + "spans": [ + { + "bbox": [ + 136, + 119, + 275, + 131 + ], + "type": "text", + "content": "1 Georgia Institute of Technology" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 169, + 131, + 242, + 142 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 169, + 131, + 242, + 142 + ], + "spans": [ + { + "bbox": [ + 169, + 131, + 242, + 142 + ], + "type": "text", + "content": "2 Meta AI, FAIR" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 118, + 142, + 293, + 154 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 142, + 293, + 154 + ], + "spans": [ + { + "bbox": [ + 118, + 142, + 293, + 154 + ], + "type": "text", + "content": "3 University of Illinois Urbana-Champaign" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 37, + 164, + 373, + 293 + ], + "blocks": [ + { + "bbox": [ + 37, + 164, + 373, + 293 + ], + "lines": [ + { + "bbox": [ + 37, + 164, + 373, + 293 + ], + "spans": [ + { + "bbox": [ + 37, + 164, + 373, + 293 + ], + "type": "image", + "image_path": "cfcd26146e1a2e80dc84caa2831a7bb7f368e50237efbc29024997c17fd81f1b.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 31, + 305, + 379, + 328 + ], + "lines": [ + { + "bbox": [ + 31, + 305, + 379, + 328 + ], + "spans": [ + { + "bbox": [ + 31, + 305, + 379, + 328 + ], + "type": "text", + "content": "Fig. 1: We propose 3-By-2, a novel training-free method for low-shot 3D object part segmentation that achieves SOTA performance on both zero-shot and few-shot settings." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 59, + 335, + 353, + 500 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 335, + 353, + 500 + ], + "spans": [ + { + "bbox": [ + 59, + 335, + 353, + 500 + ], + "type": "text", + "content": "Abstract. 3D object part segmentation is essential in computer vision applications. While substantial progress has been made in 2D object part segmentation, the 3D counterpart has received less attention, in part due to the scarcity of annotated 3D datasets, which are expensive to collect. In this work, we propose to leverage a few annotated 3D shapes or richly annotated 2D datasets to perform 3D object part segmentation. We present our novel approach, termed 3-By-2 that achieves SOTA performance on different benchmarks with various granularity levels. By using features from pretrained foundation models and exploiting semantic and geometric correspondences, we are able to overcome the challenges of limited 3D annotations. Our approach leverages available 2D labels, enabling effective 3D object part segmentation. Our method 3-By-2 can accommodate various part taxonomies and granularities, demonstrating part label transfer ability across different object categories. Project website: https://ngailapdi.github.io/projects/3by2/." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 32, + 517, + 129, + 530 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 517, + 129, + 530 + ], + "spans": [ + { + "bbox": [ + 32, + 517, + 129, + 530 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 31, + 541, + 381, + 566 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 31, + 541, + 381, + 566 + ], + "spans": [ + { + "bbox": [ + 31, + 541, + 381, + 566 + ], + "type": "text", + "content": "3D object part understanding is essential in various research fields and applications, such as robotics [20, 25, 37] and graphics [14]. Through our understanding" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 41, + 571, + 221, + 583 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 571, + 221, + 583 + ], + "spans": [ + { + "bbox": [ + 41, + 571, + 221, + 583 + ], + "type": "text", + "content": "Work done as an intern at Meta AI (FAIR)." + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 413, + 615 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 32, + 34, + 379, + 166 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 34, + 379, + 166 + ], + "spans": [ + { + "bbox": [ + 32, + 34, + 379, + 166 + ], + "type": "text", + "content": "of the world, objects can be decomposed into parts based on diverse properties (e.g., geometry or affordance [8, 21]). However, these different decompositions do not always align with one another—the same object can be segmented into parts differently depending on the specific use case. For instance, a driver might perceive a car in terms of its functional components like the steering wheel, accelerator pedal, and brake pedal. Conversely, a manufacturing worker may view the car as an assembly of structural parts, such as the frame, bumper, and windshield. Further, various parts with similar functionalities or structures can be shared among different object classes (e.g., the term \"leg\" can apply to multiple furniture items). How can we design a 3D part segmentation system that has high performance across such different requirements and scenarios?" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 32, + 169, + 379, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 169, + 379, + 300 + ], + "spans": [ + { + "bbox": [ + 32, + 169, + 379, + 300 + ], + "type": "text", + "content": "Recent works in 3D part segmentation have integrated language as an additional input [1, 19, 52] by leveraging vision-language models to prompt the segmentation. However, grounding visual parts using language is inherently ambiguous. This is because parts can be described using diverse phrases that may include synonyms, various levels of detail, and differences in terminology (structural vs functional), which presents challenges for these models [19]. In contrast, images capture rich information about object shapes, textures and spatial part relationships. These properties can directly be parsed and compared using visual similarities between objects despite differences in linguistic expression. Therefore, it is important to study the limits and potentials of reasoning about visual similarity for generalization across different objects and categories." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 32, + 303, + 379, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 303, + 379, + 386 + ], + "spans": [ + { + "bbox": [ + 32, + 303, + 379, + 386 + ], + "type": "text", + "content": "In this work, we investigate the 3D part segmentation task from this different perspective and propose a novel method called 3-By-2. Since labeling 3D data is expensive, we design 3-by-2 to leverage existing extensively annotated 2D part segmentation datasets [10,31] or a few-labeled 3D shapes to perform object part segmentation without additional training or finetuning. Our method does not need any language input and can flexibly handle segmentation tasks at various levels of granularity." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 32, + 389, + 379, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 389, + 379, + 520 + ], + "spans": [ + { + "bbox": [ + 32, + 389, + 379, + 520 + ], + "type": "text", + "content": "We build our method based on the observation that because objects are constructed from parts, and because various objects often share a common set of parts with similar visual structures, this should allow part label transfer from one object to another without any language description. Recent studies [36, 47] have demonstrated the strong 2D semantic correspondences encoded by features of image diffusion models that generalize across different domains (e.g. sketch vs real images). To label a query 3D object point cloud, we leverage these strong representations to perform 2D pixel correspondence-based label transfer from in-the-wild 2D datasets or 2D renders of a few labeled 3D objects. To the best of our knowledge, we are the first to use diffusion model features for semantic label transfer in the context of 3D part segmentation." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 33, + 523, + 379, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 523, + 379, + 583 + ], + "spans": [ + { + "bbox": [ + 33, + 523, + 379, + 583 + ], + "type": "text", + "content": "While it might seem that obtaining 2D part labels for multi-view renders of an object through label transfer and back-projection into 3D is intuitively straightforward, a high performance and efficient implementation requires careful consideration of the challenges of 3D part segmentation: 1) Precise determination of 3D object part boundaries, which is particularly challenging for unstructured" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 33, + 11, + 39, + 18 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 11, + 39, + 18 + ], + "spans": [ + { + "bbox": [ + 33, + 11, + 39, + 18 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 66, + 10, + 121, + 19 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 10, + 121, + 19 + ], + "spans": [ + { + "bbox": [ + 66, + 10, + 121, + 19 + ], + "type": "text", + "content": "A. Thai et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 413, + 615 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 34, + 33, + 378, + 94 + ], + "blocks": [ + { + "bbox": [ + 34, + 33, + 378, + 94 + ], + "lines": [ + { + "bbox": [ + 34, + 33, + 378, + 94 + ], + "spans": [ + { + "bbox": [ + 34, + 33, + 378, + 94 + ], + "type": "image", + "image_path": "206dbe6a15b8c3e45c52762fb89e8f2882ccc9f4b2f5c9e661a279ea8635f83d.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 30, + 99, + 379, + 154 + ], + "lines": [ + { + "bbox": [ + 30, + 99, + 379, + 154 + ], + "spans": [ + { + "bbox": [ + 30, + 99, + 379, + 154 + ], + "type": "text", + "content": "Fig. 2: Overview of our proposed method 3-By-2. (1) Render the input object in multiple camera viewpoints, (2) Perform 2D part segmentation on each view individually by leveraging 2D semantic correspondences and 2D class-agnostic segmentation model, (3) Aggregate the 2D predictions from multiple views using our proposed mask-consistency module, (4) Back-project the predictions to 3D using depth information." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 30, + 164, + 379, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 164, + 379, + 236 + ], + "spans": [ + { + "bbox": [ + 30, + 164, + 379, + 236 + ], + "type": "text", + "content": "data like point clouds, and 2) Flexible adaptation to different levels of part granularity. To this end, we introduce three novel elements of our method: non-overlapping generation, mask-level label transfer and mask-consistency modules (see Fig. 2). These components work efficiently together to ensure precise 3D part segmentation masks and boundaries across a range of object categories and part levels (Fig. 1 and Tables 1, 2, 3)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 30, + 236, + 379, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 236, + 379, + 308 + ], + "spans": [ + { + "bbox": [ + 30, + 236, + 379, + 308 + ], + "type": "text", + "content": "Overall, 3-By-2 is a training-free method independent of language inputs, instead relying solely on the 2D labels provided by a 2D database. Unlike previous methods that require 3D segmentation priors like point-cloud clusters [19] or mesh surface information [1,32], our approach has only a single requirement: calibrated cameras for back-projection. This can be known during the rendering process or predicted using SfM approaches." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 30, + 308, + 380, + 452 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 308, + 380, + 452 + ], + "spans": [ + { + "bbox": [ + 30, + 308, + 380, + 452 + ], + "type": "text", + "content": "We validate the performance of our approach with PartNet-Ensembled [19], a dataset tailored for language-input models, and PartNet [24], which is not tailored for language. These datasets exhibit multiple levels of granularity. Notably, unlike previous approaches that require category-specific fine-tuning for few-shot scenarios [19, 32], 3-By-2 achieves SOTA performance without any training or fine-tuning requirements in either a zero-shot or few-shot setting. Additionally, we identify that models with language inputs exhibit suboptimal performance with highly fine-grained part terminologies. This highlights the advantages of our approach, which effectively handles these fine-grained object parts. Furthermore, we conduct comprehensive ablation studies and demonstrate the transferability of parts across different object categories, which benefits the understanding of object part compositionality." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 453, + 229, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 453, + 229, + 464 + ], + "spans": [ + { + "bbox": [ + 46, + 453, + 229, + 464 + ], + "type": "text", + "content": "In summary, our contributions are 4-fold:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 37, + 475, + 379, + 583 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 37, + 475, + 379, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 37, + 475, + 379, + 510 + ], + "spans": [ + { + "bbox": [ + 37, + 475, + 379, + 510 + ], + "type": "text", + "content": "- A novel, training-free method, 3-By-2, that achieves SOTA performance on benchmarks with different levels of granularity for zero-shot and few-shot 3D object part segmentation." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 37, + 511, + 379, + 546 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 37, + 511, + 379, + 546 + ], + "spans": [ + { + "bbox": [ + 37, + 511, + 379, + 546 + ], + "type": "text", + "content": "- The first to provide an effective approach for leveraging image diffusion model's features [36] to establish 2D semantic correspondences in the context of 3D part segmentation." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 37, + 547, + 379, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 37, + 547, + 379, + 583 + ], + "spans": [ + { + "bbox": [ + 37, + 547, + 379, + 583 + ], + "type": "text", + "content": "- Novel non-overlapping mask generation, mask-level label transfer, and mask-consistency modules that effectively transfer part labels from 2D database and extrapolate them to 3D." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 318, + 10, + 347, + 20 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 10, + 347, + 20 + ], + "spans": [ + { + "bbox": [ + 318, + 10, + 347, + 20 + ], + "type": "text", + "content": "3-By-2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 373, + 10, + 379, + 19 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 373, + 10, + 379, + 19 + ], + "spans": [ + { + "bbox": [ + 373, + 10, + 379, + 19 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 413, + 615 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 38, + 33, + 379, + 57 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 38, + 33, + 379, + 57 + ], + "spans": [ + { + "bbox": [ + 38, + 33, + 379, + 57 + ], + "type": "text", + "content": "- Demonstrating the flexibility of 3-By-2 in accommodating various database settings and in generalizing between different object categories." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 33, + 77, + 135, + 91 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 77, + 135, + 91 + ], + "spans": [ + { + "bbox": [ + 33, + 77, + 135, + 91 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 33, + 103, + 171, + 116 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 103, + 171, + 116 + ], + "spans": [ + { + "bbox": [ + 33, + 103, + 171, + 116 + ], + "type": "text", + "content": "2.1 3D Part Segmentation" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 33, + 125, + 379, + 244 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 125, + 379, + 244 + ], + "spans": [ + { + "bbox": [ + 33, + 125, + 379, + 244 + ], + "type": "text", + "content": "In contrast to its 2D counterpart, the progress in this field has been relatively limited, primarily due to the high cost associated with collecting and annotating 3D datasets. Currently, all of the available large-scale annotated 3D object part datasets are synthetic [18,24,40,45]. The most widely used benchmarks [24,45] are predominantly derived from objects within the ShapeNetCore [5] dataset. This problem has been tackled using architectures that take 3D representations [24,29] as inputs. These methods were trained in a supervised manner, requiring large-scale annotated data. More recent approaches have attempted to investigate data-efficient training scenarios where only a few 3D shapes are annotated [19,32,39,50]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 33, + 262, + 354, + 274 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 262, + 354, + 274 + ], + "spans": [ + { + "bbox": [ + 33, + 262, + 354, + 274 + ], + "type": "text", + "content": "2.2 Multi-view 2D-3D Segmentation Using Foundation Models" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 33, + 284, + 379, + 391 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 284, + 379, + 391 + ], + "spans": [ + { + "bbox": [ + 33, + 284, + 379, + 391 + ], + "type": "text", + "content": "Although multi-view approaches have been widely utilized in the past for 3D segmentation [7,13,48], the rapid advancement of 2D foundation models [16,17] has encouraged more SOTA research aimed at leveraging these models to perform 3D segmentation in a multi-view fashion. CLIP [30] and GLIP [17] have been employed to integrate language information from multiple 2D views into 3D for open-vocabulary segmentation [1,19,27,35,50]. SAM [16], due to its ability to output per-pixel masks, has been used as an effective tool for multi-view 2D-3D segmentation, both on 3D structures like point clouds [35,42,44,46,50] or in NeRF-style [4]." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 33, + 392, + 386, + 511 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 392, + 386, + 511 + ], + "spans": [ + { + "bbox": [ + 33, + 392, + 386, + 511 + ], + "type": "text", + "content": "Scene Segmentation. Various combinations of foundation models have been explored for this task. While [35] leverages CLIP and SAM to support open-vocabulary 3D part segmentation, others use SAM with carefully designed prompts [4] or post-processing techniques [44]. Building upon these successes, concurrent works [12,26,42] seek to improve SAM utilization strategies. Our work differs by focusing on part segmentation, which requires finer granularity. This distinction in objectives directly influences the processing of SAM predictions, tailored to suit their specific characteristics. For example, while scene segmentation methods may disregard or merge masks covering parts of objects, part segmentation approaches might encourage splitting, depending on the desired level of detail." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 33, + 512, + 379, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 512, + 379, + 583 + ], + "spans": [ + { + "bbox": [ + 33, + 512, + 379, + 583 + ], + "type": "text", + "content": "Part Segmentation. PartSLIP [19] and SATR [1] were among the first to employ foundation models for this task, pioneering the use of GLIP for open-vocabulary segmentation. Concurrent works have seen the integration of SAM into their pipelines [15,43,50]. Zhou et al. [50] and Kim et al. [15] use SAM with GLIP-predicted bounding boxes, while Xue et al. [43] employ SAM with furthest point sampling for each view, extending predictions to 3D with GLIP labels. Our" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 33, + 11, + 39, + 19 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 11, + 39, + 19 + ], + "spans": [ + { + "bbox": [ + 33, + 11, + 39, + 19 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 66, + 10, + 121, + 19 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 10, + 121, + 19 + ], + "spans": [ + { + "bbox": [ + 66, + 10, + 121, + 19 + ], + "type": "text", + "content": "A. Thai et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 413, + 615 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 32, + 34, + 379, + 94 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 34, + 379, + 94 + ], + "spans": [ + { + "bbox": [ + 32, + 34, + 379, + 94 + ], + "type": "text", + "content": "approach shares with these works the use of SAM for 2D segmentation before 3D aggregation. In contrast, our method focuses solely on visual cues without language inputs, employing image diffusion model's features [36]. To improve SAM's accuracy, we introduce a novel non-overlapping mask generation module, eliminating the need for GLIP-generated bounding boxes." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 33, + 109, + 276, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 109, + 276, + 121 + ], + "spans": [ + { + "bbox": [ + 33, + 109, + 276, + 121 + ], + "type": "text", + "content": "2.3 Part Label Transfer using Correspondences" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 33, + 127, + 379, + 211 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 127, + 379, + 211 + ], + "spans": [ + { + "bbox": [ + 33, + 127, + 379, + 211 + ], + "type": "text", + "content": "Transferring labels from annotated datasets to non-annotated datasets has been considered recently in [34] for open-vocabulary 2D part segmentation and previously in [6, 51] for 3D part segmentation. While [34] used DINOv1 [3] feature representations for dense label transfer between related objects in the base classes and novel object classes, Zhu et al. [51] relied on classical SIFT [22] features for establishing correspondences in 2D images. Chen et al. [6], in contrast, train a network to regress the correspondences directly on the input point cloud." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 33, + 212, + 379, + 306 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 212, + 379, + 306 + ], + "spans": [ + { + "bbox": [ + 33, + 212, + 379, + 306 + ], + "type": "text", + "content": "We share with these approaches the use of semantic correspondences to identify optimal candidates for label transfer. However, our primary objective sets us apart significantly from [34], as we focus on segmenting 3D objects. Compared to [51], we leverage class-agnostic segmentation models to avoid dense pixel/patch sampling. Furthermore, unlike [6], we do not require direct operations on 3D point clouds or any specific 3D representations. Additionally, we introduce a mask-consistency module for per mask label voting, rather than relying solely on small local patches." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 33, + 307, + 379, + 391 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 307, + 379, + 391 + ], + "spans": [ + { + "bbox": [ + 33, + 307, + 379, + 391 + ], + "type": "text", + "content": "Semantic Correspondences from Foundation Models. Many vision foundation models have demonstrated an inherent capability to implicitly capture semantic correspondences across different instances within the same category (e.g., matching chair backs) and across diverse categories (e.g., aligning dog's legs with cat's legs) [2,11,36,47]. In this work, we leverage semantic correspondences established by [36] to transfer part labels from annotated 2D datasets to query 3D objects." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 33, + 406, + 99, + 420 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 406, + 99, + 420 + ], + "spans": [ + { + "bbox": [ + 33, + 406, + 99, + 420 + ], + "type": "text", + "content": "3 Method" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 33, + 429, + 379, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 429, + 379, + 525 + ], + "spans": [ + { + "bbox": [ + 33, + 429, + 379, + 525 + ], + "type": "text", + "content": "Given a database " + }, + { + "bbox": [ + 33, + 429, + 379, + 525 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 33, + 429, + 379, + 525 + ], + "type": "text", + "content": " consisting of 2D part annotations, our goal is to segment each query object " + }, + { + "bbox": [ + 33, + 429, + 379, + 525 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 33, + 429, + 379, + 525 + ], + "type": "text", + "content": " into parts using the visual part vocabulary provided by " + }, + { + "bbox": [ + 33, + 429, + 379, + 525 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 33, + 429, + 379, + 525 + ], + "type": "text", + "content": ". Note that " + }, + { + "bbox": [ + 33, + 429, + 379, + 525 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 33, + 429, + 379, + 525 + ], + "type": "text", + "content": " can either be gathered from 2D (image) part datasets or from renders of a few 3D objects captured at different view-points. Our method consists of three main steps (Fig. 2): (1) render a set of 2D RGB images " + }, + { + "bbox": [ + 33, + 429, + 379, + 525 + ], + "type": "inline_equation", + "content": "\\mathcal{I}q" + }, + { + "bbox": [ + 33, + 429, + 379, + 525 + ], + "type": "text", + "content": " of 3D object " + }, + { + "bbox": [ + 33, + 429, + 379, + 525 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 33, + 429, + 379, + 525 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 33, + 429, + 379, + 525 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 33, + 429, + 379, + 525 + ], + "type": "text", + "content": " distinct camera viewpoints; (2) perform 2D part segmentation on the rendered images; (3) aggregate image-level predictions through a mask-consistency aggregation module to obtain 3D predictions." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 33, + 541, + 171, + 553 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 541, + 171, + 553 + ], + "spans": [ + { + "bbox": [ + 33, + 541, + 171, + 553 + ], + "type": "text", + "content": "3.1 2D Part Segmentation" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 33, + 559, + 379, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 559, + 379, + 583 + ], + "spans": [ + { + "bbox": [ + 33, + 559, + 379, + 583 + ], + "type": "text", + "content": "There are two primary approaches to tackle this task: (1) Top-down, using segmentation mechanisms such as SAM [16], or (2) Bottom-up, which involves la" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 318, + 9, + 346, + 19 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 9, + 346, + 19 + ], + "spans": [ + { + "bbox": [ + 318, + 9, + 346, + 19 + ], + "type": "text", + "content": "3-By-2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 373, + 10, + 379, + 19 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 373, + 10, + 379, + 19 + ], + "spans": [ + { + "bbox": [ + 373, + 10, + 379, + 19 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 413, + 615 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 41, + 36, + 365, + 110 + ], + "blocks": [ + { + "bbox": [ + 41, + 36, + 365, + 110 + ], + "lines": [ + { + "bbox": [ + 41, + 36, + 365, + 110 + ], + "spans": [ + { + "bbox": [ + 41, + 36, + 365, + 110 + ], + "type": "image", + "image_path": "e97d61a381c5ee17448db1eb3f59252539cc7b90918569145f2c2bc1daf3098b.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 30, + 111, + 380, + 177 + ], + "lines": [ + { + "bbox": [ + 30, + 111, + 380, + 177 + ], + "spans": [ + { + "bbox": [ + 30, + 111, + 380, + 177 + ], + "type": "text", + "content": "Fig. 3: The process of pixel-level part label transferring. For each pixel " + }, + { + "bbox": [ + 30, + 111, + 380, + 177 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 30, + 111, + 380, + 177 + ], + "type": "text", + "content": " in the query image " + }, + { + "bbox": [ + 30, + 111, + 380, + 177 + ], + "type": "inline_equation", + "content": "I_{k}" + }, + { + "bbox": [ + 30, + 111, + 380, + 177 + ], + "type": "text", + "content": ", we perform the following: (1) Extract the feature " + }, + { + "bbox": [ + 30, + 111, + 380, + 177 + ], + "type": "inline_equation", + "content": "f(p)" + }, + { + "bbox": [ + 30, + 111, + 380, + 177 + ], + "type": "text", + "content": ", along with the feature grid for each image " + }, + { + "bbox": [ + 30, + 111, + 380, + 177 + ], + "type": "inline_equation", + "content": "I_{\\mathcal{D}}" + }, + { + "bbox": [ + 30, + 111, + 380, + 177 + ], + "type": "text", + "content": " in the database " + }, + { + "bbox": [ + 30, + 111, + 380, + 177 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 30, + 111, + 380, + 177 + ], + "type": "text", + "content": "; (2) Measure cosine similarity between " + }, + { + "bbox": [ + 30, + 111, + 380, + 177 + ], + "type": "inline_equation", + "content": "f(p)" + }, + { + "bbox": [ + 30, + 111, + 380, + 177 + ], + "type": "text", + "content": " and the feature of each pixel within each feature grid, (3) Obtain the best match of " + }, + { + "bbox": [ + 30, + 111, + 380, + 177 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 30, + 111, + 380, + 177 + ], + "type": "text", + "content": " over " + }, + { + "bbox": [ + 30, + 111, + 380, + 177 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 30, + 111, + 380, + 177 + ], + "type": "text", + "content": " by determining the most similar pixel " + }, + { + "bbox": [ + 30, + 111, + 380, + 177 + ], + "type": "inline_equation", + "content": "p_{\\mathcal{D}}" + }, + { + "bbox": [ + 30, + 111, + 380, + 177 + ], + "type": "text", + "content": " over all images " + }, + { + "bbox": [ + 30, + 111, + 380, + 177 + ], + "type": "inline_equation", + "content": "I_{\\mathcal{D}}" + }, + { + "bbox": [ + 30, + 111, + 380, + 177 + ], + "type": "text", + "content": "; (4) Assign the label of " + }, + { + "bbox": [ + 30, + 111, + 380, + 177 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 30, + 111, + 380, + 177 + ], + "type": "text", + "content": " is to be the label of " + }, + { + "bbox": [ + 30, + 111, + 380, + 177 + ], + "type": "inline_equation", + "content": "p_{\\mathcal{D}}" + }, + { + "bbox": [ + 30, + 111, + 380, + 177 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 41, + 181, + 198, + 234 + ], + "blocks": [ + { + "bbox": [ + 41, + 181, + 198, + 234 + ], + "lines": [ + { + "bbox": [ + 41, + 181, + 198, + 234 + ], + "spans": [ + { + "bbox": [ + 41, + 181, + 198, + 234 + ], + "type": "image", + "image_path": "8a5d33ac48e0f1040cfad7f99a5f9d27919b1e194577f94e0c4c190dc4c0840a.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 113, + 236, + 126, + 246 + ], + "lines": [ + { + "bbox": [ + 113, + 236, + 126, + 246 + ], + "spans": [ + { + "bbox": [ + 113, + 236, + 126, + 246 + ], + "type": "text", + "content": "(a)" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 224, + 182, + 377, + 234 + ], + "blocks": [ + { + "bbox": [ + 224, + 182, + 377, + 234 + ], + "lines": [ + { + "bbox": [ + 224, + 182, + 377, + 234 + ], + "spans": [ + { + "bbox": [ + 224, + 182, + 377, + 234 + ], + "type": "image", + "image_path": "3f62c6385a69e3e5d61fd0b77403fb45581133ec2efe76e9c4111693130faab8.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 293, + 236, + 306, + 246 + ], + "lines": [ + { + "bbox": [ + 293, + 236, + 306, + 246 + ], + "spans": [ + { + "bbox": [ + 293, + 236, + 306, + 246 + ], + "type": "text", + "content": "(b)" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 30, + 250, + 380, + 316 + ], + "lines": [ + { + "bbox": [ + 30, + 250, + 380, + 316 + ], + "spans": [ + { + "bbox": [ + 30, + 250, + 380, + 316 + ], + "type": "text", + "content": "Fig. 4: (a) Non-overlapping 2D Mask Proposal. We address the issue of overlapping masks produced by SAM. The masks are first sorted by their areas. Subsequently, the smaller masks are stacked on top of the larger ones. Non-overlapping masks are obtained by taking the visible segment of each mask. (b) Different mask sampling strategies for label transfer. Our strategy provides accurate, dense prediction with clear part boundaries." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 30, + 328, + 379, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 328, + 379, + 388 + ], + "spans": [ + { + "bbox": [ + 30, + 328, + 379, + 388 + ], + "type": "text", + "content": "beling each pixel individually. While SAM produces high-quality 2D masks with sharp boundaries, it operates in a class-agnostic manner, often leading to high overlap between sub-parts, parts, and instances. Simply selecting the mask with the highest score may result in incorrect granularity and lacks the flexibility required for part segmentation." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 30, + 390, + 379, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 390, + 379, + 509 + ], + "spans": [ + { + "bbox": [ + 30, + 390, + 379, + 509 + ], + "type": "text", + "content": "Conversely, doing label transfer for each pixel individually in the image is computationally impractical, particularly for part segmentation tasks where high resolution is preferred. Sparsely sampling and labeling pixels can result in under-segmented masks, particularly for smaller parts that are less likely to be sampled compared to larger parts (see Fig. 4b). Moreover, accurately determining part boundaries for individual pixels can be challenging, which may result in increased errors when extrapolating to 3D, particularly with unstructured 3D representations like point clouds. These issues raise the important question: how do we transfer part labels and preserve part boundaries without sacrificing computational resources?" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 30, + 511, + 380, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 511, + 380, + 584 + ], + "spans": [ + { + "bbox": [ + 30, + 511, + 380, + 584 + ], + "type": "text", + "content": "To address this question, we propose a 2D segmentation method that combines the strengths of both approaches which consists of 3 novel components: (1) Single-pixel 2D label transfer using semantic correspondences derived from DIFT [36], (2) Non-overlapping 2D mask proposal module, which refines SAM's multi-granularity predicted masks into non-overlapping part masks, and (3) Mask-level label transfer by integrating (1) and (2)." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 32, + 10, + 39, + 18 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 10, + 39, + 18 + ], + "spans": [ + { + "bbox": [ + 32, + 10, + 39, + 18 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 66, + 9, + 121, + 19 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 9, + 121, + 19 + ], + "spans": [ + { + "bbox": [ + 66, + 9, + 121, + 19 + ], + "type": "text", + "content": "A. Thai et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 413, + 615 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 48, + 50, + 78, + 87 + ], + "blocks": [ + { + "bbox": [ + 48, + 50, + 78, + 87 + ], + "lines": [ + { + "bbox": [ + 48, + 50, + 78, + 87 + ], + "spans": [ + { + "bbox": [ + 48, + 50, + 78, + 87 + ], + "type": "image", + "image_path": "4e6332bb0c5ce2a514edb1b89e96aaafb0f172c60357b240c7623744e91f2126.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 30, + 107, + 207, + 174 + ], + "lines": [ + { + "bbox": [ + 30, + 107, + 207, + 174 + ], + "spans": [ + { + "bbox": [ + 30, + 107, + 207, + 174 + ], + "type": "text", + "content": "Fig.5: Two approaches to aggregate 3D part labels from multiple 2D views. Aggregating 3D part labels from multiple 2D views through geometric correspondence can be achieved by either point or mask label consistency." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 82, + 37, + 202, + 65 + ], + "blocks": [ + { + "bbox": [ + 82, + 37, + 202, + 65 + ], + "lines": [ + { + "bbox": [ + 82, + 37, + 202, + 65 + ], + "spans": [ + { + "bbox": [ + 82, + 37, + 202, + 65 + ], + "type": "image", + "image_path": "dea239a485f5790362d5a94a9cb7b3df903b1fb4e4ce9b1a9495e58d7d10c807.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 83, + 70, + 202, + 97 + ], + "blocks": [ + { + "bbox": [ + 104, + 66, + 186, + 70 + ], + "lines": [ + { + "bbox": [ + 104, + 66, + 186, + 70 + ], + "spans": [ + { + "bbox": [ + 104, + 66, + 186, + 70 + ], + "type": "text", + "content": "Mask label consistency between multiple views" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 83, + 70, + 202, + 97 + ], + "lines": [ + { + "bbox": [ + 83, + 70, + 202, + 97 + ], + "spans": [ + { + "bbox": [ + 83, + 70, + 202, + 97 + ], + "type": "image", + "image_path": "b65086ae59d580d41b24b0db1194cc8a82383dcc510ce4ca72fde3180a51c29e.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 223, + 40, + 379, + 98 + ], + "blocks": [ + { + "bbox": [ + 223, + 40, + 379, + 98 + ], + "lines": [ + { + "bbox": [ + 223, + 40, + 379, + 98 + ], + "spans": [ + { + "bbox": [ + 223, + 40, + 379, + 98 + ], + "type": "image", + "image_path": "8b9f13c2b24dc9d116ff4abd011e4138bf2f10a61eac193fd4a50dcb2196ad78.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 211, + 107, + 388, + 161 + ], + "lines": [ + { + "bbox": [ + 211, + 107, + 388, + 161 + ], + "spans": [ + { + "bbox": [ + 211, + 107, + 388, + 161 + ], + "type": "text", + "content": "Fig.6: Effectiveness of mask label consistency. Enforcing consistency at the mask level can mitigate discrepancies at each individual point and contributes to smoother segmentation." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 30, + 183, + 380, + 362 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 183, + 380, + 362 + ], + "spans": [ + { + "bbox": [ + 30, + 183, + 380, + 362 + ], + "type": "text", + "content": "Single-pixel 2D Label Transfer. At the core of our method is the 2D label transfer process. The goal is to transfer pixel labels from the annotated 2D database " + }, + { + "bbox": [ + 30, + 183, + 380, + 362 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 30, + 183, + 380, + 362 + ], + "type": "text", + "content": " to the query RGB image " + }, + { + "bbox": [ + 30, + 183, + 380, + 362 + ], + "type": "inline_equation", + "content": "I_{k} \\in \\mathcal{I}q" + }, + { + "bbox": [ + 30, + 183, + 380, + 362 + ], + "type": "text", + "content": ": for a pixel " + }, + { + "bbox": [ + 30, + 183, + 380, + 362 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 30, + 183, + 380, + 362 + ], + "type": "text", + "content": " in the foreground object in " + }, + { + "bbox": [ + 30, + 183, + 380, + 362 + ], + "type": "inline_equation", + "content": "I_{k}" + }, + { + "bbox": [ + 30, + 183, + 380, + 362 + ], + "type": "text", + "content": ", we aim to identify the best-matched pixel " + }, + { + "bbox": [ + 30, + 183, + 380, + 362 + ], + "type": "inline_equation", + "content": "p'" + }, + { + "bbox": [ + 30, + 183, + 380, + 362 + ], + "type": "text", + "content": " in each image " + }, + { + "bbox": [ + 30, + 183, + 380, + 362 + ], + "type": "inline_equation", + "content": "I_{\\mathcal{D}}" + }, + { + "bbox": [ + 30, + 183, + 380, + 362 + ], + "type": "text", + "content": " in the database " + }, + { + "bbox": [ + 30, + 183, + 380, + 362 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 30, + 183, + 380, + 362 + ], + "type": "text", + "content": " and assign initial label to " + }, + { + "bbox": [ + 30, + 183, + 380, + 362 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 30, + 183, + 380, + 362 + ], + "type": "text", + "content": " by " + }, + { + "bbox": [ + 30, + 183, + 380, + 362 + ], + "type": "inline_equation", + "content": "p'" + }, + { + "bbox": [ + 30, + 183, + 380, + 362 + ], + "type": "text", + "content": ". To this end, we leverage the established semantic correspondence of DIFT [36]. While recent works have demonstrated the effectiveness of image diffusion models in extracting semantic correspondences, as evidenced by evaluations on datasets like SPair-71K [23], we are the first to leverage these features for transferring semantic labels in the context of 3D part segmentation. Specifically, " + }, + { + "bbox": [ + 30, + 183, + 380, + 362 + ], + "type": "inline_equation", + "content": "p' = \\arg \\max_{p' \\in I_{\\mathcal{D}}} \\cos(f(p), f(p'))" + }, + { + "bbox": [ + 30, + 183, + 380, + 362 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 30, + 183, + 380, + 362 + ], + "type": "inline_equation", + "content": "\\cos" + }, + { + "bbox": [ + 30, + 183, + 380, + 362 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 30, + 183, + 380, + 362 + ], + "type": "inline_equation", + "content": "f(x)" + }, + { + "bbox": [ + 30, + 183, + 380, + 362 + ], + "type": "text", + "content": " denotes the cosine similarity score and the feature representing pixel " + }, + { + "bbox": [ + 30, + 183, + 380, + 362 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 30, + 183, + 380, + 362 + ], + "type": "text", + "content": ". The best pixel correspondence " + }, + { + "bbox": [ + 30, + 183, + 380, + 362 + ], + "type": "inline_equation", + "content": "p_{\\mathcal{D}}" + }, + { + "bbox": [ + 30, + 183, + 380, + 362 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 30, + 183, + 380, + 362 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 30, + 183, + 380, + 362 + ], + "type": "text", + "content": " over the entire database is obtained by taking the most similar match within all the images in the database. Formally, " + }, + { + "bbox": [ + 30, + 183, + 380, + 362 + ], + "type": "inline_equation", + "content": "p_{\\mathcal{D}} = \\arg \\max_{\\mathcal{D}} p'" + }, + { + "bbox": [ + 30, + 183, + 380, + 362 + ], + "type": "text", + "content": ". The label of " + }, + { + "bbox": [ + 30, + 183, + 380, + 362 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 30, + 183, + 380, + 362 + ], + "type": "text", + "content": " is then assigned to be the label of " + }, + { + "bbox": [ + 30, + 183, + 380, + 362 + ], + "type": "inline_equation", + "content": "p_{\\mathcal{D}}" + }, + { + "bbox": [ + 30, + 183, + 380, + 362 + ], + "type": "text", + "content": " (see Fig. 3)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 30, + 365, + 379, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 365, + 379, + 497 + ], + "spans": [ + { + "bbox": [ + 30, + 365, + 379, + 497 + ], + "type": "text", + "content": "Coarse-to-fine correspondence search. Finding the nearest neighbor for a query pixel across the entire database can be prohibitively costly, especially for part segmentation which operates in high resolutions. We propose a coarse-to-fine strategy: using the coarse feature maps generated by DIFT [36], we first conduct the search at the coarse level to localize the region of the best match. We then extract the " + }, + { + "bbox": [ + 30, + 365, + 379, + 497 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 30, + 365, + 379, + 497 + ], + "type": "text", + "content": " window centered at this region (in feature space) for a fine search (see Fig. 7). This approach ensures that we compute per-pixel similarity scores only within the region of interest, rather than across the entire image, improving computational efficiency. For instance, when processing a pair of images with a resolution of " + }, + { + "bbox": [ + 30, + 365, + 379, + 497 + ], + "type": "inline_equation", + "content": "800 \\times 800" + }, + { + "bbox": [ + 30, + 365, + 379, + 497 + ], + "type": "text", + "content": ", coarse-to-fine correspondence search achieves a speed improvement of approximately 2000 times in terms of wall clock time." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 30, + 499, + 379, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 499, + 379, + 584 + ], + "spans": [ + { + "bbox": [ + 30, + 499, + 379, + 584 + ], + "type": "text", + "content": "Non-overlapping 2D Mask Proposal. We propose the use of class-agnostic 2D part mask proposal, specifically from SAM [16]. By assuming that each mask proposal corresponds to a subset of a part, we can then selectively sample pixels within each mask proposal for label transferring. The labels are subsequently propagated to each pixel of the 2D masks through a majority voting process based on the sampled pixels within the mask. To address the issue posed by the highly overlapping predictions from SAM's multi-granularity model, we intro" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 318, + 10, + 347, + 20 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 10, + 347, + 20 + ], + "spans": [ + { + "bbox": [ + 318, + 10, + 347, + 20 + ], + "type": "text", + "content": "3-By-2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 373, + 10, + 379, + 19 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 373, + 10, + 379, + 19 + ], + "spans": [ + { + "bbox": [ + 373, + 10, + 379, + 19 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 33, + 76, + 39 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 33, + 76, + 39 + ], + "spans": [ + { + "bbox": [ + 47, + 33, + 76, + 39 + ], + "type": "text", + "content": "Input 3D Object" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 413, + 615 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 55, + 33, + 357, + 127 + ], + "blocks": [ + { + "bbox": [ + 55, + 33, + 357, + 127 + ], + "lines": [ + { + "bbox": [ + 55, + 33, + 357, + 127 + ], + "spans": [ + { + "bbox": [ + 55, + 33, + 357, + 127 + ], + "type": "image", + "image_path": "cab047ae98ccbf6f9876a4d6488f212bdc2cb48e5e983dabf86d9cc85aa54e7d.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 30, + 129, + 380, + 174 + ], + "lines": [ + { + "bbox": [ + 30, + 129, + 380, + 174 + ], + "spans": [ + { + "bbox": [ + 30, + 129, + 380, + 174 + ], + "type": "text", + "content": "Fig. 7: Coase-to-fine correspondence search. We first conduct searching on a coarse level to identify the region of best match. We then extract the " + }, + { + "bbox": [ + 30, + 129, + 380, + 174 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 30, + 129, + 380, + 174 + ], + "type": "text", + "content": " window centered at this region in feature space for a fine search. This approach is approximately 2000 times faster in terms of wall time for large " + }, + { + "bbox": [ + 30, + 129, + 380, + 174 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 30, + 129, + 380, + 174 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 30, + 129, + 380, + 174 + ], + "type": "inline_equation", + "content": "800 \\times 800" + }, + { + "bbox": [ + 30, + 129, + 380, + 174 + ], + "type": "text", + "content": ")." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 30, + 178, + 380, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 178, + 380, + 202 + ], + "spans": [ + { + "bbox": [ + 30, + 178, + 380, + 202 + ], + "type": "text", + "content": "duce a non-overlapping 2D mask generation module. This module takes SAM masks as inputs and outputs a set of mutually exclusive 2D masks." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 30, + 203, + 380, + 263 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 203, + 380, + 263 + ], + "spans": [ + { + "bbox": [ + 30, + 203, + 380, + 263 + ], + "type": "text", + "content": "We arrange the SAM output masks in descending order of mask area and stack smaller masks on top of larger ones. This ensures that if mask " + }, + { + "bbox": [ + 30, + 203, + 380, + 263 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 30, + 203, + 380, + 263 + ], + "type": "text", + "content": " is a subset of mask " + }, + { + "bbox": [ + 30, + 203, + 380, + 263 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 30, + 203, + 380, + 263 + ], + "type": "text", + "content": ", stacking " + }, + { + "bbox": [ + 30, + 203, + 380, + 263 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 30, + 203, + 380, + 263 + ], + "type": "text", + "content": " on top of " + }, + { + "bbox": [ + 30, + 203, + 380, + 263 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 30, + 203, + 380, + 263 + ], + "type": "text", + "content": " results in non-overlapping masks, namely " + }, + { + "bbox": [ + 30, + 203, + 380, + 263 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 30, + 203, + 380, + 263 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 30, + 203, + 380, + 263 + ], + "type": "inline_equation", + "content": "B \\setminus A" + }, + { + "bbox": [ + 30, + 203, + 380, + 263 + ], + "type": "text", + "content": ". Non-overlapping masks are finally obtained by taking the visible segments of each mask (see Fig. 4a)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 30, + 264, + 380, + 311 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 264, + 380, + 311 + ], + "spans": [ + { + "bbox": [ + 30, + 264, + 380, + 311 + ], + "type": "text", + "content": "2D Mask Label Assignment. After obtaining the non-overlapping masks, we sparsely sample pixels in each mask to transfer label. We then perform majority voting to assign the dominant label for each 2D mask, weighted by the confidence score (cosine similarity) of the best pixel correspondence matches." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 31, + 332, + 255, + 345 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 31, + 332, + 255, + 345 + ], + "spans": [ + { + "bbox": [ + 31, + 332, + 255, + 345 + ], + "type": "text", + "content": "3.2 Mask-consistency Aggregation Module" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 30, + 356, + 379, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 356, + 379, + 487 + ], + "spans": [ + { + "bbox": [ + 30, + 356, + 379, + 487 + ], + "type": "text", + "content": "Given a set of 2D RGB images with part segmentation predictions, we aim to extrapolate these segmentation labels to 3D using geometric correspondences. Prior works [19,32] aggregate multi-view information for each 3D point or mesh triangle face through a weighted sum of multi-view 2D predictions. To fully maintain the high-quality part boundaries predicted by SAM in 2D, we choose to aggregate multi-view predictions for each 2D mask instead. This observation is based on the fact that part identities remain constant across multiple views (e.g., the seat in view 1 should be segmented as the seat in view 2, see Fig. 5). Intuitively, mask consistency can be seen as an additional constraint on point consistency, encouraging points within the same 2D mask to remain associated with the same masks in the 3D space" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 30, + 488, + 380, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 488, + 380, + 584 + ], + "spans": [ + { + "bbox": [ + 30, + 488, + 380, + 584 + ], + "type": "text", + "content": "We present a novel mask-consistency aggregation module that takes a set of 2D part segmentation predictions for multiple views as input. Our approach involves constructing an undirected unweighted graph, denoted as " + }, + { + "bbox": [ + 30, + 488, + 380, + 584 + ], + "type": "inline_equation", + "content": "G:V\\to E" + }, + { + "bbox": [ + 30, + 488, + 380, + 584 + ], + "type": "text", + "content": ", where each vertex corresponds to a 2D mask in a given view. The edges of the graph connect masks from different views that capture the projection of the same 3D points. We construct a set of mask correspondences for each vertex " + }, + { + "bbox": [ + 30, + 488, + 380, + 584 + ], + "type": "inline_equation", + "content": "v\\in V" + }, + { + "bbox": [ + 30, + 488, + 380, + 584 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 30, + 488, + 380, + 584 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_v = \\{v,u_1,u_2,\\dots u_N\\}" + }, + { + "bbox": [ + 30, + 488, + 380, + 584 + ], + "type": "text", + "content": " where an edge " + }, + { + "bbox": [ + 30, + 488, + 380, + 584 + ], + "type": "inline_equation", + "content": "e_i" + }, + { + "bbox": [ + 30, + 488, + 380, + 584 + ], + "type": "text", + "content": " connects " + }, + { + "bbox": [ + 30, + 488, + 380, + 584 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 30, + 488, + 380, + 584 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 30, + 488, + 380, + 584 + ], + "type": "inline_equation", + "content": "u_{i}" + }, + { + "bbox": [ + 30, + 488, + 380, + 584 + ], + "type": "text", + "content": ". A mask " + }, + { + "bbox": [ + 30, + 488, + 380, + 584 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 30, + 488, + 380, + 584 + ], + "type": "text", + "content": " is defined as oversegmented when there exists at least 2 masks in " + }, + { + "bbox": [ + 30, + 488, + 380, + 584 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_v" + }, + { + "bbox": [ + 30, + 488, + 380, + 584 + ], + "type": "text", + "content": " that" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 32, + 10, + 39, + 19 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 10, + 39, + 19 + ], + "spans": [ + { + "bbox": [ + 32, + 10, + 39, + 19 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 66, + 9, + 121, + 19 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 9, + 121, + 19 + ], + "spans": [ + { + "bbox": [ + 66, + 9, + 121, + 19 + ], + "type": "text", + "content": "A. Thai et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 413, + 615 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 27, + 360, + 109 + ], + "blocks": [ + { + "bbox": [ + 50, + 27, + 360, + 109 + ], + "lines": [ + { + "bbox": [ + 50, + 27, + 360, + 109 + ], + "spans": [ + { + "bbox": [ + 50, + 27, + 360, + 109 + ], + "type": "image", + "image_path": "1c9607d3b815359e95575fa7a7897f879f58857de511a2a43f1191a0afe5f972.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 30, + 113, + 380, + 190 + ], + "lines": [ + { + "bbox": [ + 30, + 113, + 380, + 190 + ], + "spans": [ + { + "bbox": [ + 30, + 113, + 380, + 190 + ], + "type": "text", + "content": "Fig. 8: Mask-consistency process. (1) Each vertex of " + }, + { + "bbox": [ + 30, + 113, + 380, + 190 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 30, + 113, + 380, + 190 + ], + "type": "text", + "content": " corresponds to a mask in a given image. The edge connecting each pair of vertices denotes that the pair contains the projection of the same 3D points. Mask consistency set " + }, + { + "bbox": [ + 30, + 113, + 380, + 190 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_v" + }, + { + "bbox": [ + 30, + 113, + 380, + 190 + ], + "type": "text", + "content": " for each " + }, + { + "bbox": [ + 30, + 113, + 380, + 190 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 30, + 113, + 380, + 190 + ], + "type": "text", + "content": " is obtained via the first-order neighborhood of " + }, + { + "bbox": [ + 30, + 113, + 380, + 190 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 30, + 113, + 380, + 190 + ], + "type": "text", + "content": ". (2) " + }, + { + "bbox": [ + 30, + 113, + 380, + 190 + ], + "type": "inline_equation", + "content": "v_1" + }, + { + "bbox": [ + 30, + 113, + 380, + 190 + ], + "type": "text", + "content": " is detected as under-segmented since " + }, + { + "bbox": [ + 30, + 113, + 380, + 190 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{v_1}" + }, + { + "bbox": [ + 30, + 113, + 380, + 190 + ], + "type": "text", + "content": " consists of masks from the same view with different labels " + }, + { + "bbox": [ + 30, + 113, + 380, + 190 + ], + "type": "inline_equation", + "content": "(v_2, v_3)" + }, + { + "bbox": [ + 30, + 113, + 380, + 190 + ], + "type": "text", + "content": " and hence, is discarded. (3) Traverse " + }, + { + "bbox": [ + 30, + 113, + 380, + 190 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{v_i}" + }, + { + "bbox": [ + 30, + 113, + 380, + 190 + ], + "type": "text", + "content": " to obtain labels for " + }, + { + "bbox": [ + 30, + 113, + 380, + 190 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{v_i}" + }, + { + "bbox": [ + 30, + 113, + 380, + 190 + ], + "type": "text", + "content": ". (4) Obtain label for each mask by majority voting. Here we show a simple example for visualization purpose." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 31, + 194, + 379, + 218 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 31, + 194, + 379, + 218 + ], + "spans": [ + { + "bbox": [ + 31, + 194, + 379, + 218 + ], + "type": "text", + "content": "belong to the same image but are assigned with different labels. For instance, in Fig. 8, vertex " + }, + { + "bbox": [ + 31, + 194, + 379, + 218 + ], + "type": "inline_equation", + "content": "v_{1}" + }, + { + "bbox": [ + 31, + 194, + 379, + 218 + ], + "type": "text", + "content": " corresponds to an undersegmented mask. Formally," + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 98, + 228, + 379, + 242 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 228, + 379, + 242 + ], + "spans": [ + { + "bbox": [ + 98, + 228, + 379, + 242 + ], + "type": "interline_equation", + "content": "\\mathcal {S} _ {v} = \\left\\{u _ {i}, u _ {j} \\in I _ {k} \\text {a n d} l \\left(u _ {i}\\right) \\neq l \\left(u _ {j}\\right) \\mid u _ {i}, u _ {j} \\in \\mathcal {M} _ {v} \\right\\} \\tag {1}", + "image_path": "d4b382aaae0243c9aed8c4c70814615eabe75be65df86a7e95ae925abcfc9e68.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 30, + 250, + 379, + 333 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 250, + 379, + 333 + ], + "spans": [ + { + "bbox": [ + 30, + 250, + 379, + 333 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 30, + 250, + 379, + 333 + ], + "type": "inline_equation", + "content": "l(x)" + }, + { + "bbox": [ + 30, + 250, + 379, + 333 + ], + "type": "text", + "content": " denotes the label of " + }, + { + "bbox": [ + 30, + 250, + 379, + 333 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 30, + 250, + 379, + 333 + ], + "type": "text", + "content": ". We discard " + }, + { + "bbox": [ + 30, + 250, + 379, + 333 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 30, + 250, + 379, + 333 + ], + "type": "text", + "content": " if " + }, + { + "bbox": [ + 30, + 250, + 379, + 333 + ], + "type": "inline_equation", + "content": "|S_v| > \\epsilon" + }, + { + "bbox": [ + 30, + 250, + 379, + 333 + ], + "type": "text", + "content": ". That is, if " + }, + { + "bbox": [ + 30, + 250, + 379, + 333 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 30, + 250, + 379, + 333 + ], + "type": "text", + "content": " is consistently determined as undersegmented across multiple views, we discard the contribution of " + }, + { + "bbox": [ + 30, + 250, + 379, + 333 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 30, + 250, + 379, + 333 + ], + "type": "text", + "content": " in the final label assignment. We then traverse the graph simultaneously from each vertex using breadth-first-search to accumulate the labels for each " + }, + { + "bbox": [ + 30, + 250, + 379, + 333 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_v" + }, + { + "bbox": [ + 30, + 250, + 379, + 333 + ], + "type": "text", + "content": ". Subsequently, we perform majority voting to assign labels to each " + }, + { + "bbox": [ + 30, + 250, + 379, + 333 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_v" + }, + { + "bbox": [ + 30, + 250, + 379, + 333 + ], + "type": "text", + "content": ". Finally, for each mask, we identify the most frequently assigned label as the final label." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 30, + 335, + 379, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 335, + 379, + 418 + ], + "spans": [ + { + "bbox": [ + 30, + 335, + 379, + 418 + ], + "type": "text", + "content": "The simple intuition behind this approach is: if a part occasionally receives incorrect labels in some challenging views, employing majority voting within the mask correspondence set can calibrate these errors. Further, performing this on the mask level ensures that if two 2D points share the same mask label in the majority of the views, they will ultimately be assigned with the same final label. This approach calibrates potential discrepancies in individual point-wise aggregations (see Fig. 6)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 31, + 437, + 128, + 451 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 31, + 437, + 128, + 451 + ], + "spans": [ + { + "bbox": [ + 31, + 437, + 128, + 451 + ], + "type": "text", + "content": "4 Experiments" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 30, + 464, + 380, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 464, + 380, + 584 + ], + "spans": [ + { + "bbox": [ + 30, + 464, + 380, + 584 + ], + "type": "text", + "content": "In this section, we first report the performance of 3-By-2 against baselines on PartNet-Ensembled (PartNetE) [19] in Sec. 4.1 and on PartNet [24] with \"level-3\" annotation in Sec. 4.2. Note the distinction between these datasets since PartNetE consists of a distinct set of articulated objects from [41]. These datasets also exhibit different granularity of part annotations. While PartNetE consists of both basic parts like chair back and fine-grained parts like scissors screw, PartNet with \"level-3\" annotation contains all fine-grained parts such as \"back_frame_vertical_bar\". In Sec. 4.3, we conduct comprehensive ablation studies to verify the necessity of each components in 3-By-2. Our few-shot experiments refer to the setting where a few labeled 3D objects are available for" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 318, + 9, + 347, + 21 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 9, + 347, + 21 + ], + "spans": [ + { + "bbox": [ + 318, + 9, + 347, + 21 + ], + "type": "text", + "content": "3-By-2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 372, + 10, + 379, + 19 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 372, + 10, + 379, + 19 + ], + "spans": [ + { + "bbox": [ + 372, + 10, + 379, + 19 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 413, + 615 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 30, + 33, + 380, + 98 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 33, + 380, + 98 + ], + "spans": [ + { + "bbox": [ + 30, + 33, + 380, + 98 + ], + "type": "text", + "content": "Table 1: Few-shot performance on PartNetE [19] dataset. The left columns show performance on the 17 categories that supervised methods [28,29,38] (first 3 rows) were trained on with additional 28K objects. The right columns show performance on the 28 categories with only 8 objects/category in the training set. [19,33,49,50] and ours (last 5 rows) only have access to 8 objects/category during training for all 45 categories. Please refer to the Supplement for the full table on all 45 categories." + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 34, + 101, + 376, + 185 + ], + "blocks": [ + { + "bbox": [ + 34, + 101, + 376, + 185 + ], + "lines": [ + { + "bbox": [ + 34, + 101, + 376, + 185 + ], + "spans": [ + { + "bbox": [ + 34, + 101, + 376, + 185 + ], + "type": "table", + "html": "
MethodsChairSci-ssorsLap-topDoorMicro-waveKey-boardAvg. (17)Cam-eraUSBStap-lerDisp-enserKet-tleEye-gl.Avg. (28)Avg. (45)
PointNext [29]0.9180.5730.3250.4380.4050.4500.5910.3320.6790.8860.2600.4510.8810.4570.502
PointNet++ [28]0.8470.5000.5540.4570.4360.7450.5330.0650.5240.5160.1210.2090.7620.2500.365
SoftGroup [38]0.8830.7600.1840.5310.3830.5890.5050.2360.4410.8010.1890.5740.7240.3130.384
ACD [33]0.3900.3910.1110.1890.0660.2610.1960.1010.2520.5000.1940.4020.7820.2590.235
Prototype [49]0.7080.4300.2790.3340.2700.4490.4190.3200.6540.8070.5340.6070.7790.4700.451
PartSLIP [19]0.8540.6030.2970.4080.4270.5360.5670.5830.5610.8480.7380.7700.8830.6250.603
PartSLIP++ [50]0.8530.6050.2970.4510.4950.7240.5740.6320.5750.6300.7200.8560.8830.6420.615
3-By-2 (ours)0.8440.6570.4530.5440.4020.8960.6040.6260.7900.9010.7820.8150.9280.6650.642
", + "image_path": "f91cdc4b60d83063b55ff820c0e8d341935a251d55476949aa921afe5314faa9.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 30, + 198, + 379, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 198, + 379, + 222 + ], + "spans": [ + { + "bbox": [ + 30, + 198, + 379, + 222 + ], + "type": "text", + "content": "each object category while there is no annotated 3D part labels in the zero-shot setting. In this setting, we leverage labels from the 2D domain instead." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 31, + 240, + 242, + 252 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 31, + 240, + 242, + 252 + ], + "spans": [ + { + "bbox": [ + 31, + 240, + 242, + 252 + ], + "type": "text", + "content": "4.1 Performance on PartNet-Ensembled" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 30, + 260, + 380, + 332 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 260, + 380, + 332 + ], + "spans": [ + { + "bbox": [ + 30, + 260, + 380, + 332 + ], + "type": "text", + "content": "Data & Metric. We use the dataset provided by Liu et al. [19] for both the few-shot and zero-shot settings. For each object in both few-shot and test sets, we render 20 RGB images from different views with resolution " + }, + { + "bbox": [ + 30, + 260, + 380, + 332 + ], + "type": "inline_equation", + "content": "800 \\times 800" + }, + { + "bbox": [ + 30, + 260, + 380, + 332 + ], + "type": "text", + "content": ". We report mean IoU (mIoU) performance of all baselines using the evaluation protocol provided by [19] on the input point clouds. Specifically, the performance of a part is not considered if it does not exist in the queried object." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 30, + 333, + 379, + 463 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 333, + 379, + 463 + ], + "spans": [ + { + "bbox": [ + 30, + 333, + 379, + 463 + ], + "type": "text", + "content": "Few-shot Baselines. We compare 3-By-2 against fully-supervised semantic segmentation [28, 29, 38], few-shot semantic segmentation [33, 49] and language-based [19, 50] methods. The fully supervised methods [28, 29, 38] were trained on 28K objects of 17 overlapping categories between PartNetE [19], in addition to the few-shot set consisting of 8 objects/category. The second group of baselines [19, 33, 49, 50] were only trained on the few-shot set. PartSLIP and PartSLIP++, a concurrent work, rely on large vision-language model (GLIP [17]) to guide the 2D part detection before extending to the 3D point cloud segmentation. We provide more detailed descriptions in the Supplement. We omit the evaluation of MvDeCor [32] on this benchmark since it requires ground-truth 3D meshes, whereas PartNetE only provides dense point clouds as inputs." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 30, + 464, + 379, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 464, + 379, + 559 + ], + "spans": [ + { + "bbox": [ + 30, + 464, + 379, + 559 + ], + "type": "text", + "content": "Few-shot Setting. In this setting, 8 objects/category serve as the few-shot set. We evaluate on the entire test set of PartNetE [19]. For a fair comparison, we remove part labels in the test set that do not exist in the few-shot set. We present our few-shot results in Table 1. Compared to fully-supervised 3D methods, we outperform by " + }, + { + "bbox": [ + 30, + 464, + 379, + 559 + ], + "type": "inline_equation", + "content": "1 - 10\\%" + }, + { + "bbox": [ + 30, + 464, + 379, + 559 + ], + "type": "text", + "content": " mIoU on these categories. Additionally, we demonstrate a significant performance boost on the remaining 28 categories (21-41% mIoU). We further outperform PartSLIP and PartSLIP++ on both subsets, achieving " + }, + { + "bbox": [ + 30, + 464, + 379, + 559 + ], + "type": "inline_equation", + "content": "\\sim 3\\%" + }, + { + "bbox": [ + 30, + 464, + 379, + 559 + ], + "type": "text", + "content": " mIoU improvements overall." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 31, + 559, + 379, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 31, + 559, + 379, + 583 + ], + "spans": [ + { + "bbox": [ + 31, + 559, + 379, + 583 + ], + "type": "text", + "content": "Performance on Real-world Scans. Please note that there is currently no publicly available real-world 3D part segmentation dataset for direct comparison. How-" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 33, + 10, + 43, + 19 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 10, + 43, + 19 + ], + "spans": [ + { + "bbox": [ + 33, + 10, + 43, + 19 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 66, + 9, + 121, + 19 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 9, + 121, + 19 + ], + "spans": [ + { + "bbox": [ + 66, + 9, + 121, + 19 + ], + "type": "text", + "content": "A. Thai et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 413, + 615 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 57, + 66, + 353, + 130 + ], + "blocks": [ + { + "bbox": [ + 32, + 33, + 379, + 65 + ], + "lines": [ + { + "bbox": [ + 32, + 33, + 379, + 65 + ], + "spans": [ + { + "bbox": [ + 32, + 33, + 379, + 65 + ], + "type": "text", + "content": "Table 2: Zero-shot performance on the subset of PartNetE [19] that overlaps with PACO [31]. Our method effectively leverages 2D in-the-wild part segmentation dataset to perform 3D part segmentation." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 57, + 66, + 353, + 130 + ], + "lines": [ + { + "bbox": [ + 57, + 66, + 353, + 130 + ], + "spans": [ + { + "bbox": [ + 57, + 66, + 353, + 130 + ], + "type": "table", + "html": "
MethodsKet- tleMicro- waveSci- sorsF.- ChairMouseBot- tleLaptopClockRemoteLampAvg.- (18)
SAMPro3D [42]+ OpenMask3D [35]0.0260.0010.1180.4370.0190.1030.0170.0070.0840.0740.146
PartSLIP [19]0.2080.1660.2180.9170.2700.7630.2700.2670.1150.3710.341
VLPart [34]-MC0.2110.1920.1930.8130.0000.2160.0600.2050.1320.1660.222
3-By-2 (ours)0.7650.3480.5940.7120.3070.8070.3940.2530.2390.5000.430
", + "image_path": "7e835724b9e05921d14103290237e05ef4c2255e1f6f1071cb0d2913a2dff458.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 68, + 158, + 341, + 204 + ], + "blocks": [ + { + "bbox": [ + 32, + 135, + 379, + 156 + ], + "lines": [ + { + "bbox": [ + 32, + 135, + 379, + 156 + ], + "spans": [ + { + "bbox": [ + 32, + 135, + 379, + 156 + ], + "type": "text", + "content": "Table 3: Performance on PartNet dataset with \"level-3\" annotations in the few-shot setting. Bold and underline denote best and second best performance respectively." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 68, + 158, + 341, + 204 + ], + "lines": [ + { + "bbox": [ + 68, + 158, + 341, + 204 + ], + "spans": [ + { + "bbox": [ + 68, + 158, + 341, + 204 + ], + "type": "table", + "html": "
MethodsBot- tleMicro- waveDis- playDish- washerFau- cetKnifeEar- phoneClockBedTrash- canAvg.
MvDeCor [32]0.4210.3770.6000.3270.2120.1870.2050.1430.0990.1990.277
PartSLIP [19]0.3440.1430.3860.2280.0090.0230.0640.0170.0030.0310.125
3-By-2 (ours)0.4540.3890.5670.4290.2030.1960.2250.1160.0960.1340.281
", + "image_path": "b42c4d8ce612c331e0e89dde43624d8ae0ab5ef0ca9296bb575ebcc2f789aca7.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 32, + 217, + 379, + 252 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 217, + 379, + 252 + ], + "spans": [ + { + "bbox": [ + 32, + 217, + 379, + 252 + ], + "type": "text", + "content": "ever, we demonstrate the robustness of our method using real-world objects, as shown in Fig. 1. These objects were originally introduced by Liu et al. [19] and captured using an iPhone12 camera." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 32, + 255, + 379, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 255, + 379, + 399 + ], + "spans": [ + { + "bbox": [ + 32, + 255, + 379, + 399 + ], + "type": "text", + "content": "Zero-shot Baselines. We compare 3-By-2 with PartSLIP [19], VLPart [34]-MC and SAMPro3D [42] + OpenMask3D [35]. For PartSLIP, we prompt the pre-trained GLIP model with the language inputs without finetuning, following Liu et al. [19]. VLPart [34] is a SOTA 2D part segmentation method that was trained on a combination of various large-scale 2D part datasets. We replace our 2D part segmentation module with a pre-trained VLPart model, retaining the 3D mask-consistency aggregation module as 3-By-2, and term this baseline VLPart-MC. During inference, to guide VLPart effectively, we prompt the model with language inputs as in PartSLIP. SAMPro3D [42] is a SOTA zero-shot instance segmentation method for 3D scenes using SAM at its core. For semantic segmentation evaluation, we integrate SAMPro3D with OpenMask3D [35], an open-vocabulary 3D scene segmentation method." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 32, + 401, + 379, + 484 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 401, + 379, + 484 + ], + "spans": [ + { + "bbox": [ + 32, + 401, + 379, + 484 + ], + "type": "text", + "content": "Zero-shot Setting. Since we do not have access to any labeled 3D objects in this setting, to effectively transfer part labels, we leverage PACO [31]. This dataset is a fine-grained and richly annotated 2D datasets consisting of objects from COCO-LVIS [9]. We crop and mask each annotated object using the provided object bounding box and segmentation mask to form the database. Further, we filter out small objects or objects with limited visibility, using the area of the object segmentation mask as a criterion." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 33, + 487, + 379, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 487, + 379, + 582 + ], + "spans": [ + { + "bbox": [ + 33, + 487, + 379, + 582 + ], + "type": "text", + "content": "In Table 2 we show the performance of all baselines and 3-By-2 on the subset of PartNetE that overlaps with PACO dataset [31]. By leveraging the abundance and fine-grained of 2D in-the-wild part segmentation datasets, we achieve superior performance compared to all baselines (9-29% mIoU). We significantly outperform PartSLIP on challenging categories with small or thin parts (e.g. scissors and lamp by 28% and 13% mIoU respectively). These results highlight the effectiveness of 3-By-2 even when the database includes challenging real-world images with partial occlusion and truncation." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 318, + 10, + 346, + 20 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 10, + 346, + 20 + ], + "spans": [ + { + "bbox": [ + 318, + 10, + 346, + 20 + ], + "type": "text", + "content": "3-By-2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 369, + 10, + 378, + 19 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 369, + 10, + 378, + 19 + ], + "spans": [ + { + "bbox": [ + 369, + 10, + 378, + 19 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 413, + 615 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 30, + 34, + 381, + 142 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 34, + 381, + 142 + ], + "spans": [ + { + "bbox": [ + 30, + 34, + 381, + 142 + ], + "type": "text", + "content": "Effectiveness of Our 2D Segmentation Module. We demonstrate the effectiveness of our 2D segmenter, leveraging SAM and DIFT, by showcasing its strong performance against VLPart [34], a SOTA 2D part segmentation method (see Table 2, last 2 rows). Note that VLPart was trained on PACO [31] among other 2D part datasets. Therefore, it is reasonable to anticipate that this method can effectively use knowledge from PACO to accurately segment the 18 overlapping categories between PartNetE and PACO. For both VLPart-MC and 3-By-2, we maintain the same 3D aggregation module. Our method significantly outperforms VLPart-MC, demonstrating the advantage of our proposed 2D segmentation module." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 30, + 143, + 381, + 227 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 143, + 381, + 227 + ], + "spans": [ + { + "bbox": [ + 30, + 143, + 381, + 227 + ], + "type": "text", + "content": "Comparison to SOTA Scene Segmentation Approach. SAMPro3D [42] is a concurrent work with SOTA performance on zero-shot instance segmentation in 3D scene. This is a training-free model that effectively prompts SAM within the 2D domain using 3D point projections. As in Table 2, we outperform this baseline by a significant margin, highlighting the non-trivial nature of adapting scene segmentation methods for 3D part segmentation tasks, particularly those involving post-processing of 2D foundation models." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 31, + 247, + 224, + 260 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 31, + 247, + 224, + 260 + ], + "spans": [ + { + "bbox": [ + 31, + 247, + 224, + 260 + ], + "type": "text", + "content": "4.2 Performance on Level-3 PartNet" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 30, + 271, + 381, + 366 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 271, + 381, + 366 + ], + "spans": [ + { + "bbox": [ + 30, + 271, + 381, + 366 + ], + "type": "text", + "content": "In this experiment, we select 10 categories from PartNet [24] that come with fine-grained (\"level-3\") annotations. We randomly select 10 objects per category from the training set (following [32]) to form our few-shot set, and up to 50 objects per category from the test set for evaluation, ensuring overlap with ShapeNetCore.v2 [5]. Given that PartSLIP [19] employs point cloud RGB for superpoint generation, which serves as 3D priors, our decision to choose overlapping objects with ShapeNetCore.v2 is to preserve object texture information. We use the same few-shot and test set for all baselines." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 30, + 367, + 381, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 367, + 381, + 403 + ], + "spans": [ + { + "bbox": [ + 30, + 367, + 381, + 403 + ], + "type": "text", + "content": "Data. As inputs to our approach, we render 15 overlapping views for each textured mesh using Blender cycle renderer with realistic lighting from HDRI environment maps." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 30, + 404, + 381, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 404, + 381, + 487 + ], + "spans": [ + { + "bbox": [ + 30, + 404, + 381, + 487 + ], + "type": "text", + "content": "Baselines. The baselines are reproduced following the papers' recommended training procedure. Specifically, we pre-train MvDeCor [32] on the entire training set of the selected categories consisting of 86 views per non-textured object, with rendered RGB, depth and normal maps as inputs. We then fine-tune the segmentation heads for each individual object category in the few-shot set with 15 views per object. Note that the input for this stage also includes RGB, depth and normal maps." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 30, + 488, + 381, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 488, + 381, + 584 + ], + "spans": [ + { + "bbox": [ + 30, + 488, + 381, + 584 + ], + "type": "text", + "content": "For PartSLIP [19], we derive the language prompt by traversing the part hierarchy and concatenating labels from each level along the path, spanning from root to leaf. For example, the path \"bottle/jug/handle\" is transformed into \"bottle jug handle\". This adaptation is due to the potential for different leaf nodes to share identical labels (e.g., bottle/normal_bottle/handle and bottle/jug/handle), as relying solely on the leaf node label could introduce confusion in predictions. We adopt PartSLIP's point cloud, image rendering and data processing pipeline with default parameters." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 33, + 10, + 43, + 19 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 10, + 43, + 19 + ], + "spans": [ + { + "bbox": [ + 33, + 10, + 43, + 19 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 66, + 9, + 121, + 19 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 9, + 121, + 19 + ], + "spans": [ + { + "bbox": [ + 66, + 9, + 121, + 19 + ], + "type": "text", + "content": "A. Thai et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 413, + 615 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 35, + 56, + 206, + 87 + ], + "blocks": [ + { + "bbox": [ + 31, + 33, + 210, + 54 + ], + "lines": [ + { + "bbox": [ + 31, + 33, + 210, + 54 + ], + "spans": [ + { + "bbox": [ + 31, + 33, + 210, + 54 + ], + "type": "text", + "content": "Table 4: Ablation of the non-overlapping mask generation module." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 35, + 56, + 206, + 87 + ], + "lines": [ + { + "bbox": [ + 35, + 56, + 206, + 87 + ], + "spans": [ + { + "bbox": [ + 35, + 56, + 206, + 87 + ], + "type": "table", + "html": "
2D Mask ProposalScissorsMouseSuitcaseBottleChair
SAM0.4570.4400.2850.0040.638
Non-overlap0.6750.6840.8130.8100.844
", + "image_path": "1f15ac454461a3021fee1a45e302b62048560c94c0320667450bbfeefe46f97c.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 218, + 56, + 384, + 86 + ], + "blocks": [ + { + "bbox": [ + 215, + 34, + 387, + 55 + ], + "lines": [ + { + "bbox": [ + 215, + 34, + 387, + 55 + ], + "spans": [ + { + "bbox": [ + 215, + 34, + 387, + 55 + ], + "type": "text", + "content": "Table 5: Ablation of our proposed mask-consistency component." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 218, + 56, + 384, + 86 + ], + "lines": [ + { + "bbox": [ + 218, + 56, + 384, + 86 + ], + "spans": [ + { + "bbox": [ + 218, + 56, + 384, + 86 + ], + "type": "table", + "html": "
3D Label AggregationScissorsSuitcasePrinterClock
Point-Consistency0.6190.5790.0090.363
Mask-Consistency0.6750.6840.0850.458
", + "image_path": "2143a2074b2fd8888de2ac37932188cb45da51e6c407142baaf352c914ce3b63.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 30, + 101, + 381, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 101, + 381, + 220 + ], + "spans": [ + { + "bbox": [ + 30, + 101, + 381, + 220 + ], + "type": "text", + "content": "Evaluation & Metric. We uniformly sample 300K points on the surface of each labeled ground truth mesh and employ nearest neighbor assignment to associate a ground-truth label with each point. This point set is used for evaluating all methods for a fair comparison and eliminating any randomness introduced by the point cloud sampling step. We use part mIoU on the sampled point set as the evaluation metric. We employ the standard mIoU calculation, which considers the performance of all parts in the vocabulary, even in cases where they may not exist in certain objects. Additionally, different from MvDeCor, we do not exclude the \"others\" label during evaluation based on ground-truth labels. For a fair comparison, we applied the same evaluation approach across all methods." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 30, + 221, + 381, + 305 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 221, + 381, + 305 + ], + "spans": [ + { + "bbox": [ + 30, + 221, + 381, + 305 + ], + "type": "text", + "content": "Results. We show results in Table 3. Compared to PartSLIP [19], we outperformed on all categories by a significant margin (16% mIoU on average), demonstrating the challenges posed by fine-grained settings for GLIP [17]. While our performance is on par with MvDeCor [32], it is important to note that MvDeCor is both pretrained and finetuned on PartNet [24], using ground truth depth and normal maps as additional inputs. In contrast, our method requires no training on the target data distribution." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 31, + 323, + 137, + 335 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 31, + 323, + 137, + 335 + ], + "spans": [ + { + "bbox": [ + 31, + 323, + 137, + 335 + ], + "type": "text", + "content": "4.3 Ablation Study" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 30, + 344, + 381, + 427 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 344, + 381, + 427 + ], + "spans": [ + { + "bbox": [ + 30, + 344, + 381, + 427 + ], + "type": "text", + "content": "Non-overlapping Mask Generation. In Table 4, we illustrate the effectiveness of our proposed non-overlapping mask generation module. The comparison involves evaluating the performance of our method with and without this module. In the case of the model without the non-overlapping mask generation module, we directly utilize the predicted SAM outputs for label transferring. The results indicate that our non-overlapping mask generation module is necessary for achieving an optimal performance." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 30, + 428, + 381, + 463 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 428, + 381, + 463 + ], + "spans": [ + { + "bbox": [ + 30, + 428, + 381, + 463 + ], + "type": "text", + "content": "Mask-consistency Module. In Table 5, we demonstrate the effectiveness of our proposed mask-consistency component, which improves the final performance especially on objects with small parts." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 30, + 464, + 380, + 511 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 464, + 380, + 511 + ], + "spans": [ + { + "bbox": [ + 30, + 464, + 380, + 511 + ], + "type": "text", + "content": "Properties of Database. In this section, we investigate two key questions: 1) Can 3-By-2 accurately segment the query object within a database containing multiple object categories? and 2) Is it possible to transfer parts with the same semantic meaning between different object categories?" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 30, + 512, + 381, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 512, + 381, + 584 + ], + "spans": [ + { + "bbox": [ + 30, + 512, + 381, + 584 + ], + "type": "text", + "content": "Multi-category database. To address question 1, we perform experiments using databases containing 1, 2, and 8 categories respectively (see Table 6). Specifically, taking the query category as \"Kettle\", for the 2-category setting we construct a database consisting of \"Kettle, Kitchen Pot\". We selected these categories due to their shared semantic parts with \"Kettle\", which could potentially lead to confusion (e.g., kettle lid vs. kitchen pot lid). With 8-category setting, we add in" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 318, + 10, + 346, + 20 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 10, + 346, + 20 + ], + "spans": [ + { + "bbox": [ + 318, + 10, + 346, + 20 + ], + "type": "text", + "content": "3-By-2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 369, + 10, + 379, + 19 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 369, + 10, + 379, + 19 + ], + "spans": [ + { + "bbox": [ + 369, + 10, + 379, + 19 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 413, + 615 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 59, + 98, + 194, + 142 + ], + "blocks": [ + { + "bbox": [ + 31, + 33, + 224, + 87 + ], + "lines": [ + { + "bbox": [ + 31, + 33, + 224, + 87 + ], + "spans": [ + { + "bbox": [ + 31, + 33, + 224, + 87 + ], + "type": "text", + "content": "Table 6: Multi-category database experiment. Performance of Kettle in various database settings is reported with mIoU. Our method shows robustness in performance even when more categories are added in the database." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 59, + 98, + 194, + 142 + ], + "lines": [ + { + "bbox": [ + 59, + 98, + 194, + 142 + ], + "spans": [ + { + "bbox": [ + 59, + 98, + 194, + 142 + ], + "type": "table", + "html": "
DatabaseLidHandleSpoutAvg.
1-category0.7590.9040.7830.815
2-category0.7030.8200.7480.757
8-category0.7270.7730.7560.752
", + "image_path": "e4363b7de9a57c8627dfd3e17b08fda9540213ab7b8a09ddb016d96d540490ed.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 243, + 109, + 378, + 138 + ], + "blocks": [ + { + "bbox": [ + 230, + 33, + 392, + 108 + ], + "lines": [ + { + "bbox": [ + 230, + 33, + 392, + 108 + ], + "spans": [ + { + "bbox": [ + 230, + 33, + 392, + 108 + ], + "type": "text", + "content": "Table 7: Cross-category database experiment. We report the performance of 18 tables with wheels in Part-NetE. Results show that our method can transfer wheel annotations from Chair to correct the prediction on Table wheels." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 243, + 109, + 378, + 138 + ], + "lines": [ + { + "bbox": [ + 243, + 109, + 378, + 138 + ], + "spans": [ + { + "bbox": [ + 243, + 109, + 378, + 138 + ], + "type": "table", + "html": "
DatabaseLegTabletopWheelAvg.
Table only0.5860.6470.0000.411
Chair & Table0.6410.6330.6000.625
", + "image_path": "5f373a99fa6a259990e70929b60edc6f489a697f39f9947ab6c0356cc9b045e7.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 30, + 146, + 379, + 229 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 146, + 379, + 229 + ], + "spans": [ + { + "bbox": [ + 30, + 146, + 379, + 229 + ], + "type": "text", + "content": "categories that are completely different and do not share any parts with \"Kettle\" (e.g. \"Eyeglasses\"). In general, with more categories in the database, there is a slight decrease in the average performance. Notably, there are marginal differences between 2-category and 8-category (second and third rows), highlighting the ability of 3-By-2 in handling both diverse object taxonomy and part segmentation. This finding is particularly interesting since many prior works [19,32] require finetuning each category separately for few-shot evaluation." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 30, + 230, + 380, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 230, + 380, + 361 + ], + "spans": [ + { + "bbox": [ + 30, + 230, + 380, + 361 + ], + "type": "text", + "content": "Cross-category database. Considering question 2, we note that the few-shot set of \"Table\" in PartNetE lacks objects with wheels as a part, whereas such objects are present in the test set. To address this, we incorporate the \"Chair\" category where the wheel part exists in the database. We evaluate on 18 tables in PartNetE test set with the \"wheel\" part annotated (see Table 7). Compared to the table only few-shot set, combining the database with \"Chair\" improves the performance on \"leg\" by " + }, + { + "bbox": [ + 30, + 230, + 380, + 361 + ], + "type": "inline_equation", + "content": "\\sim 6\\%" + }, + { + "bbox": [ + 30, + 230, + 380, + 361 + ], + "type": "text", + "content": " mIoU. The improvement in the \"leg\" part can be attributed to the inclusion of \"Chair\" in the database, which reduces the likelihood of the model incorrectly associating \"wheel\" with \"leg\" due to the absence of \"wheel\" in the few-shot set. Interestingly, the performance for \"wheel\" increases significantly, " + }, + { + "bbox": [ + 30, + 230, + 380, + 361 + ], + "type": "inline_equation", + "content": "+60\\%" + }, + { + "bbox": [ + 30, + 230, + 380, + 361 + ], + "type": "text", + "content": " mIoU through the label transfer from chair wheels." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 30, + 361, + 379, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 361, + 379, + 445 + ], + "spans": [ + { + "bbox": [ + 30, + 361, + 379, + 445 + ], + "type": "text", + "content": "While the concept may seem intuitive, our findings shed new light on object part compositionality. Despite the diversity in appearances and shapes across various object categories, there exists a finite set of object parts that are shared among them. Recognizing the transferability of these parts is important for facilitating rapid learning of novel objects across a range of tasks. Further, our results show the ability to correct wrong predictions of our approach by transferring labels from another category. Please refer to the Sup. for additional studies." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 31, + 463, + 118, + 476 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 31, + 463, + 118, + 476 + ], + "spans": [ + { + "bbox": [ + 31, + 463, + 118, + 476 + ], + "type": "text", + "content": "5 Conclusion" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 30, + 488, + 379, + 560 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 30, + 488, + 379, + 560 + ], + "spans": [ + { + "bbox": [ + 30, + 488, + 379, + 560 + ], + "type": "text", + "content": "In this work, we propose 3-By-2, a novel, training-free method that achieves SOTA performance on benchmarks with diverse levels of part granularity without the need for language inputs, on both zero-shot and few-shot settings. We demonstrate the flexibility of 3-By-2 in transferring part labels between different object categories. We hope the development of 3-By-2 can encourage further exploration of visual similarities for this task." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 33, + 10, + 43, + 19 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 10, + 43, + 19 + ], + "spans": [ + { + "bbox": [ + 33, + 10, + 43, + 19 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 66, + 9, + 121, + 19 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 9, + 121, + 19 + ], + "spans": [ + { + "bbox": [ + 66, + 9, + 121, + 19 + ], + "type": "text", + "content": "A. Thai et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 413, + 615 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 32, + 33, + 140, + 47 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 33, + 140, + 47 + ], + "spans": [ + { + "bbox": [ + 32, + 33, + 140, + 47 + ], + "type": "text", + "content": "Acknowledgement" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 31, + 58, + 302, + 70 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 31, + 58, + 302, + 70 + ], + "spans": [ + { + "bbox": [ + 31, + 58, + 302, + 70 + ], + "type": "text", + "content": "This work was partly supported by NIH R01HD104624-01A1." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 32, + 88, + 98, + 102 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 88, + 98, + 102 + ], + "spans": [ + { + "bbox": [ + 32, + 88, + 98, + 102 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 33, + 113, + 380, + 583 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 36, + 113, + 379, + 144 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 36, + 113, + 379, + 144 + ], + "spans": [ + { + "bbox": [ + 36, + 113, + 379, + 144 + ], + "type": "text", + "content": "1. Abdelreheem, A., Skorokhodov, I., Ovsjanikov, M., Wonka, P.: Satr: Zero-shot semantic segmentation of 3d shapes. arXiv preprint arXiv:2304.04909 (2023) 2, 3, 4" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 36, + 145, + 379, + 167 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 36, + 145, + 379, + 167 + ], + "spans": [ + { + "bbox": [ + 36, + 145, + 379, + 167 + ], + "type": "text", + "content": "2. Amir, S., Gandelsman, Y., Bagon, S., Dekel, T.: Deep vit features as dense visual descriptors. arXiv preprint arXiv:2112.05814 2(3), 4 (2021) 5" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 36, + 168, + 380, + 210 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 36, + 168, + 380, + 210 + ], + "spans": [ + { + "bbox": [ + 36, + 168, + 380, + 210 + ], + "type": "text", + "content": "3. Caron, M., Touvron, H., Misra, I., Jégou, H., Mairal, J., Bojanowski, P., Joulin, A.: Emerging properties in self-supervised vision transformers. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 9650-9660 (2021) 5" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 37, + 211, + 379, + 244 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 37, + 211, + 379, + 244 + ], + "spans": [ + { + "bbox": [ + 37, + 211, + 379, + 244 + ], + "type": "text", + "content": "4. Cen, J., Zhou, Z., Fang, J., Shen, W., Xie, L., Jiang, D., Zhang, X., Tian, Q., et al.: Segment anything in 3d with nerfs. Advances in Neural Information Processing Systems 36 (2024) 4" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 37, + 244, + 379, + 277 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 37, + 244, + 379, + 277 + ], + "spans": [ + { + "bbox": [ + 37, + 244, + 379, + 277 + ], + "type": "text", + "content": "5. Chang, A.X., Funkhouser, T., Guibas, L., Hanrahan, P., Huang, Q., Li, Z., Savarese, S., Savva, M., Song, S., Su, H., et al.: Shapenet: An information-rich 3d model repository. arXiv preprint arXiv:1512.03012 (2015) 4, 12" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 37, + 277, + 379, + 321 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 37, + 277, + 379, + 321 + ], + "spans": [ + { + "bbox": [ + 37, + 277, + 379, + 321 + ], + "type": "text", + "content": "6. Chen, N., Liu, L., Cui, Z., Chen, R., Ceylan, D., Tu, C., Wang, W.: Unsupervised learning of intrinsic structural representation points. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 9121-9130 (2020) 5" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 37, + 321, + 379, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 37, + 321, + 379, + 354 + ], + "spans": [ + { + "bbox": [ + 37, + 321, + 379, + 354 + ], + "type": "text", + "content": "7. Dai, A., Nießner, M.: 3dmv: Joint 3d-multi-view prediction for 3d semantic scene segmentation. In: Proceedings of the European Conference on Computer Vision (ECCV). pp. 452-468 (2018) 4" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 37, + 354, + 379, + 386 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 37, + 354, + 379, + 386 + ], + "spans": [ + { + "bbox": [ + 37, + 354, + 379, + 386 + ], + "type": "text", + "content": "8. Deng, S., Xu, X., Wu, C., Chen, K., Jia, K.: 3d affordancenet: A benchmark for visual object affordance understanding. In: proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 1778-1787 (2021) 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 37, + 387, + 379, + 420 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 37, + 387, + 379, + 420 + ], + "spans": [ + { + "bbox": [ + 37, + 387, + 379, + 420 + ], + "type": "text", + "content": "9. Gupta, A., Dollar, P., Girshick, R.: Lvis: A dataset for large vocabulary instance segmentation. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 5356-5364 (2019) 11" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 33, + 420, + 379, + 452 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 420, + 379, + 452 + ], + "spans": [ + { + "bbox": [ + 33, + 420, + 379, + 452 + ], + "type": "text", + "content": "10. He, J., Yang, S., Yang, S., Kortylewski, A., Yuan, X., Chen, J.N., Liu, S., Yang, C., Yu, Q., Yuille, A.: Partimagenet: A large, high-quality dataset of parts. In: European Conference on Computer Vision. pp. 128-145. Springer (2022) 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 33, + 452, + 379, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 452, + 379, + 485 + ], + "spans": [ + { + "bbox": [ + 33, + 452, + 379, + 485 + ], + "type": "text", + "content": "11. Hedlin, E., Sharma, G., Mahajan, S., Isack, H., Kar, A., Tagliasacchi, A., Yi, K.M.: Unsupervised semantic correspondence using stable diffusion. arXiv preprint arXiv:2305.15581 (2023) 5" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 33, + 485, + 379, + 517 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 485, + 379, + 517 + ], + "spans": [ + { + "bbox": [ + 33, + 485, + 379, + 517 + ], + "type": "text", + "content": "12. Huang, R., Peng, S., Takmaz, A., Tombari, F., Pollefeys, M., Song, S., Huang, G., Engelmann, F.: Segment3d: Learning fine-grained class-agnostic 3d segmentation without manual labels. arXiv preprint arXiv:2312.17232 (2023) 4" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 33, + 518, + 379, + 560 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 518, + 379, + 560 + ], + "spans": [ + { + "bbox": [ + 33, + 518, + 379, + 560 + ], + "type": "text", + "content": "13. Jaritz, M., Gu, J., Su, H.: Multi-view pointnet for 3d scene understanding. 2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW) pp. 3995-4003 (2019), https://apisemanticscholar.org/CorpusID:203593088" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 33, + 561, + 379, + 583 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 561, + 379, + 583 + ], + "spans": [ + { + "bbox": [ + 33, + 561, + 379, + 583 + ], + "type": "text", + "content": "14. Kalogerakis, E., Hertzmann, A., Singh, K.: Learning 3D Mesh Segmentation and Labeling. ACM Transactions on Graphics 29(3) (2010) 1" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 317, + 9, + 347, + 19 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 9, + 347, + 19 + ], + "spans": [ + { + "bbox": [ + 317, + 9, + 347, + 19 + ], + "type": "text", + "content": "3-By-2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 369, + 10, + 379, + 19 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 369, + 10, + 379, + 19 + ], + "spans": [ + { + "bbox": [ + 369, + 10, + 379, + 19 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 413, + 615 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 32, + 35, + 379, + 583 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 33, + 35, + 379, + 55 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 35, + 379, + 55 + ], + "spans": [ + { + "bbox": [ + 33, + 35, + 379, + 55 + ], + "type": "text", + "content": "15. Kim, H., Sung, M.: Partstad: 2d-to-3d part segmentation task adaptation (2024) 4" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 32, + 57, + 379, + 89 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 57, + 379, + 89 + ], + "spans": [ + { + "bbox": [ + 32, + 57, + 379, + 89 + ], + "type": "text", + "content": "16. Kirillov, A., Mintun, E., Ravi, N., Mao, H., Rolland, C., Gustafson, L., Xiao, T., Whitehead, S., Berg, A.C., Lo, W.Y., et al.: Segment anything. arXiv preprint arXiv:2304.02643 (2023) 4, 5, 7" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 33, + 89, + 379, + 132 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 89, + 379, + 132 + ], + "spans": [ + { + "bbox": [ + 33, + 89, + 379, + 132 + ], + "type": "text", + "content": "17. Li, L.H., Zhang, P., Zhang, H., Yang, J., Li, C., Zhong, Y., Wang, L., Yuan, L., Zhang, L., Hwang, J.N., et al.: Grounded language-image pre-training. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 10965-10975 (2022) 4, 10, 13" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 33, + 132, + 379, + 164 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 132, + 379, + 164 + ], + "spans": [ + { + "bbox": [ + 33, + 132, + 379, + 164 + ], + "type": "text", + "content": "18. Li, Y., Upadhyay, U., Habib Slim, A.A., Arpit Prajapati, S.P., Wonka, P., Elhoseiny, M.: 3d compat: Composition of materials on parts of 3d things (eccv 2022). ECCV (2022) 4" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 33, + 164, + 379, + 207 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 164, + 379, + 207 + ], + "spans": [ + { + "bbox": [ + 33, + 164, + 379, + 207 + ], + "type": "text", + "content": "19. Liu, M., Zhu, Y., Cai, H., Han, S., Ling, Z., Porikli, F., Su, H.: Partslip: Low-shot part segmentation for 3d point clouds via pretrained image-language models. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 21736-21746 (2023) 2, 3, 4, 8, 9, 10, 11, 12, 13, 14" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 33, + 207, + 379, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 207, + 379, + 239 + ], + "spans": [ + { + "bbox": [ + 33, + 207, + 379, + 239 + ], + "type": "text", + "content": "20. Liu, W., Mao, J., Hsu, J., Hermans, T., Garg, A., Wu, J.: Composable part-based manipulation. In: 7th Annual Conference on Robot Learning (2023), https://openreview.net/forum?id=o-K3HVUeEw1" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 32, + 239, + 379, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 239, + 379, + 281 + ], + "spans": [ + { + "bbox": [ + 32, + 239, + 379, + 281 + ], + "type": "text", + "content": "21. Liu, X., Xu, X., Rao, A., Gan, C., Yi, L.: Autogpart: Intermediate supervision search for generalizable 3d part segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 11624-11634 (2022) 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 33, + 282, + 379, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 282, + 379, + 304 + ], + "spans": [ + { + "bbox": [ + 33, + 282, + 379, + 304 + ], + "type": "text", + "content": "22. Lowe, D.G.: Distinctive image features from scale-invariant keypoints. International journal of computer vision 60, 91-110 (2004) 5" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 33, + 304, + 379, + 325 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 304, + 379, + 325 + ], + "spans": [ + { + "bbox": [ + 33, + 304, + 379, + 325 + ], + "type": "text", + "content": "23. Min, J., Lee, J., Ponce, J., Cho, M.: Spair-71k: A large-scale benchmark for semantic correspondence. arXiv preprint arXiv:1908.10543 (2019) 7" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 33, + 325, + 379, + 368 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 325, + 379, + 368 + ], + "spans": [ + { + "bbox": [ + 33, + 325, + 379, + 368 + ], + "type": "text", + "content": "24. Mo, K., Zhu, S., Chang, A.X., Yi, L., Tripathi, S., Guibas, L.J., Su, H.: Partnet: A large-scale benchmark for fine-grained and hierarchical part-level 3d object understanding. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 909-918 (2019) 3, 4, 9, 12, 13" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 33, + 368, + 379, + 400 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 368, + 379, + 400 + ], + "spans": [ + { + "bbox": [ + 33, + 368, + 379, + 400 + ], + "type": "text", + "content": "25. Nadeau, P., Giamou, M., Kelly, J.: The sum of its parts: Visual part segmentation for inertial parameter identification of manipulated objects. arXiv preprint arXiv:2302.06685 (2023) 1" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 33, + 400, + 379, + 431 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 400, + 379, + 431 + ], + "spans": [ + { + "bbox": [ + 33, + 400, + 379, + 431 + ], + "type": "text", + "content": "26. Nguyen, P.D.A., Ngo, T.D., Gan, C., Kalogerakis, E., Tran, A., Pham, C., Nguyen, K.: Open3dis: Open-vocabulary 3d instance segmentation with 2d mask guidance (2023) 4" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 33, + 432, + 379, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 432, + 379, + 475 + ], + "spans": [ + { + "bbox": [ + 33, + 432, + 379, + 475 + ], + "type": "text", + "content": "27. Peng, S., Genova, K., Jiang, C., Tagliasacchi, A., Pollefeys, M., Funkhouser, T., et al.: Openscene: 3d scene understanding with open vocabularies. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 815-824 (2023) 4" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 33, + 475, + 379, + 507 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 475, + 379, + 507 + ], + "spans": [ + { + "bbox": [ + 33, + 475, + 379, + 507 + ], + "type": "text", + "content": "28. Qi, C.R., Yi, L., Su, H., Guibas, L.J.: Pointnet++: Deep hierarchical feature learning on point sets in a metric space. Advances in neural information processing systems 30 (2017) 10" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 33, + 507, + 379, + 539 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 507, + 379, + 539 + ], + "spans": [ + { + "bbox": [ + 33, + 507, + 379, + 539 + ], + "type": "text", + "content": "29. Qian, G., Li, Y., Peng, H., Mai, J., Hammoud, H., Elhoseiny, M., Ghanem, B.: Pointnext: Revisiting pointnet++ with improved training and scaling strategies. Advances in Neural Information Processing Systems 35, 23192-23204 (2022) 4, 10" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 33, + 539, + 379, + 583 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 539, + 379, + 583 + ], + "spans": [ + { + "bbox": [ + 33, + 539, + 379, + 583 + ], + "type": "text", + "content": "30. Radford, A., Kim, J.W., Hallacy, C., Ramesh, A., Goh, G., Agarwal, S., Sastry, G., Askell, A., Mishkin, P., Clark, J., et al.: Learning transferable visual models from natural language supervision. In: International conference on machine learning. pp. 8748-8763. PMLR (2021) 4" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 33, + 10, + 43, + 19 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 10, + 43, + 19 + ], + "spans": [ + { + "bbox": [ + 33, + 10, + 43, + 19 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 65, + 9, + 121, + 19 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 9, + 121, + 19 + ], + "spans": [ + { + "bbox": [ + 65, + 9, + 121, + 19 + ], + "type": "text", + "content": "A. Thai et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 413, + 615 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 31, + 35, + 379, + 583 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 31, + 35, + 379, + 79 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 31, + 35, + 379, + 79 + ], + "spans": [ + { + "bbox": [ + 31, + 35, + 379, + 79 + ], + "type": "text", + "content": "31. Ramanathan, V., Kalia, A., Petrovic, V., Wen, Y., Zheng, B., Guo, B., Wang, R., Marquez, A., Kovvuri, R., Kadian, A., et al.: Paco: Parts and attributes of common objects. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 7141-7151 (2023) 2, 11, 12" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 31, + 79, + 379, + 123 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 31, + 79, + 379, + 123 + ], + "spans": [ + { + "bbox": [ + 31, + 79, + 379, + 123 + ], + "type": "text", + "content": "32. Sharma, G., Yin, K., Maji, S., Kalogerakis, E., Litany, O., Fidler, S.: Mvdecor: Multi-view dense correspondence learning for fine-grained 3d segmentation. In: European Conference on Computer Vision. pp. 550-567. Springer (2022) 3, 4, 8, 10, 11, 12, 13, 14" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 32, + 123, + 379, + 145 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 123, + 379, + 145 + ], + "spans": [ + { + "bbox": [ + 32, + 123, + 379, + 145 + ], + "type": "text", + "content": "33. Singh, C., Murdoch, W.J., Yu, B.: Hierarchical interpretations for neural network predictions. arXiv preprint arXiv:1806.05337 (2018) 10" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 32, + 145, + 379, + 177 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 145, + 379, + 177 + ], + "spans": [ + { + "bbox": [ + 32, + 145, + 379, + 177 + ], + "type": "text", + "content": "34. Sun, P., Chen, S., Zhu, C., Xiao, F., Luo, P., Xie, S., Yan, Z.: Going denser with open-vocabulary part segmentation. arXiv preprint arXiv:2305.11173 (2023) 5, 11, 12" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 32, + 177, + 379, + 210 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 177, + 379, + 210 + ], + "spans": [ + { + "bbox": [ + 32, + 177, + 379, + 210 + ], + "type": "text", + "content": "35. Takmaz, A., Fedele, E., Sumner, R.W., Pollefeys, M., Tombari, F., Engelmann, F.: Openmask3d: Open-vocabulary 3d instance segmentation. arXiv preprint arXiv:2306.13631 (2023) 4, 11" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 32, + 210, + 379, + 233 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 210, + 379, + 233 + ], + "spans": [ + { + "bbox": [ + 32, + 210, + 379, + 233 + ], + "type": "text", + "content": "36. Tang, L., Jia, M., Wang, Q., Phoo, C.P., Hariharan, B.: Emergent correspondence from image diffusion. arXiv preprint arXiv:2306.03881 (2023) 2, 3, 5, 6, 7" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 32, + 233, + 379, + 265 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 233, + 379, + 265 + ], + "spans": [ + { + "bbox": [ + 32, + 233, + 379, + 265 + ], + "type": "text", + "content": "37. Varadarajan, K.M., Vincze, M.: Object part segmentation and classification in range images for grasping. In: 2011 15th International Conference on Advanced Robotics (ICAR). pp. 21-27. IEEE (2011) 1" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 32, + 266, + 379, + 298 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 266, + 379, + 298 + ], + "spans": [ + { + "bbox": [ + 32, + 266, + 379, + 298 + ], + "type": "text", + "content": "38. Vu, T., Kim, K., Luu, T.M., Nguyen, T., Yoo, C.D.: Softgroup for 3d instance segmentation on point clouds. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 2708-2717 (2022) 10" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 32, + 298, + 379, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 298, + 379, + 331 + ], + "spans": [ + { + "bbox": [ + 32, + 298, + 379, + 331 + ], + "type": "text", + "content": "39. Wang, L., Li, X., Fang, Y.: Few-shot learning of part-specific probability space for 3d shape segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (June 2020) 4" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 32, + 331, + 379, + 364 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 331, + 379, + 364 + ], + "spans": [ + { + "bbox": [ + 32, + 331, + 379, + 364 + ], + "type": "text", + "content": "40. Wang, R., Zhang, Y., Mao, J., Zhang, R., Cheng, C.Y., Wu, J.: Ikea-manual: Seeing shape assembly step by step. Advances in Neural Information Processing Systems 35, 28428-28440 (2022) 4" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 32, + 364, + 379, + 408 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 364, + 379, + 408 + ], + "spans": [ + { + "bbox": [ + 32, + 364, + 379, + 408 + ], + "type": "text", + "content": "41. Xiang, F., Qin, Y., Mo, K., Xia, Y., Zhu, H., Liu, F., Liu, M., Jiang, H., Yuan, Y., Wang, H., et al.: Sapien: A simulated part-based interactive environment. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 11097-11107 (2020) 9" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 32, + 408, + 379, + 440 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 408, + 379, + 440 + ], + "spans": [ + { + "bbox": [ + 32, + 408, + 379, + 440 + ], + "type": "text", + "content": "42. Xu, M., Yin, X., Qiu, L., Liu, Y., Tong, X., Han, X.: Sampro3d: Locating sam prompts in 3d for zero-shot scene segmentation. arXiv preprint arXiv:2311.17707 (2023) 4, 11, 12" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 32, + 440, + 379, + 463 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 440, + 379, + 463 + ], + "spans": [ + { + "bbox": [ + 32, + 440, + 379, + 463 + ], + "type": "text", + "content": "43. Xue, Y., Chen, N., Liu, J., Sun, W.: Zerops: High-quality cross-modal knowledge transfer for zero-shot 3d part segmentation (2023) 4" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 32, + 463, + 379, + 484 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 463, + 379, + 484 + ], + "spans": [ + { + "bbox": [ + 32, + 463, + 379, + 484 + ], + "type": "text", + "content": "44. Yang, Y., Wu, X., He, T., Zhao, H., Liu, X.: Sam3d: Segment anything in 3d scenes. arXiv preprint arXiv:2306.03908 (2023) 4" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 32, + 484, + 379, + 517 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 484, + 379, + 517 + ], + "spans": [ + { + "bbox": [ + 32, + 484, + 379, + 517 + ], + "type": "text", + "content": "45. Yi, L., Kim, V.G., Ceylan, D., Shen, I.C., Yan, M., Su, H., Lu, C., Huang, Q., Sheffer, A., Guibas, L.: A scalable active framework for region annotation in 3d shape collections. SIGGRAPH Asia (2016) 4" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 32, + 517, + 379, + 550 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 517, + 379, + 550 + ], + "spans": [ + { + "bbox": [ + 32, + 517, + 379, + 550 + ], + "type": "text", + "content": "46. Yu, Q., Du, H., Liu, C., Yu, X.: When 3d bounding-box meets sam: Point cloud instance segmentation with weak-and-noisy supervision. ArXiv abs/2309.00828 (2023), https://api-semanticscholar.org/CorpusID:261530997 4" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 32, + 550, + 379, + 583 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 550, + 379, + 583 + ], + "spans": [ + { + "bbox": [ + 32, + 550, + 379, + 583 + ], + "type": "text", + "content": "47. Zhang, J., Herrmann, C., Hur, J., Cabrera, L.P., Jampani, V., Sun, D., Yang, M.H.: A tale of two features: Stable diffusion complements dino for zero-shot semantic correspondence. arXiv preprint arXiv:2305.15347 (2023) 2, 5" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 317, + 10, + 347, + 20 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 10, + 347, + 20 + ], + "spans": [ + { + "bbox": [ + 317, + 10, + 347, + 20 + ], + "type": "text", + "content": "3-By-2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 369, + 10, + 379, + 19 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 369, + 10, + 379, + 19 + ], + "spans": [ + { + "bbox": [ + 369, + 10, + 379, + 19 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 413, + 615 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 31, + 35, + 379, + 243 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 32, + 35, + 379, + 79 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 35, + 379, + 79 + ], + "spans": [ + { + "bbox": [ + 32, + 35, + 379, + 79 + ], + "type": "text", + "content": "48. Zhao, L., Lu, J., Zhou, J.: Similarity-aware fusion network for 3d semantic segmentation. 2021 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS) pp. 1585-1592 (2021), https://apisemantic scholar.org/CorpusID:235732071 4" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 31, + 79, + 379, + 112 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 31, + 79, + 379, + 112 + ], + "spans": [ + { + "bbox": [ + 31, + 79, + 379, + 112 + ], + "type": "text", + "content": "49. Zhao, N., Chua, T.S., Lee, G.H.: Few-shot 3d point cloud semantic segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 8873-8882 (2021) 10" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 32, + 112, + 379, + 144 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 112, + 379, + 144 + ], + "spans": [ + { + "bbox": [ + 32, + 112, + 379, + 144 + ], + "type": "text", + "content": "50. Zhou, Y., Gu, J., Li, X., Liu, M., Fang, Y., Su, H.: Partslip++: Enhancing low-shot 3d part segmentation via multi-view instance segmentation and maximum likelihood estimation. arXiv preprint arXiv:2312.03015 (2023) 4, 10" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 32, + 145, + 379, + 199 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 145, + 379, + 199 + ], + "spans": [ + { + "bbox": [ + 32, + 145, + 379, + 199 + ], + "type": "text", + "content": "51. Zhu, J., Zhang, Y., Guo, J., Liu, H., Liu, M., Liu, Y., Guo, Y.: Label transfer between images and 3d shapes via local correspondence encoding. Comput. Aided Geom. Des. 71(C), 255-266 (may 2019). https://doi.org/10.1016/j.cagd.2019.04.009, https://doi.org/10.1016/j.cagd.2019.04.009 5" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 32, + 199, + 379, + 243 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 32, + 199, + 379, + 243 + ], + "spans": [ + { + "bbox": [ + 32, + 199, + 379, + 243 + ], + "type": "text", + "content": "52. Zhu, X., Zhang, R., He, B., Guo, Z., Zeng, Z., Qin, Z., Zhang, S., Gao, P.: Pointclip v2: Prompting clip and gpt for powerful 3d open-world learning. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 2639-2650 (2023) 2" + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 33, + 10, + 43, + 19 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 33, + 10, + 43, + 19 + ], + "spans": [ + { + "bbox": [ + 33, + 10, + 43, + 19 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 66, + 9, + 121, + 19 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 9, + 121, + 19 + ], + "spans": [ + { + "bbox": [ + 66, + 9, + 121, + 19 + ], + "type": "text", + "content": "A. Thai et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 413, + 615 + ], + "page_idx": 17 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/4D Contrastive Superflows are Dense 3D Representation Learners/3b016017-cefc-4a8b-a706-93b64616c878_content_list.json b/2024/4D Contrastive Superflows are Dense 3D Representation Learners/3b016017-cefc-4a8b-a706-93b64616c878_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..587431f75315c5caca74a20d3875d1d1fd032a83 --- /dev/null +++ b/2024/4D Contrastive Superflows are Dense 3D Representation Learners/3b016017-cefc-4a8b-a706-93b64616c878_content_list.json @@ -0,0 +1,2136 @@ +[ + { + "type": "text", + "text": "4D Contrastive Superflows are Dense 3D Representation Learners", + "text_level": 1, + "bbox": [ + 269, + 140, + 733, + 186 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xiang Xu $^{1,\\star}$ , Lingdong Kong $^{2,3,*}$ , Hui Shuai $^{4}$ , Wenwei Zhang $^{2}$ , Liang Pan $^{2}$ , Kai Chen $^{2}$ , Ziwei Liu $^{5}$ , and Qingshan Liu $^{4,\\text{图}}$", + "bbox": [ + 274, + 210, + 727, + 244 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Nanjing University of Aeronautics and Astronautics", + "bbox": [ + 318, + 253, + 681, + 268 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "2 Shanghai AI Laboratory", + "bbox": [ + 411, + 268, + 589, + 282 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{3}$ National University of Singapore", + "bbox": [ + 383, + 282, + 617, + 296 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{4}$ Nanjing University of Posts and Telecommunications", + "bbox": [ + 318, + 296, + 683, + 310 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "S-Lab, Nanyang Technological University", + "bbox": [ + 356, + 310, + 645, + 323 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract. In the realm of autonomous driving, accurate 3D perception is the foundation. However, developing such models relies on extensive human annotations – a process that is both costly and labor-intensive. To address this challenge from a data representation learning perspective, we introduce SuperFlow, a novel framework designed to harness consecutive LiDAR-camera pairs for establishing spatiotemporal pretraining objectives. SuperFlow stands out by integrating two key designs: 1) a dense-to-sparse consistency regularization, which promotes insensitivity to point cloud density variations during feature learning, and 2) a flow-based contrastive learning module, carefully crafted to extract meaningful temporal cues from readily available sensor calibrations. To further boost learning efficiency, we incorporate a plug-and-play view consistency module that enhances the alignment of the knowledge distilled from camera views. Extensive comparative and ablation studies across 11 heterogeneous LiDAR datasets validate our effectiveness and superiority. Additionally, we observe several interesting emerging properties by scaling up the 2D and 3D backbones during pretraining, shedding light on the future research of 3D foundation models for LiDAR-based perception. Code is publicly available at https://github.com/Xiangxu-0103/SuperFlow.", + "bbox": [ + 259, + 354, + 743, + 619 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Keywords: LiDAR Segmentation $\\cdot$ 3D Data Pretraining $\\cdot$ Autonomous Driving $\\cdot$ Image-to-LiDAR Contrastive Learning $\\cdot$ Semantic Superpixels", + "bbox": [ + 259, + 631, + 743, + 660 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 215, + 681, + 375, + 698 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Driving perception is one of the most crucial components of an autonomous vehicle system. Recent advancements in sensing technologies, such as light detection and ranging (LiDAR) sensors and surrounding-view cameras, open up new possibilities for a holistic, accurate, and 3D-aware scene perception [3,9,79].", + "bbox": [ + 212, + 712, + 785, + 772 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Training a 3D perception model that can perform well in real-world scenarios often requires large-scale datasets and sufficient computing power [27,58]. Different from 2D, annotating 3D data is notably more expensive and labor-intensive,", + "bbox": [ + 212, + 772, + 785, + 818 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "* X. Xu and L. Kong contributed equally to this work. ⌒ Corresponding author.", + "bbox": [ + 217, + 824, + 756, + 840 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "which hinders the scalability of existing 3D perception models [28,69,98,112]. Data representation learning serves as a potential solution to mitigate such a problem [6,76]. By designing suitable pretraining objectives, the models are anticipated to extract useful concepts from raw data, where such concepts can help improve models' performance on downstream tasks with fewer annotations [51].", + "bbox": [ + 212, + 146, + 787, + 223 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Recently, Sautier et al. [82] proposed SLidR to distill knowledge from surrounding camera views - using a pretrained 2D backbone such as MoCo [14] and DINO [72] - to LiDAR point clouds, exhibiting promising 3D representation learning properties. The key to its success is the superpixel-driven contrastive objectives between cameras and LiDAR sensors. Subsequent works further extended this framework from various aspects, such as class balancing [66], hybrid-view distillation [110], semantic superpixels [11, 12, 61], and so on. While these methods showed improved performance over their baselines, there exist several issues that could undermine the data representation learning.", + "bbox": [ + 212, + 228, + 455, + 561 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/1198c7f3899ffac5b4f62f30fa3305ba0aaee00132c41cf2e3ab3b3bec78a1c5.jpg", + "image_caption": [ + "Fig.1: Performance overview of SuperFlow compared to state-of-the-art image-to-LiDAR pretraining methods, i.e., Seal [61], SLidR [82], and PPKT [63], on eleven LiDAR datasets. The scores of prior methods are normalized based on SuperFlow's scores. The larger the area coverage, the better the overall segmentation performance." + ], + "image_footnote": [], + "bbox": [ + 465, + 236, + 785, + 448 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The first concern revolves around the inherent temporal dynamics of LiDAR data [4,8]. LiDAR point clouds are acquired sequentially, capturing the essence of motion within the scene. Traditional approaches [61,63,66,82,110] often overlook this temporal aspect, treating each snapshot as an isolated scan. However, this sequential nature holds a wealth of information that can significantly enrich the model's understanding of the 3D environment [71,96]. Utilizing these temporal cues can lead to more robust and context-aware 3D perception models, which is crucial for dynamic environments encountered in autonomous driving.", + "bbox": [ + 212, + 568, + 787, + 690 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Moreover, the varying density of LiDAR point clouds presents a unique challenge [45, 47, 94]. Due to the nature of LiDAR scanning and data acquisition, different areas within the same scene can have significantly different point densities, which can in turn affect the consistency of feature representation across the scene [2, 47, 108, 111]. Therefore, a model that can learn invariant features regardless of point cloud density tends to be effective for recognizing the structural and semantic information in the 3D space.", + "bbox": [ + 212, + 696, + 787, + 803 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In lieu of existing challenges, we propose a novel spatiotemporal contrastive learning dubbed SuperFlow to encourage effective cross-sensor knowledge dis", + "bbox": [ + 212, + 809, + 785, + 840 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "X. Xu et al.", + "bbox": [ + 271, + 114, + 352, + 127 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "tillation. Our approach features three key components, all centered around the use of the off-the-shelf temporal cues inherent in the LiDAR acquisition process:", + "bbox": [ + 215, + 146, + 784, + 176 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "- We first introduce a straightforward yet effective view consistency alignment that seamlessly generates semantic superpixels with language guidance, alleviating the \"self-conflict\" issues in existing works [61,66,82]. As opposed to the previous pipeline, our method also aligns the semantics across camera views in consecutive scenes, paving the way for more sophisticated designs.", + "bbox": [ + 225, + 181, + 785, + 257 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "- To address the varying density of LiDAR point clouds, we present a dense-to-sparse regularization module that encourages consistency between features of dense and sparse point clouds. Dense points are obtained by concatenating multi-sweep LiDAR scans within a suitable time window and propagating the semantic superpixels from sparse to dense points. By leveraging dense point features to regularize sparse point features, the model promotes insensitivity to point cloud density variations.", + "bbox": [ + 225, + 257, + 785, + 361 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "- To capture useful temporal cues from consecutive scans across different timestamps, we design a flow-based contrastive learning module. This module takes multiple LiDAR-camera pairs as input and excites strong consistency between temporally shifted representations. Analogous to existing image-to-LiDAR representation learning methods [61,66,82], we also incorporate useful spatial contrastive objectives into our framework, setting a unified pipeline that emphasizes holistic representation learning from both the structural 3D layouts and the temporal 4D information.", + "bbox": [ + 225, + 361, + 785, + 481 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The strong spatiotemporal consistency regularization in SuperFlow effectively forms a semantically rich landscape that enhances data representations. As illustrated in Fig. 1, our approach achieves appealing performance gains over state-of-the-art 3D pretraining methods across a diverse spectrum of downstream tasks. Meanwhile, we also target at scaling the capacity of both 2D and 3D backbones during pretraining, shedding light on the future development of more robust, unified, and ubiquitous 3D perception models.", + "bbox": [ + 215, + 488, + 784, + 593 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To summarize, this work incorporates key contributions listed as follows:", + "bbox": [ + 238, + 593, + 763, + 608 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "- We present SuperFlow, a novel framework aimed to harness consecutive LiDAR-camera pairs for establishing spatiotemporal pretraining objectives.", + "bbox": [ + 225, + 614, + 784, + 643 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "- Our framework incorporates novel designs including view consistency alignment, dense-to-sparse regularization, and flow-based contrastive learning, which better encourages data representation learning effects between camera and LiDAR sensors across consecutive scans.", + "bbox": [ + 225, + 643, + 784, + 700 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "- Our approach sets a new state-of-the-art performance across 11 LiDAR datasets, exhibiting strong robustness and generalizability. We also reveal intriguing emergent properties as we scale up the 2D and 3D backbones, which could lay the foundation for scalable 3D perception.", + "bbox": [ + 225, + 700, + 784, + 762 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Related Work", + "text_level": 1, + "bbox": [ + 215, + 782, + 387, + 797 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "LiDAR-based 3D Perception. The LiDAR sensor has been widely used in today's 3D perception systems, credited to its robust and structural sensing abl-", + "bbox": [ + 215, + 809, + 784, + 839 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "4D Contrastive Superflows are Dense 3D Representation Learners", + "bbox": [ + 292, + 114, + 732, + 128 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "ities [4, 88, 92]. Due to the sparse and unordered nature of LiDAR point clouds, suitable rasterization strategies are needed to convert them into structural inputs [37, 93]. Popular choices include sparse voxels [18, 19, 33, 34, 90, 118], bird's eye view maps [10, 56, 111, 117], range view images [17, 21, 44, 68, 104, 107, 116], and multi-view fusion [18, 40, 60, 62, 77, 105, 106]. While witnessing record-breaking performances on standard benchmarks, existing approaches rely heavily on human annotations, which hinders scalability [27]. In response to this challenge, we resort to newly appeared 3D representation learning, hoping to leverage the rich collections of unlabeled LiDAR point clouds for more effective learning from LiDAR data. This could further enrich the efficacy of LiDAR-based perception.", + "bbox": [ + 212, + 146, + 787, + 297 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Data-Efficient 3D Perception. To better save annotation budgets, previous efforts seek 3D perception in a data-efficient manner [11, 12, 27, 40, 46, 49]. One line of research resorts to weak supervision, e.g., seeding points [36, 53, 86, 115], active prompts [38, 57, 100], and scribbles [94], for weakly-supervised LiDAR semantic segmentation. Another line of research seeks semi-supervised learning approaches [47, 52, 91] to better tackle efficient 3D scene perception and achieve promising results. In this work, different from the prior pursuits, we tackle efficient 3D perception from the data representation learning perspective. We establish several LiDAR-based data representation learning settings that seamlessly combine pretraining with weakly- and semi-supervised learning, further enhancing the scalability of 3D perception systems.", + "bbox": [ + 212, + 301, + 787, + 468 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3D Representation Learning. Analog to 2D representation learning strategies [13,15,30,31,103], prior works designed contrastive [35,70,81,101,108,113], masked modeling [32,50,95], and reconstruction [7,67] objectives for 3D pretraining. Most early 3D representation learning approaches use a single modality for pretraining, leaving room for further development. The off-the-shelf calibrations among different types of sensors provide a promising solution for building pretraining objectives [63]. Recently, SLidR [82] has made the first contribution toward multi-modal 3D representation learning between camera and LiDAR sensors. Subsequent works [66,74,110] extended this framework with more advanced designs. Seal [61] leverages powerful vision foundation models [42,109,119,120] to better assist the contrastive learning across sensors. Puy et al. [75,76] conducted a comprehensive study on the distillation recipe for better pretraining effects. While these approaches have exhibited better performance than their baselines, they overlooked the rich temporal cues across consecutive scans, which might lead to sub-opt pretraining performance. In this work, we construct dense 3D representation learning objectives using calibrated LiDAR sequences. Our approach encourages the consistency between features from sparse to dense inputs and features across timestamps, yielding superiority over existing endeavors.", + "bbox": [ + 212, + 472, + 787, + 744 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4D Representation Learning. Leveraging consecutive scans is promising in extracting temporal relations [2, 23, 33, 85]. For point cloud data pretraining, prior works [16, 64, 83, 84, 114] mainly focused on applying 4D cues on object- and human-centric point clouds, which are often small in scale. For large-scale automotive point clouds, STRL [39] learns spatiotemporal data invariance with different spatial augmentations in the point cloud sequence. TARL [71] and", + "bbox": [ + 212, + 750, + 787, + 840 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "X. Xu et al.", + "bbox": [ + 271, + 114, + 352, + 126 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "STSSL [96] encourage similarities of point clusters in two consecutive frames, where such clusters are obtained by ground removal and clustering algorithms, i.e., RANSAC [25], Patchwork [55], and HDBSCAN [24]. BEVContrast [81] shares a similar motivation but utilizes BEV maps for contrastive learning, which yields a more effective implementation. The \"one-fits-all\" clustering parameters, however, are often difficult to obtain, hindering existing works. Different from existing methods that use a single modality for 4D representation learning, we propose to leverage LiDAR-camera correspondences and semantic-rich superpixels to establish meaningful multi-modality 4D pretraining objectives.", + "bbox": [ + 212, + 146, + 782, + 282 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3 SuperFlow", + "text_level": 1, + "bbox": [ + 215, + 306, + 354, + 324 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In this section, we first revisit the common setups of the camera-to-LiDAR distillation baseline (cf. Sec. 3.1). We then elaborate on the technical details of SuperFlow, encompassing a straightforward yet effective view consistency alignment (cf. Sec. 3.2), a dense-to-sparse consistency regularization (cf. Sec. 3.3), and a flow-based spatiotemporal contrastive learning (cf. Sec. 3.4). The overall pipeline of the proposed SuperFlow framework is depicted in Fig. 4.", + "bbox": [ + 212, + 340, + 784, + 431 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.1 Preliminaries", + "text_level": 1, + "bbox": [ + 215, + 454, + 372, + 467 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Problem Definition. Given a point cloud $\\mathcal{P}^t = \\{\\mathbf{p}_i^t, \\mathbf{f}_i^t | i = 1, \\dots, N\\}$ with $N$ points captured by a LiDAR sensor at time $t$ , where $\\mathbf{p}_i \\in \\mathbb{R}^3$ denotes the coordinate of the point and $\\mathbf{f}_i \\in \\mathbb{R}^C$ is the corresponding feature, we aim to transfer knowledge from $M$ surrounding camera images $\\mathcal{I}^t = \\{\\mathbf{I}_i^t | i = 1, \\dots, M\\}$ into the point cloud. Here, $\\mathbf{I}_i \\in \\mathbb{R}^{H \\times W \\times 3}$ represents an image with height $H$ and width $W$ . Prior works [61, 82] generate a set of class-agnostic superpixels $\\mathcal{X}_i = \\{\\mathbf{X}_i^j | j = 1, \\dots, V\\}$ for each image via the unsupervised SLIC algorithm [1] or the more recent vision foundation models (VFMs) [42, 119, 120], where $V$ denotes the total number of superpixels. Assuming that the point cloud $\\mathcal{P}^t$ and images $\\mathcal{I}^t$ are calibrated, the point cloud $\\mathbf{p}_i = (x_i, y_i, z_i)$ can be then projected to the image plane $(u_i, v_i)$ using the following sensor calibration parameters:", + "bbox": [ + 212, + 479, + 784, + 647 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n[ u _ {i}, v _ {i}, 1 ] ^ {\\mathrm {T}} = \\frac {1}{z _ {i}} \\times \\Gamma_ {K} \\times \\Gamma_ {c \\leftarrow l} \\times [ x _ {i}, y _ {i}, z _ {i} ] ^ {\\mathrm {T}}, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 346, + 657, + 784, + 688 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\Gamma_K$ denotes the camera intrinsic matrix and $\\Gamma_{c\\leftarrow l}$ is the transformation matrix from LiDAR sensors to surrounding-view cameras. We also obtain a set of superpoints $\\mathcal{Y} = \\{\\mathbf{Y}^j | j = 1, \\dots, V\\}$ through this projection.", + "bbox": [ + 212, + 699, + 782, + 743 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Network Representations. Let $\\mathcal{F}_{\\theta_p}:\\mathbb{R}^{N\\times (3 + C)}\\to \\mathbb{R}^{N\\times D}$ be a 3D backbone with trainable parameters $\\theta_{p}$ , which takes LiDAR points as input and outputs $D$ -dimensional point features. Let $\\mathcal{G}_{\\theta_i}:\\mathbb{R}^{H\\times W\\times 3}\\to \\mathbb{R}^{\\frac{H}{S}\\times \\frac{W}{S}\\times E}$ be an image backbone with pretrained parameters $\\theta_{i}$ that takes images as input and outputs $E$ -dimensional image features with stride $S$ . Let $\\mathcal{H}_{\\omega_p}:\\mathbb{R}^{N\\times D}\\to \\mathbb{R}^{N\\times L}$ and $\\mathcal{H}_{\\omega_i}:\\mathbb{R}^{\\frac{H}{S}\\times \\frac{W}{S}\\times E}\\to \\mathbb{R}^{H\\times W\\times L}$ be linear heads with trainable parameters $\\omega_{p}$ and $\\omega_{i}$ ,", + "bbox": [ + 212, + 744, + 784, + 840 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "4D Contrastive Superflows are Dense 3D Representation Learners", + "bbox": [ + 292, + 114, + 732, + 128 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/d6f78ecf544015a58501fc084c9b78db5f5dbc080295b048e04ffc6452db34c1.jpg", + "image_caption": [ + "(a) Heuristic" + ], + "image_footnote": [], + "bbox": [ + 220, + 143, + 395, + 271 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/a0575d5b2a37ee97764d9318e67aea6b2e6ebae92c7921259b2f9e2bb23d7ba0.jpg", + "image_caption": [ + "(b) Class Agnostic" + ], + "image_footnote": [], + "bbox": [ + 413, + 143, + 589, + 271 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/50c03ec25032693f317237d850c56bf109c008111e4ce182b074ba20ba1cd76b.jpg", + "image_caption": [ + "(c) View Consistent", + "Fig. 2: Comparisons of different superpixels. (a) Class-agnostic superpixels generated by the unsupervised SLIC [1] algorithm. (b) Class-agnostic semantic superpixels generated by vision foundation models (VFMs) [109, 119, 120]. (c) View-consistent semantic superpixels generated by our view consistency alignment module." + ], + "image_footnote": [], + "bbox": [ + 606, + 143, + 776, + 271 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "which project backbone features to $L$ -dimensional features with $\\ell_2$ -normalization and upsample image features to $H\\times W$ with bilinear interpolation.", + "bbox": [ + 214, + 377, + 784, + 407 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Pretraining Objective. The overall objective of image-to-LiDAR representation learning [82] is to transfer knowledge from the trained image backbone $\\mathcal{G}_{\\theta_i}$ to the 3D backbone $\\mathcal{F}_{\\theta_p}$ . The superpixels $\\mathcal{X}_i$ generated offline, serve as an intermediate to effectively guide the knowledge transfer process.", + "bbox": [ + 214, + 407, + 784, + 468 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.2 View Consistency Alignment", + "text_level": 1, + "bbox": [ + 215, + 493, + 500, + 508 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Motivation. The class-agnostic superpixels $\\mathcal{X}_i$ used in prior works [61,66,82] are typically instance-level and do not consider their actual categories. As discussed in [66], instance-level superpixels can lead to \"self-conflict\" problems, which undermines the effectiveness of pretraining.", + "bbox": [ + 214, + 521, + 784, + 580 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Superpixel Comparisons. Fig. 2 compares superpixels generated via the unsupervised SLIC [1] and VFMs. SLIC [1] tends to over-segment objects, causing semantic conflicts. VFMs generate superpixels through a panoptic segmentation head, which can still lead to \"self-conflict\" in three conditions (see Fig. 2b): ① when the same object appears in different camera views, leading to different parts of the same object being treated as negative samples; ② when objects of the same category within the same camera view are treated as negative samples; ③ when objects across different camera views are treated as negative samples even if they share the same label.", + "bbox": [ + 214, + 583, + 785, + 717 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Semantic-Related Superpixels Generation. To address these issues, we propose generating semantic-related superpixels to ensure consistency across camera views. Contrastive Vision-Language Pre-training (CLIP) [78] has shown great generalization in few-shot learning. Building on existing VFMs [42,119,120], we employ CLIP's text encoder and fine-tune the last layer of the segmentation head from VFMs with predefined text prompts. This allows the segmentation head to generate language-guided semantic categories for each pixel, which we leverage as superpixels. As shown in Fig. 2c, we unify superpixels across camera", + "bbox": [ + 214, + 719, + 785, + 839 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "X. Xu et al.", + "bbox": [ + 271, + 114, + 352, + 126 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "views based on semantic category, alleviating the \"self-conflict\" problem in prior image-to-LiDAR contrastive learning pipelines.", + "bbox": [ + 214, + 146, + 782, + 176 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.3 D2S: Dense-to-Sparse Consistency Regularization", + "text_level": 1, + "bbox": [ + 214, + 205, + 669, + 222 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Motivation. LiDAR points are sparse and often incomplete, significantly restricting the efficacy of the cross-sensor feature representation learning process. In this work, we propose to tackle this challenge by combining multiple LiDAR scans within a suitable time window to create a dense point cloud, which is then used to encourage consistency with the sparse point cloud.", + "bbox": [ + 212, + 239, + 782, + 314 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Point Cloud Concatenation. Specifically, given a keyframe point cloud $\\mathcal{P}^t$ captured at time $t$ and a set of sweep point clouds $\\{\\mathcal{P}^s | s = 1, \\dots, T\\}$ captured at previous times $s$ , we first transform the coordinate $(x^s, y^s, z^s)$ of the sweep point cloud $\\mathcal{P}^s$ to the coordinate systems of $\\mathcal{P}^t$ , as they share different systems due to the vehicle's movement:", + "bbox": [ + 214, + 316, + 452, + 481 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\left[ \\tilde {x} ^ {s}, \\tilde {y} ^ {s}, \\tilde {z} ^ {s} \\right] ^ {\\mathrm {T}} = \\Gamma_ {t \\leftarrow s} \\times \\left[ x ^ {s}, y ^ {s}, z ^ {s} \\right] ^ {\\mathrm {T}}, \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 215, + 494, + 457, + 527 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $\\Gamma_{t\\leftarrow s}$ denotes the transformation matrix from the sweep point cloud at time $s$ to the keyframe point cloud at time $t$ . We then concatenate the transformed sweep points $\\{\\tilde{\\mathcal{P}}^s |s =$", + "bbox": [ + 214, + 529, + 454, + 619 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/b52e547912a4ed8aa7f1a04c2e0540c4daaa084ad61bafd7bf4f8239a0d546ef.jpg", + "image_caption": [ + "Fig.3: Dense-to-sparse (D2S) consistency regularization module. Dense point clouds are obtained by combining multiple point clouds captured at different times. A D2S regularization is formulated by encouraging the consistency between dense features and sparse features." + ], + "image_footnote": [], + "bbox": [ + 467, + 324, + 785, + 513 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "$1, \\ldots, T\\}$ with $\\mathcal{P}^t$ to obtain a dense point cloud $\\mathcal{P}^d$ . As shown in Fig. 3, $\\mathcal{P}^d$ fuses temporal information from consecutive point clouds, resulting in a dense and semantically rich representation for feature learning.", + "bbox": [ + 214, + 618, + 782, + 662 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Dense Superpoints. Meanwhile, we generate sets of superpoints $\\mathcal{Y}^d$ and $\\mathcal{Y}^t$ for $\\mathcal{P}^d$ and $\\mathcal{P}^t$ , respectively, using superpixels $\\mathcal{X}^t$ . Both $\\mathcal{P}^t$ and $\\mathcal{P}^d$ are fed into the weight-shared 3D network $\\mathcal{F}_{\\theta_p}$ and $\\mathcal{H}_{\\omega_p}$ for feature extraction. The output features are grouped via average pooling based on the superpoint indices to obtain superpoint features $\\mathbf{Q}^d$ and $\\mathbf{Q}^t$ , where $\\mathbf{Q}^d \\in \\mathbb{R}^{V \\times L}$ and $\\mathbf{Q}^d \\in \\mathbb{R}^{V \\times L}$ . We expect $\\mathbf{Q}^d$ and $\\mathbf{Q}^t$ to share similar features, leading to the following D2S loss:", + "bbox": [ + 214, + 664, + 784, + 756 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {d} 2 \\mathrm {s}} = \\frac {1}{V} \\sum_ {i = 1} ^ {V} \\left(1 - < \\mathbf {q} _ {i} ^ {t}, \\mathbf {q} _ {i} ^ {d} >\\right), \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 387, + 768, + 784, + 811 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $< \\cdot, \\cdot >$ denotes the scalar product to measure the similarity of features.", + "bbox": [ + 214, + 824, + 777, + 839 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "4D Contrastive Superflows are Dense 3D Representation Learners", + "bbox": [ + 292, + 114, + 732, + 128 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 774, + 114, + 784, + 126 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/22d2b1936af646bca2a681273d837a052b79ea967189ee36a0423cb3177ac22a.jpg", + "image_caption": [ + "Fig. 4: Flow-based contrastive learning (FCL) pipeline. FCL takes multiple LiDAR-camera pairs from consecutive scans as input. Based on temporally aligned semantic superpixel and superpoints, two contrastive learning objectives are formulated: 1) spatial contrastive learning between each LiDAR-camera pair $(\\mathcal{L}_{\\mathrm{sc}})$ , and 2) temporal contrastive learning among consecutive LiDAR point clouds across scenes $(\\mathcal{L}_{\\mathrm{tc}})$ ." + ], + "image_footnote": [], + "bbox": [ + 222, + 143, + 782, + 383 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3.4 FCL: Flow-Based Contrastive Learning", + "text_level": 1, + "bbox": [ + 214, + 483, + 584, + 500 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Motivation. LiDAR point clouds are acquired sequentially, embedding rich dynamic scene information across consecutive timestamps. Prior works [61, 66, 82] primarily focused on single LiDAR scans, overlooking the consistency of moving objects across scenes. To address these limitations, we propose flow-based contrastive learning (FCL) across sequential LiDAR scenes to encourage spatiotemporal consistency.", + "bbox": [ + 212, + 508, + 782, + 598 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Spatial Contrastive Learning. Our framework, depicted in Fig. 4, takes three LiDAR-camera pairs from different timestamps within a suitable time window as input, i.e., $\\{(\\mathcal{P}^t,\\mathcal{I}^t),(\\mathcal{P}^{t + \\Delta t},\\mathcal{I}^{t + \\Delta t}),(\\mathcal{P}^{t - \\Delta t},\\mathcal{I}^{t - \\Delta t})\\}$ , where timestamp $t$ denotes the current scene and $\\Delta t$ is the timespan. Following previous works [61,82], we first distill knowledge from the 2D network into the 3D network for each scene separately. Taking $(\\mathcal{P}^t,\\mathcal{I}^t)$ as an example, $\\mathcal{P}^t$ and $\\mathcal{I}^t$ are fed into the 3D and 2D networks to extract per-point and image features. The output features are then grouped via average pooling based on superpoints $\\mathcal{Y}^t$ and superpixels $\\mathcal{X}^t$ to obtain superpoint features $\\mathbf{Q}^t$ and superpixel features $\\mathbf{K}^t$ . A spatial contrastive loss is formulated to constrain 3D representation via pretrained 2D prior knowledge. This process is formulated as follows:", + "bbox": [ + 212, + 599, + 784, + 763 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {s c}} = - \\frac {1}{V} \\sum_ {i = 1} ^ {V} \\log \\left[ \\frac {e ^ {(< \\mathbf {q} _ {i} , \\mathbf {k} _ {i} > / \\tau)}}{\\sum_ {j \\neq i} e ^ {(< \\mathbf {q} _ {i} , \\mathbf {k} _ {j} > / \\tau)} + e (< \\mathbf {q} _ {i} , \\mathbf {k} _ {i} > / \\tau)} \\right], \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 300, + 773, + 784, + 816 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "where $\\tau > 0$ is a temperature that controls the smoothness of distillation.", + "bbox": [ + 214, + 824, + 743, + 839 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "X. Xu et al.", + "bbox": [ + 271, + 114, + 352, + 126 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Flow-Based Contrastive Learning. The spatial contrastive learning objective between images and point clouds, as depicted in Eq. (4), fails to ensure that moving objects share similar attributes across different scenes. To maintain consistency across scenes, a temporal consistency loss is introduced among superpoint features across different scenes. For the point clouds $\\mathcal{P}^t$ and $\\mathcal{P}^{t + \\Delta t}$ , the corresponding superpoint features $\\mathbf{Q}^t$ and $\\mathbf{Q}^{t + \\Delta t}$ are obtained via their superpoints. The temporal contrastive loss operates on $\\mathbf{Q}^t$ and $\\mathbf{Q}^{t + \\Delta t}$ :", + "bbox": [ + 212, + 146, + 782, + 252 + ], + "page_idx": 8 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {t c}} ^ {t \\leftarrow t + \\Delta t} = - \\frac {1}{V} \\sum_ {i = 1} ^ {V} \\log \\left[ \\frac {e ^ {(< \\mathbf {q} _ {i} ^ {t} , \\mathbf {q} _ {i} ^ {t + \\Delta t} > / \\tau)}}{\\sum_ {j \\neq i} e ^ {(< \\mathbf {q} _ {i} ^ {t} , \\mathbf {q} _ {j} ^ {t + \\Delta t} > / \\tau)} + e ^ {(< \\mathbf {q} _ {i} ^ {t} , \\mathbf {q} _ {i} ^ {t + \\Delta t} > / \\tau)}} \\right]. (5)\n$$\n", + "text_format": "latex", + "bbox": [ + 261, + 258, + 785, + 306 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "The same function is also applied between $\\mathbf{Q}^t$ and $\\mathbf{Q}^{t - \\Delta t}$ . This approach enables point features at time $t$ to extract more context-aware information across scenes.", + "bbox": [ + 214, + 311, + 782, + 340 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4 Experiments", + "text_level": 1, + "bbox": [ + 214, + 359, + 374, + 378 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.1 Settings", + "text_level": 1, + "bbox": [ + 214, + 388, + 330, + 402 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Data. We follow the seminar works SLidR [82] and Seal [61] when preparing the datasets. A total of eleven datasets are used in our experiments, including $^1 nuScenes$ [26], $^2 SemanticKITTI$ [5], $^3 Waymo$ Open [89], $^4 ScribbleKITTI$ [94], $^5 RELLIS-3D$ [41], $^6 SemanticPOSS$ [73], $^7 SemanticSTF$ [99], $^8 SynLiDAR$ [97], $^9 DAPS-3D$ [43], $^{10}$ Synth4D [80], and $^{11}$ Robo3D [45]. Due to space limits, kindly refer to the Appendix and [61, 82] for additional details about these datasets.", + "bbox": [ + 212, + 407, + 784, + 498 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Implementation Details. SuperFlow is implemented using the MMDetection3D [20] and OpenPCSeg [59] codebases. Consistent with prior works [61,82], we employ MinkUNet [19] as the 3D backbone and DINOv2 [72] (with ViT backbones [22]) as the 2D backbone, distilling from three variants: small (S), base (B), and large (L). Following Seal [61], OpenSeeD [109] is used to generate semantic superpixels. The framework is pretrained end-to-end on 600 scenes from nuScenes [26], then linear probed and fine-tuned on nuScenes [26] according to the data splits in SLidR [82]. The domain generalization study adheres to the same configurations as Seal [61] for the other ten datasets. Both the baselines and SuperFlow are pretrained using eight GPUs for 50 epochs, while linear probing and downstream fine-tuning experiments use four GPUs for 100 epochs, all utilizing the AdamW optimizer [65] and OneCycle scheduler [87]. Due to space limits, kindly refer to the Appendix for additional implementation details.", + "bbox": [ + 212, + 500, + 782, + 694 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Evaluation Protocols. Following conventions, we report the Intersection-over-Union (IoU) on each semantic class and mean IoU (mIoU) over all classes for downstream tasks. For 3D robustness evaluations, we follow Robo3D [45] and report the mean Corruption Error (mCE) and mean Resilience Rate (mRR).", + "bbox": [ + 212, + 696, + 782, + 756 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.2 Comparative Study", + "text_level": 1, + "bbox": [ + 214, + 773, + 423, + 789 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Linear Probing. We start by investigating the pretraining quality via linear probing. For this setup, we initialize the 3D backbone $\\mathcal{F}_{\\theta_p}$ with pretrained parameters and fine-tune only the added-on segmentation head. As shown in Tab. 1,", + "bbox": [ + 212, + 794, + 782, + 839 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "4D Contrastive Superflows are Dense 3D Representation Learners", + "bbox": [ + 292, + 114, + 732, + 128 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/c75525b860ff1e4e730f59e5ef2ab19f40a15480d7638b3b25517c3e56a791e1.jpg", + "table_caption": [ + "Table 1: Comparisons of state-of-the-art pretraining methods pretrained on nuScenes [26] and fine-tuned on SemanticKITTI [5] and Waymo Open [89] with specified data portions, respectively. All methods use MinkUNet [19] as the 3D semantic segmentation backbone. LP denotes linear probing with a frozen backbone. All scores are given in percentage (\\%). Best scores in each configuration are shaded with colors." + ], + "table_footnote": [], + "table_body": "
MethodVenueDistillnuScenesKITTI1%Waymo1%
LP1%5%10%25%Full
Random--8.1030.3047.8456.1565.4874.6639.5039.41
PointContrast [101]ECCV'20None21.9032.50----41.10-
DepthContrast [113]ICCV'21None22.1031.70----41.50-
ALSO [7]CVPR'23None-37.70-59.40-72.00--
BEVContrast [81]3DV'24None-38.30-59.60-72.30--
PPKT [63]arXiv'21ResNet35.9037.8053.7460.2567.1474.5244.0047.60
SLidR [82]CVPR'22ResNet38.8038.3052.4959.8466.9174.7944.6047.12
ST-SLidR [66]CVPR'23ResNet40.4840.7554.6960.7567.7075.1444.7244.93
TriCC [74]CVPR'23ResNet38.0041.2054.1060.4067.6075.6045.90-
Seal [61]NeurIPS'23ResNet44.9545.8455.6462.9768.4175.6046.6349.34
HVDistill [110]IJCV'24ResNet39.5042.7056.6062.9069.3076.6049.70-
PPKT [63]arXiv'21ViT-S38.6040.6052.0659.9965.7673.9743.2547.44
SLidR [82]CVPR'22ViT-S44.7041.1653.6561.4766.7174.2044.6747.57
Seal [61]NeurIPS'23ViT-S45.1644.2755.1362.4667.6475.5846.5148.67
SuperFlowOursViT-S46.4447.8159.4464.4769.2076.5447.9749.94
PPKT [63]arXiv'21ViT-B39.9540.9153.2160.8766.2274.0744.0947.57
SLidR [82]CVPR'22ViT-B45.3541.6455.8362.6867.6174.9845.5048.32
Seal [61]NeurIPS'23ViT-B46.5945.9857.1562.7968.1875.4147.2448.91
SuperFlowOursViT-B47.6648.0959.6664.5269.7976.5748.4050.20
PPKT [63]arXiv'21ViT-L41.5742.0555.7561.2666.8874.3345.8747.82
SLidR [82]CVPR'22ViT-L45.7042.7757.4563.2068.1375.5147.0148.60
Seal [61]NeurIPS'23ViT-L46.8146.2758.1463.2768.6775.6647.5550.02
SuperFlowOursViT-L48.0149.9560.7265.0970.0177.1949.0750.67
", + "bbox": [ + 220, + 219, + 792, + 522 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "SuperFlow consistently outperforms state-of-the-art methods under diverse configurations. We attribute this to the use of temporal consistency learning, which captures the structurally rich temporal cues across consecutive scenes and enhances the semantic representation learning of the 3D backbone. We also observe improved performance with larger 2D networks (i.e., from ViT-S to ViT-L), revealing a promising direction of achieving higher quality 3D pretraining.", + "bbox": [ + 215, + 549, + 785, + 640 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Downstream Fine-Tuning. It is known that data representation learning can mitigate the need for large-scale human annotations. Our study systematically compares SuperFlow with prior works on three popular datasets, including nuScenes [26], SemanticKITTI [5], and Waymo Open [89], under limited annotations for few-shot fine-tuning. From Tab. 1, we observe that SuperFlow achieves promising performance gains among three datasets across all fine-tuning tasks. We also use the pretrained 3D backbone as initialization for the fully-supervised learning study on nuScenes [26]. As can be seen from Tab. 1, models pretrained via representation learning consistently outperform the random initialization counterparts, highlighting the efficacy of conducting data pretraining. We also find that distillations from larger 2D networks show consistent improvements.", + "bbox": [ + 215, + 642, + 787, + 808 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Cross-Domain Generalization. To verify the strong generalizability of SuperFlow, we conduct a comprehensive study using seven diverse LiDAR datasets and", + "bbox": [ + 215, + 809, + 785, + 839 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "X. Xu et al.", + "bbox": [ + 271, + 114, + 352, + 127 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/99212ac06ff53c7622e60563a0cf6f05dabbeb103da805ef130aac6e3606a40d.jpg", + "table_caption": [ + "Table 2: Domain generalization study of different pretraining methods pretrained on the nuScenes [26] dataset and fine-tuned on other seven heterogeneous 3D semantic segmentation datasets with specified data portions, respectively. All scores are given in percentage (\\%). Best scores in each configuration are shaded with colors." + ], + "table_footnote": [], + "table_body": "
MethodSciKITTIRellis-3DSemPOSSSemSTFSynLiDARDAPS-3DSynth4D
1%10%1%10%HalfFullHalfFull1%10%HalfFull1%10%
Random23.8147.6038.4653.6046.2654.1248.0348.1519.8944.7474.3279.3820.2266.87
PPKT [63]36.5051.6749.7154.3350.1856.0050.9254.6937.5746.4878.9084.0061.1062.41
SLidR [82]39.6050.4549.7554.5751.5655.3652.0154.3542.0547.8481.0085.4063.1062.67
Seal [61]40.6452.7751.0955.0353.2656.8953.4655.3643.5849.2681.8885.9064.5066.96
SuperFlow42.7054.0052.8355.7154.4157.3354.7256.5744.8551.3882.4386.2165.3169.43
", + "bbox": [ + 220, + 205, + 785, + 299 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/433d3674cae395d6a29464e75ddd8e068e1d2d9bb9cb9febc736b81435671f39.jpg", + "table_caption": [ + "Table 3: Out-of-distribution 3D robustness study of state-of-the-art pretraining methods under corruption and sensor failure scenarios in the nuScenes- $C$ dataset from the Robo3D benchmark [45]. Full denotes fine-tuning with full labels. LP denotes linear probing with a frozen backbone. All mCE $(\\downarrow)$ , mRR $(\\uparrow)$ , and mIoU $(\\uparrow)$ scores are given in percentage $(\\%)$ . Best scores in each configuration are shaded with colors." + ], + "table_footnote": [], + "table_body": "
#InitialBackbonemCEmRRFogRainSnowBlurBeamCrossEchoSensorAvg
FullRandomMinkU-18 o115.6170.8553.9071.1048.2251.8562.2137.7357.4738.9752.68
SuperFlowMinkU-18 o109.0075.6654.9572.7949.5657.6862.8242.4559.6141.7755.21
RandomMinkU-34 o112.2072.5762.9670.6555.4851.7162.0131.5659.6439.4154.18
PPKT [63]MinkU-34 o105.6475.8764.0172.1859.0857.1763.8836.3460.5939.5756.60
SLiD R [82]MinkU-34 o106.0875.9965.4172.3156.0156.0762.8741.9461.1638.9056.83
Seal [61]MinkU-34 o92.6383.0872.6674.3166.2266.1465.9657.4459.8739.8562.81
SuperFlowMinkU-34 o91.6783.1770.3275.7765.4161.0568.0960.0258.3650.4163.68
RandomMinkU-50 o113.7672.8149.9571.1645.3655.5562.8436.9459.1243.1553.01
SuperFlowMinkU-50 o107.3574.0254.3673.0850.0756.9264.0538.1062.0247.0255.70
RandomMinkU-101 o109.1074.0750.4573.0248.8558.4864.1843.8659.8241.4755.02
SuperFlowMinkU-101 o96.4478.5756.9276.2954.7059.3571.8955.1360.2751.6060.77
LPPPKT [63]MinkU-34 o183.4478.1530.6535.4228.1229.2132.8219.5228.0120.7128.06
SLiD R [82]MinkU-34 o179.3877.1834.8838.0932.6426.4433.7320.8131.5421.4429.95
Seal [61]MinkU-34 o166.1875.3837.3342.7729.9337.7340.3220.3137.7324.9433.88
SuperFlowMinkU-34 o161.7875.5237.5943.4237.6039.5741.4023.6438.0326.6935.99
", + "bbox": [ + 220, + 383, + 787, + 585 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "show results in Tab. 2. It is worth noting that these datasets are collected under different acquisition and annotation conditions, including adverse weather, weak annotations, synthetic collection, and dynamic objects. For all fourteen domain generalization fine-tuning tasks, SuperFlow exhibits superior performance over the prior arts [61,63,82]. This study strongly verifies the effectiveness of the proposed flow-based contrastive learning for image-to-LiDAR data representation.", + "bbox": [ + 212, + 611, + 787, + 700 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Out-of-Distribution Robustness. The robustness of 3D perception models against unprecedented conditions directly correlates with the model's applicability to real-world applications [29, 48, 54, 102]. We compare our SuperFlow with prior models in the nuScenes- $C$ dataset from the Robo3D benchmark [45] and show results in Tab. 3. We observe that models pretrained using SuperFlow exhibit improved robustness over the random initialization counterparts. Besides, we find that 3D networks with different capacities often pose diverse robustness.", + "bbox": [ + 212, + 703, + 787, + 809 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Quantitative Assessments. We visualize the prediction results fine-tuned on nuScenes [26], SemanticKITTI [5] and Waymo Open [89], compared with random", + "bbox": [ + 212, + 809, + 787, + 840 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "4D Contrastive Superflows are Dense 3D Representation Learners", + "bbox": [ + 294, + 114, + 732, + 128 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 767, + 114, + 782, + 126 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/80c858bd9940bcdce9361f73b8d3a1dce32044c247d62574983c2a163418f2c0.jpg", + "table_caption": [ + "Table 4: Ablation study of SuperFlow using different # of sweeps. All methods use ViT-B [72] for distillation. All scores are given in percentage (%). Baseline results are shaded with colors." + ], + "table_footnote": [], + "table_body": "
BackbonenuScenesKITTIWaymo
LP1%1%1%
1× Sweeps ○47.4147.5248.1449.31
2× Sweeps •47.6648.0948.4050.20
5× Sweeps ○47.2348.0047.9449.14
7× Sweeps •46.0347.9846.8347.97
", + "bbox": [ + 218, + 220, + 473, + 297 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/21db9c2125c93b3217ec0829a9990d5e1da45242cf0abf42ca3ba2322fc09b22.jpg", + "table_caption": [ + "Table 5: Ablation study of SuperFlow on network capacity (# params) of 3D backbones. All methods use ViT-B [72] for distillation. All scores are given in percentage $(\\%)$ . Baseline results are shaded with colors." + ], + "table_footnote": [], + "table_body": "
BackboneLayernuScenesKITTIWaymo
LP1%1%1%
MinkUNet o1847.2047.7048.0449.24
MinkUNet •3447.6648.0948.4050.20
MinkUNet o5054.1152.8649.2251.20
MinkUNet o10152.5651.1948.5150.01
", + "bbox": [ + 491, + 220, + 782, + 297 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/f4be9d88cc20bc6d06a3a363feb9e31082ded5b164315a623c796c9ade2ef37a.jpg", + "image_caption": [ + "Fig. 5: Qualitative assessments of state-of-the-art pretraining methods pretrained on nuScenes [26] and fine-tuned on nuScenes [26], SemanticKITTI [5], and Waymo Open [89], with $1\\%$ annotations. The error maps show the correct and incorrect predictions in gray and red, respectively. Best viewed in colors and zoomed-in for details." + ], + "image_footnote": [], + "bbox": [ + 220, + 311, + 782, + 566 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "initialization, SLiDR [82], and Seal [61]. As shown in Fig. 5, Superflow performs well, especially on backgrounds, i.e., \"road\" and \"sidewalk\" in complex scenarios.", + "bbox": [ + 212, + 652, + 787, + 683 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "4.3 Ablation Study", + "text_level": 1, + "bbox": [ + 215, + 707, + 388, + 722 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "In this section, we are tailored to understand the efficacy of each design in our SuperFlow framework. Unless otherwise specified, we adopt MinkUNet-34 [19] and ViT-B [72] as the 3D and 2D backbones, respectively, throughout this study. 3D Network Capacity. Existing 3D backbones are relatively small in scale compared to their 2D counterparts. We study the scale of the 3D network and the results are shown in Tab. 5. We observe improved performance as the network capacity scales up, except for MinkUNet-101 [19]. We conjecture that this is due", + "bbox": [ + 212, + 733, + 787, + 840 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "X. Xu et al.", + "bbox": [ + 271, + 114, + 352, + 127 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/9947106944d7c65ab4ef03b16593ed7bd79bae48d2b6b2af6e7d37116af6bdc5.jpg", + "table_caption": [ + "Table 6: Ablation study of each component in SuperFlow. All variants use a MinkUNet-34 [19] as the 3D backbone and ViT-B [72] for distillation. VC: View consistency. D2S: Dense-to-sparse regularization. FCL: Flow-based contrastive learning. All scores are given in percentage $(\\%)$ ." + ], + "table_footnote": [], + "table_body": "
#VC D2S FCLnuScenesKITTIWaymo
LP1%1%1%
-Random8.1030.3039.5039.41
(a)XXX44.6544.4746.6547.77
(b)XX45.5745.2146.8748.01
(c)X46.1746.9147.2649.01
(d)X47.2447.6748.2149.80
(e)47.6648.0948.4050.20
", + "bbox": [ + 218, + 247, + 509, + 353 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/1580a61da61b3b5b4e4ceb003be4eabb9a3e84525f5348187f647808093c2701.jpg", + "table_caption": [ + "Table 7: Ablation study on spatiotemporal consistency. All variants use a MinkUNet-34 [19] as the 3D backbone and ViT-B [72] for distillation. 0 denotes current timestamp. 0.5s corresponds to a $20\\mathrm{Hz}$ timespan. All scores are given in percentage $(\\%)$ ." + ], + "table_footnote": [], + "table_body": "
TimespannuScenesKITTIWaymo
LP1%1%1%
Single-Frame46.1746.9147.2649.01
0, -0.5s46.3947.0847.9949.78
-0.5s, 0, +0.5s47.6648.0948.4050.20
-1.0s, 0, +1.0s47.6047.9948.4350.18
-1.5s, 0, +1.5s46.4348.2748.3449.93
-2.0s, 0, +2.0s46.2048.4948.1850.01
", + "bbox": [ + 526, + 247, + 784, + 352 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "to the fact that models with limited parameters are less effective in capturing patterns during representation learning, and, conversely, models with a large set of trainable parameters tend to be difficult to converge.", + "bbox": [ + 212, + 371, + 784, + 416 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Representation Density. The consistency regularization between sparse and dense point clouds encourages useful representation learning. To analyze the degree of regularization, we investigate various point cloud densities and show the results in Tab. 4. We observe that a suitable point cloud density can improve the model's ability to feature representation. When the density of point clouds is too dense, the motion of objects is obvious in the scene. However, we generate superpoints of the dense points based on superpixels captured at the time of sparse points. The displacement difference of dynamic objects makes the projection misalignment. A trade-off selection would be two or three sweeps.", + "bbox": [ + 212, + 417, + 785, + 551 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Temporal Consistency. The ability to capture semantically coherent temporal cues is crucial in our SuperFlow framework. In Eq. (5), we operate temporal contrastive learning on superpoints features across scenes. As shown in Tab. 7, we observe that temporal contrastive learning achieves better results compared to single-frame methods. We also compare the impact of frames used to capture temporal cues. When we use 3 frames, it acquires more context-aware information than 2 frames and achieves better results. Finally, we study the impact of the timespan between frames. The performance will drop with a longer timespan. We conjecture that scenes with short timespans have more consistency, while long timespans tend to have more uncertain factors.", + "bbox": [ + 212, + 551, + 787, + 703 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Component Analysis. In Tab. 6, we analyze each component in the SuperFlow framework, including view consistency, dense-to-sparse regularization, and flow-based contrastive learning. The baseline is SLiDR [82] with VFMs-based superpixels. View consistency brings slight improvements among the popular datasets with a few annotations. D2S distills dense features into sparse features and it brings about $1\\%$ mIoU gains. FCL extracts temporal cues via temporal contrastive learning and it significantly leads to about $2.0\\%$ mIoU gains.", + "bbox": [ + 212, + 704, + 787, + 809 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Visual Inspections. Similarity maps presented in Fig. 6 denote the segmentation ability of our pretrained model. The query points include \"car\", \"man-", + "bbox": [ + 212, + 809, + 785, + 839 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "4D Contrastive Superflows are Dense 3D Representation Learners", + "bbox": [ + 292, + 114, + 732, + 128 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 767, + 114, + 785, + 126 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/12bc1af94125515fe8051a70711c3ff20bdc4863433c7582ef4dccbea1b86ce2.jpg", + "image_caption": [ + "(a) \"car\" (3D)" + ], + "image_footnote": [], + "bbox": [ + 218, + 143, + 395, + 212 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/fb0e28d4d6c13aaa80ccc3373c14b356ca27a471b46636f037b077d7ae892728.jpg", + "image_caption": [ + "(b) \"manmade\" (3D)" + ], + "image_footnote": [], + "bbox": [ + 411, + 143, + 589, + 212 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/b6f43375c411a39ae209a34edb715b422dbf5ebe2cad8e5d5edf47e96649d7ef.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 606, + 143, + 782, + 212 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/6f4855312f348390127186ae934af7372a4890336ae6ec836de8ddf26685b32e.jpg", + "image_caption": [ + "(d) \"car\" (2D)" + ], + "image_footnote": [], + "bbox": [ + 220, + 226, + 395, + 294 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/3fdf1159b556ec7132d4c18c87d23b73b04cbab10c84428d9280dcf0a065ed4c.jpg", + "image_caption": [ + "(e) \"manmade\" (2D)" + ], + "image_footnote": [], + "bbox": [ + 413, + 226, + 589, + 294 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/d78f6d33c4c9b14ebe6bb966101a2e602206ca40d62d1279e38e533a6ff4556c.jpg", + "image_caption": [ + "(c) \"sidewalk\" (3D)" + ], + "image_footnote": [], + "bbox": [ + 606, + 226, + 782, + 294 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/06f1520cfe6c3acf8a78f2f5cfbf805329812e90e08b8d457ef2ed6e61b6fa2b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 220, + 308, + 395, + 377 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/a46f1f5da693229841ddb3ecf854bdafedbeb44368d7508aec878ebda3e629ed.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 413, + 308, + 589, + 377 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/2f515a9dfd96c708f7d7559bf86b7a5e6f0fe21524266fe90d9d7e78185e290b.jpg", + "image_caption": [ + "(f) \"sidewalk\" (2D)", + "(i) \"terrain\" (3D)" + ], + "image_footnote": [], + "bbox": [ + 606, + 308, + 782, + 377 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/a24a5d71cc50cab9fe2a8266fbc4ea2ca49f68c1fbc4d0faa5952b776f76e50c.jpg", + "image_caption": [ + "(g) \"vegetation\" (3D)", + "(j) \"vegetation\" (2D)", + "Fig. 6: Cosine similarity between features of a query point (red dot) and: 1) features of other points projected in the image (the 1st and 3rd rows); and 2) features of an image with the same scene (the 2nd and 4th rows). The color goes from red to blue denoting low and high similarity scores, respectively. Best viewed in color." + ], + "image_footnote": [], + "bbox": [ + 220, + 390, + 395, + 458 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/9d0bb47d746a504c47701d6a3948d9f767d00fce5330b9c770273c0e227a548a.jpg", + "image_caption": [ + "(h) \"driveable surface\" (3D)", + "(k) \"driveable surface\" (2D)" + ], + "image_footnote": [], + "bbox": [ + 413, + 390, + 589, + 458 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/09c89e52a9980fd253975c6cc38438de5e90b29a9cd3e1aaef25e6cf8c64cf1b.jpg", + "image_caption": [ + "(1) \"terrain\" (2D)" + ], + "image_footnote": [], + "bbox": [ + 606, + 390, + 782, + 458 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "made\", \"sidewalk\", \"vegetation\", \"driveable surface\", and \"terrain\". SuperFlows shows strong semantic discriminative ability without fine-tuning. We conjecture that it comes from three aspects: 1) View consistent superpixels enable the network to learn semantic representation; 2) Dense-to-sparse regularization enhances the network to learn varying density features; 3) Temporal contrastive learning extracts semantic cues across scenes.", + "bbox": [ + 212, + 560, + 787, + 651 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "5 Conclusion", + "text_level": 1, + "bbox": [ + 215, + 672, + 359, + 690 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "In this work, we presented SuperFlow to tackle the challenging 3D data representation learning. Motivated by the sequential nature of LiDAR acquisitions, we proposed three novel designs to better encourage spatiotemporal consistency, encompassing view consistency alignment, dense-to-sparse regularization, and flow-based contrastive learning. Extensive experiments across 11 diverse LiDAR datasets showed that SuperFlow consistently outperforms prior approaches in linear probing, downstream fine-tuning, and robustness probing. Our study on scaling up 2D and 3D network capacities reveals insightful findings. We hope this work could shed light on future designs of powerful 3D foundation models.", + "bbox": [ + 212, + 703, + 787, + 840 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "X. Xu et al.", + "bbox": [ + 271, + 114, + 352, + 126 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Acknowledgements. This work was supported by the Scientific and Technological Innovation 2030 - \"New Generation Artificial Intelligence\" Major Project (No. 2021ZD0112200), the Joint Funds of the National Natural Science Foundation of China (No. U21B2044), the Key Research and Development Program of Jiangsu Province (No. BE2023016-3), and the Talent Research Start-up Foundation of Nanjing University of Posts and Telecommunications (No. NY223172). This work was also supported by the Ministry of Education, Singapore, under its MOE AcRF Tier 2 (MOET2EP20221-0012), NTU NAP, and under the RIE2020 Industry Alignment Fund - Industry Collaboration Projects (IAF-ICP) Funding Initiative, as well as cash and in-kind contribution from the industry partner(s).", + "bbox": [ + 212, + 146, + 787, + 297 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 217, + 324, + 321, + 340 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "1. Achanta, R., Shaji, A., Smith, K., Lucchi, A., Fua, P., Susstrunk, S.: Slic superpixels compared to state-of-the-art superpixel methods. IEEE Transactions on Pattern Analysis and Machine Intelligence 34(11), 2274-2282 (2012)", + "2. Aygun, M., Osep, A., Weber, M., Maximov, M., Stachniss, C., Behley, J., Leal-Taixe, L.: 4d panoptic lidar segmentation. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5527-5537 (2021)", + "3. Badue, C., Guidolini, R., Carneiro, R.V., Azevedo, P., Cardoso, V.B., Forechi, A., Jesus, L., Berriel, R., Paixão, T.M., Mutz, F., de Paula Veronese, L., Oliveira-Santos, T., Souza, A.F.D.: Self-driving cars: A survey. Expert Systems with Applications 165, 113816 (2021)", + "4. Behley, J., Garbade, M., Milioto, A., Quenzel, J., Behnke, S., Gall, J., Stachniss, C.: Towards 3d lidar-based semantic scene understanding of 3d point cloud sequences: The semanticicketti dataset. International Journal of Robotics Research 40, 959-96 (2021)", + "5. Behley, J., Garbade, M., Milioto, A., Quenzel, J., Behnke, S., Stachniss, C., Gall, J.: Semantickitti: A dataset for semantic scene understanding of lidar sequences. In: IEEE/CVF International Conference on Computer Vision. pp. 9297-9307 (2019)", + "6. Bengio, Y., Courville, A., Vincent, P.: Representation learning: A review and new perspectives. IEEE Transactions on Pattern Analysis and Machine Intelligence 35(8), 1798-1828 (2013)", + "7. Boulch, A., Sautier, C., Michele, B., Puy, G., Marlet, R.: Also: Automotive lidar self-supervision by occupancy estimation. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 13455-13465 (2023)", + "8. Caesar, H., Bankiti, V., Lang, A.H., Vora, S., Liong, V.E., Xu, Q., Krishnan, A., Pan, Y., Baldan, G., Beijbom, O.: nuscenes: A multimodal dataset for autonomous driving. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 11621-11631 (2020)", + "9. Cao, A.Q., Dai, A., de Charette, R.: Pasco: Urban 3d panoptic scene completion with uncertainty awareness. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 14554-14564 (2024)", + "10. Chen, Q., Vora, S., Beijbom, O.: Polarstream: Streaming lidar object detection and segmentation with polar pillars. In: Advances in Neural Information Processing Systems. vol. 34 (2021)" + ], + "bbox": [ + 225, + 359, + 785, + 839 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "4D Contrastive Superflows are Dense 3D Representation Learners", + "bbox": [ + 292, + 114, + 732, + 128 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 767, + 114, + 785, + 126 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "11. Chen, R., Liu, Y., Kong, L., Chen, N., Zhu, X., Ma, Y., Liu, T., Wang, W.: Towards label-free scene understanding by vision foundation models. In: Advances in Neural Information Processing Systems. vol. 36 (2023)", + "12. Chen, R., Liu, Y., Kong, L., Zhu, X., Ma, Y., Li, Y., Hou, Y., Qiao, Y., Wang, W.: Clip2scene: Towards label-efficient 3d scene understanding by clip. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 7020-7030 (2023)", + "13. Chen, T., Kornblith, S., Norouzi, M., Hinton, G.: A simple framework for contrastive learning of visual representations. In: International Conference on Machine Learning. pp. 1597-1607 (2020)", + "14. Chen, X., Fan, H., Girshick, R., He, K.: Improved baselines with momentum contrastive learning. arXiv preprint arXiv:2003.04297 (2020)", + "15. Chen, X., Xie, S., He, K.: An empirical study of training self-supervised vision transformers. In: IEEE/CVF International Conference on Computer Vision. pp. 9640-9649 (2021)", + "16. Chen, Y., Nießner, M., Dai, A.: 4dcontrast: Contrastive learning with dynamic correspondences for 3d scene understanding. In: European Conference on Computer Vision. pp. 543-560 (2022)", + "17. Cheng, H., Han, X., Xiao, G.: Cenet: Toward concise and efficient lidar semantic segmentation for autonomous driving. In: IEEE International Conference on Multimedia and Expo. pp. 1-6 (2022)", + "18. Cheng, R., Razani, R., Taghavi, E., Li, E., Liu, B.: Af2-s3net: Attentive feature fusion with adaptive feature selection for sparse semantic segmentation network. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 12547-12556 (2021)", + "19. Choy, C., Gwak, J., Savarese, S.: 4d spatio-temporal convnets: Minkowski convolutional neural networks. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 3075-3084 (2019)", + "20. Contributors, M.: MMDetection3D: OpenMMLab next-generation platform for general 3D object detection. https://github.com/open-mmlab/mmdetection3d (2020)", + "21. Cortinhal, T., Tzelepis, G., Aksoy, E.E.: Salsanext: Fast, uncertainty-aware semantic segmentation of lidar point clouds. In: International Symposium on Visual Computing. pp. 207-222 (2020)", + "22. Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., Zhai, X., Unterthiner, T., Dehghani, M., Minderer, M., Heigold, G., Gelly, S., Uszkoreit, J., Houlsby, N.: An image is worth 16x16 words: Transformers for image recognition at scale. In: International Conference on Learning Representations (2020)", + "23. Duerr, F., Pfaller, M., Weigel, H., Beyerer, J.: Lidar-based recurrent 3d semantic segmentation with temporal memory alignment. In: International Conference on 3D Vision. pp. 781-790 (2020)", + "24. Ester, M., Kriegel, H.P., Sander, J., Xu, X.: A density-based algorithm for discovering clusters in large spatial databases with noise. In: ACM SIGKDD Conference on Knowledge Discovery and Data Mining. pp. 226-231 (1996)", + "25. Fischler, M.A., Bolles, R.C.: Random sample consensus: A paradigm for model fitting with applications to image analysis and automated cartography. Communications of the ACM 24(6), 381-395 (1981)", + "26. Fong, W.K., Mohan, R., Hurtado, J.V., Zhou, L., Caesar, H., Beijbom, O., Valada, A.: Panoptic nuscenes: A large-scale benchmark for lidar panoptic segmentation and tracking. IEEE Robotics and Automation Letters 7, 3795-3802 (2022)" + ], + "bbox": [ + 225, + 146, + 784, + 839 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "X. Xu et al.", + "bbox": [ + 271, + 114, + 351, + 126 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "27. Gao, B., Pan, Y., Li, C., Geng, S., Zhao, H.: Are we hungry for 3d lidar data for semantic segmentation? a survey of datasets and methods. IEEE Transactions on Intelligent Transportation Systems 23(7), 6063-6081 (2021)", + "28. Geiger, A., Lenz, P., Urtasun, R.: Are we ready for autonomous driving? the kitti vision benchmark suite. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 3354-3361 (2012)", + "29. Hao, X., Wei, M., Yang, Y., Zhao, H., Zhang, H., Zhou, Y., Wang, Q., Li, W., Kong, L., Zhang, J.: Is your hd map constructor reliable under sensor corruptions? arXiv preprint arXiv:2406.12214 (2024)", + "30. He, K., Chen, X., Xie, S., Li, Y., Dólár, P., Girshick, R.: Masked autoencoders are scalable vision learners. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 16000-16009 (2022)", + "31. He, K., Fan, H., Wu, Y., Xie, S., Girshick, R.: Momentum contrast for unsupervised visual representation learning. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 9729-9738 (2020)", + "32. Hess, G., Jaxing, J., Svensson, E., Hagerman, D., Petersson, C., Svensson, L.: Masked autoencoders for self-supervised learning on automotive point clouds. arXiv preprint arXiv:2207.00531 (2022)", + "33. Hong, F., Kong, L., Zhou, H., Zhu, X., Li, H., Liu, Z.: Unified 3d and 4d panoptic segmentation via dynamic shifting networks. IEEE Transactions on Pattern Analysis and Machine Intelligence 46(5), 3480-3495 (2024)", + "34. Hong, F., Zhou, H., Zhu, X., Li, H., Liu, Z.: Lidar-based panoptic segmentation via dynamic shifting network. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 13090-13099 (2021)", + "35. Hou, J., Graham, B., Nießner, M., Xie, S.: Exploring data-efficient 3d scene understanding with contrastive scene contexts. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 15587-15597 (2021)", + "36. Hu, Q., Yang, B., Fang, G., Guo, Y., Leonardis, A., Trigoni, N., Markham, A.: Sqn: Weakly-supervised semantic segmentation of large-scale 3d point clouds. In: European Conference on Computer Vision. pp. 600-619 (2022)", + "37. Hu, Q., Yang, B., Khalid, S., Xiao, W., Trigoni, N., Markham, A.: Towards semantic segmentation of urban-scale 3d point clouds: A dataset, benchmarks and challenges. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 4977-4987 (2021)", + "38. Hu, Z., Bai, X., Zhang, R., Wang, X., Sun, G., Fu, H., Tai, C.L.: Lidal: Interframe uncertainty based active learning for 3d lidar semantic segmentation. In: European Conference on Computer Vision. pp. 248-265 (2022)", + "39. Huang, S., Xie, Y., Zhu, S.C., Zhu, Y.: Spatio-temporal self-supervised representation learning for 3d point clouds. In: IEEE/CVF International Conference on Computer Vision. pp. 6535-6545 (2021)", + "40. Jaritz, M., Vu, T.H., de Charette, R., Wirbel, E., Pérez, P.: xmuda: Cross-modal unsupervised domain adaptation for 3d semantic segmentation. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 12605-12614 (2020)", + "41. Jiang, P., Osteen, P., Wigness, M., Saripallig, S.: Rellis-3d dataset: Data, benchmarks and analysis. In: IEEE International Conference on Robotics and Automation. pp. 1110–1116 (2021)", + "42. Kirillov, A., Mintun, E., Ravi, N., Mao, H., Rolland, C., Gustafson, L., Xiao, T., Whitehead, S., Berg, A.C., Lo, W.Y., Dollar, P., Girshick, R.: Segment anything. In: IEEE/CVF International Conference on Computer Vision. pp. 4015-4026 (2023)" + ], + "bbox": [ + 225, + 146, + 784, + 839 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "4D Contrastive Superflows are Dense 3D Representation Learners", + "bbox": [ + 292, + 114, + 732, + 128 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 767, + 114, + 785, + 126 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "43. Klokov, A., Pak, D.U., Khorin, A., Yudin, D., Kochiev, L., Luchinskiy, V., Bezuglyj, V.: Daps3d: Domain adaptive projective segmentation of 3d lidar point clouds. IEEE Access 11, 79341-79356 (2023)", + "44. Kong, L., Liu, Y., Chen, R., Ma, Y., Zhu, X., Li, Y., Hou, Y., Qiao, Y., Liu, Z.: Rethinking range view representation for lidar segmentation. In: IEEE/CVF International Conference on Computer Vision. pp. 228-240 (2023)", + "45. Kong, L., Liu, Y., Li, X., Chen, R., Zhang, W., Ren, J., Pan, L., Chen, K., Liu, Z.: Robo3d: Towards robust and reliable 3d perception against corruptions. In: IEEE/CVF International Conference on Computer Vision. pp. 19994-20006 (2023)", + "46. Kong, L., Quader, N., Liong, V.E.: Conda: Unsupervised domain adaptation for lidar segmentation via regularized domain concatenation. In: IEEE International Conference on Robotics and Automation. pp. 9338-9345 (2023)", + "47. Kong, L., Ren, J., Pan, L., Liu, Z.: Lasermix for semi-supervised lidar semantic segmentation. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 21705-21715 (2023)", + "48. Kong, L., Xie, S., Hu, H., Ng, L.X., Cottereau, B.R., Ooi, W.T.: Robodepth: Robust out-of-distribution depth estimation under corruptions. In: Advances in Neural Information Processing Systems. vol. 36 (2023)", + "49. Kong, L., Xu, X., Ren, J., Zhang, W., Pan, L., Chen, K., Ooi, W.T., Liu, Z.: Multi-modal data-efficient 3d scene understanding for autonomous driving. arXiv preprint arXiv:2405.05258 (2024)", + "50. Krispel, G., Schinagl, D., Fruhwirth-Reisinger, C., Possegger, H., Bischof, H.: Maeli: Masked autoencoder for large-scale lidar point clouds. In: IEEE/CVF Winter Conference on Applications of Computer Vision. pp. 3383-3392 (2024)", + "51. Le-Khac, P.H., Healy, G., Smeaton, A.F.: Contrastive representation learning: A framework and review. IEEE Transactions on Pattern Analysis and Machine Intelligence 8, 193907-193934 (2020)", + "52. Li, L., Shum, H.P., Breckon, T.P.: Less is more: Reducing task and model complexity for 3d point cloud semantic segmentation. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 9361-9371 (2023)", + "53. Li, R., de Charette, R., Cao, A.Q.: Coarse3d: Class-prototypes for contrastive learning in weakly-supervised 3d point cloud segmentation. In: British Machine Vision Conference (2022)", + "54. Li, Y., Kong, L., Hu, H., Xu, X., Huang, X.: Optimizing lidar placements for robust driving perception in adverse conditions. arXiv preprint arXiv:2403.17009 (2024)", + "55. Lim, H., Oh, M., Myung, H.: Patchwork: Concentric zone-based region-wise ground segmentation with ground likelihood estimation using a 3d lidar sensor. IEEE Robotics and Automation Letters 6(4), 6458-6465 (2021)", + "56. Liong, V.E., Nguyen, T.N.T., Widjaja, S., Sharma, D., Chong, Z.J.: Amvnet: Assertion-based multi-view fusion network for lidar semantic segmentation. arXiv preprint arXiv:2012.04934 (2020)", + "57. Liu, M., Zhou, Y., Qi, C.R., Gong, B., Su, H., Anguelov, D.: Less: Label-efficient semantic segmentation for lidar point clouds. In: European Conference on Computer Vision. pp. 70-89 (2022)", + "58. Liu, M., Yurtsever, E., Zhou, X., Fossaert, J., Cui, Y., Zagar, B.L., Knoll., A.C.: A survey on autonomous driving datasets: Data statistic, annotation, and outlook. arXiv preprint arXiv:2401.01454 (2024)" + ], + "bbox": [ + 225, + 146, + 784, + 839 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "X. Xu et al.", + "bbox": [ + 271, + 114, + 352, + 126 + ], + "page_idx": 17 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "59. Liu, Y., Bai, Y., Kong, L., Chen, R., Hou, Y., Shi, B., Li, Y.: Pcseg: An open source point cloud segmentation codebase. https://github.com/PJLab-ADG/PCSeg (2023)", + "60. Liu, Y., Chen, R., Li, X., Kong, L., Yang, Y., Xia, Z., Bai, Y., Zhu, X., Ma, Y., Li, Y., Qiao, Y., Hou, Y.: Uniseg: A unified multi-modal lidar segmentation network and the openpcseg codebase. In: IEEE/CVF International Conference on Computer Vision. pp. 21662-21673 (2023)", + "61. Liu, Y., Kong, L., Cen, J., Chen, R., Zhang, W., Pan, L., Chen, K., Liu, Z.: Segment any point cloud sequences by distilling vision foundation models. In: Advances in Neural Information Processing Systems. vol. 36 (2023)", + "62. Liu, Y., Kong, L., Wu, X., Chen, R., Li, X., Pan, L., Liu, Z., Ma, Y.: Multi-space alignments towards universal lidar segmentation. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 14648-14661 (2024)", + "63. Liu, Y.C., Huang, Y.K., Chiang, H.Y., Su, H.T., Liu, Z.Y., Chen, C.T., Tseng, C.Y., Hsu, W.H.: Learning from 2d: Contrastive pixel-to-point knowledge transfer for 3d pretraining. arXiv preprint arXiv:2104.04687 (2021)", + "64. Liu, Y., Chen, J., Zhang, Z., Huang, J., Yi, L.: Leaf: Learning frames for 4d point cloud sequence understanding. In: IEEE/CVF International Conference on Computer Vision. pp. 604-613 (2023)", + "65. Loshchilov, I., Hutter, F.: Decoupled weight decay regularization. In: International Conference on Learning Representations (2018)", + "66. Mahmoud, A., Hu, J.S., Kuai, T., Harakeh, A., Paull, L., Waslander, S.L.: Self-supervised image-to-point distillation via semantically tolerant contrastive loss. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 7102-7110 (2023)", + "67. Michele, B., Boulch, A., Puy, G., Vu, T.H., Marlet, R., Courty, N.: Saluda: Surface-based automotive lidar unsupervised domain adaptation. arXiv preprint arXiv:2304.03251 (2023)", + "68. Milioto, A., Vizzo, I., Behley, J., Stachniss, C.: Rangenet++: Fast and accurate lidar semantic segmentation. In: IEEE/RSJ International Conference on Intelligent Robots and Systems. pp. 4213-4220 (2019)", + "69. Muhammad, K., Ullah, A., Lloret, J., Ser, J.D., de Albuquerque, V.H.C.: Deep learning for safe autonomous driving: Current challenges and future directions. IEEE Transactions on Intelligent Transportation Systems 22(7), 4316-4336 (2020)", + "70. Nunes, L., Marcuzzi, R., Chen, X., Behley, J., Stachniss, C.: Segcontrast: 3d point cloud feature representation learning through self-supervised segment discrimination. IEEE Robotics and Automation Letters 7(2), 2116-2123 (2022)", + "71. Nunes, L., Wiesmann, L., Marcuzzi, R., Chen, X., Behley, J., Stachniss, C.: Temporal consistent 3d lidar representation learning for semantic perception in autonomous driving. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5217-5228 (2023)", + "72. Oquab, M., Darcet, T., Moutakanni, T., Vo, H., Szafraniec, M., Khalidov, V., Fernandez, P., Haziza, D., Massa, F., El-Nouby, A., Assran, M., Ballas, N., Galuba, W., Howes, R., Huang, P.Y., Li, S.W., Misra, I., Rabbat, M., Sharma, V., Synnaeve, G., Xu, H., Jegou, H., Mairal, J., Labatut, P., Joulin, A., Bojanowski, P.: Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:2304.07193 (2023)", + "73. Pan, Y., Gao, B., Mei, J., Geng, S., Li, C., Zhao, H.: Semanticposs: A point cloud dataset with large quantity of dynamic instances. In: IEEE Intelligent Vehicles Symposium. pp. 687-693 (2020)" + ], + "bbox": [ + 225, + 146, + 784, + 839 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "4D Contrastive Superflows are Dense 3D Representation Learners", + "bbox": [ + 294, + 114, + 732, + 128 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 767, + 116, + 784, + 126 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "74. Pang, B., Xia, H., Lu, C.: Unsupervised 3d point cloud representation learning by triangle constrained contrast for autonomous driving. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5229-5239 (2023)", + "75. Puy, G., Gidaris, S., Boulch, A., Simeoni, O., Sautier, C., Pérez, P., Bursuc, A., Marlet, R.: Revisiting the distillation of image representations into point clouds for autonomous driving. arXiv preprint arXiv:2310.17504 (2023)", + "76. Puy, G., Gidaris, S., Boulch, A., Simeoni, O., Sautier, C., Pérez, P., Bursuc, A., Marlet, R.: Three pillars improving vision foundation model distillation for lidar. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 21519-21529 (2024)", + "77. Qiu, H., Yu, B., Tao, D.: Gfnet: Geometric flow network for 3d point cloud semantic segmentation. Transactions on Machine Learning Research (2022)", + "78. Radford, A., Kim, J.W., Hallacy, C., Ramesh, A., Goh, G., Agarwal, S., Sastry, G., Askell, A., Mishkin, P., Clark, J., et al.: Learning transferable visual models from natural language supervision. In: International conference on machine learning. pp. 8748-8763. PMLR (2021)", + "79. Rizzoli, G., Barbato, F., Zanuttigh, P.: Multimodal semantic segmentation in autonomous driving: A review of current approaches and future perspectives. Technologies 10(4) (2022)", + "80. Saltori, C., Krivosheev, E., Lathuilière, S., Sebe, N., Galasso, F., Fiameni, G., Ricci, E., Poiesi, F.: Gipso: Geometrically informed propagation for online adaptation in 3d lidar segmentation. In: European Conference on Computer Vision. pp. 567-585 (2022)", + "81. Sautier, C., Puy, G., Boulch, A., Marlet, R., Lepetit, V.: Bevcontrast: Self-supervision in bev space for automotive lidar point clouds. arXiv preprint arXiv:2310.17281 (2023)", + "82. Sautier, C., Puy, G., Gidaris, S., Boulch, A., Bursuc, A., Marlet, R.: Image-to-lidar self-supervised distillation for autonomous driving data. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 9891-9901 (2022)", + "83. Shen, Z., Sheng, X., Fan, H., Wang, L., Guo, Y., Liu, Q., Wen, H., Zhou, X.: Masked spatio-temporal structure prediction for self-supervised learning on point cloud videos. In: IEEE/CVF International Conference on Computer Vision. pp. 16580-16589 (2023)", + "84. Sheng, X., Shen, Z., Xiao, G., Wang, L., Guo, Y., Fan, H.: Point contrastive prediction with semantic clustering for self-supervised learning on point cloud videos. In: IEEE/CVF International Conference on Computer Vision. pp. 16515-16524 (2023)", + "85. Shi, H., Lin, G., Wang, H., Hung, T.Y., Wang, Z.: Spsequencenet: Semantic segmentation network on 4d point clouds. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 4574-4583 (2020)", + "86. Shi, H., Wei, J., Li, R., Liu, F., Lin, G.: Weakly supervised segmentation on outdoor 4d point clouds with temporal matching and spatial graph propagation. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 11840-11849 (2022)", + "87. Smith, L.N., Topin, N.: Super-convergence: Very fast training of neural networks using large learning rates. arXiv preprint arXiv:1708.07120 (2017)", + "88. Sun, J., Xu, X., Kong, L., Liu, Y., Li, L., Zhu, C., Zhang, J., Xiao, Z., Chen, R., Wang, T., Zhang, W., Chen, K., Qing, C.: An empirical study of training state-of-the-art lidar segmentation models. arXiv preprint arXiv:2405.14870 (2024)" + ], + "bbox": [ + 223, + 147, + 785, + 840 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "X. Xu et al.", + "bbox": [ + 271, + 114, + 351, + 126 + ], + "page_idx": 19 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "89. Sun, P., Kretzschmar, H., Dotiwalla, X., Chouard, A., Patnaik, V., Tsui, P., Guo, J., Zhou, Y., Chai, Y., Caine, B., Vasudevan, V., Han, W., Ngiam, J., Zhao, H., Timofeev, A., Ettinger, S., Krivokon, M., Gao, A., Joshi, A., Zhang, Y., Shlens, J., Chen, Z., Anguelov, D.: Scalability in perception for autonomous driving: Waymo open dataset. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 2446-2454 (2020)", + "90. Tang, H., Liu, Z., Zhao, S., Lin, Y., Lin, J., Wang, H., Han, S.: Searching efficient 3d architectures with sparse point-voxel convolution. In: European Conference on Computer Vision. pp. 685-702 (2020)", + "91. Tarvainen, A., Valpola, H.: Mean teachers are better role models: Weight-averaged consistency targets improve semi-supervised deep learning results. In: Advances in Neural Information Processing Systems. vol. 30 (2017)", + "92. Triess, L.T., Dreissig, M., Rist, C.B., Zollner, J.M.: A survey on deep domain adaptation for lidar perception. In: IEEE Intelligent Vehicles Symposium Workshops. pp. 350-357 (2021)", + "93. Uecker, M., Fleck, T., Pflugfelder, M., Zöllner, J.M.: Analyzing deep learning representations of point clouds for real-time in-vehicle lidar perception. arXiv preprint arXiv:2210.14612 (2022)", + "94. Unal, O., Dai, D., Gool, L.V.: Scribble-supervised lidar semantic segmentation. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 2697-2707 (2022)", + "95. Wei, W., Nejadasl, F.K., Gevers, T., Oswald, M.R.: T-mae: Temporal masked autoencoders for point cloud representation learning. arXiv preprint arXiv:2312.10217 (2023)", + "96. Wu, Y., Zhang, T., Ke, W., Süssstrunk, S., Salzmann, M.: Spatiotemporal self-supervised learning for point clouds in the wild. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5251-5260 (2023)", + "97. Xiao, A., Huang, J., Guan, D., Zhan, F., Lu, S.: Transfer learning from synthetic to real lidar point cloud for semantic segmentation. In: AAAI Conference on Artificial Intelligence. pp. 2795-2803 (2022)", + "98. Xiao, A., Huang, J., Guan, D., Zhang, X., Lu, S., Shao, L.: Unsupervised point cloud representation learning with deep neural networks: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence 45(9), 11321-11339 (2023)", + "99. Xiao, A., Huang, J., Xuan, W., Ren, R., Liu, K., Guan, D., Saddik, A.E., Lu, S., Xing, E.: 3d semantic segmentation in the wild: Learning generalized models for adverse-condition point clouds. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 9382-9392 (2023)", + "100. Xie, B., Li, S., Guo, Q., Liu, C.H., Cheng, X.: Annotator: A generic active learning baseline for lidar semantic segmentation. In: Advances in Neural Information Processing Systems. vol. 36 (2023)", + "101. Xie, S., Gu, J., Guo, D., Qi, C.R., Guibas, L., Litany, O.: Pointcontrast: Unsupervised pre-training for 3d point cloud understanding. In: European Conference on Computer Vision. pp. 574-591 (2020)", + "102. Xie, S., Kong, L., Zhang, W., Ren, J., Pan, L., Chen, K., Liu, Z.: Benchmarking and improving bird's eye view perception robustness in autonomous driving. arXiv preprint arXiv:2405.17426 (2024)", + "103. Xie, Z., Zhang, Z., Cao, Y., Lin, Y., Bao, J., Yao, Z., Dai, Q., Hu, H.: Simmim: A simple framework for masked image modeling. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 9653-9663 (2022)" + ], + "bbox": [ + 218, + 146, + 785, + 839 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "4D Contrastive Superflows are Dense 3D Representation Learners", + "bbox": [ + 294, + 114, + 732, + 128 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 767, + 116, + 782, + 126 + ], + "page_idx": 20 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "104. Xu, C., Wu, B., Wang, Z., Zhan, W., Vajda, P., Keutzer, K., Tomizuka, M.: Squeezesegv3: Spatially-adaptive convolution for efficient point-cloud segmentation. In: European Conference on Computer Vision. pp. 1-19 (2020)", + "105. Xu, J., Zhang, R., Dou, J., Zhu, Y., Sun, J., Pu, S.: Rpvnet: A deep and efficient range-point-voxel fusion network for lidar point cloud segmentation. In: IEEE/CVF International Conference on Computer Vision. pp. 16024-16033 (2021)", + "106. Xu, W., Li, X., Ni, P., Guang, X., Luo, H., Zhao, X.: Multi-view fusion driven 3d point cloud semantic segmentation based on hierarchical transformer. IEEE Sensors Journal 23(24), 31461-31470 (2023)", + "107. Xu, X., Kong, L., Shuai, H., Liu, Q.: Frnet: Frustum-range networks for scalable lidar segmentation. arXiv preprint arXiv:2312.04484 (2023)", + "108. Yin, J., Zhou, D., Zhang, L., Fang, J., Xu, C.Z., Shen, J., Wang, W.: Proposal contrast: Unsupervised pre-training for lidar-based 3d object detection. In: European Conference on Computer Vision. pp. 17-33 (2022)", + "109. Zhang, H., Li, F., Zou, X., Liu, S., Li, C., Gao, J., Yang, J., Zhang, L.: A simple framework for open-vocabulary segmentation and detection. In: IEEE/CVF International Conference on Computer Vision. pp. 1020-1031 (2023)", + "110. Zhang, S., Deng, J., Bai, L., Li, H., Ouyang, W., Zhang, Y.: Hvdistill: Transferring knowledge from images to point clouds via unsupervised hybrid-view distillation. International Journal of Computer Vision pp. 1-15 (2024)", + "111. Zhang, Y., Zhou, Z., David, P., Yue, X., Xi, Z., Gong, B., Foroosh, H.: Polarnet: An improved grid representation for online lidar point clouds semantic segmentation. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 9601-9610 (2020)", + "112. Zhang, Y., Hou, J., Yuan, Y.: A comprehensive study of the robustness for lidar-based 3d object detectors against adversarial attacks. International Journal of Computer Vision pp. 1-33 (2023)", + "113. Zhang, Z., Girdhar, R., Joulin, A., Misra, I.: Self-supervised pretraining of 3d features on any point-cloud. In: IEEE/CVF International Conference on Computer Vision. pp. 10252-10263 (2021)", + "114. Zhang, Z., Dong, Y., Liu, Y., Yi, L.: Complete-to-partial 4d distillation for self-supervised point cloud sequence representation learning. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 17661-17670 (2023)", + "115. Zhang, Z., Yang, B., Wang, B., Li, B.: Growsp: Unsupervised semantic segmentation of 3d point clouds. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 17619-17629 (2023)", + "116. Zhao, Y., Bai, L., Huang, X.: Fidnet: Lidar point cloud semantic segmentation with fully interpolation decoding. In: IEEE/RSJ International Conference on Intelligent Robots and Systems. pp. 4453-4458 (2021)", + "117. Zhou, Z., Zhang, Y., Foroosh, H.: Panoptic-polarnet: Proposal-free lidar point cloud panoptic segmentation. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 13194-13203 (2021)", + "118. Zhu, X., Zhou, H., Wang, T., Hong, F., Ma, Y., Li, W., Li, H., Lin, D.: Cylindrical and asymmetrical 3d convolution networks for lidar segmentation. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 9939-9948 (2021)", + "119. Zou, X., Dou, Z.Y., Yang, J., Gan, Z., Li, L., Li, C., Dai, X., Behl, H., Wang, J., Yuan, L., Peng, N., Wang, L., Lee, Y.J., Gao, J.: Generalized decoding for pixel, image, and language. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 15116-15127 (2023)" + ], + "bbox": [ + 215, + 146, + 785, + 839 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "X. Xu et al.", + "bbox": [ + 271, + 114, + 351, + 126 + ], + "page_idx": 21 + }, + { + "type": "ref_text", + "text": "120. Zou, X., Yang, J., Zhang, H., Li, F., Li, L., Gao, J., Lee, Y.J.: Segment everything everywhere all at once. In: Advances in Neural Information Processing Systems. vol. 36 (2023)", + "bbox": [ + 217, + 146, + 787, + 189 + ], + "page_idx": 22 + }, + { + "type": "header", + "text": "4D Contrastive Superflows are Dense 3D Representation Learners", + "bbox": [ + 292, + 114, + 730, + 128 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 767, + 116, + 784, + 126 + ], + "page_idx": 22 + } +] \ No newline at end of file diff --git a/2024/4D Contrastive Superflows are Dense 3D Representation Learners/3b016017-cefc-4a8b-a706-93b64616c878_model.json b/2024/4D Contrastive Superflows are Dense 3D Representation Learners/3b016017-cefc-4a8b-a706-93b64616c878_model.json new file mode 100644 index 0000000000000000000000000000000000000000..b7a44e09790b5fcd5c11d900c754d42220fe08f4 --- /dev/null +++ b/2024/4D Contrastive Superflows are Dense 3D Representation Learners/3b016017-cefc-4a8b-a706-93b64616c878_model.json @@ -0,0 +1,3535 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.27, + 0.141, + 0.734, + 0.187 + ], + "angle": 0, + "content": "4D Contrastive Superflows are Dense 3D Representation Learners" + }, + { + "type": "text", + "bbox": [ + 0.275, + 0.212, + 0.728, + 0.245 + ], + "angle": 0, + "content": "Xiang Xu\\(^{1,\\star}\\), Lingdong Kong\\(^{2,3,*}\\), Hui Shuai\\(^{4}\\), Wenwei Zhang\\(^{2}\\), Liang Pan\\(^{2}\\), Kai Chen\\(^{2}\\), Ziwei Liu\\(^{5}\\), and Qingshan Liu\\(^{4,\\text{图}}\\)" + }, + { + "type": "text", + "bbox": [ + 0.32, + 0.255, + 0.682, + 0.269 + ], + "angle": 0, + "content": "1 Nanjing University of Aeronautics and Astronautics" + }, + { + "type": "text", + "bbox": [ + 0.412, + 0.269, + 0.59, + 0.283 + ], + "angle": 0, + "content": "2 Shanghai AI Laboratory" + }, + { + "type": "text", + "bbox": [ + 0.385, + 0.283, + 0.619, + 0.297 + ], + "angle": 0, + "content": "\\(^{3}\\) National University of Singapore" + }, + { + "type": "text", + "bbox": [ + 0.319, + 0.297, + 0.684, + 0.311 + ], + "angle": 0, + "content": "\\(^{4}\\) Nanjing University of Posts and Telecommunications" + }, + { + "type": "text", + "bbox": [ + 0.357, + 0.311, + 0.646, + 0.324 + ], + "angle": 0, + "content": "S-Lab, Nanyang Technological University" + }, + { + "type": "text", + "bbox": [ + 0.261, + 0.356, + 0.744, + 0.621 + ], + "angle": 0, + "content": "Abstract. In the realm of autonomous driving, accurate 3D perception is the foundation. However, developing such models relies on extensive human annotations – a process that is both costly and labor-intensive. To address this challenge from a data representation learning perspective, we introduce SuperFlow, a novel framework designed to harness consecutive LiDAR-camera pairs for establishing spatiotemporal pretraining objectives. SuperFlow stands out by integrating two key designs: 1) a dense-to-sparse consistency regularization, which promotes insensitivity to point cloud density variations during feature learning, and 2) a flow-based contrastive learning module, carefully crafted to extract meaningful temporal cues from readily available sensor calibrations. To further boost learning efficiency, we incorporate a plug-and-play view consistency module that enhances the alignment of the knowledge distilled from camera views. Extensive comparative and ablation studies across 11 heterogeneous LiDAR datasets validate our effectiveness and superiority. Additionally, we observe several interesting emerging properties by scaling up the 2D and 3D backbones during pretraining, shedding light on the future research of 3D foundation models for LiDAR-based perception. Code is publicly available at https://github.com/Xiangxu-0103/SuperFlow." + }, + { + "type": "text", + "bbox": [ + 0.261, + 0.632, + 0.744, + 0.661 + ], + "angle": 0, + "content": "Keywords: LiDAR Segmentation \\(\\cdot\\) 3D Data Pretraining \\(\\cdot\\) Autonomous Driving \\(\\cdot\\) Image-to-LiDAR Contrastive Learning \\(\\cdot\\) Semantic Superpixels" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.683, + 0.377, + 0.699 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.713, + 0.787, + 0.773 + ], + "angle": 0, + "content": "Driving perception is one of the most crucial components of an autonomous vehicle system. Recent advancements in sensing technologies, such as light detection and ranging (LiDAR) sensors and surrounding-view cameras, open up new possibilities for a holistic, accurate, and 3D-aware scene perception [3,9,79]." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.773, + 0.787, + 0.819 + ], + "angle": 0, + "content": "Training a 3D perception model that can perform well in real-world scenarios often requires large-scale datasets and sufficient computing power [27,58]. Different from 2D, annotating 3D data is notably more expensive and labor-intensive," + }, + { + "type": "page_footnote", + "bbox": [ + 0.218, + 0.825, + 0.758, + 0.841 + ], + "angle": 0, + "content": "* X. Xu and L. Kong contributed equally to this work. ⌒ Corresponding author." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "2" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.353, + 0.128 + ], + "angle": 0, + "content": "X. Xu et al." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.224 + ], + "angle": 0, + "content": "which hinders the scalability of existing 3D perception models [28,69,98,112]. Data representation learning serves as a potential solution to mitigate such a problem [6,76]. By designing suitable pretraining objectives, the models are anticipated to extract useful concepts from raw data, where such concepts can help improve models' performance on downstream tasks with fewer annotations [51]." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.229, + 0.456, + 0.562 + ], + "angle": 0, + "content": "Recently, Sautier et al. [82] proposed SLidR to distill knowledge from surrounding camera views - using a pretrained 2D backbone such as MoCo [14] and DINO [72] - to LiDAR point clouds, exhibiting promising 3D representation learning properties. The key to its success is the superpixel-driven contrastive objectives between cameras and LiDAR sensors. Subsequent works further extended this framework from various aspects, such as class balancing [66], hybrid-view distillation [110], semantic superpixels [11, 12, 61], and so on. While these methods showed improved performance over their baselines, there exist several issues that could undermine the data representation learning." + }, + { + "type": "image", + "bbox": [ + 0.467, + 0.237, + 0.787, + 0.449 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.464, + 0.455, + 0.788, + 0.553 + ], + "angle": 0, + "content": "Fig.1: Performance overview of SuperFlow compared to state-of-the-art image-to-LiDAR pretraining methods, i.e., Seal [61], SLidR [82], and PPKT [63], on eleven LiDAR datasets. The scores of prior methods are normalized based on SuperFlow's scores. The larger the area coverage, the better the overall segmentation performance." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.569, + 0.788, + 0.691 + ], + "angle": 0, + "content": "The first concern revolves around the inherent temporal dynamics of LiDAR data [4,8]. LiDAR point clouds are acquired sequentially, capturing the essence of motion within the scene. Traditional approaches [61,63,66,82,110] often overlook this temporal aspect, treating each snapshot as an isolated scan. However, this sequential nature holds a wealth of information that can significantly enrich the model's understanding of the 3D environment [71,96]. Utilizing these temporal cues can lead to more robust and context-aware 3D perception models, which is crucial for dynamic environments encountered in autonomous driving." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.697, + 0.788, + 0.804 + ], + "angle": 0, + "content": "Moreover, the varying density of LiDAR point clouds presents a unique challenge [45, 47, 94]. Due to the nature of LiDAR scanning and data acquisition, different areas within the same scene can have significantly different point densities, which can in turn affect the consistency of feature representation across the scene [2, 47, 108, 111]. Therefore, a model that can learn invariant features regardless of point cloud density tends to be effective for recognizing the structural and semantic information in the 3D space." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.81, + 0.787, + 0.842 + ], + "angle": 0, + "content": "In lieu of existing challenges, we propose a novel spatiotemporal contrastive learning dubbed SuperFlow to encourage effective cross-sensor knowledge dis" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.294, + 0.115, + 0.733, + 0.129 + ], + "angle": 0, + "content": "4D Contrastive Superflows are Dense 3D Representation Learners" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "3" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.147, + 0.785, + 0.178 + ], + "angle": 0, + "content": "tillation. Our approach features three key components, all centered around the use of the off-the-shelf temporal cues inherent in the LiDAR acquisition process:" + }, + { + "type": "text", + "bbox": [ + 0.226, + 0.183, + 0.786, + 0.258 + ], + "angle": 0, + "content": "- We first introduce a straightforward yet effective view consistency alignment that seamlessly generates semantic superpixels with language guidance, alleviating the \"self-conflict\" issues in existing works [61,66,82]. As opposed to the previous pipeline, our method also aligns the semantics across camera views in consecutive scenes, paving the way for more sophisticated designs." + }, + { + "type": "text", + "bbox": [ + 0.226, + 0.258, + 0.787, + 0.362 + ], + "angle": 0, + "content": "- To address the varying density of LiDAR point clouds, we present a dense-to-sparse regularization module that encourages consistency between features of dense and sparse point clouds. Dense points are obtained by concatenating multi-sweep LiDAR scans within a suitable time window and propagating the semantic superpixels from sparse to dense points. By leveraging dense point features to regularize sparse point features, the model promotes insensitivity to point cloud density variations." + }, + { + "type": "text", + "bbox": [ + 0.226, + 0.362, + 0.786, + 0.482 + ], + "angle": 0, + "content": "- To capture useful temporal cues from consecutive scans across different timestamps, we design a flow-based contrastive learning module. This module takes multiple LiDAR-camera pairs as input and excites strong consistency between temporally shifted representations. Analogous to existing image-to-LiDAR representation learning methods [61,66,82], we also incorporate useful spatial contrastive objectives into our framework, setting a unified pipeline that emphasizes holistic representation learning from both the structural 3D layouts and the temporal 4D information." + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.489, + 0.785, + 0.594 + ], + "angle": 0, + "content": "The strong spatiotemporal consistency regularization in SuperFlow effectively forms a semantically rich landscape that enhances data representations. As illustrated in Fig. 1, our approach achieves appealing performance gains over state-of-the-art 3D pretraining methods across a diverse spectrum of downstream tasks. Meanwhile, we also target at scaling the capacity of both 2D and 3D backbones during pretraining, shedding light on the future development of more robust, unified, and ubiquitous 3D perception models." + }, + { + "type": "text", + "bbox": [ + 0.239, + 0.594, + 0.764, + 0.609 + ], + "angle": 0, + "content": "To summarize, this work incorporates key contributions listed as follows:" + }, + { + "type": "text", + "bbox": [ + 0.226, + 0.615, + 0.785, + 0.644 + ], + "angle": 0, + "content": "- We present SuperFlow, a novel framework aimed to harness consecutive LiDAR-camera pairs for establishing spatiotemporal pretraining objectives." + }, + { + "type": "text", + "bbox": [ + 0.226, + 0.644, + 0.785, + 0.702 + ], + "angle": 0, + "content": "- Our framework incorporates novel designs including view consistency alignment, dense-to-sparse regularization, and flow-based contrastive learning, which better encourages data representation learning effects between camera and LiDAR sensors across consecutive scans." + }, + { + "type": "text", + "bbox": [ + 0.226, + 0.702, + 0.785, + 0.763 + ], + "angle": 0, + "content": "- Our approach sets a new state-of-the-art performance across 11 LiDAR datasets, exhibiting strong robustness and generalizability. We also reveal intriguing emergent properties as we scale up the 2D and 3D backbones, which could lay the foundation for scalable 3D perception." + }, + { + "type": "title", + "bbox": [ + 0.217, + 0.783, + 0.388, + 0.799 + ], + "angle": 0, + "content": "2 Related Work" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.81, + 0.785, + 0.84 + ], + "angle": 0, + "content": "LiDAR-based 3D Perception. The LiDAR sensor has been widely used in today's 3D perception systems, credited to its robust and structural sensing abl-" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "4" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.353, + 0.127 + ], + "angle": 0, + "content": "X. Xu et al." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.299 + ], + "angle": 0, + "content": "ities [4, 88, 92]. Due to the sparse and unordered nature of LiDAR point clouds, suitable rasterization strategies are needed to convert them into structural inputs [37, 93]. Popular choices include sparse voxels [18, 19, 33, 34, 90, 118], bird's eye view maps [10, 56, 111, 117], range view images [17, 21, 44, 68, 104, 107, 116], and multi-view fusion [18, 40, 60, 62, 77, 105, 106]. While witnessing record-breaking performances on standard benchmarks, existing approaches rely heavily on human annotations, which hinders scalability [27]. In response to this challenge, we resort to newly appeared 3D representation learning, hoping to leverage the rich collections of unlabeled LiDAR point clouds for more effective learning from LiDAR data. This could further enrich the efficacy of LiDAR-based perception." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.303, + 0.788, + 0.469 + ], + "angle": 0, + "content": "Data-Efficient 3D Perception. To better save annotation budgets, previous efforts seek 3D perception in a data-efficient manner [11, 12, 27, 40, 46, 49]. One line of research resorts to weak supervision, e.g., seeding points [36, 53, 86, 115], active prompts [38, 57, 100], and scribbles [94], for weakly-supervised LiDAR semantic segmentation. Another line of research seeks semi-supervised learning approaches [47, 52, 91] to better tackle efficient 3D scene perception and achieve promising results. In this work, different from the prior pursuits, we tackle efficient 3D perception from the data representation learning perspective. We establish several LiDAR-based data representation learning settings that seamlessly combine pretraining with weakly- and semi-supervised learning, further enhancing the scalability of 3D perception systems." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.473, + 0.788, + 0.746 + ], + "angle": 0, + "content": "3D Representation Learning. Analog to 2D representation learning strategies [13,15,30,31,103], prior works designed contrastive [35,70,81,101,108,113], masked modeling [32,50,95], and reconstruction [7,67] objectives for 3D pretraining. Most early 3D representation learning approaches use a single modality for pretraining, leaving room for further development. The off-the-shelf calibrations among different types of sensors provide a promising solution for building pretraining objectives [63]. Recently, SLidR [82] has made the first contribution toward multi-modal 3D representation learning between camera and LiDAR sensors. Subsequent works [66,74,110] extended this framework with more advanced designs. Seal [61] leverages powerful vision foundation models [42,109,119,120] to better assist the contrastive learning across sensors. Puy et al. [75,76] conducted a comprehensive study on the distillation recipe for better pretraining effects. While these approaches have exhibited better performance than their baselines, they overlooked the rich temporal cues across consecutive scans, which might lead to sub-opt pretraining performance. In this work, we construct dense 3D representation learning objectives using calibrated LiDAR sequences. Our approach encourages the consistency between features from sparse to dense inputs and features across timestamps, yielding superiority over existing endeavors." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.75, + 0.788, + 0.841 + ], + "angle": 0, + "content": "4D Representation Learning. Leveraging consecutive scans is promising in extracting temporal relations [2, 23, 33, 85]. For point cloud data pretraining, prior works [16, 64, 83, 84, 114] mainly focused on applying 4D cues on object- and human-centric point clouds, which are often small in scale. For large-scale automotive point clouds, STRL [39] learns spatiotemporal data invariance with different spatial augmentations in the point cloud sequence. TARL [71] and" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.294, + 0.115, + 0.733, + 0.129 + ], + "angle": 0, + "content": "4D Contrastive Superflows are Dense 3D Representation Learners" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "5" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.784, + 0.283 + ], + "angle": 0, + "content": "STSSL [96] encourage similarities of point clusters in two consecutive frames, where such clusters are obtained by ground removal and clustering algorithms, i.e., RANSAC [25], Patchwork [55], and HDBSCAN [24]. BEVContrast [81] shares a similar motivation but utilizes BEV maps for contrastive learning, which yields a more effective implementation. The \"one-fits-all\" clustering parameters, however, are often difficult to obtain, hindering existing works. Different from existing methods that use a single modality for 4D representation learning, we propose to leverage LiDAR-camera correspondences and semantic-rich superpixels to establish meaningful multi-modality 4D pretraining objectives." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.308, + 0.356, + 0.325 + ], + "angle": 0, + "content": "3 SuperFlow" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.341, + 0.785, + 0.432 + ], + "angle": 0, + "content": "In this section, we first revisit the common setups of the camera-to-LiDAR distillation baseline (cf. Sec. 3.1). We then elaborate on the technical details of SuperFlow, encompassing a straightforward yet effective view consistency alignment (cf. Sec. 3.2), a dense-to-sparse consistency regularization (cf. Sec. 3.3), and a flow-based spatiotemporal contrastive learning (cf. Sec. 3.4). The overall pipeline of the proposed SuperFlow framework is depicted in Fig. 4." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.455, + 0.373, + 0.468 + ], + "angle": 0, + "content": "3.1 Preliminaries" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.481, + 0.785, + 0.648 + ], + "angle": 0, + "content": "Problem Definition. Given a point cloud \\(\\mathcal{P}^t = \\{\\mathbf{p}_i^t, \\mathbf{f}_i^t | i = 1, \\dots, N\\}\\) with \\(N\\) points captured by a LiDAR sensor at time \\(t\\), where \\(\\mathbf{p}_i \\in \\mathbb{R}^3\\) denotes the coordinate of the point and \\(\\mathbf{f}_i \\in \\mathbb{R}^C\\) is the corresponding feature, we aim to transfer knowledge from \\(M\\) surrounding camera images \\(\\mathcal{I}^t = \\{\\mathbf{I}_i^t | i = 1, \\dots, M\\}\\) into the point cloud. Here, \\(\\mathbf{I}_i \\in \\mathbb{R}^{H \\times W \\times 3}\\) represents an image with height \\(H\\) and width \\(W\\). Prior works [61, 82] generate a set of class-agnostic superpixels \\(\\mathcal{X}_i = \\{\\mathbf{X}_i^j | j = 1, \\dots, V\\}\\) for each image via the unsupervised SLIC algorithm [1] or the more recent vision foundation models (VFMs) [42, 119, 120], where \\(V\\) denotes the total number of superpixels. Assuming that the point cloud \\(\\mathcal{P}^t\\) and images \\(\\mathcal{I}^t\\) are calibrated, the point cloud \\(\\mathbf{p}_i = (x_i, y_i, z_i)\\) can be then projected to the image plane \\((u_i, v_i)\\) using the following sensor calibration parameters:" + }, + { + "type": "equation", + "bbox": [ + 0.348, + 0.659, + 0.785, + 0.689 + ], + "angle": 0, + "content": "\\[\n[ u _ {i}, v _ {i}, 1 ] ^ {\\mathrm {T}} = \\frac {1}{z _ {i}} \\times \\Gamma_ {K} \\times \\Gamma_ {c \\leftarrow l} \\times [ x _ {i}, y _ {i}, z _ {i} ] ^ {\\mathrm {T}}, \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.7, + 0.784, + 0.744 + ], + "angle": 0, + "content": "where \\(\\Gamma_K\\) denotes the camera intrinsic matrix and \\(\\Gamma_{c\\leftarrow l}\\) is the transformation matrix from LiDAR sensors to surrounding-view cameras. We also obtain a set of superpoints \\(\\mathcal{Y} = \\{\\mathbf{Y}^j | j = 1, \\dots, V\\}\\) through this projection." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.745, + 0.785, + 0.842 + ], + "angle": 0, + "content": "Network Representations. Let \\(\\mathcal{F}_{\\theta_p}:\\mathbb{R}^{N\\times (3 + C)}\\to \\mathbb{R}^{N\\times D}\\) be a 3D backbone with trainable parameters \\(\\theta_{p}\\), which takes LiDAR points as input and outputs \\(D\\)-dimensional point features. Let \\(\\mathcal{G}_{\\theta_i}:\\mathbb{R}^{H\\times W\\times 3}\\to \\mathbb{R}^{\\frac{H}{S}\\times \\frac{W}{S}\\times E}\\) be an image backbone with pretrained parameters \\(\\theta_{i}\\) that takes images as input and outputs \\(E\\)-dimensional image features with stride \\(S\\). Let \\(\\mathcal{H}_{\\omega_p}:\\mathbb{R}^{N\\times D}\\to \\mathbb{R}^{N\\times L}\\) and \\(\\mathcal{H}_{\\omega_i}:\\mathbb{R}^{\\frac{H}{S}\\times \\frac{W}{S}\\times E}\\to \\mathbb{R}^{H\\times W\\times L}\\) be linear heads with trainable parameters \\(\\omega_{p}\\) and \\(\\omega_{i}\\)," + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "6" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.353, + 0.127 + ], + "angle": 0, + "content": "X. Xu et al." + }, + { + "type": "image", + "bbox": [ + 0.221, + 0.144, + 0.396, + 0.272 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.271, + 0.274, + 0.348, + 0.286 + ], + "angle": 0, + "content": "(a) Heuristic" + }, + { + "type": "image", + "bbox": [ + 0.414, + 0.144, + 0.59, + 0.272 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.446, + 0.274, + 0.558, + 0.286 + ], + "angle": 0, + "content": "(b) Class Agnostic" + }, + { + "type": "image", + "bbox": [ + 0.607, + 0.144, + 0.777, + 0.272 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.637, + 0.274, + 0.754, + 0.286 + ], + "angle": 0, + "content": "(c) View Consistent" + }, + { + "type": "image_caption", + "bbox": [ + 0.215, + 0.298, + 0.787, + 0.354 + ], + "angle": 0, + "content": "Fig. 2: Comparisons of different superpixels. (a) Class-agnostic superpixels generated by the unsupervised SLIC [1] algorithm. (b) Class-agnostic semantic superpixels generated by vision foundation models (VFMs) [109, 119, 120]. (c) View-consistent semantic superpixels generated by our view consistency alignment module." + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.378, + 0.785, + 0.408 + ], + "angle": 0, + "content": "which project backbone features to \\(L\\)-dimensional features with \\(\\ell_2\\)-normalization and upsample image features to \\(H\\times W\\) with bilinear interpolation." + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.408, + 0.785, + 0.469 + ], + "angle": 0, + "content": "Pretraining Objective. The overall objective of image-to-LiDAR representation learning [82] is to transfer knowledge from the trained image backbone \\(\\mathcal{G}_{\\theta_i}\\) to the 3D backbone \\(\\mathcal{F}_{\\theta_p}\\). The superpixels \\(\\mathcal{X}_i\\) generated offline, serve as an intermediate to effectively guide the knowledge transfer process." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.494, + 0.501, + 0.51 + ], + "angle": 0, + "content": "3.2 View Consistency Alignment" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.522, + 0.785, + 0.581 + ], + "angle": 0, + "content": "Motivation. The class-agnostic superpixels \\(\\mathcal{X}_i\\) used in prior works [61,66,82] are typically instance-level and do not consider their actual categories. As discussed in [66], instance-level superpixels can lead to \"self-conflict\" problems, which undermines the effectiveness of pretraining." + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.584, + 0.787, + 0.718 + ], + "angle": 0, + "content": "Superpixel Comparisons. Fig. 2 compares superpixels generated via the unsupervised SLIC [1] and VFMs. SLIC [1] tends to over-segment objects, causing semantic conflicts. VFMs generate superpixels through a panoptic segmentation head, which can still lead to \"self-conflict\" in three conditions (see Fig. 2b): ① when the same object appears in different camera views, leading to different parts of the same object being treated as negative samples; ② when objects of the same category within the same camera view are treated as negative samples; ③ when objects across different camera views are treated as negative samples even if they share the same label." + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.72, + 0.787, + 0.84 + ], + "angle": 0, + "content": "Semantic-Related Superpixels Generation. To address these issues, we propose generating semantic-related superpixels to ensure consistency across camera views. Contrastive Vision-Language Pre-training (CLIP) [78] has shown great generalization in few-shot learning. Building on existing VFMs [42,119,120], we employ CLIP's text encoder and fine-tune the last layer of the segmentation head from VFMs with predefined text prompts. This allows the segmentation head to generate language-guided semantic categories for each pixel, which we leverage as superpixels. As shown in Fig. 2c, we unify superpixels across camera" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.294, + 0.115, + 0.733, + 0.129 + ], + "angle": 0, + "content": "4D Contrastive Superflows are Dense 3D Representation Learners" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.116, + 0.785, + 0.127 + ], + "angle": 0, + "content": "7" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.147, + 0.784, + 0.178 + ], + "angle": 0, + "content": "views based on semantic category, alleviating the \"self-conflict\" problem in prior image-to-LiDAR contrastive learning pipelines." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.207, + 0.67, + 0.223 + ], + "angle": 0, + "content": "3.3 D2S: Dense-to-Sparse Consistency Regularization" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.24, + 0.784, + 0.315 + ], + "angle": 0, + "content": "Motivation. LiDAR points are sparse and often incomplete, significantly restricting the efficacy of the cross-sensor feature representation learning process. In this work, we propose to tackle this challenge by combining multiple LiDAR scans within a suitable time window to create a dense point cloud, which is then used to encourage consistency with the sparse point cloud." + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.317, + 0.453, + 0.482 + ], + "angle": 0, + "content": "Point Cloud Concatenation. Specifically, given a keyframe point cloud \\(\\mathcal{P}^t\\) captured at time \\(t\\) and a set of sweep point clouds \\(\\{\\mathcal{P}^s | s = 1, \\dots, T\\}\\) captured at previous times \\(s\\), we first transform the coordinate \\((x^s, y^s, z^s)\\) of the sweep point cloud \\(\\mathcal{P}^s\\) to the coordinate systems of \\(\\mathcal{P}^t\\), as they share different systems due to the vehicle's movement:" + }, + { + "type": "equation", + "bbox": [ + 0.216, + 0.496, + 0.458, + 0.529 + ], + "angle": 0, + "content": "\\[\n\\left[ \\tilde {x} ^ {s}, \\tilde {y} ^ {s}, \\tilde {z} ^ {s} \\right] ^ {\\mathrm {T}} = \\Gamma_ {t \\leftarrow s} \\times \\left[ x ^ {s}, y ^ {s}, z ^ {s} \\right] ^ {\\mathrm {T}}, \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.53, + 0.455, + 0.62 + ], + "angle": 0, + "content": "where \\(\\Gamma_{t\\leftarrow s}\\) denotes the transformation matrix from the sweep point cloud at time \\(s\\) to the keyframe point cloud at time \\(t\\). We then concatenate the transformed sweep points \\(\\{\\tilde{\\mathcal{P}}^s |s =\\)" + }, + { + "type": "image", + "bbox": [ + 0.468, + 0.325, + 0.787, + 0.515 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.464, + 0.521, + 0.788, + 0.605 + ], + "angle": 0, + "content": "Fig.3: Dense-to-sparse (D2S) consistency regularization module. Dense point clouds are obtained by combining multiple point clouds captured at different times. A D2S regularization is formulated by encouraging the consistency between dense features and sparse features." + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.619, + 0.784, + 0.664 + ], + "angle": 0, + "content": "\\(1, \\ldots, T\\}\\) with \\(\\mathcal{P}^t\\) to obtain a dense point cloud \\(\\mathcal{P}^d\\). As shown in Fig. 3, \\(\\mathcal{P}^d\\) fuses temporal information from consecutive point clouds, resulting in a dense and semantically rich representation for feature learning." + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.665, + 0.785, + 0.757 + ], + "angle": 0, + "content": "Dense Superpoints. Meanwhile, we generate sets of superpoints \\(\\mathcal{Y}^d\\) and \\(\\mathcal{Y}^t\\) for \\(\\mathcal{P}^d\\) and \\(\\mathcal{P}^t\\), respectively, using superpixels \\(\\mathcal{X}^t\\). Both \\(\\mathcal{P}^t\\) and \\(\\mathcal{P}^d\\) are fed into the weight-shared 3D network \\(\\mathcal{F}_{\\theta_p}\\) and \\(\\mathcal{H}_{\\omega_p}\\) for feature extraction. The output features are grouped via average pooling based on the superpoint indices to obtain superpoint features \\(\\mathbf{Q}^d\\) and \\(\\mathbf{Q}^t\\), where \\(\\mathbf{Q}^d \\in \\mathbb{R}^{V \\times L}\\) and \\(\\mathbf{Q}^d \\in \\mathbb{R}^{V \\times L}\\). We expect \\(\\mathbf{Q}^d\\) and \\(\\mathbf{Q}^t\\) to share similar features, leading to the following D2S loss:" + }, + { + "type": "equation", + "bbox": [ + 0.388, + 0.77, + 0.785, + 0.812 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {d} 2 \\mathrm {s}} = \\frac {1}{V} \\sum_ {i = 1} ^ {V} \\left(1 - < \\mathbf {q} _ {i} ^ {t}, \\mathbf {q} _ {i} ^ {d} >\\right), \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.825, + 0.779, + 0.84 + ], + "angle": 0, + "content": "where \\( < \\cdot, \\cdot > \\) denotes the scalar product to measure the similarity of features." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "8" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.353, + 0.127 + ], + "angle": 0, + "content": "X. Xu et al." + }, + { + "type": "image", + "bbox": [ + 0.223, + 0.145, + 0.784, + 0.384 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.393, + 0.788, + 0.464 + ], + "angle": 0, + "content": "Fig. 4: Flow-based contrastive learning (FCL) pipeline. FCL takes multiple LiDAR-camera pairs from consecutive scans as input. Based on temporally aligned semantic superpixel and superpoints, two contrastive learning objectives are formulated: 1) spatial contrastive learning between each LiDAR-camera pair \\((\\mathcal{L}_{\\mathrm{sc}})\\), and 2) temporal contrastive learning among consecutive LiDAR point clouds across scenes \\((\\mathcal{L}_{\\mathrm{tc}})\\)." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.484, + 0.586, + 0.5 + ], + "angle": 0, + "content": "3.4 FCL: Flow-Based Contrastive Learning" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.509, + 0.784, + 0.599 + ], + "angle": 0, + "content": "Motivation. LiDAR point clouds are acquired sequentially, embedding rich dynamic scene information across consecutive timestamps. Prior works [61, 66, 82] primarily focused on single LiDAR scans, overlooking the consistency of moving objects across scenes. To address these limitations, we propose flow-based contrastive learning (FCL) across sequential LiDAR scenes to encourage spatiotemporal consistency." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.6, + 0.785, + 0.765 + ], + "angle": 0, + "content": "Spatial Contrastive Learning. Our framework, depicted in Fig. 4, takes three LiDAR-camera pairs from different timestamps within a suitable time window as input, i.e., \\(\\{(\\mathcal{P}^t,\\mathcal{I}^t),(\\mathcal{P}^{t + \\Delta t},\\mathcal{I}^{t + \\Delta t}),(\\mathcal{P}^{t - \\Delta t},\\mathcal{I}^{t - \\Delta t})\\}\\), where timestamp \\(t\\) denotes the current scene and \\(\\Delta t\\) is the timespan. Following previous works [61,82], we first distill knowledge from the 2D network into the 3D network for each scene separately. Taking \\((\\mathcal{P}^t,\\mathcal{I}^t)\\) as an example, \\(\\mathcal{P}^t\\) and \\(\\mathcal{I}^t\\) are fed into the 3D and 2D networks to extract per-point and image features. The output features are then grouped via average pooling based on superpoints \\(\\mathcal{Y}^t\\) and superpixels \\(\\mathcal{X}^t\\) to obtain superpoint features \\(\\mathbf{Q}^t\\) and superpixel features \\(\\mathbf{K}^t\\). A spatial contrastive loss is formulated to constrain 3D representation via pretrained 2D prior knowledge. This process is formulated as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.302, + 0.775, + 0.785, + 0.817 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {s c}} = - \\frac {1}{V} \\sum_ {i = 1} ^ {V} \\log \\left[ \\frac {e ^ {(< \\mathbf {q} _ {i} , \\mathbf {k} _ {i} > / \\tau)}}{\\sum_ {j \\neq i} e ^ {(< \\mathbf {q} _ {i} , \\mathbf {k} _ {j} > / \\tau)} + e (< \\mathbf {q} _ {i} , \\mathbf {k} _ {i} > / \\tau)} \\right], \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.825, + 0.744, + 0.84 + ], + "angle": 0, + "content": "where \\(\\tau > 0\\) is a temperature that controls the smoothness of distillation." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.294, + 0.115, + 0.733, + 0.129 + ], + "angle": 0, + "content": "4D Contrastive Superflows are Dense 3D Representation Learners" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "9" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.784, + 0.253 + ], + "angle": 0, + "content": "Flow-Based Contrastive Learning. The spatial contrastive learning objective between images and point clouds, as depicted in Eq. (4), fails to ensure that moving objects share similar attributes across different scenes. To maintain consistency across scenes, a temporal consistency loss is introduced among superpoint features across different scenes. For the point clouds \\(\\mathcal{P}^t\\) and \\(\\mathcal{P}^{t + \\Delta t}\\), the corresponding superpoint features \\(\\mathbf{Q}^t\\) and \\(\\mathbf{Q}^{t + \\Delta t}\\) are obtained via their superpoints. The temporal contrastive loss operates on \\(\\mathbf{Q}^t\\) and \\(\\mathbf{Q}^{t + \\Delta t}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.263, + 0.259, + 0.786, + 0.307 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {t c}} ^ {t \\leftarrow t + \\Delta t} = - \\frac {1}{V} \\sum_ {i = 1} ^ {V} \\log \\left[ \\frac {e ^ {(< \\mathbf {q} _ {i} ^ {t} , \\mathbf {q} _ {i} ^ {t + \\Delta t} > / \\tau)}}{\\sum_ {j \\neq i} e ^ {(< \\mathbf {q} _ {i} ^ {t} , \\mathbf {q} _ {j} ^ {t + \\Delta t} > / \\tau)} + e ^ {(< \\mathbf {q} _ {i} ^ {t} , \\mathbf {q} _ {i} ^ {t + \\Delta t} > / \\tau)}} \\right]. (5)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.312, + 0.784, + 0.342 + ], + "angle": 0, + "content": "The same function is also applied between \\(\\mathbf{Q}^t\\) and \\(\\mathbf{Q}^{t - \\Delta t}\\). This approach enables point features at time \\(t\\) to extract more context-aware information across scenes." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.361, + 0.375, + 0.379 + ], + "angle": 0, + "content": "4 Experiments" + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.389, + 0.331, + 0.404 + ], + "angle": 0, + "content": "4.1 Settings" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.409, + 0.785, + 0.499 + ], + "angle": 0, + "content": "Data. We follow the seminar works SLidR [82] and Seal [61] when preparing the datasets. A total of eleven datasets are used in our experiments, including \\(^1 nuScenes\\) [26], \\(^2 SemanticKITTI\\) [5], \\(^3 Waymo\\) Open [89], \\(^4 ScribbleKITTI\\) [94], \\(^5 RELLIS-3D\\) [41], \\(^6 SemanticPOSS\\) [73], \\(^7 SemanticSTF\\) [99], \\(^8 SynLiDAR\\) [97], \\(^9 DAPS-3D\\) [43], \\(^{10}\\) Synth4D [80], and \\(^{11}\\) Robo3D [45]. Due to space limits, kindly refer to the Appendix and [61, 82] for additional details about these datasets." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.5, + 0.784, + 0.695 + ], + "angle": 0, + "content": "Implementation Details. SuperFlow is implemented using the MMDetection3D [20] and OpenPCSeg [59] codebases. Consistent with prior works [61,82], we employ MinkUNet [19] as the 3D backbone and DINOv2 [72] (with ViT backbones [22]) as the 2D backbone, distilling from three variants: small (S), base (B), and large (L). Following Seal [61], OpenSeeD [109] is used to generate semantic superpixels. The framework is pretrained end-to-end on 600 scenes from nuScenes [26], then linear probed and fine-tuned on nuScenes [26] according to the data splits in SLidR [82]. The domain generalization study adheres to the same configurations as Seal [61] for the other ten datasets. Both the baselines and SuperFlow are pretrained using eight GPUs for 50 epochs, while linear probing and downstream fine-tuning experiments use four GPUs for 100 epochs, all utilizing the AdamW optimizer [65] and OneCycle scheduler [87]. Due to space limits, kindly refer to the Appendix for additional implementation details." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.697, + 0.784, + 0.757 + ], + "angle": 0, + "content": "Evaluation Protocols. Following conventions, we report the Intersection-over-Union (IoU) on each semantic class and mean IoU (mIoU) over all classes for downstream tasks. For 3D robustness evaluations, we follow Robo3D [45] and report the mean Corruption Error (mCE) and mean Resilience Rate (mRR)." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.774, + 0.424, + 0.79 + ], + "angle": 0, + "content": "4.2 Comparative Study" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.795, + 0.784, + 0.84 + ], + "angle": 0, + "content": "Linear Probing. We start by investigating the pretraining quality via linear probing. For this setup, we initialize the 3D backbone \\(\\mathcal{F}_{\\theta_p}\\) with pretrained parameters and fine-tune only the added-on segmentation head. As shown in Tab. 1," + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "10" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.353, + 0.128 + ], + "angle": 0, + "content": "X. Xu et al." + }, + { + "type": "table_caption", + "bbox": [ + 0.217, + 0.145, + 0.788, + 0.215 + ], + "angle": 0, + "content": "Table 1: Comparisons of state-of-the-art pretraining methods pretrained on nuScenes [26] and fine-tuned on SemanticKITTI [5] and Waymo Open [89] with specified data portions, respectively. All methods use MinkUNet [19] as the 3D semantic segmentation backbone. LP denotes linear probing with a frozen backbone. All scores are given in percentage (\\%). Best scores in each configuration are shaded with colors." + }, + { + "type": "table", + "bbox": [ + 0.221, + 0.22, + 0.794, + 0.523 + ], + "angle": 0, + "content": "
MethodVenueDistillnuScenesKITTI1%Waymo1%
LP1%5%10%25%Full
Random--8.1030.3047.8456.1565.4874.6639.5039.41
PointContrast [101]ECCV'20None21.9032.50----41.10-
DepthContrast [113]ICCV'21None22.1031.70----41.50-
ALSO [7]CVPR'23None-37.70-59.40-72.00--
BEVContrast [81]3DV'24None-38.30-59.60-72.30--
PPKT [63]arXiv'21ResNet35.9037.8053.7460.2567.1474.5244.0047.60
SLidR [82]CVPR'22ResNet38.8038.3052.4959.8466.9174.7944.6047.12
ST-SLidR [66]CVPR'23ResNet40.4840.7554.6960.7567.7075.1444.7244.93
TriCC [74]CVPR'23ResNet38.0041.2054.1060.4067.6075.6045.90-
Seal [61]NeurIPS'23ResNet44.9545.8455.6462.9768.4175.6046.6349.34
HVDistill [110]IJCV'24ResNet39.5042.7056.6062.9069.3076.6049.70-
PPKT [63]arXiv'21ViT-S38.6040.6052.0659.9965.7673.9743.2547.44
SLidR [82]CVPR'22ViT-S44.7041.1653.6561.4766.7174.2044.6747.57
Seal [61]NeurIPS'23ViT-S45.1644.2755.1362.4667.6475.5846.5148.67
SuperFlowOursViT-S46.4447.8159.4464.4769.2076.5447.9749.94
PPKT [63]arXiv'21ViT-B39.9540.9153.2160.8766.2274.0744.0947.57
SLidR [82]CVPR'22ViT-B45.3541.6455.8362.6867.6174.9845.5048.32
Seal [61]NeurIPS'23ViT-B46.5945.9857.1562.7968.1875.4147.2448.91
SuperFlowOursViT-B47.6648.0959.6664.5269.7976.5748.4050.20
PPKT [63]arXiv'21ViT-L41.5742.0555.7561.2666.8874.3345.8747.82
SLidR [82]CVPR'22ViT-L45.7042.7757.4563.2068.1375.5147.0148.60
Seal [61]NeurIPS'23ViT-L46.8146.2758.1463.2768.6775.6647.5550.02
SuperFlowOursViT-L48.0149.9560.7265.0970.0177.1949.0750.67
" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.55, + 0.787, + 0.641 + ], + "angle": 0, + "content": "SuperFlow consistently outperforms state-of-the-art methods under diverse configurations. We attribute this to the use of temporal consistency learning, which captures the structurally rich temporal cues across consecutive scenes and enhances the semantic representation learning of the 3D backbone. We also observe improved performance with larger 2D networks (i.e., from ViT-S to ViT-L), revealing a promising direction of achieving higher quality 3D pretraining." + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.643, + 0.788, + 0.809 + ], + "angle": 0, + "content": "Downstream Fine-Tuning. It is known that data representation learning can mitigate the need for large-scale human annotations. Our study systematically compares SuperFlow with prior works on three popular datasets, including nuScenes [26], SemanticKITTI [5], and Waymo Open [89], under limited annotations for few-shot fine-tuning. From Tab. 1, we observe that SuperFlow achieves promising performance gains among three datasets across all fine-tuning tasks. We also use the pretrained 3D backbone as initialization for the fully-supervised learning study on nuScenes [26]. As can be seen from Tab. 1, models pretrained via representation learning consistently outperform the random initialization counterparts, highlighting the efficacy of conducting data pretraining. We also find that distillations from larger 2D networks show consistent improvements." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.81, + 0.787, + 0.84 + ], + "angle": 0, + "content": "Cross-Domain Generalization. To verify the strong generalizability of SuperFlow, we conduct a comprehensive study using seven diverse LiDAR datasets and" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.295, + 0.115, + 0.733, + 0.129 + ], + "angle": 0, + "content": "4D Contrastive Superflows are Dense 3D Representation Learners" + }, + { + "type": "page_number", + "bbox": [ + 0.768, + 0.116, + 0.784, + 0.127 + ], + "angle": 0, + "content": "11" + }, + { + "type": "table_caption", + "bbox": [ + 0.214, + 0.145, + 0.788, + 0.201 + ], + "angle": 0, + "content": "Table 2: Domain generalization study of different pretraining methods pretrained on the nuScenes [26] dataset and fine-tuned on other seven heterogeneous 3D semantic segmentation datasets with specified data portions, respectively. All scores are given in percentage (\\%). Best scores in each configuration are shaded with colors." + }, + { + "type": "table", + "bbox": [ + 0.221, + 0.207, + 0.787, + 0.3 + ], + "angle": 0, + "content": "
MethodSciKITTIRellis-3DSemPOSSSemSTFSynLiDARDAPS-3DSynth4D
1%10%1%10%HalfFullHalfFull1%10%HalfFull1%10%
Random23.8147.6038.4653.6046.2654.1248.0348.1519.8944.7474.3279.3820.2266.87
PPKT [63]36.5051.6749.7154.3350.1856.0050.9254.6937.5746.4878.9084.0061.1062.41
SLidR [82]39.6050.4549.7554.5751.5655.3652.0154.3542.0547.8481.0085.4063.1062.67
Seal [61]40.6452.7751.0955.0353.2656.8953.4655.3643.5849.2681.8885.9064.5066.96
SuperFlow42.7054.0052.8355.7154.4157.3354.7256.5744.8551.3882.4386.2165.3169.43
" + }, + { + "type": "table_caption", + "bbox": [ + 0.214, + 0.309, + 0.788, + 0.381 + ], + "angle": 0, + "content": "Table 3: Out-of-distribution 3D robustness study of state-of-the-art pretraining methods under corruption and sensor failure scenarios in the nuScenes- \\(C\\) dataset from the Robo3D benchmark [45]. Full denotes fine-tuning with full labels. LP denotes linear probing with a frozen backbone. All mCE \\((\\downarrow)\\), mRR \\((\\uparrow)\\), and mIoU \\((\\uparrow)\\) scores are given in percentage \\((\\%)\\). Best scores in each configuration are shaded with colors." + }, + { + "type": "table", + "bbox": [ + 0.221, + 0.385, + 0.788, + 0.587 + ], + "angle": 0, + "content": "
#InitialBackbonemCEmRRFogRainSnowBlurBeamCrossEchoSensorAvg
FullRandomMinkU-18 o115.6170.8553.9071.1048.2251.8562.2137.7357.4738.9752.68
SuperFlowMinkU-18 o109.0075.6654.9572.7949.5657.6862.8242.4559.6141.7755.21
RandomMinkU-34 o112.2072.5762.9670.6555.4851.7162.0131.5659.6439.4154.18
PPKT [63]MinkU-34 o105.6475.8764.0172.1859.0857.1763.8836.3460.5939.5756.60
SLiD R [82]MinkU-34 o106.0875.9965.4172.3156.0156.0762.8741.9461.1638.9056.83
Seal [61]MinkU-34 o92.6383.0872.6674.3166.2266.1465.9657.4459.8739.8562.81
SuperFlowMinkU-34 o91.6783.1770.3275.7765.4161.0568.0960.0258.3650.4163.68
RandomMinkU-50 o113.7672.8149.9571.1645.3655.5562.8436.9459.1243.1553.01
SuperFlowMinkU-50 o107.3574.0254.3673.0850.0756.9264.0538.1062.0247.0255.70
RandomMinkU-101 o109.1074.0750.4573.0248.8558.4864.1843.8659.8241.4755.02
SuperFlowMinkU-101 o96.4478.5756.9276.2954.7059.3571.8955.1360.2751.6060.77
LPPPKT [63]MinkU-34 o183.4478.1530.6535.4228.1229.2132.8219.5228.0120.7128.06
SLiD R [82]MinkU-34 o179.3877.1834.8838.0932.6426.4433.7320.8131.5421.4429.95
Seal [61]MinkU-34 o166.1875.3837.3342.7729.9337.7340.3220.3137.7324.9433.88
SuperFlowMinkU-34 o161.7875.5237.5943.4237.6039.5741.4023.6438.0326.6935.99
" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.612, + 0.788, + 0.702 + ], + "angle": 0, + "content": "show results in Tab. 2. It is worth noting that these datasets are collected under different acquisition and annotation conditions, including adverse weather, weak annotations, synthetic collection, and dynamic objects. For all fourteen domain generalization fine-tuning tasks, SuperFlow exhibits superior performance over the prior arts [61,63,82]. This study strongly verifies the effectiveness of the proposed flow-based contrastive learning for image-to-LiDAR data representation." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.704, + 0.788, + 0.81 + ], + "angle": 0, + "content": "Out-of-Distribution Robustness. The robustness of 3D perception models against unprecedented conditions directly correlates with the model's applicability to real-world applications [29, 48, 54, 102]. We compare our SuperFlow with prior models in the nuScenes- \\(C\\) dataset from the Robo3D benchmark [45] and show results in Tab. 3. We observe that models pretrained using SuperFlow exhibit improved robustness over the random initialization counterparts. Besides, we find that 3D networks with different capacities often pose diverse robustness." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.81, + 0.788, + 0.842 + ], + "angle": 0, + "content": "Quantitative Assessments. We visualize the prediction results fine-tuned on nuScenes [26], SemanticKITTI [5] and Waymo Open [89], compared with random" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "12" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.353, + 0.128 + ], + "angle": 0, + "content": "X. Xu et al." + }, + { + "type": "table_caption", + "bbox": [ + 0.216, + 0.145, + 0.477, + 0.214 + ], + "angle": 0, + "content": "Table 4: Ablation study of SuperFlow using different # of sweeps. All methods use ViT-B [72] for distillation. All scores are given in percentage (%). Baseline results are shaded with colors." + }, + { + "type": "table", + "bbox": [ + 0.22, + 0.221, + 0.474, + 0.299 + ], + "angle": 0, + "content": "
BackbonenuScenesKITTIWaymo
LP1%1%1%
1× Sweeps ○47.4147.5248.1449.31
2× Sweeps •47.6648.0948.4050.20
5× Sweeps ○47.2348.0047.9449.14
7× Sweeps •46.0347.9846.8347.97
" + }, + { + "type": "table_caption", + "bbox": [ + 0.49, + 0.145, + 0.784, + 0.215 + ], + "angle": 0, + "content": "Table 5: Ablation study of SuperFlow on network capacity (# params) of 3D backbones. All methods use ViT-B [72] for distillation. All scores are given in percentage \\((\\%)\\). Baseline results are shaded with colors." + }, + { + "type": "table", + "bbox": [ + 0.493, + 0.221, + 0.783, + 0.299 + ], + "angle": 0, + "content": "
BackboneLayernuScenesKITTIWaymo
LP1%1%1%
MinkUNet o1847.2047.7048.0449.24
MinkUNet •3447.6648.0948.4050.20
MinkUNet o5054.1152.8649.2251.20
MinkUNet o10152.5651.1948.5150.01
" + }, + { + "type": "image", + "bbox": [ + 0.221, + 0.313, + 0.784, + 0.567 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.574, + 0.788, + 0.63 + ], + "angle": 0, + "content": "Fig. 5: Qualitative assessments of state-of-the-art pretraining methods pretrained on nuScenes [26] and fine-tuned on nuScenes [26], SemanticKITTI [5], and Waymo Open [89], with \\(1\\%\\) annotations. The error maps show the correct and incorrect predictions in gray and red, respectively. Best viewed in colors and zoomed-in for details." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.653, + 0.788, + 0.684 + ], + "angle": 0, + "content": "initialization, SLiDR [82], and Seal [61]. As shown in Fig. 5, Superflow performs well, especially on backgrounds, i.e., \"road\" and \"sidewalk\" in complex scenarios." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.708, + 0.39, + 0.723 + ], + "angle": 0, + "content": "4.3 Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.734, + 0.788, + 0.842 + ], + "angle": 0, + "content": "In this section, we are tailored to understand the efficacy of each design in our SuperFlow framework. Unless otherwise specified, we adopt MinkUNet-34 [19] and ViT-B [72] as the 3D and 2D backbones, respectively, throughout this study. 3D Network Capacity. Existing 3D backbones are relatively small in scale compared to their 2D counterparts. We study the scale of the 3D network and the results are shown in Tab. 5. We observe improved performance as the network capacity scales up, except for MinkUNet-101 [19]. We conjecture that this is due" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.294, + 0.115, + 0.733, + 0.129 + ], + "angle": 0, + "content": "4D Contrastive Superflows are Dense 3D Representation Learners" + }, + { + "type": "page_number", + "bbox": [ + 0.768, + 0.116, + 0.786, + 0.127 + ], + "angle": 0, + "content": "13" + }, + { + "type": "table_caption", + "bbox": [ + 0.216, + 0.145, + 0.507, + 0.243 + ], + "angle": 0, + "content": "Table 6: Ablation study of each component in SuperFlow. All variants use a MinkUNet-34 [19] as the 3D backbone and ViT-B [72] for distillation. VC: View consistency. D2S: Dense-to-sparse regularization. FCL: Flow-based contrastive learning. All scores are given in percentage \\((\\%)\\)." + }, + { + "type": "table", + "bbox": [ + 0.22, + 0.248, + 0.511, + 0.354 + ], + "angle": 0, + "content": "
#VC D2S FCLnuScenesKITTIWaymo
LP1%1%1%
-Random8.1030.3039.5039.41
(a)XXX44.6544.4746.6547.77
(b)XX45.5745.2146.8748.01
(c)X46.1746.9147.2649.01
(d)X47.2447.6748.2149.80
(e)47.6648.0948.4050.20
" + }, + { + "type": "table_caption", + "bbox": [ + 0.523, + 0.145, + 0.788, + 0.243 + ], + "angle": 0, + "content": "Table 7: Ablation study on spatiotemporal consistency. All variants use a MinkUNet-34 [19] as the 3D backbone and ViT-B [72] for distillation. 0 denotes current timestamp. 0.5s corresponds to a \\(20\\mathrm{Hz}\\) timespan. All scores are given in percentage \\((\\%)\\)." + }, + { + "type": "table", + "bbox": [ + 0.527, + 0.248, + 0.785, + 0.353 + ], + "angle": 0, + "content": "
TimespannuScenesKITTIWaymo
LP1%1%1%
Single-Frame46.1746.9147.2649.01
0, -0.5s46.3947.0847.9949.78
-0.5s, 0, +0.5s47.6648.0948.4050.20
-1.0s, 0, +1.0s47.6047.9948.4350.18
-1.5s, 0, +1.5s46.4348.2748.3449.93
-2.0s, 0, +2.0s46.2048.4948.1850.01
" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.372, + 0.785, + 0.417 + ], + "angle": 0, + "content": "to the fact that models with limited parameters are less effective in capturing patterns during representation learning, and, conversely, models with a large set of trainable parameters tend to be difficult to converge." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.418, + 0.787, + 0.553 + ], + "angle": 0, + "content": "Representation Density. The consistency regularization between sparse and dense point clouds encourages useful representation learning. To analyze the degree of regularization, we investigate various point cloud densities and show the results in Tab. 4. We observe that a suitable point cloud density can improve the model's ability to feature representation. When the density of point clouds is too dense, the motion of objects is obvious in the scene. However, we generate superpoints of the dense points based on superpixels captured at the time of sparse points. The displacement difference of dynamic objects makes the projection misalignment. A trade-off selection would be two or three sweeps." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.553, + 0.788, + 0.704 + ], + "angle": 0, + "content": "Temporal Consistency. The ability to capture semantically coherent temporal cues is crucial in our SuperFlow framework. In Eq. (5), we operate temporal contrastive learning on superpoints features across scenes. As shown in Tab. 7, we observe that temporal contrastive learning achieves better results compared to single-frame methods. We also compare the impact of frames used to capture temporal cues. When we use 3 frames, it acquires more context-aware information than 2 frames and achieves better results. Finally, we study the impact of the timespan between frames. The performance will drop with a longer timespan. We conjecture that scenes with short timespans have more consistency, while long timespans tend to have more uncertain factors." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.705, + 0.788, + 0.81 + ], + "angle": 0, + "content": "Component Analysis. In Tab. 6, we analyze each component in the SuperFlow framework, including view consistency, dense-to-sparse regularization, and flow-based contrastive learning. The baseline is SLiDR [82] with VFMs-based superpixels. View consistency brings slight improvements among the popular datasets with a few annotations. D2S distills dense features into sparse features and it brings about \\(1\\%\\) mIoU gains. FCL extracts temporal cues via temporal contrastive learning and it significantly leads to about \\(2.0\\%\\) mIoU gains." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.81, + 0.787, + 0.84 + ], + "angle": 0, + "content": "Visual Inspections. Similarity maps presented in Fig. 6 denote the segmentation ability of our pretrained model. The query points include \"car\", \"man-" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "14" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.353, + 0.127 + ], + "angle": 0, + "content": "X. Xu et al." + }, + { + "type": "image", + "bbox": [ + 0.22, + 0.144, + 0.397, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.268, + 0.215, + 0.351, + 0.226 + ], + "angle": 0, + "content": "(a) \"car\" (3D)" + }, + { + "type": "image", + "bbox": [ + 0.413, + 0.144, + 0.59, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.441, + 0.215, + 0.562, + 0.226 + ], + "angle": 0, + "content": "(b) \"manmade\" (3D)" + }, + { + "type": "image", + "bbox": [ + 0.607, + 0.144, + 0.783, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.64, + 0.215, + 0.751, + 0.226 + ], + "angle": 0, + "content": "(c) \"sidewalk\" (3D)" + }, + { + "type": "image", + "bbox": [ + 0.221, + 0.227, + 0.397, + 0.295 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.267, + 0.297, + 0.351, + 0.308 + ], + "angle": 0, + "content": "(d) \"car\" (2D)" + }, + { + "type": "image", + "bbox": [ + 0.414, + 0.227, + 0.59, + 0.295 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.442, + 0.297, + 0.561, + 0.308 + ], + "angle": 0, + "content": "(e) \"manmade\" (2D)" + }, + { + "type": "image", + "bbox": [ + 0.607, + 0.227, + 0.783, + 0.295 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.64, + 0.297, + 0.751, + 0.308 + ], + "angle": 0, + "content": "(f) \"sidewalk\" (2D)" + }, + { + "type": "image", + "bbox": [ + 0.221, + 0.309, + 0.397, + 0.378 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.248, + 0.379, + 0.371, + 0.391 + ], + "angle": 0, + "content": "(g) \"vegetation\" (3D)" + }, + { + "type": "image", + "bbox": [ + 0.414, + 0.309, + 0.59, + 0.378 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.421, + 0.379, + 0.583, + 0.391 + ], + "angle": 0, + "content": "(h) \"driveable surface\" (3D)" + }, + { + "type": "image", + "bbox": [ + 0.607, + 0.309, + 0.783, + 0.378 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.645, + 0.379, + 0.746, + 0.39 + ], + "angle": 0, + "content": "(i) \"terrain\" (3D)" + }, + { + "type": "image", + "bbox": [ + 0.221, + 0.391, + 0.397, + 0.459 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.248, + 0.461, + 0.37, + 0.473 + ], + "angle": 0, + "content": "(j) \"vegetation\" (2D)" + }, + { + "type": "image", + "bbox": [ + 0.414, + 0.391, + 0.59, + 0.459 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.421, + 0.461, + 0.583, + 0.473 + ], + "angle": 0, + "content": "(k) \"driveable surface\" (2D)" + }, + { + "type": "image", + "bbox": [ + 0.607, + 0.391, + 0.783, + 0.459 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.645, + 0.461, + 0.746, + 0.473 + ], + "angle": 0, + "content": "(1) \"terrain\" (2D)" + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.484, + 0.788, + 0.541 + ], + "angle": 0, + "content": "Fig. 6: Cosine similarity between features of a query point (red dot) and: 1) features of other points projected in the image (the 1st and 3rd rows); and 2) features of an image with the same scene (the 2nd and 4th rows). The color goes from red to blue denoting low and high similarity scores, respectively. Best viewed in color." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.561, + 0.788, + 0.652 + ], + "angle": 0, + "content": "made\", \"sidewalk\", \"vegetation\", \"driveable surface\", and \"terrain\". SuperFlows shows strong semantic discriminative ability without fine-tuning. We conjecture that it comes from three aspects: 1) View consistent superpixels enable the network to learn semantic representation; 2) Dense-to-sparse regularization enhances the network to learn varying density features; 3) Temporal contrastive learning extracts semantic cues across scenes." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.674, + 0.36, + 0.691 + ], + "angle": 0, + "content": "5 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.704, + 0.789, + 0.841 + ], + "angle": 0, + "content": "In this work, we presented SuperFlow to tackle the challenging 3D data representation learning. Motivated by the sequential nature of LiDAR acquisitions, we proposed three novel designs to better encourage spatiotemporal consistency, encompassing view consistency alignment, dense-to-sparse regularization, and flow-based contrastive learning. Extensive experiments across 11 diverse LiDAR datasets showed that SuperFlow consistently outperforms prior approaches in linear probing, downstream fine-tuning, and robustness probing. Our study on scaling up 2D and 3D network capacities reveals insightful findings. We hope this work could shed light on future designs of powerful 3D foundation models." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.294, + 0.115, + 0.733, + 0.129 + ], + "angle": 0, + "content": "4D Contrastive Superflows are Dense 3D Representation Learners" + }, + { + "type": "page_number", + "bbox": [ + 0.768, + 0.116, + 0.786, + 0.127 + ], + "angle": 0, + "content": "15" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.299 + ], + "angle": 0, + "content": "Acknowledgements. This work was supported by the Scientific and Technological Innovation 2030 - \"New Generation Artificial Intelligence\" Major Project (No. 2021ZD0112200), the Joint Funds of the National Natural Science Foundation of China (No. U21B2044), the Key Research and Development Program of Jiangsu Province (No. BE2023016-3), and the Talent Research Start-up Foundation of Nanjing University of Posts and Telecommunications (No. NY223172). This work was also supported by the Ministry of Education, Singapore, under its MOE AcRF Tier 2 (MOET2EP20221-0012), NTU NAP, and under the RIE2020 Industry Alignment Fund - Industry Collaboration Projects (IAF-ICP) Funding Initiative, as well as cash and in-kind contribution from the industry partner(s)." + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.325, + 0.323, + 0.341 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.231, + 0.361, + 0.787, + 0.404 + ], + "angle": 0, + "content": "1. Achanta, R., Shaji, A., Smith, K., Lucchi, A., Fua, P., Susstrunk, S.: Slic superpixels compared to state-of-the-art superpixel methods. IEEE Transactions on Pattern Analysis and Machine Intelligence 34(11), 2274-2282 (2012)" + }, + { + "type": "ref_text", + "bbox": [ + 0.231, + 0.405, + 0.787, + 0.445 + ], + "angle": 0, + "content": "2. Aygun, M., Osep, A., Weber, M., Maximov, M., Stachniss, C., Behley, J., Leal-Taixe, L.: 4d panoptic lidar segmentation. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5527-5537 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.231, + 0.447, + 0.787, + 0.502 + ], + "angle": 0, + "content": "3. Badue, C., Guidolini, R., Carneiro, R.V., Azevedo, P., Cardoso, V.B., Forechi, A., Jesus, L., Berriel, R., Paixão, T.M., Mutz, F., de Paula Veronese, L., Oliveira-Santos, T., Souza, A.F.D.: Self-driving cars: A survey. Expert Systems with Applications 165, 113816 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.231, + 0.504, + 0.787, + 0.558 + ], + "angle": 0, + "content": "4. Behley, J., Garbade, M., Milioto, A., Quenzel, J., Behnke, S., Gall, J., Stachniss, C.: Towards 3d lidar-based semantic scene understanding of 3d point cloud sequences: The semanticicketti dataset. International Journal of Robotics Research 40, 959-96 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.231, + 0.559, + 0.787, + 0.613 + ], + "angle": 0, + "content": "5. Behley, J., Garbade, M., Milioto, A., Quenzel, J., Behnke, S., Stachniss, C., Gall, J.: Semantickitti: A dataset for semantic scene understanding of lidar sequences. In: IEEE/CVF International Conference on Computer Vision. pp. 9297-9307 (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.231, + 0.615, + 0.787, + 0.657 + ], + "angle": 0, + "content": "6. Bengio, Y., Courville, A., Vincent, P.: Representation learning: A review and new perspectives. IEEE Transactions on Pattern Analysis and Machine Intelligence 35(8), 1798-1828 (2013)" + }, + { + "type": "ref_text", + "bbox": [ + 0.231, + 0.658, + 0.787, + 0.699 + ], + "angle": 0, + "content": "7. Boulch, A., Sautier, C., Michele, B., Puy, G., Marlet, R.: Also: Automotive lidar self-supervision by occupancy estimation. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 13455-13465 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.231, + 0.7, + 0.787, + 0.755 + ], + "angle": 0, + "content": "8. Caesar, H., Bankiti, V., Lang, A.H., Vora, S., Liong, V.E., Xu, Q., Krishnan, A., Pan, Y., Baldan, G., Beijbom, O.: nuscenes: A multimodal dataset for autonomous driving. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 11621-11631 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.231, + 0.757, + 0.787, + 0.798 + ], + "angle": 0, + "content": "9. Cao, A.Q., Dai, A., de Charette, R.: Pasco: Urban 3d panoptic scene completion with uncertainty awareness. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 14554-14564 (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.799, + 0.787, + 0.84 + ], + "angle": 0, + "content": "10. Chen, Q., Vora, S., Beijbom, O.: Polarstream: Streaming lidar object detection and segmentation with polar pillars. In: Advances in Neural Information Processing Systems. vol. 34 (2021)" + }, + { + "type": "list", + "bbox": [ + 0.226, + 0.361, + 0.787, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "16" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.352, + 0.127 + ], + "angle": 0, + "content": "X. Xu et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.147, + 0.785, + 0.189 + ], + "angle": 0, + "content": "11. Chen, R., Liu, Y., Kong, L., Chen, N., Zhu, X., Ma, Y., Liu, T., Wang, W.: Towards label-free scene understanding by vision foundation models. In: Advances in Neural Information Processing Systems. vol. 36 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.19, + 0.785, + 0.233 + ], + "angle": 0, + "content": "12. Chen, R., Liu, Y., Kong, L., Zhu, X., Ma, Y., Li, Y., Hou, Y., Qiao, Y., Wang, W.: Clip2scene: Towards label-efficient 3d scene understanding by clip. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 7020-7030 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.234, + 0.785, + 0.274 + ], + "angle": 0, + "content": "13. Chen, T., Kornblith, S., Norouzi, M., Hinton, G.: A simple framework for contrastive learning of visual representations. In: International Conference on Machine Learning. pp. 1597-1607 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.276, + 0.785, + 0.303 + ], + "angle": 0, + "content": "14. Chen, X., Fan, H., Girshick, R., He, K.: Improved baselines with momentum contrastive learning. arXiv preprint arXiv:2003.04297 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.304, + 0.785, + 0.346 + ], + "angle": 0, + "content": "15. Chen, X., Xie, S., He, K.: An empirical study of training self-supervised vision transformers. In: IEEE/CVF International Conference on Computer Vision. pp. 9640-9649 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.346, + 0.785, + 0.388 + ], + "angle": 0, + "content": "16. Chen, Y., Nießner, M., Dai, A.: 4dcontrast: Contrastive learning with dynamic correspondences for 3d scene understanding. In: European Conference on Computer Vision. pp. 543-560 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.389, + 0.785, + 0.431 + ], + "angle": 0, + "content": "17. Cheng, H., Han, X., Xiao, G.: Cenet: Toward concise and efficient lidar semantic segmentation for autonomous driving. In: IEEE International Conference on Multimedia and Expo. pp. 1-6 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.432, + 0.785, + 0.487 + ], + "angle": 0, + "content": "18. Cheng, R., Razani, R., Taghavi, E., Li, E., Liu, B.: Af2-s3net: Attentive feature fusion with adaptive feature selection for sparse semantic segmentation network. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 12547-12556 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.488, + 0.785, + 0.529 + ], + "angle": 0, + "content": "19. Choy, C., Gwak, J., Savarese, S.: 4d spatio-temporal convnets: Minkowski convolutional neural networks. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 3075-3084 (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.53, + 0.785, + 0.572 + ], + "angle": 0, + "content": "20. Contributors, M.: MMDetection3D: OpenMMLab next-generation platform for general 3D object detection. https://github.com/open-mmlab/mmdetection3d (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.573, + 0.785, + 0.614 + ], + "angle": 0, + "content": "21. Cortinhal, T., Tzelepis, G., Aksoy, E.E.: Salsanext: Fast, uncertainty-aware semantic segmentation of lidar point clouds. In: International Symposium on Visual Computing. pp. 207-222 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.615, + 0.785, + 0.671 + ], + "angle": 0, + "content": "22. Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., Zhai, X., Unterthiner, T., Dehghani, M., Minderer, M., Heigold, G., Gelly, S., Uszkoreit, J., Houlsby, N.: An image is worth 16x16 words: Transformers for image recognition at scale. In: International Conference on Learning Representations (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.671, + 0.785, + 0.713 + ], + "angle": 0, + "content": "23. Duerr, F., Pfaller, M., Weigel, H., Beyerer, J.: Lidar-based recurrent 3d semantic segmentation with temporal memory alignment. In: International Conference on 3D Vision. pp. 781-790 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.714, + 0.785, + 0.755 + ], + "angle": 0, + "content": "24. Ester, M., Kriegel, H.P., Sander, J., Xu, X.: A density-based algorithm for discovering clusters in large spatial databases with noise. In: ACM SIGKDD Conference on Knowledge Discovery and Data Mining. pp. 226-231 (1996)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.757, + 0.785, + 0.798 + ], + "angle": 0, + "content": "25. Fischler, M.A., Bolles, R.C.: Random sample consensus: A paradigm for model fitting with applications to image analysis and automated cartography. Communications of the ACM 24(6), 381-395 (1981)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.799, + 0.785, + 0.84 + ], + "angle": 0, + "content": "26. Fong, W.K., Mohan, R., Hurtado, J.V., Zhou, L., Caesar, H., Beijbom, O., Valada, A.: Panoptic nuscenes: A large-scale benchmark for lidar panoptic segmentation and tracking. IEEE Robotics and Automation Letters 7, 3795-3802 (2022)" + }, + { + "type": "list", + "bbox": [ + 0.226, + 0.147, + 0.785, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.294, + 0.115, + 0.733, + 0.129 + ], + "angle": 0, + "content": "4D Contrastive Superflows are Dense 3D Representation Learners" + }, + { + "type": "page_number", + "bbox": [ + 0.768, + 0.116, + 0.786, + 0.127 + ], + "angle": 0, + "content": "17" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.147, + 0.785, + 0.189 + ], + "angle": 0, + "content": "27. Gao, B., Pan, Y., Li, C., Geng, S., Zhao, H.: Are we hungry for 3d lidar data for semantic segmentation? a survey of datasets and methods. IEEE Transactions on Intelligent Transportation Systems 23(7), 6063-6081 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.19, + 0.785, + 0.231 + ], + "angle": 0, + "content": "28. Geiger, A., Lenz, P., Urtasun, R.: Are we ready for autonomous driving? the kitti vision benchmark suite. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 3354-3361 (2012)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.231, + 0.785, + 0.272 + ], + "angle": 0, + "content": "29. Hao, X., Wei, M., Yang, Y., Zhao, H., Zhang, H., Zhou, Y., Wang, Q., Li, W., Kong, L., Zhang, J.: Is your hd map constructor reliable under sensor corruptions? arXiv preprint arXiv:2406.12214 (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.273, + 0.785, + 0.314 + ], + "angle": 0, + "content": "30. He, K., Chen, X., Xie, S., Li, Y., Dólár, P., Girshick, R.: Masked autoencoders are scalable vision learners. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 16000-16009 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.315, + 0.785, + 0.355 + ], + "angle": 0, + "content": "31. He, K., Fan, H., Wu, Y., Xie, S., Girshick, R.: Momentum contrast for unsupervised visual representation learning. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 9729-9738 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.356, + 0.785, + 0.397 + ], + "angle": 0, + "content": "32. Hess, G., Jaxing, J., Svensson, E., Hagerman, D., Petersson, C., Svensson, L.: Masked autoencoders for self-supervised learning on automotive point clouds. arXiv preprint arXiv:2207.00531 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.398, + 0.785, + 0.438 + ], + "angle": 0, + "content": "33. Hong, F., Kong, L., Zhou, H., Zhu, X., Li, H., Liu, Z.: Unified 3d and 4d panoptic segmentation via dynamic shifting networks. IEEE Transactions on Pattern Analysis and Machine Intelligence 46(5), 3480-3495 (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.439, + 0.785, + 0.481 + ], + "angle": 0, + "content": "34. Hong, F., Zhou, H., Zhu, X., Li, H., Liu, Z.: Lidar-based panoptic segmentation via dynamic shifting network. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 13090-13099 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.481, + 0.785, + 0.522 + ], + "angle": 0, + "content": "35. Hou, J., Graham, B., Nießner, M., Xie, S.: Exploring data-efficient 3d scene understanding with contrastive scene contexts. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 15587-15597 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.522, + 0.785, + 0.563 + ], + "angle": 0, + "content": "36. Hu, Q., Yang, B., Fang, G., Guo, Y., Leonardis, A., Trigoni, N., Markham, A.: Sqn: Weakly-supervised semantic segmentation of large-scale 3d point clouds. In: European Conference on Computer Vision. pp. 600-619 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.564, + 0.785, + 0.618 + ], + "angle": 0, + "content": "37. Hu, Q., Yang, B., Khalid, S., Xiao, W., Trigoni, N., Markham, A.: Towards semantic segmentation of urban-scale 3d point clouds: A dataset, benchmarks and challenges. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 4977-4987 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.619, + 0.785, + 0.66 + ], + "angle": 0, + "content": "38. Hu, Z., Bai, X., Zhang, R., Wang, X., Sun, G., Fu, H., Tai, C.L.: Lidal: Interframe uncertainty based active learning for 3d lidar semantic segmentation. In: European Conference on Computer Vision. pp. 248-265 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.66, + 0.785, + 0.702 + ], + "angle": 0, + "content": "39. Huang, S., Xie, Y., Zhu, S.C., Zhu, Y.: Spatio-temporal self-supervised representation learning for 3d point clouds. In: IEEE/CVF International Conference on Computer Vision. pp. 6535-6545 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.702, + 0.785, + 0.743 + ], + "angle": 0, + "content": "40. Jaritz, M., Vu, T.H., de Charette, R., Wirbel, E., Pérez, P.: xmuda: Cross-modal unsupervised domain adaptation for 3d semantic segmentation. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 12605-12614 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.744, + 0.785, + 0.785 + ], + "angle": 0, + "content": "41. Jiang, P., Osteen, P., Wigness, M., Saripallig, S.: Rellis-3d dataset: Data, benchmarks and analysis. In: IEEE International Conference on Robotics and Automation. pp. 1110–1116 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.785, + 0.785, + 0.84 + ], + "angle": 0, + "content": "42. Kirillov, A., Mintun, E., Ravi, N., Mao, H., Rolland, C., Gustafson, L., Xiao, T., Whitehead, S., Berg, A.C., Lo, W.Y., Dollar, P., Girshick, R.: Segment anything. In: IEEE/CVF International Conference on Computer Vision. pp. 4015-4026 (2023)" + }, + { + "type": "list", + "bbox": [ + 0.226, + 0.147, + 0.785, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "18" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.353, + 0.127 + ], + "angle": 0, + "content": "X. Xu et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.147, + 0.785, + 0.189 + ], + "angle": 0, + "content": "43. Klokov, A., Pak, D.U., Khorin, A., Yudin, D., Kochiev, L., Luchinskiy, V., Bezuglyj, V.: Daps3d: Domain adaptive projective segmentation of 3d lidar point clouds. IEEE Access 11, 79341-79356 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.19, + 0.785, + 0.233 + ], + "angle": 0, + "content": "44. Kong, L., Liu, Y., Chen, R., Ma, Y., Zhu, X., Li, Y., Hou, Y., Qiao, Y., Liu, Z.: Rethinking range view representation for lidar segmentation. In: IEEE/CVF International Conference on Computer Vision. pp. 228-240 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.234, + 0.785, + 0.288 + ], + "angle": 0, + "content": "45. Kong, L., Liu, Y., Li, X., Chen, R., Zhang, W., Ren, J., Pan, L., Chen, K., Liu, Z.: Robo3d: Towards robust and reliable 3d perception against corruptions. In: IEEE/CVF International Conference on Computer Vision. pp. 19994-20006 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.289, + 0.785, + 0.331 + ], + "angle": 0, + "content": "46. Kong, L., Quader, N., Liong, V.E.: Conda: Unsupervised domain adaptation for lidar segmentation via regularized domain concatenation. In: IEEE International Conference on Robotics and Automation. pp. 9338-9345 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.332, + 0.785, + 0.374 + ], + "angle": 0, + "content": "47. Kong, L., Ren, J., Pan, L., Liu, Z.: Lasermix for semi-supervised lidar semantic segmentation. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 21705-21715 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.375, + 0.785, + 0.416 + ], + "angle": 0, + "content": "48. Kong, L., Xie, S., Hu, H., Ng, L.X., Cottereau, B.R., Ooi, W.T.: Robodepth: Robust out-of-distribution depth estimation under corruptions. In: Advances in Neural Information Processing Systems. vol. 36 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.417, + 0.785, + 0.459 + ], + "angle": 0, + "content": "49. Kong, L., Xu, X., Ren, J., Zhang, W., Pan, L., Chen, K., Ooi, W.T., Liu, Z.: Multi-modal data-efficient 3d scene understanding for autonomous driving. arXiv preprint arXiv:2405.05258 (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.459, + 0.785, + 0.5 + ], + "angle": 0, + "content": "50. Krispel, G., Schinagl, D., Fruhwirth-Reisinger, C., Possegger, H., Bischof, H.: Maeli: Masked autoencoder for large-scale lidar point clouds. In: IEEE/CVF Winter Conference on Applications of Computer Vision. pp. 3383-3392 (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.502, + 0.785, + 0.543 + ], + "angle": 0, + "content": "51. Le-Khac, P.H., Healy, G., Smeaton, A.F.: Contrastive representation learning: A framework and review. IEEE Transactions on Pattern Analysis and Machine Intelligence 8, 193907-193934 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.544, + 0.785, + 0.586 + ], + "angle": 0, + "content": "52. Li, L., Shum, H.P., Breckon, T.P.: Less is more: Reducing task and model complexity for 3d point cloud semantic segmentation. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 9361-9371 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.587, + 0.785, + 0.628 + ], + "angle": 0, + "content": "53. Li, R., de Charette, R., Cao, A.Q.: Coarse3d: Class-prototypes for contrastive learning in weakly-supervised 3d point cloud segmentation. In: British Machine Vision Conference (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.629, + 0.785, + 0.67 + ], + "angle": 0, + "content": "54. Li, Y., Kong, L., Hu, H., Xu, X., Huang, X.: Optimizing lidar placements for robust driving perception in adverse conditions. arXiv preprint arXiv:2403.17009 (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.671, + 0.785, + 0.713 + ], + "angle": 0, + "content": "55. Lim, H., Oh, M., Myung, H.: Patchwork: Concentric zone-based region-wise ground segmentation with ground likelihood estimation using a 3d lidar sensor. IEEE Robotics and Automation Letters 6(4), 6458-6465 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.714, + 0.785, + 0.755 + ], + "angle": 0, + "content": "56. Liong, V.E., Nguyen, T.N.T., Widjaja, S., Sharma, D., Chong, Z.J.: Amvnet: Assertion-based multi-view fusion network for lidar semantic segmentation. arXiv preprint arXiv:2012.04934 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.757, + 0.785, + 0.798 + ], + "angle": 0, + "content": "57. Liu, M., Zhou, Y., Qi, C.R., Gong, B., Su, H., Anguelov, D.: Less: Label-efficient semantic segmentation for lidar point clouds. In: European Conference on Computer Vision. pp. 70-89 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.799, + 0.785, + 0.84 + ], + "angle": 0, + "content": "58. Liu, M., Yurtsever, E., Zhou, X., Fossaert, J., Cui, Y., Zagar, B.L., Knoll., A.C.: A survey on autonomous driving datasets: Data statistic, annotation, and outlook. arXiv preprint arXiv:2401.01454 (2024)" + }, + { + "type": "list", + "bbox": [ + 0.226, + 0.147, + 0.785, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.295, + 0.115, + 0.733, + 0.129 + ], + "angle": 0, + "content": "4D Contrastive Superflows are Dense 3D Representation Learners" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "19" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.147, + 0.785, + 0.189 + ], + "angle": 0, + "content": "59. Liu, Y., Bai, Y., Kong, L., Chen, R., Hou, Y., Shi, B., Li, Y.: Pcseg: An open source point cloud segmentation codebase. https://github.com/PJLab-ADG/PCSeg (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.189, + 0.785, + 0.243 + ], + "angle": 0, + "content": "60. Liu, Y., Chen, R., Li, X., Kong, L., Yang, Y., Xia, Z., Bai, Y., Zhu, X., Ma, Y., Li, Y., Qiao, Y., Hou, Y.: Uniseg: A unified multi-modal lidar segmentation network and the openpcseg codebase. In: IEEE/CVF International Conference on Computer Vision. pp. 21662-21673 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.243, + 0.785, + 0.284 + ], + "angle": 0, + "content": "61. Liu, Y., Kong, L., Cen, J., Chen, R., Zhang, W., Pan, L., Chen, K., Liu, Z.: Segment any point cloud sequences by distilling vision foundation models. In: Advances in Neural Information Processing Systems. vol. 36 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.284, + 0.785, + 0.325 + ], + "angle": 0, + "content": "62. Liu, Y., Kong, L., Wu, X., Chen, R., Li, X., Pan, L., Liu, Z., Ma, Y.: Multi-space alignments towards universal lidar segmentation. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 14648-14661 (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.325, + 0.785, + 0.365 + ], + "angle": 0, + "content": "63. Liu, Y.C., Huang, Y.K., Chiang, H.Y., Su, H.T., Liu, Z.Y., Chen, C.T., Tseng, C.Y., Hsu, W.H.: Learning from 2d: Contrastive pixel-to-point knowledge transfer for 3d pretraining. arXiv preprint arXiv:2104.04687 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.365, + 0.785, + 0.405 + ], + "angle": 0, + "content": "64. Liu, Y., Chen, J., Zhang, Z., Huang, J., Yi, L.: Leaf: Learning frames for 4d point cloud sequence understanding. In: IEEE/CVF International Conference on Computer Vision. pp. 604-613 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.405, + 0.785, + 0.432 + ], + "angle": 0, + "content": "65. Loshchilov, I., Hutter, F.: Decoupled weight decay regularization. In: International Conference on Learning Representations (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.432, + 0.785, + 0.487 + ], + "angle": 0, + "content": "66. Mahmoud, A., Hu, J.S., Kuai, T., Harakeh, A., Paull, L., Waslander, S.L.: Self-supervised image-to-point distillation via semantically tolerant contrastive loss. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 7102-7110 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.487, + 0.785, + 0.527 + ], + "angle": 0, + "content": "67. Michele, B., Boulch, A., Puy, G., Vu, T.H., Marlet, R., Courty, N.: Saluda: Surface-based automotive lidar unsupervised domain adaptation. arXiv preprint arXiv:2304.03251 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.527, + 0.785, + 0.568 + ], + "angle": 0, + "content": "68. Milioto, A., Vizzo, I., Behley, J., Stachniss, C.: Rangenet++: Fast and accurate lidar semantic segmentation. In: IEEE/RSJ International Conference on Intelligent Robots and Systems. pp. 4213-4220 (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.568, + 0.785, + 0.621 + ], + "angle": 0, + "content": "69. Muhammad, K., Ullah, A., Lloret, J., Ser, J.D., de Albuquerque, V.H.C.: Deep learning for safe autonomous driving: Current challenges and future directions. IEEE Transactions on Intelligent Transportation Systems 22(7), 4316-4336 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.622, + 0.785, + 0.662 + ], + "angle": 0, + "content": "70. Nunes, L., Marcuzzi, R., Chen, X., Behley, J., Stachniss, C.: Segcontrast: 3d point cloud feature representation learning through self-supervised segment discrimination. IEEE Robotics and Automation Letters 7(2), 2116-2123 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.662, + 0.785, + 0.717 + ], + "angle": 0, + "content": "71. Nunes, L., Wiesmann, L., Marcuzzi, R., Chen, X., Behley, J., Stachniss, C.: Temporal consistent 3d lidar representation learning for semantic perception in autonomous driving. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5217-5228 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.717, + 0.785, + 0.799 + ], + "angle": 0, + "content": "72. Oquab, M., Darcet, T., Moutakanni, T., Vo, H., Szafraniec, M., Khalidov, V., Fernandez, P., Haziza, D., Massa, F., El-Nouby, A., Assran, M., Ballas, N., Galuba, W., Howes, R., Huang, P.Y., Li, S.W., Misra, I., Rabbat, M., Sharma, V., Synnaeve, G., Xu, H., Jegou, H., Mairal, J., Labatut, P., Joulin, A., Bojanowski, P.: Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:2304.07193 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.799, + 0.785, + 0.84 + ], + "angle": 0, + "content": "73. Pan, Y., Gao, B., Mei, J., Geng, S., Li, C., Zhao, H.: Semanticposs: A point cloud dataset with large quantity of dynamic instances. In: IEEE Intelligent Vehicles Symposium. pp. 687-693 (2020)" + }, + { + "type": "list", + "bbox": [ + 0.226, + 0.147, + 0.785, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "20" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.352, + 0.127 + ], + "angle": 0, + "content": "X. Xu et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.225, + 0.148, + 0.786, + 0.189 + ], + "angle": 0, + "content": "74. Pang, B., Xia, H., Lu, C.: Unsupervised 3d point cloud representation learning by triangle constrained contrast for autonomous driving. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5229-5239 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.191, + 0.787, + 0.233 + ], + "angle": 0, + "content": "75. Puy, G., Gidaris, S., Boulch, A., Simeoni, O., Sautier, C., Pérez, P., Bursuc, A., Marlet, R.: Revisiting the distillation of image representations into point clouds for autonomous driving. arXiv preprint arXiv:2310.17504 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.234, + 0.786, + 0.288 + ], + "angle": 0, + "content": "76. Puy, G., Gidaris, S., Boulch, A., Simeoni, O., Sautier, C., Pérez, P., Bursuc, A., Marlet, R.: Three pillars improving vision foundation model distillation for lidar. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 21519-21529 (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.29, + 0.786, + 0.318 + ], + "angle": 0, + "content": "77. Qiu, H., Yu, B., Tao, D.: Gfnet: Geometric flow network for 3d point cloud semantic segmentation. Transactions on Machine Learning Research (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.32, + 0.786, + 0.374 + ], + "angle": 0, + "content": "78. Radford, A., Kim, J.W., Hallacy, C., Ramesh, A., Goh, G., Agarwal, S., Sastry, G., Askell, A., Mishkin, P., Clark, J., et al.: Learning transferable visual models from natural language supervision. In: International conference on machine learning. pp. 8748-8763. PMLR (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.376, + 0.786, + 0.416 + ], + "angle": 0, + "content": "79. Rizzoli, G., Barbato, F., Zanuttigh, P.: Multimodal semantic segmentation in autonomous driving: A review of current approaches and future perspectives. Technologies 10(4) (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.418, + 0.786, + 0.472 + ], + "angle": 0, + "content": "80. Saltori, C., Krivosheev, E., Lathuilière, S., Sebe, N., Galasso, F., Fiameni, G., Ricci, E., Poiesi, F.: Gipso: Geometrically informed propagation for online adaptation in 3d lidar segmentation. In: European Conference on Computer Vision. pp. 567-585 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.474, + 0.786, + 0.515 + ], + "angle": 0, + "content": "81. Sautier, C., Puy, G., Boulch, A., Marlet, R., Lepetit, V.: Bevcontrast: Self-supervision in bev space for automotive lidar point clouds. arXiv preprint arXiv:2310.17281 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.517, + 0.786, + 0.558 + ], + "angle": 0, + "content": "82. Sautier, C., Puy, G., Gidaris, S., Boulch, A., Bursuc, A., Marlet, R.: Image-to-lidar self-supervised distillation for autonomous driving data. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 9891-9901 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.559, + 0.786, + 0.613 + ], + "angle": 0, + "content": "83. Shen, Z., Sheng, X., Fan, H., Wang, L., Guo, Y., Liu, Q., Wen, H., Zhou, X.: Masked spatio-temporal structure prediction for self-supervised learning on point cloud videos. In: IEEE/CVF International Conference on Computer Vision. pp. 16580-16589 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.615, + 0.786, + 0.67 + ], + "angle": 0, + "content": "84. Sheng, X., Shen, Z., Xiao, G., Wang, L., Guo, Y., Fan, H.: Point contrastive prediction with semantic clustering for self-supervised learning on point cloud videos. In: IEEE/CVF International Conference on Computer Vision. pp. 16515-16524 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.672, + 0.786, + 0.713 + ], + "angle": 0, + "content": "85. Shi, H., Lin, G., Wang, H., Hung, T.Y., Wang, Z.: Spsequencenet: Semantic segmentation network on 4d point clouds. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 4574-4583 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.715, + 0.786, + 0.769 + ], + "angle": 0, + "content": "86. Shi, H., Wei, J., Li, R., Liu, F., Lin, G.: Weakly supervised segmentation on outdoor 4d point clouds with temporal matching and spatial graph propagation. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 11840-11849 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.77, + 0.786, + 0.798 + ], + "angle": 0, + "content": "87. Smith, L.N., Topin, N.: Super-convergence: Very fast training of neural networks using large learning rates. arXiv preprint arXiv:1708.07120 (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.799, + 0.786, + 0.841 + ], + "angle": 0, + "content": "88. Sun, J., Xu, X., Kong, L., Liu, Y., Li, L., Zhu, C., Zhang, J., Xiao, Z., Chen, R., Wang, T., Zhang, W., Chen, K., Qing, C.: An empirical study of training state-of-the-art lidar segmentation models. arXiv preprint arXiv:2405.14870 (2024)" + }, + { + "type": "list", + "bbox": [ + 0.225, + 0.148, + 0.787, + 0.841 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.295, + 0.115, + 0.733, + 0.129 + ], + "angle": 0, + "content": "4D Contrastive Superflows are Dense 3D Representation Learners" + }, + { + "type": "page_number", + "bbox": [ + 0.768, + 0.117, + 0.784, + 0.127 + ], + "angle": 0, + "content": "21" + }, + { + "type": "ref_text", + "bbox": [ + 0.225, + 0.147, + 0.786, + 0.232 + ], + "angle": 0, + "content": "89. Sun, P., Kretzschmar, H., Dotiwalla, X., Chouard, A., Patnaik, V., Tsui, P., Guo, J., Zhou, Y., Chai, Y., Caine, B., Vasudevan, V., Han, W., Ngiam, J., Zhao, H., Timofeev, A., Ettinger, S., Krivokon, M., Gao, A., Joshi, A., Zhang, Y., Shlens, J., Chen, Z., Anguelov, D.: Scalability in perception for autonomous driving: Waymo open dataset. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 2446-2454 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.225, + 0.233, + 0.787, + 0.274 + ], + "angle": 0, + "content": "90. Tang, H., Liu, Z., Zhao, S., Lin, Y., Lin, J., Wang, H., Han, S.: Searching efficient 3d architectures with sparse point-voxel convolution. In: European Conference on Computer Vision. pp. 685-702 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.225, + 0.275, + 0.787, + 0.317 + ], + "angle": 0, + "content": "91. Tarvainen, A., Valpola, H.: Mean teachers are better role models: Weight-averaged consistency targets improve semi-supervised deep learning results. In: Advances in Neural Information Processing Systems. vol. 30 (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.318, + 0.787, + 0.358 + ], + "angle": 0, + "content": "92. Triess, L.T., Dreissig, M., Rist, C.B., Zollner, J.M.: A survey on deep domain adaptation for lidar perception. In: IEEE Intelligent Vehicles Symposium Workshops. pp. 350-357 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.36, + 0.787, + 0.402 + ], + "angle": 0, + "content": "93. Uecker, M., Fleck, T., Pflugfelder, M., Zöllner, J.M.: Analyzing deep learning representations of point clouds for real-time in-vehicle lidar perception. arXiv preprint arXiv:2210.14612 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.403, + 0.787, + 0.444 + ], + "angle": 0, + "content": "94. Unal, O., Dai, D., Gool, L.V.: Scribble-supervised lidar semantic segmentation. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 2697-2707 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.445, + 0.787, + 0.487 + ], + "angle": 0, + "content": "95. Wei, W., Nejadasl, F.K., Gevers, T., Oswald, M.R.: T-mae: Temporal masked autoencoders for point cloud representation learning. arXiv preprint arXiv:2312.10217 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.488, + 0.787, + 0.529 + ], + "angle": 0, + "content": "96. Wu, Y., Zhang, T., Ke, W., Süssstrunk, S., Salzmann, M.: Spatiotemporal self-supervised learning for point clouds in the wild. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5251-5260 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.531, + 0.787, + 0.572 + ], + "angle": 0, + "content": "97. Xiao, A., Huang, J., Guan, D., Zhan, F., Lu, S.: Transfer learning from synthetic to real lidar point cloud for semantic segmentation. In: AAAI Conference on Artificial Intelligence. pp. 2795-2803 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.573, + 0.787, + 0.614 + ], + "angle": 0, + "content": "98. Xiao, A., Huang, J., Guan, D., Zhang, X., Lu, S., Shao, L.: Unsupervised point cloud representation learning with deep neural networks: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence 45(9), 11321-11339 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.615, + 0.787, + 0.67 + ], + "angle": 0, + "content": "99. Xiao, A., Huang, J., Xuan, W., Ren, R., Liu, K., Guan, D., Saddik, A.E., Lu, S., Xing, E.: 3d semantic segmentation in the wild: Learning generalized models for adverse-condition point clouds. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 9382-9392 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.22, + 0.671, + 0.786, + 0.712 + ], + "angle": 0, + "content": "100. Xie, B., Li, S., Guo, Q., Liu, C.H., Cheng, X.: Annotator: A generic active learning baseline for lidar semantic segmentation. In: Advances in Neural Information Processing Systems. vol. 36 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.22, + 0.714, + 0.786, + 0.755 + ], + "angle": 0, + "content": "101. Xie, S., Gu, J., Guo, D., Qi, C.R., Guibas, L., Litany, O.: Pointcontrast: Unsupervised pre-training for 3d point cloud understanding. In: European Conference on Computer Vision. pp. 574-591 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.22, + 0.757, + 0.786, + 0.798 + ], + "angle": 0, + "content": "102. Xie, S., Kong, L., Zhang, W., Ren, J., Pan, L., Chen, K., Liu, Z.: Benchmarking and improving bird's eye view perception robustness in autonomous driving. arXiv preprint arXiv:2405.17426 (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.22, + 0.799, + 0.786, + 0.84 + ], + "angle": 0, + "content": "103. Xie, Z., Zhang, Z., Cao, Y., Lin, Y., Bao, J., Yao, Z., Dai, Q., Hu, H.: Simmim: A simple framework for masked image modeling. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 9653-9663 (2022)" + }, + { + "type": "list", + "bbox": [ + 0.22, + 0.147, + 0.787, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "22" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.352, + 0.127 + ], + "angle": 0, + "content": "X. Xu et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.147, + 0.785, + 0.189 + ], + "angle": 0, + "content": "104. Xu, C., Wu, B., Wang, Z., Zhan, W., Vajda, P., Keutzer, K., Tomizuka, M.: Squeezesegv3: Spatially-adaptive convolution for efficient point-cloud segmentation. In: European Conference on Computer Vision. pp. 1-19 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.19, + 0.787, + 0.244 + ], + "angle": 0, + "content": "105. Xu, J., Zhang, R., Dou, J., Zhu, Y., Sun, J., Pu, S.: Rpvnet: A deep and efficient range-point-voxel fusion network for lidar point cloud segmentation. In: IEEE/CVF International Conference on Computer Vision. pp. 16024-16033 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.245, + 0.786, + 0.286 + ], + "angle": 0, + "content": "106. Xu, W., Li, X., Ni, P., Guang, X., Luo, H., Zhao, X.: Multi-view fusion driven 3d point cloud semantic segmentation based on hierarchical transformer. IEEE Sensors Journal 23(24), 31461-31470 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.287, + 0.786, + 0.314 + ], + "angle": 0, + "content": "107. Xu, X., Kong, L., Shuai, H., Liu, Q.: Frnet: Frustum-range networks for scalable lidar segmentation. arXiv preprint arXiv:2312.04484 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.315, + 0.786, + 0.357 + ], + "angle": 0, + "content": "108. Yin, J., Zhou, D., Zhang, L., Fang, J., Xu, C.Z., Shen, J., Wang, W.: Proposal contrast: Unsupervised pre-training for lidar-based 3d object detection. In: European Conference on Computer Vision. pp. 17-33 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.357, + 0.786, + 0.398 + ], + "angle": 0, + "content": "109. Zhang, H., Li, F., Zou, X., Liu, S., Li, C., Gao, J., Yang, J., Zhang, L.: A simple framework for open-vocabulary segmentation and detection. In: IEEE/CVF International Conference on Computer Vision. pp. 1020-1031 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.399, + 0.786, + 0.439 + ], + "angle": 0, + "content": "110. Zhang, S., Deng, J., Bai, L., Li, H., Ouyang, W., Zhang, Y.: Hvdistill: Transferring knowledge from images to point clouds via unsupervised hybrid-view distillation. International Journal of Computer Vision pp. 1-15 (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.439, + 0.786, + 0.495 + ], + "angle": 0, + "content": "111. Zhang, Y., Zhou, Z., David, P., Yue, X., Xi, Z., Gong, B., Foroosh, H.: Polarnet: An improved grid representation for online lidar point clouds semantic segmentation. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 9601-9610 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.495, + 0.787, + 0.536 + ], + "angle": 0, + "content": "112. Zhang, Y., Hou, J., Yuan, Y.: A comprehensive study of the robustness for lidar-based 3d object detectors against adversarial attacks. International Journal of Computer Vision pp. 1-33 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.536, + 0.786, + 0.577 + ], + "angle": 0, + "content": "113. Zhang, Z., Girdhar, R., Joulin, A., Misra, I.: Self-supervised pretraining of 3d features on any point-cloud. In: IEEE/CVF International Conference on Computer Vision. pp. 10252-10263 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.577, + 0.786, + 0.619 + ], + "angle": 0, + "content": "114. Zhang, Z., Dong, Y., Liu, Y., Yi, L.: Complete-to-partial 4d distillation for self-supervised point cloud sequence representation learning. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 17661-17670 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.62, + 0.786, + 0.66 + ], + "angle": 0, + "content": "115. Zhang, Z., Yang, B., Wang, B., Li, B.: Growsp: Unsupervised semantic segmentation of 3d point clouds. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 17619-17629 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.66, + 0.786, + 0.702 + ], + "angle": 0, + "content": "116. Zhao, Y., Bai, L., Huang, X.: Fidnet: Lidar point cloud semantic segmentation with fully interpolation decoding. In: IEEE/RSJ International Conference on Intelligent Robots and Systems. pp. 4453-4458 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.702, + 0.786, + 0.743 + ], + "angle": 0, + "content": "117. Zhou, Z., Zhang, Y., Foroosh, H.: Panoptic-polarnet: Proposal-free lidar point cloud panoptic segmentation. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 13194-13203 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.744, + 0.786, + 0.785 + ], + "angle": 0, + "content": "118. Zhu, X., Zhou, H., Wang, T., Hong, F., Ma, Y., Li, W., Li, H., Lin, D.: Cylindrical and asymmetrical 3d convolution networks for lidar segmentation. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 9939-9948 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.785, + 0.786, + 0.84 + ], + "angle": 0, + "content": "119. Zou, X., Dou, Z.Y., Yang, J., Gan, Z., Li, L., Li, C., Dai, X., Behl, H., Wang, J., Yuan, L., Peng, N., Wang, L., Lee, Y.J., Gao, J.: Generalized decoding for pixel, image, and language. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 15116-15127 (2023)" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.147, + 0.787, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.294, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "4D Contrastive Superflows are Dense 3D Representation Learners" + }, + { + "type": "page_number", + "bbox": [ + 0.768, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "23" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.147, + 0.788, + 0.19 + ], + "angle": 0, + "content": "120. Zou, X., Yang, J., Zhang, H., Li, F., Li, L., Gao, J., Lee, Y.J.: Segment everything everywhere all at once. In: Advances in Neural Information Processing Systems. vol. 36 (2023)" + } + ] +] \ No newline at end of file diff --git a/2024/4D Contrastive Superflows are Dense 3D Representation Learners/3b016017-cefc-4a8b-a706-93b64616c878_origin.pdf b/2024/4D Contrastive Superflows are Dense 3D Representation Learners/3b016017-cefc-4a8b-a706-93b64616c878_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..da4cec6f991ff31b900fe10bff5629536ce38145 --- /dev/null +++ b/2024/4D Contrastive Superflows are Dense 3D Representation Learners/3b016017-cefc-4a8b-a706-93b64616c878_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8361045bf8d7a1462f3bea2dd2e84ad365778ebc9c748d5a6b25dce6d4644386 +size 4758484 diff --git a/2024/4D Contrastive Superflows are Dense 3D Representation Learners/full.md b/2024/4D Contrastive Superflows are Dense 3D Representation Learners/full.md new file mode 100644 index 0000000000000000000000000000000000000000..fba88e16a12edb26af00fbbd5db12f9b800369b2 --- /dev/null +++ b/2024/4D Contrastive Superflows are Dense 3D Representation Learners/full.md @@ -0,0 +1,405 @@ +# 4D Contrastive Superflows are Dense 3D Representation Learners + +Xiang Xu $^{1,\star}$ , Lingdong Kong $^{2,3,*}$ , Hui Shuai $^{4}$ , Wenwei Zhang $^{2}$ , Liang Pan $^{2}$ , Kai Chen $^{2}$ , Ziwei Liu $^{5}$ , and Qingshan Liu $^{4,\text{图}}$ + +1 Nanjing University of Aeronautics and Astronautics + +2 Shanghai AI Laboratory + +$^{3}$ National University of Singapore + +$^{4}$ Nanjing University of Posts and Telecommunications + +S-Lab, Nanyang Technological University + +Abstract. In the realm of autonomous driving, accurate 3D perception is the foundation. However, developing such models relies on extensive human annotations – a process that is both costly and labor-intensive. To address this challenge from a data representation learning perspective, we introduce SuperFlow, a novel framework designed to harness consecutive LiDAR-camera pairs for establishing spatiotemporal pretraining objectives. SuperFlow stands out by integrating two key designs: 1) a dense-to-sparse consistency regularization, which promotes insensitivity to point cloud density variations during feature learning, and 2) a flow-based contrastive learning module, carefully crafted to extract meaningful temporal cues from readily available sensor calibrations. To further boost learning efficiency, we incorporate a plug-and-play view consistency module that enhances the alignment of the knowledge distilled from camera views. Extensive comparative and ablation studies across 11 heterogeneous LiDAR datasets validate our effectiveness and superiority. Additionally, we observe several interesting emerging properties by scaling up the 2D and 3D backbones during pretraining, shedding light on the future research of 3D foundation models for LiDAR-based perception. Code is publicly available at https://github.com/Xiangxu-0103/SuperFlow. + +Keywords: LiDAR Segmentation $\cdot$ 3D Data Pretraining $\cdot$ Autonomous Driving $\cdot$ Image-to-LiDAR Contrastive Learning $\cdot$ Semantic Superpixels + +# 1 Introduction + +Driving perception is one of the most crucial components of an autonomous vehicle system. Recent advancements in sensing technologies, such as light detection and ranging (LiDAR) sensors and surrounding-view cameras, open up new possibilities for a holistic, accurate, and 3D-aware scene perception [3,9,79]. + +Training a 3D perception model that can perform well in real-world scenarios often requires large-scale datasets and sufficient computing power [27,58]. Different from 2D, annotating 3D data is notably more expensive and labor-intensive, + +which hinders the scalability of existing 3D perception models [28,69,98,112]. Data representation learning serves as a potential solution to mitigate such a problem [6,76]. By designing suitable pretraining objectives, the models are anticipated to extract useful concepts from raw data, where such concepts can help improve models' performance on downstream tasks with fewer annotations [51]. + +Recently, Sautier et al. [82] proposed SLidR to distill knowledge from surrounding camera views - using a pretrained 2D backbone such as MoCo [14] and DINO [72] - to LiDAR point clouds, exhibiting promising 3D representation learning properties. The key to its success is the superpixel-driven contrastive objectives between cameras and LiDAR sensors. Subsequent works further extended this framework from various aspects, such as class balancing [66], hybrid-view distillation [110], semantic superpixels [11, 12, 61], and so on. While these methods showed improved performance over their baselines, there exist several issues that could undermine the data representation learning. + +![](images/1198c7f3899ffac5b4f62f30fa3305ba0aaee00132c41cf2e3ab3b3bec78a1c5.jpg) +Fig.1: Performance overview of SuperFlow compared to state-of-the-art image-to-LiDAR pretraining methods, i.e., Seal [61], SLidR [82], and PPKT [63], on eleven LiDAR datasets. The scores of prior methods are normalized based on SuperFlow's scores. The larger the area coverage, the better the overall segmentation performance. + +The first concern revolves around the inherent temporal dynamics of LiDAR data [4,8]. LiDAR point clouds are acquired sequentially, capturing the essence of motion within the scene. Traditional approaches [61,63,66,82,110] often overlook this temporal aspect, treating each snapshot as an isolated scan. However, this sequential nature holds a wealth of information that can significantly enrich the model's understanding of the 3D environment [71,96]. Utilizing these temporal cues can lead to more robust and context-aware 3D perception models, which is crucial for dynamic environments encountered in autonomous driving. + +Moreover, the varying density of LiDAR point clouds presents a unique challenge [45, 47, 94]. Due to the nature of LiDAR scanning and data acquisition, different areas within the same scene can have significantly different point densities, which can in turn affect the consistency of feature representation across the scene [2, 47, 108, 111]. Therefore, a model that can learn invariant features regardless of point cloud density tends to be effective for recognizing the structural and semantic information in the 3D space. + +In lieu of existing challenges, we propose a novel spatiotemporal contrastive learning dubbed SuperFlow to encourage effective cross-sensor knowledge dis + +tillation. Our approach features three key components, all centered around the use of the off-the-shelf temporal cues inherent in the LiDAR acquisition process: + +- We first introduce a straightforward yet effective view consistency alignment that seamlessly generates semantic superpixels with language guidance, alleviating the "self-conflict" issues in existing works [61,66,82]. As opposed to the previous pipeline, our method also aligns the semantics across camera views in consecutive scenes, paving the way for more sophisticated designs. + +- To address the varying density of LiDAR point clouds, we present a dense-to-sparse regularization module that encourages consistency between features of dense and sparse point clouds. Dense points are obtained by concatenating multi-sweep LiDAR scans within a suitable time window and propagating the semantic superpixels from sparse to dense points. By leveraging dense point features to regularize sparse point features, the model promotes insensitivity to point cloud density variations. + +- To capture useful temporal cues from consecutive scans across different timestamps, we design a flow-based contrastive learning module. This module takes multiple LiDAR-camera pairs as input and excites strong consistency between temporally shifted representations. Analogous to existing image-to-LiDAR representation learning methods [61,66,82], we also incorporate useful spatial contrastive objectives into our framework, setting a unified pipeline that emphasizes holistic representation learning from both the structural 3D layouts and the temporal 4D information. + +The strong spatiotemporal consistency regularization in SuperFlow effectively forms a semantically rich landscape that enhances data representations. As illustrated in Fig. 1, our approach achieves appealing performance gains over state-of-the-art 3D pretraining methods across a diverse spectrum of downstream tasks. Meanwhile, we also target at scaling the capacity of both 2D and 3D backbones during pretraining, shedding light on the future development of more robust, unified, and ubiquitous 3D perception models. + +To summarize, this work incorporates key contributions listed as follows: + +- We present SuperFlow, a novel framework aimed to harness consecutive LiDAR-camera pairs for establishing spatiotemporal pretraining objectives. + +- Our framework incorporates novel designs including view consistency alignment, dense-to-sparse regularization, and flow-based contrastive learning, which better encourages data representation learning effects between camera and LiDAR sensors across consecutive scans. + +- Our approach sets a new state-of-the-art performance across 11 LiDAR datasets, exhibiting strong robustness and generalizability. We also reveal intriguing emergent properties as we scale up the 2D and 3D backbones, which could lay the foundation for scalable 3D perception. + +# 2 Related Work + +LiDAR-based 3D Perception. The LiDAR sensor has been widely used in today's 3D perception systems, credited to its robust and structural sensing abl- + +ities [4, 88, 92]. Due to the sparse and unordered nature of LiDAR point clouds, suitable rasterization strategies are needed to convert them into structural inputs [37, 93]. Popular choices include sparse voxels [18, 19, 33, 34, 90, 118], bird's eye view maps [10, 56, 111, 117], range view images [17, 21, 44, 68, 104, 107, 116], and multi-view fusion [18, 40, 60, 62, 77, 105, 106]. While witnessing record-breaking performances on standard benchmarks, existing approaches rely heavily on human annotations, which hinders scalability [27]. In response to this challenge, we resort to newly appeared 3D representation learning, hoping to leverage the rich collections of unlabeled LiDAR point clouds for more effective learning from LiDAR data. This could further enrich the efficacy of LiDAR-based perception. + +Data-Efficient 3D Perception. To better save annotation budgets, previous efforts seek 3D perception in a data-efficient manner [11, 12, 27, 40, 46, 49]. One line of research resorts to weak supervision, e.g., seeding points [36, 53, 86, 115], active prompts [38, 57, 100], and scribbles [94], for weakly-supervised LiDAR semantic segmentation. Another line of research seeks semi-supervised learning approaches [47, 52, 91] to better tackle efficient 3D scene perception and achieve promising results. In this work, different from the prior pursuits, we tackle efficient 3D perception from the data representation learning perspective. We establish several LiDAR-based data representation learning settings that seamlessly combine pretraining with weakly- and semi-supervised learning, further enhancing the scalability of 3D perception systems. + +3D Representation Learning. Analog to 2D representation learning strategies [13,15,30,31,103], prior works designed contrastive [35,70,81,101,108,113], masked modeling [32,50,95], and reconstruction [7,67] objectives for 3D pretraining. Most early 3D representation learning approaches use a single modality for pretraining, leaving room for further development. The off-the-shelf calibrations among different types of sensors provide a promising solution for building pretraining objectives [63]. Recently, SLidR [82] has made the first contribution toward multi-modal 3D representation learning between camera and LiDAR sensors. Subsequent works [66,74,110] extended this framework with more advanced designs. Seal [61] leverages powerful vision foundation models [42,109,119,120] to better assist the contrastive learning across sensors. Puy et al. [75,76] conducted a comprehensive study on the distillation recipe for better pretraining effects. While these approaches have exhibited better performance than their baselines, they overlooked the rich temporal cues across consecutive scans, which might lead to sub-opt pretraining performance. In this work, we construct dense 3D representation learning objectives using calibrated LiDAR sequences. Our approach encourages the consistency between features from sparse to dense inputs and features across timestamps, yielding superiority over existing endeavors. + +4D Representation Learning. Leveraging consecutive scans is promising in extracting temporal relations [2, 23, 33, 85]. For point cloud data pretraining, prior works [16, 64, 83, 84, 114] mainly focused on applying 4D cues on object- and human-centric point clouds, which are often small in scale. For large-scale automotive point clouds, STRL [39] learns spatiotemporal data invariance with different spatial augmentations in the point cloud sequence. TARL [71] and + +STSSL [96] encourage similarities of point clusters in two consecutive frames, where such clusters are obtained by ground removal and clustering algorithms, i.e., RANSAC [25], Patchwork [55], and HDBSCAN [24]. BEVContrast [81] shares a similar motivation but utilizes BEV maps for contrastive learning, which yields a more effective implementation. The "one-fits-all" clustering parameters, however, are often difficult to obtain, hindering existing works. Different from existing methods that use a single modality for 4D representation learning, we propose to leverage LiDAR-camera correspondences and semantic-rich superpixels to establish meaningful multi-modality 4D pretraining objectives. + +# 3 SuperFlow + +In this section, we first revisit the common setups of the camera-to-LiDAR distillation baseline (cf. Sec. 3.1). We then elaborate on the technical details of SuperFlow, encompassing a straightforward yet effective view consistency alignment (cf. Sec. 3.2), a dense-to-sparse consistency regularization (cf. Sec. 3.3), and a flow-based spatiotemporal contrastive learning (cf. Sec. 3.4). The overall pipeline of the proposed SuperFlow framework is depicted in Fig. 4. + +# 3.1 Preliminaries + +Problem Definition. Given a point cloud $\mathcal{P}^t = \{\mathbf{p}_i^t, \mathbf{f}_i^t | i = 1, \dots, N\}$ with $N$ points captured by a LiDAR sensor at time $t$ , where $\mathbf{p}_i \in \mathbb{R}^3$ denotes the coordinate of the point and $\mathbf{f}_i \in \mathbb{R}^C$ is the corresponding feature, we aim to transfer knowledge from $M$ surrounding camera images $\mathcal{I}^t = \{\mathbf{I}_i^t | i = 1, \dots, M\}$ into the point cloud. Here, $\mathbf{I}_i \in \mathbb{R}^{H \times W \times 3}$ represents an image with height $H$ and width $W$ . Prior works [61, 82] generate a set of class-agnostic superpixels $\mathcal{X}_i = \{\mathbf{X}_i^j | j = 1, \dots, V\}$ for each image via the unsupervised SLIC algorithm [1] or the more recent vision foundation models (VFMs) [42, 119, 120], where $V$ denotes the total number of superpixels. Assuming that the point cloud $\mathcal{P}^t$ and images $\mathcal{I}^t$ are calibrated, the point cloud $\mathbf{p}_i = (x_i, y_i, z_i)$ can be then projected to the image plane $(u_i, v_i)$ using the following sensor calibration parameters: + +$$ +[ u _ {i}, v _ {i}, 1 ] ^ {\mathrm {T}} = \frac {1}{z _ {i}} \times \Gamma_ {K} \times \Gamma_ {c \leftarrow l} \times [ x _ {i}, y _ {i}, z _ {i} ] ^ {\mathrm {T}}, \tag {1} +$$ + +where $\Gamma_K$ denotes the camera intrinsic matrix and $\Gamma_{c\leftarrow l}$ is the transformation matrix from LiDAR sensors to surrounding-view cameras. We also obtain a set of superpoints $\mathcal{Y} = \{\mathbf{Y}^j | j = 1, \dots, V\}$ through this projection. + +Network Representations. Let $\mathcal{F}_{\theta_p}:\mathbb{R}^{N\times (3 + C)}\to \mathbb{R}^{N\times D}$ be a 3D backbone with trainable parameters $\theta_{p}$ , which takes LiDAR points as input and outputs $D$ -dimensional point features. Let $\mathcal{G}_{\theta_i}:\mathbb{R}^{H\times W\times 3}\to \mathbb{R}^{\frac{H}{S}\times \frac{W}{S}\times E}$ be an image backbone with pretrained parameters $\theta_{i}$ that takes images as input and outputs $E$ -dimensional image features with stride $S$ . Let $\mathcal{H}_{\omega_p}:\mathbb{R}^{N\times D}\to \mathbb{R}^{N\times L}$ and $\mathcal{H}_{\omega_i}:\mathbb{R}^{\frac{H}{S}\times \frac{W}{S}\times E}\to \mathbb{R}^{H\times W\times L}$ be linear heads with trainable parameters $\omega_{p}$ and $\omega_{i}$ , + +![](images/d6f78ecf544015a58501fc084c9b78db5f5dbc080295b048e04ffc6452db34c1.jpg) +(a) Heuristic + +![](images/a0575d5b2a37ee97764d9318e67aea6b2e6ebae92c7921259b2f9e2bb23d7ba0.jpg) +(b) Class Agnostic + +![](images/50c03ec25032693f317237d850c56bf109c008111e4ce182b074ba20ba1cd76b.jpg) +(c) View Consistent +Fig. 2: Comparisons of different superpixels. (a) Class-agnostic superpixels generated by the unsupervised SLIC [1] algorithm. (b) Class-agnostic semantic superpixels generated by vision foundation models (VFMs) [109, 119, 120]. (c) View-consistent semantic superpixels generated by our view consistency alignment module. + +which project backbone features to $L$ -dimensional features with $\ell_2$ -normalization and upsample image features to $H\times W$ with bilinear interpolation. + +Pretraining Objective. The overall objective of image-to-LiDAR representation learning [82] is to transfer knowledge from the trained image backbone $\mathcal{G}_{\theta_i}$ to the 3D backbone $\mathcal{F}_{\theta_p}$ . The superpixels $\mathcal{X}_i$ generated offline, serve as an intermediate to effectively guide the knowledge transfer process. + +# 3.2 View Consistency Alignment + +Motivation. The class-agnostic superpixels $\mathcal{X}_i$ used in prior works [61,66,82] are typically instance-level and do not consider their actual categories. As discussed in [66], instance-level superpixels can lead to "self-conflict" problems, which undermines the effectiveness of pretraining. + +Superpixel Comparisons. Fig. 2 compares superpixels generated via the unsupervised SLIC [1] and VFMs. SLIC [1] tends to over-segment objects, causing semantic conflicts. VFMs generate superpixels through a panoptic segmentation head, which can still lead to "self-conflict" in three conditions (see Fig. 2b): ① when the same object appears in different camera views, leading to different parts of the same object being treated as negative samples; ② when objects of the same category within the same camera view are treated as negative samples; ③ when objects across different camera views are treated as negative samples even if they share the same label. + +Semantic-Related Superpixels Generation. To address these issues, we propose generating semantic-related superpixels to ensure consistency across camera views. Contrastive Vision-Language Pre-training (CLIP) [78] has shown great generalization in few-shot learning. Building on existing VFMs [42,119,120], we employ CLIP's text encoder and fine-tune the last layer of the segmentation head from VFMs with predefined text prompts. This allows the segmentation head to generate language-guided semantic categories for each pixel, which we leverage as superpixels. As shown in Fig. 2c, we unify superpixels across camera + +views based on semantic category, alleviating the "self-conflict" problem in prior image-to-LiDAR contrastive learning pipelines. + +# 3.3 D2S: Dense-to-Sparse Consistency Regularization + +Motivation. LiDAR points are sparse and often incomplete, significantly restricting the efficacy of the cross-sensor feature representation learning process. In this work, we propose to tackle this challenge by combining multiple LiDAR scans within a suitable time window to create a dense point cloud, which is then used to encourage consistency with the sparse point cloud. + +Point Cloud Concatenation. Specifically, given a keyframe point cloud $\mathcal{P}^t$ captured at time $t$ and a set of sweep point clouds $\{\mathcal{P}^s | s = 1, \dots, T\}$ captured at previous times $s$ , we first transform the coordinate $(x^s, y^s, z^s)$ of the sweep point cloud $\mathcal{P}^s$ to the coordinate systems of $\mathcal{P}^t$ , as they share different systems due to the vehicle's movement: + +$$ +\left[ \tilde {x} ^ {s}, \tilde {y} ^ {s}, \tilde {z} ^ {s} \right] ^ {\mathrm {T}} = \Gamma_ {t \leftarrow s} \times \left[ x ^ {s}, y ^ {s}, z ^ {s} \right] ^ {\mathrm {T}}, \tag {2} +$$ + +where $\Gamma_{t\leftarrow s}$ denotes the transformation matrix from the sweep point cloud at time $s$ to the keyframe point cloud at time $t$ . We then concatenate the transformed sweep points $\{\tilde{\mathcal{P}}^s |s =$ + +![](images/b52e547912a4ed8aa7f1a04c2e0540c4daaa084ad61bafd7bf4f8239a0d546ef.jpg) +Fig.3: Dense-to-sparse (D2S) consistency regularization module. Dense point clouds are obtained by combining multiple point clouds captured at different times. A D2S regularization is formulated by encouraging the consistency between dense features and sparse features. + +$1, \ldots, T\}$ with $\mathcal{P}^t$ to obtain a dense point cloud $\mathcal{P}^d$ . As shown in Fig. 3, $\mathcal{P}^d$ fuses temporal information from consecutive point clouds, resulting in a dense and semantically rich representation for feature learning. + +Dense Superpoints. Meanwhile, we generate sets of superpoints $\mathcal{Y}^d$ and $\mathcal{Y}^t$ for $\mathcal{P}^d$ and $\mathcal{P}^t$ , respectively, using superpixels $\mathcal{X}^t$ . Both $\mathcal{P}^t$ and $\mathcal{P}^d$ are fed into the weight-shared 3D network $\mathcal{F}_{\theta_p}$ and $\mathcal{H}_{\omega_p}$ for feature extraction. The output features are grouped via average pooling based on the superpoint indices to obtain superpoint features $\mathbf{Q}^d$ and $\mathbf{Q}^t$ , where $\mathbf{Q}^d \in \mathbb{R}^{V \times L}$ and $\mathbf{Q}^d \in \mathbb{R}^{V \times L}$ . We expect $\mathbf{Q}^d$ and $\mathbf{Q}^t$ to share similar features, leading to the following D2S loss: + +$$ +\mathcal {L} _ {\mathrm {d} 2 \mathrm {s}} = \frac {1}{V} \sum_ {i = 1} ^ {V} \left(1 - < \mathbf {q} _ {i} ^ {t}, \mathbf {q} _ {i} ^ {d} >\right), \tag {3} +$$ + +where $< \cdot, \cdot >$ denotes the scalar product to measure the similarity of features. + +![](images/22d2b1936af646bca2a681273d837a052b79ea967189ee36a0423cb3177ac22a.jpg) +Fig. 4: Flow-based contrastive learning (FCL) pipeline. FCL takes multiple LiDAR-camera pairs from consecutive scans as input. Based on temporally aligned semantic superpixel and superpoints, two contrastive learning objectives are formulated: 1) spatial contrastive learning between each LiDAR-camera pair $(\mathcal{L}_{\mathrm{sc}})$ , and 2) temporal contrastive learning among consecutive LiDAR point clouds across scenes $(\mathcal{L}_{\mathrm{tc}})$ . + +# 3.4 FCL: Flow-Based Contrastive Learning + +Motivation. LiDAR point clouds are acquired sequentially, embedding rich dynamic scene information across consecutive timestamps. Prior works [61, 66, 82] primarily focused on single LiDAR scans, overlooking the consistency of moving objects across scenes. To address these limitations, we propose flow-based contrastive learning (FCL) across sequential LiDAR scenes to encourage spatiotemporal consistency. + +Spatial Contrastive Learning. Our framework, depicted in Fig. 4, takes three LiDAR-camera pairs from different timestamps within a suitable time window as input, i.e., $\{(\mathcal{P}^t,\mathcal{I}^t),(\mathcal{P}^{t + \Delta t},\mathcal{I}^{t + \Delta t}),(\mathcal{P}^{t - \Delta t},\mathcal{I}^{t - \Delta t})\}$ , where timestamp $t$ denotes the current scene and $\Delta t$ is the timespan. Following previous works [61,82], we first distill knowledge from the 2D network into the 3D network for each scene separately. Taking $(\mathcal{P}^t,\mathcal{I}^t)$ as an example, $\mathcal{P}^t$ and $\mathcal{I}^t$ are fed into the 3D and 2D networks to extract per-point and image features. The output features are then grouped via average pooling based on superpoints $\mathcal{Y}^t$ and superpixels $\mathcal{X}^t$ to obtain superpoint features $\mathbf{Q}^t$ and superpixel features $\mathbf{K}^t$ . A spatial contrastive loss is formulated to constrain 3D representation via pretrained 2D prior knowledge. This process is formulated as follows: + +$$ +\mathcal {L} _ {\mathrm {s c}} = - \frac {1}{V} \sum_ {i = 1} ^ {V} \log \left[ \frac {e ^ {(< \mathbf {q} _ {i} , \mathbf {k} _ {i} > / \tau)}}{\sum_ {j \neq i} e ^ {(< \mathbf {q} _ {i} , \mathbf {k} _ {j} > / \tau)} + e (< \mathbf {q} _ {i} , \mathbf {k} _ {i} > / \tau)} \right], \tag {4} +$$ + +where $\tau > 0$ is a temperature that controls the smoothness of distillation. + +Flow-Based Contrastive Learning. The spatial contrastive learning objective between images and point clouds, as depicted in Eq. (4), fails to ensure that moving objects share similar attributes across different scenes. To maintain consistency across scenes, a temporal consistency loss is introduced among superpoint features across different scenes. For the point clouds $\mathcal{P}^t$ and $\mathcal{P}^{t + \Delta t}$ , the corresponding superpoint features $\mathbf{Q}^t$ and $\mathbf{Q}^{t + \Delta t}$ are obtained via their superpoints. The temporal contrastive loss operates on $\mathbf{Q}^t$ and $\mathbf{Q}^{t + \Delta t}$ : + +$$ +\mathcal {L} _ {\mathrm {t c}} ^ {t \leftarrow t + \Delta t} = - \frac {1}{V} \sum_ {i = 1} ^ {V} \log \left[ \frac {e ^ {(< \mathbf {q} _ {i} ^ {t} , \mathbf {q} _ {i} ^ {t + \Delta t} > / \tau)}}{\sum_ {j \neq i} e ^ {(< \mathbf {q} _ {i} ^ {t} , \mathbf {q} _ {j} ^ {t + \Delta t} > / \tau)} + e ^ {(< \mathbf {q} _ {i} ^ {t} , \mathbf {q} _ {i} ^ {t + \Delta t} > / \tau)}} \right]. (5) +$$ + +The same function is also applied between $\mathbf{Q}^t$ and $\mathbf{Q}^{t - \Delta t}$ . This approach enables point features at time $t$ to extract more context-aware information across scenes. + +# 4 Experiments + +# 4.1 Settings + +Data. We follow the seminar works SLidR [82] and Seal [61] when preparing the datasets. A total of eleven datasets are used in our experiments, including $^1 nuScenes$ [26], $^2 SemanticKITTI$ [5], $^3 Waymo$ Open [89], $^4 ScribbleKITTI$ [94], $^5 RELLIS-3D$ [41], $^6 SemanticPOSS$ [73], $^7 SemanticSTF$ [99], $^8 SynLiDAR$ [97], $^9 DAPS-3D$ [43], $^{10}$ Synth4D [80], and $^{11}$ Robo3D [45]. Due to space limits, kindly refer to the Appendix and [61, 82] for additional details about these datasets. + +Implementation Details. SuperFlow is implemented using the MMDetection3D [20] and OpenPCSeg [59] codebases. Consistent with prior works [61,82], we employ MinkUNet [19] as the 3D backbone and DINOv2 [72] (with ViT backbones [22]) as the 2D backbone, distilling from three variants: small (S), base (B), and large (L). Following Seal [61], OpenSeeD [109] is used to generate semantic superpixels. The framework is pretrained end-to-end on 600 scenes from nuScenes [26], then linear probed and fine-tuned on nuScenes [26] according to the data splits in SLidR [82]. The domain generalization study adheres to the same configurations as Seal [61] for the other ten datasets. Both the baselines and SuperFlow are pretrained using eight GPUs for 50 epochs, while linear probing and downstream fine-tuning experiments use four GPUs for 100 epochs, all utilizing the AdamW optimizer [65] and OneCycle scheduler [87]. Due to space limits, kindly refer to the Appendix for additional implementation details. + +Evaluation Protocols. Following conventions, we report the Intersection-over-Union (IoU) on each semantic class and mean IoU (mIoU) over all classes for downstream tasks. For 3D robustness evaluations, we follow Robo3D [45] and report the mean Corruption Error (mCE) and mean Resilience Rate (mRR). + +# 4.2 Comparative Study + +Linear Probing. We start by investigating the pretraining quality via linear probing. For this setup, we initialize the 3D backbone $\mathcal{F}_{\theta_p}$ with pretrained parameters and fine-tune only the added-on segmentation head. As shown in Tab. 1, + +Table 1: Comparisons of state-of-the-art pretraining methods pretrained on nuScenes [26] and fine-tuned on SemanticKITTI [5] and Waymo Open [89] with specified data portions, respectively. All methods use MinkUNet [19] as the 3D semantic segmentation backbone. LP denotes linear probing with a frozen backbone. All scores are given in percentage (\%). Best scores in each configuration are shaded with colors. + +
MethodVenueDistillnuScenesKITTI1%Waymo1%
LP1%5%10%25%Full
Random--8.1030.3047.8456.1565.4874.6639.5039.41
PointContrast [101]ECCV'20None21.9032.50----41.10-
DepthContrast [113]ICCV'21None22.1031.70----41.50-
ALSO [7]CVPR'23None-37.70-59.40-72.00--
BEVContrast [81]3DV'24None-38.30-59.60-72.30--
PPKT [63]arXiv'21ResNet35.9037.8053.7460.2567.1474.5244.0047.60
SLidR [82]CVPR'22ResNet38.8038.3052.4959.8466.9174.7944.6047.12
ST-SLidR [66]CVPR'23ResNet40.4840.7554.6960.7567.7075.1444.7244.93
TriCC [74]CVPR'23ResNet38.0041.2054.1060.4067.6075.6045.90-
Seal [61]NeurIPS'23ResNet44.9545.8455.6462.9768.4175.6046.6349.34
HVDistill [110]IJCV'24ResNet39.5042.7056.6062.9069.3076.6049.70-
PPKT [63]arXiv'21ViT-S38.6040.6052.0659.9965.7673.9743.2547.44
SLidR [82]CVPR'22ViT-S44.7041.1653.6561.4766.7174.2044.6747.57
Seal [61]NeurIPS'23ViT-S45.1644.2755.1362.4667.6475.5846.5148.67
SuperFlowOursViT-S46.4447.8159.4464.4769.2076.5447.9749.94
PPKT [63]arXiv'21ViT-B39.9540.9153.2160.8766.2274.0744.0947.57
SLidR [82]CVPR'22ViT-B45.3541.6455.8362.6867.6174.9845.5048.32
Seal [61]NeurIPS'23ViT-B46.5945.9857.1562.7968.1875.4147.2448.91
SuperFlowOursViT-B47.6648.0959.6664.5269.7976.5748.4050.20
PPKT [63]arXiv'21ViT-L41.5742.0555.7561.2666.8874.3345.8747.82
SLidR [82]CVPR'22ViT-L45.7042.7757.4563.2068.1375.5147.0148.60
Seal [61]NeurIPS'23ViT-L46.8146.2758.1463.2768.6775.6647.5550.02
SuperFlowOursViT-L48.0149.9560.7265.0970.0177.1949.0750.67
+ +SuperFlow consistently outperforms state-of-the-art methods under diverse configurations. We attribute this to the use of temporal consistency learning, which captures the structurally rich temporal cues across consecutive scenes and enhances the semantic representation learning of the 3D backbone. We also observe improved performance with larger 2D networks (i.e., from ViT-S to ViT-L), revealing a promising direction of achieving higher quality 3D pretraining. + +Downstream Fine-Tuning. It is known that data representation learning can mitigate the need for large-scale human annotations. Our study systematically compares SuperFlow with prior works on three popular datasets, including nuScenes [26], SemanticKITTI [5], and Waymo Open [89], under limited annotations for few-shot fine-tuning. From Tab. 1, we observe that SuperFlow achieves promising performance gains among three datasets across all fine-tuning tasks. We also use the pretrained 3D backbone as initialization for the fully-supervised learning study on nuScenes [26]. As can be seen from Tab. 1, models pretrained via representation learning consistently outperform the random initialization counterparts, highlighting the efficacy of conducting data pretraining. We also find that distillations from larger 2D networks show consistent improvements. + +Cross-Domain Generalization. To verify the strong generalizability of SuperFlow, we conduct a comprehensive study using seven diverse LiDAR datasets and + +Table 2: Domain generalization study of different pretraining methods pretrained on the nuScenes [26] dataset and fine-tuned on other seven heterogeneous 3D semantic segmentation datasets with specified data portions, respectively. All scores are given in percentage (\%). Best scores in each configuration are shaded with colors. + +
MethodSciKITTIRellis-3DSemPOSSSemSTFSynLiDARDAPS-3DSynth4D
1%10%1%10%HalfFullHalfFull1%10%HalfFull1%10%
Random23.8147.6038.4653.6046.2654.1248.0348.1519.8944.7474.3279.3820.2266.87
PPKT [63]36.5051.6749.7154.3350.1856.0050.9254.6937.5746.4878.9084.0061.1062.41
SLidR [82]39.6050.4549.7554.5751.5655.3652.0154.3542.0547.8481.0085.4063.1062.67
Seal [61]40.6452.7751.0955.0353.2656.8953.4655.3643.5849.2681.8885.9064.5066.96
SuperFlow42.7054.0052.8355.7154.4157.3354.7256.5744.8551.3882.4386.2165.3169.43
+ +Table 3: Out-of-distribution 3D robustness study of state-of-the-art pretraining methods under corruption and sensor failure scenarios in the nuScenes- $C$ dataset from the Robo3D benchmark [45]. Full denotes fine-tuning with full labels. LP denotes linear probing with a frozen backbone. All mCE $(\downarrow)$ , mRR $(\uparrow)$ , and mIoU $(\uparrow)$ scores are given in percentage $(\%)$ . Best scores in each configuration are shaded with colors. + +
#InitialBackbonemCEmRRFogRainSnowBlurBeamCrossEchoSensorAvg
FullRandomMinkU-18 o115.6170.8553.9071.1048.2251.8562.2137.7357.4738.9752.68
SuperFlowMinkU-18 o109.0075.6654.9572.7949.5657.6862.8242.4559.6141.7755.21
RandomMinkU-34 o112.2072.5762.9670.6555.4851.7162.0131.5659.6439.4154.18
PPKT [63]MinkU-34 o105.6475.8764.0172.1859.0857.1763.8836.3460.5939.5756.60
SLiD R [82]MinkU-34 o106.0875.9965.4172.3156.0156.0762.8741.9461.1638.9056.83
Seal [61]MinkU-34 o92.6383.0872.6674.3166.2266.1465.9657.4459.8739.8562.81
SuperFlowMinkU-34 o91.6783.1770.3275.7765.4161.0568.0960.0258.3650.4163.68
RandomMinkU-50 o113.7672.8149.9571.1645.3655.5562.8436.9459.1243.1553.01
SuperFlowMinkU-50 o107.3574.0254.3673.0850.0756.9264.0538.1062.0247.0255.70
RandomMinkU-101 o109.1074.0750.4573.0248.8558.4864.1843.8659.8241.4755.02
SuperFlowMinkU-101 o96.4478.5756.9276.2954.7059.3571.8955.1360.2751.6060.77
LPPPKT [63]MinkU-34 o183.4478.1530.6535.4228.1229.2132.8219.5228.0120.7128.06
SLiD R [82]MinkU-34 o179.3877.1834.8838.0932.6426.4433.7320.8131.5421.4429.95
Seal [61]MinkU-34 o166.1875.3837.3342.7729.9337.7340.3220.3137.7324.9433.88
SuperFlowMinkU-34 o161.7875.5237.5943.4237.6039.5741.4023.6438.0326.6935.99
+ +show results in Tab. 2. It is worth noting that these datasets are collected under different acquisition and annotation conditions, including adverse weather, weak annotations, synthetic collection, and dynamic objects. For all fourteen domain generalization fine-tuning tasks, SuperFlow exhibits superior performance over the prior arts [61,63,82]. This study strongly verifies the effectiveness of the proposed flow-based contrastive learning for image-to-LiDAR data representation. + +Out-of-Distribution Robustness. The robustness of 3D perception models against unprecedented conditions directly correlates with the model's applicability to real-world applications [29, 48, 54, 102]. We compare our SuperFlow with prior models in the nuScenes- $C$ dataset from the Robo3D benchmark [45] and show results in Tab. 3. We observe that models pretrained using SuperFlow exhibit improved robustness over the random initialization counterparts. Besides, we find that 3D networks with different capacities often pose diverse robustness. + +Quantitative Assessments. We visualize the prediction results fine-tuned on nuScenes [26], SemanticKITTI [5] and Waymo Open [89], compared with random + +Table 4: Ablation study of SuperFlow using different # of sweeps. All methods use ViT-B [72] for distillation. All scores are given in percentage (%). Baseline results are shaded with colors. + +
BackbonenuScenesKITTIWaymo
LP1%1%1%
1× Sweeps ○47.4147.5248.1449.31
2× Sweeps •47.6648.0948.4050.20
5× Sweeps ○47.2348.0047.9449.14
7× Sweeps •46.0347.9846.8347.97
+ +Table 5: Ablation study of SuperFlow on network capacity (# params) of 3D backbones. All methods use ViT-B [72] for distillation. All scores are given in percentage $(\%)$ . Baseline results are shaded with colors. + +
BackboneLayernuScenesKITTIWaymo
LP1%1%1%
MinkUNet o1847.2047.7048.0449.24
MinkUNet •3447.6648.0948.4050.20
MinkUNet o5054.1152.8649.2251.20
MinkUNet o10152.5651.1948.5150.01
+ +![](images/f4be9d88cc20bc6d06a3a363feb9e31082ded5b164315a623c796c9ade2ef37a.jpg) +Fig. 5: Qualitative assessments of state-of-the-art pretraining methods pretrained on nuScenes [26] and fine-tuned on nuScenes [26], SemanticKITTI [5], and Waymo Open [89], with $1\%$ annotations. The error maps show the correct and incorrect predictions in gray and red, respectively. Best viewed in colors and zoomed-in for details. + +initialization, SLiDR [82], and Seal [61]. As shown in Fig. 5, Superflow performs well, especially on backgrounds, i.e., "road" and "sidewalk" in complex scenarios. + +# 4.3 Ablation Study + +In this section, we are tailored to understand the efficacy of each design in our SuperFlow framework. Unless otherwise specified, we adopt MinkUNet-34 [19] and ViT-B [72] as the 3D and 2D backbones, respectively, throughout this study. 3D Network Capacity. Existing 3D backbones are relatively small in scale compared to their 2D counterparts. We study the scale of the 3D network and the results are shown in Tab. 5. We observe improved performance as the network capacity scales up, except for MinkUNet-101 [19]. We conjecture that this is due + +Table 6: Ablation study of each component in SuperFlow. All variants use a MinkUNet-34 [19] as the 3D backbone and ViT-B [72] for distillation. VC: View consistency. D2S: Dense-to-sparse regularization. FCL: Flow-based contrastive learning. All scores are given in percentage $(\%)$ . + +
#VC D2S FCLnuScenesKITTIWaymo
LP1%1%1%
-Random8.1030.3039.5039.41
(a)XXX44.6544.4746.6547.77
(b)XX45.5745.2146.8748.01
(c)X46.1746.9147.2649.01
(d)X47.2447.6748.2149.80
(e)47.6648.0948.4050.20
+ +Table 7: Ablation study on spatiotemporal consistency. All variants use a MinkUNet-34 [19] as the 3D backbone and ViT-B [72] for distillation. 0 denotes current timestamp. 0.5s corresponds to a $20\mathrm{Hz}$ timespan. All scores are given in percentage $(\%)$ . + +
TimespannuScenesKITTIWaymo
LP1%1%1%
Single-Frame46.1746.9147.2649.01
0, -0.5s46.3947.0847.9949.78
-0.5s, 0, +0.5s47.6648.0948.4050.20
-1.0s, 0, +1.0s47.6047.9948.4350.18
-1.5s, 0, +1.5s46.4348.2748.3449.93
-2.0s, 0, +2.0s46.2048.4948.1850.01
+ +to the fact that models with limited parameters are less effective in capturing patterns during representation learning, and, conversely, models with a large set of trainable parameters tend to be difficult to converge. + +Representation Density. The consistency regularization between sparse and dense point clouds encourages useful representation learning. To analyze the degree of regularization, we investigate various point cloud densities and show the results in Tab. 4. We observe that a suitable point cloud density can improve the model's ability to feature representation. When the density of point clouds is too dense, the motion of objects is obvious in the scene. However, we generate superpoints of the dense points based on superpixels captured at the time of sparse points. The displacement difference of dynamic objects makes the projection misalignment. A trade-off selection would be two or three sweeps. + +Temporal Consistency. The ability to capture semantically coherent temporal cues is crucial in our SuperFlow framework. In Eq. (5), we operate temporal contrastive learning on superpoints features across scenes. As shown in Tab. 7, we observe that temporal contrastive learning achieves better results compared to single-frame methods. We also compare the impact of frames used to capture temporal cues. When we use 3 frames, it acquires more context-aware information than 2 frames and achieves better results. Finally, we study the impact of the timespan between frames. The performance will drop with a longer timespan. We conjecture that scenes with short timespans have more consistency, while long timespans tend to have more uncertain factors. + +Component Analysis. In Tab. 6, we analyze each component in the SuperFlow framework, including view consistency, dense-to-sparse regularization, and flow-based contrastive learning. The baseline is SLiDR [82] with VFMs-based superpixels. View consistency brings slight improvements among the popular datasets with a few annotations. D2S distills dense features into sparse features and it brings about $1\%$ mIoU gains. FCL extracts temporal cues via temporal contrastive learning and it significantly leads to about $2.0\%$ mIoU gains. + +Visual Inspections. Similarity maps presented in Fig. 6 denote the segmentation ability of our pretrained model. The query points include "car", "man- + +![](images/12bc1af94125515fe8051a70711c3ff20bdc4863433c7582ef4dccbea1b86ce2.jpg) +(a) "car" (3D) + +![](images/fb0e28d4d6c13aaa80ccc3373c14b356ca27a471b46636f037b077d7ae892728.jpg) +(b) "manmade" (3D) + +![](images/b6f43375c411a39ae209a34edb715b422dbf5ebe2cad8e5d5edf47e96649d7ef.jpg) + +![](images/6f4855312f348390127186ae934af7372a4890336ae6ec836de8ddf26685b32e.jpg) +(d) "car" (2D) + +![](images/3fdf1159b556ec7132d4c18c87d23b73b04cbab10c84428d9280dcf0a065ed4c.jpg) +(e) "manmade" (2D) + +![](images/d78f6d33c4c9b14ebe6bb966101a2e602206ca40d62d1279e38e533a6ff4556c.jpg) +(c) "sidewalk" (3D) + +![](images/06f1520cfe6c3acf8a78f2f5cfbf805329812e90e08b8d457ef2ed6e61b6fa2b.jpg) + +![](images/a46f1f5da693229841ddb3ecf854bdafedbeb44368d7508aec878ebda3e629ed.jpg) + +![](images/2f515a9dfd96c708f7d7559bf86b7a5e6f0fe21524266fe90d9d7e78185e290b.jpg) +(f) "sidewalk" (2D) +(i) "terrain" (3D) + +![](images/a24a5d71cc50cab9fe2a8266fbc4ea2ca49f68c1fbc4d0faa5952b776f76e50c.jpg) +(g) "vegetation" (3D) +(j) "vegetation" (2D) +Fig. 6: Cosine similarity between features of a query point (red dot) and: 1) features of other points projected in the image (the 1st and 3rd rows); and 2) features of an image with the same scene (the 2nd and 4th rows). The color goes from red to blue denoting low and high similarity scores, respectively. Best viewed in color. + +![](images/9d0bb47d746a504c47701d6a3948d9f767d00fce5330b9c770273c0e227a548a.jpg) +(h) "driveable surface" (3D) +(k) "driveable surface" (2D) + +![](images/09c89e52a9980fd253975c6cc38438de5e90b29a9cd3e1aaef25e6cf8c64cf1b.jpg) +(1) "terrain" (2D) + +made", "sidewalk", "vegetation", "driveable surface", and "terrain". SuperFlows shows strong semantic discriminative ability without fine-tuning. We conjecture that it comes from three aspects: 1) View consistent superpixels enable the network to learn semantic representation; 2) Dense-to-sparse regularization enhances the network to learn varying density features; 3) Temporal contrastive learning extracts semantic cues across scenes. + +# 5 Conclusion + +In this work, we presented SuperFlow to tackle the challenging 3D data representation learning. Motivated by the sequential nature of LiDAR acquisitions, we proposed three novel designs to better encourage spatiotemporal consistency, encompassing view consistency alignment, dense-to-sparse regularization, and flow-based contrastive learning. Extensive experiments across 11 diverse LiDAR datasets showed that SuperFlow consistently outperforms prior approaches in linear probing, downstream fine-tuning, and robustness probing. Our study on scaling up 2D and 3D network capacities reveals insightful findings. We hope this work could shed light on future designs of powerful 3D foundation models. + +Acknowledgements. This work was supported by the Scientific and Technological Innovation 2030 - "New Generation Artificial Intelligence" Major Project (No. 2021ZD0112200), the Joint Funds of the National Natural Science Foundation of China (No. U21B2044), the Key Research and Development Program of Jiangsu Province (No. BE2023016-3), and the Talent Research Start-up Foundation of Nanjing University of Posts and Telecommunications (No. NY223172). This work was also supported by the Ministry of Education, Singapore, under its MOE AcRF Tier 2 (MOET2EP20221-0012), NTU NAP, and under the RIE2020 Industry Alignment Fund - Industry Collaboration Projects (IAF-ICP) Funding Initiative, as well as cash and in-kind contribution from the industry partner(s). + +# References + +1. Achanta, R., Shaji, A., Smith, K., Lucchi, A., Fua, P., Susstrunk, S.: Slic superpixels compared to state-of-the-art superpixel methods. IEEE Transactions on Pattern Analysis and Machine Intelligence 34(11), 2274-2282 (2012) +2. Aygun, M., Osep, A., Weber, M., Maximov, M., Stachniss, C., Behley, J., Leal-Taixe, L.: 4d panoptic lidar segmentation. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5527-5537 (2021) +3. Badue, C., Guidolini, R., Carneiro, R.V., Azevedo, P., Cardoso, V.B., Forechi, A., Jesus, L., Berriel, R., Paixão, T.M., Mutz, F., de Paula Veronese, L., Oliveira-Santos, T., Souza, A.F.D.: Self-driving cars: A survey. Expert Systems with Applications 165, 113816 (2021) +4. Behley, J., Garbade, M., Milioto, A., Quenzel, J., Behnke, S., Gall, J., Stachniss, C.: Towards 3d lidar-based semantic scene understanding of 3d point cloud sequences: The semanticicketti dataset. International Journal of Robotics Research 40, 959-96 (2021) +5. Behley, J., Garbade, M., Milioto, A., Quenzel, J., Behnke, S., Stachniss, C., Gall, J.: Semantickitti: A dataset for semantic scene understanding of lidar sequences. In: IEEE/CVF International Conference on Computer Vision. pp. 9297-9307 (2019) +6. Bengio, Y., Courville, A., Vincent, P.: Representation learning: A review and new perspectives. IEEE Transactions on Pattern Analysis and Machine Intelligence 35(8), 1798-1828 (2013) +7. Boulch, A., Sautier, C., Michele, B., Puy, G., Marlet, R.: Also: Automotive lidar self-supervision by occupancy estimation. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 13455-13465 (2023) +8. Caesar, H., Bankiti, V., Lang, A.H., Vora, S., Liong, V.E., Xu, Q., Krishnan, A., Pan, Y., Baldan, G., Beijbom, O.: nuscenes: A multimodal dataset for autonomous driving. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 11621-11631 (2020) +9. Cao, A.Q., Dai, A., de Charette, R.: Pasco: Urban 3d panoptic scene completion with uncertainty awareness. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 14554-14564 (2024) +10. Chen, Q., Vora, S., Beijbom, O.: Polarstream: Streaming lidar object detection and segmentation with polar pillars. In: Advances in Neural Information Processing Systems. vol. 34 (2021) + +11. Chen, R., Liu, Y., Kong, L., Chen, N., Zhu, X., Ma, Y., Liu, T., Wang, W.: Towards label-free scene understanding by vision foundation models. In: Advances in Neural Information Processing Systems. vol. 36 (2023) +12. Chen, R., Liu, Y., Kong, L., Zhu, X., Ma, Y., Li, Y., Hou, Y., Qiao, Y., Wang, W.: Clip2scene: Towards label-efficient 3d scene understanding by clip. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 7020-7030 (2023) +13. Chen, T., Kornblith, S., Norouzi, M., Hinton, G.: A simple framework for contrastive learning of visual representations. In: International Conference on Machine Learning. pp. 1597-1607 (2020) +14. Chen, X., Fan, H., Girshick, R., He, K.: Improved baselines with momentum contrastive learning. arXiv preprint arXiv:2003.04297 (2020) +15. Chen, X., Xie, S., He, K.: An empirical study of training self-supervised vision transformers. In: IEEE/CVF International Conference on Computer Vision. pp. 9640-9649 (2021) +16. Chen, Y., Nießner, M., Dai, A.: 4dcontrast: Contrastive learning with dynamic correspondences for 3d scene understanding. In: European Conference on Computer Vision. pp. 543-560 (2022) +17. Cheng, H., Han, X., Xiao, G.: Cenet: Toward concise and efficient lidar semantic segmentation for autonomous driving. In: IEEE International Conference on Multimedia and Expo. pp. 1-6 (2022) +18. Cheng, R., Razani, R., Taghavi, E., Li, E., Liu, B.: Af2-s3net: Attentive feature fusion with adaptive feature selection for sparse semantic segmentation network. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 12547-12556 (2021) +19. Choy, C., Gwak, J., Savarese, S.: 4d spatio-temporal convnets: Minkowski convolutional neural networks. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 3075-3084 (2019) +20. Contributors, M.: MMDetection3D: OpenMMLab next-generation platform for general 3D object detection. https://github.com/open-mmlab/mmdetection3d (2020) +21. Cortinhal, T., Tzelepis, G., Aksoy, E.E.: Salsanext: Fast, uncertainty-aware semantic segmentation of lidar point clouds. In: International Symposium on Visual Computing. pp. 207-222 (2020) +22. Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., Zhai, X., Unterthiner, T., Dehghani, M., Minderer, M., Heigold, G., Gelly, S., Uszkoreit, J., Houlsby, N.: An image is worth 16x16 words: Transformers for image recognition at scale. In: International Conference on Learning Representations (2020) +23. Duerr, F., Pfaller, M., Weigel, H., Beyerer, J.: Lidar-based recurrent 3d semantic segmentation with temporal memory alignment. In: International Conference on 3D Vision. pp. 781-790 (2020) +24. Ester, M., Kriegel, H.P., Sander, J., Xu, X.: A density-based algorithm for discovering clusters in large spatial databases with noise. In: ACM SIGKDD Conference on Knowledge Discovery and Data Mining. pp. 226-231 (1996) +25. Fischler, M.A., Bolles, R.C.: Random sample consensus: A paradigm for model fitting with applications to image analysis and automated cartography. Communications of the ACM 24(6), 381-395 (1981) +26. Fong, W.K., Mohan, R., Hurtado, J.V., Zhou, L., Caesar, H., Beijbom, O., Valada, A.: Panoptic nuscenes: A large-scale benchmark for lidar panoptic segmentation and tracking. IEEE Robotics and Automation Letters 7, 3795-3802 (2022) + +27. Gao, B., Pan, Y., Li, C., Geng, S., Zhao, H.: Are we hungry for 3d lidar data for semantic segmentation? a survey of datasets and methods. IEEE Transactions on Intelligent Transportation Systems 23(7), 6063-6081 (2021) +28. Geiger, A., Lenz, P., Urtasun, R.: Are we ready for autonomous driving? the kitti vision benchmark suite. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 3354-3361 (2012) +29. Hao, X., Wei, M., Yang, Y., Zhao, H., Zhang, H., Zhou, Y., Wang, Q., Li, W., Kong, L., Zhang, J.: Is your hd map constructor reliable under sensor corruptions? arXiv preprint arXiv:2406.12214 (2024) +30. He, K., Chen, X., Xie, S., Li, Y., Dólár, P., Girshick, R.: Masked autoencoders are scalable vision learners. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 16000-16009 (2022) +31. He, K., Fan, H., Wu, Y., Xie, S., Girshick, R.: Momentum contrast for unsupervised visual representation learning. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 9729-9738 (2020) +32. Hess, G., Jaxing, J., Svensson, E., Hagerman, D., Petersson, C., Svensson, L.: Masked autoencoders for self-supervised learning on automotive point clouds. arXiv preprint arXiv:2207.00531 (2022) +33. Hong, F., Kong, L., Zhou, H., Zhu, X., Li, H., Liu, Z.: Unified 3d and 4d panoptic segmentation via dynamic shifting networks. IEEE Transactions on Pattern Analysis and Machine Intelligence 46(5), 3480-3495 (2024) +34. Hong, F., Zhou, H., Zhu, X., Li, H., Liu, Z.: Lidar-based panoptic segmentation via dynamic shifting network. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 13090-13099 (2021) +35. Hou, J., Graham, B., Nießner, M., Xie, S.: Exploring data-efficient 3d scene understanding with contrastive scene contexts. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 15587-15597 (2021) +36. Hu, Q., Yang, B., Fang, G., Guo, Y., Leonardis, A., Trigoni, N., Markham, A.: Sqn: Weakly-supervised semantic segmentation of large-scale 3d point clouds. In: European Conference on Computer Vision. pp. 600-619 (2022) +37. Hu, Q., Yang, B., Khalid, S., Xiao, W., Trigoni, N., Markham, A.: Towards semantic segmentation of urban-scale 3d point clouds: A dataset, benchmarks and challenges. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 4977-4987 (2021) +38. Hu, Z., Bai, X., Zhang, R., Wang, X., Sun, G., Fu, H., Tai, C.L.: Lidal: Interframe uncertainty based active learning for 3d lidar semantic segmentation. In: European Conference on Computer Vision. pp. 248-265 (2022) +39. Huang, S., Xie, Y., Zhu, S.C., Zhu, Y.: Spatio-temporal self-supervised representation learning for 3d point clouds. In: IEEE/CVF International Conference on Computer Vision. pp. 6535-6545 (2021) +40. Jaritz, M., Vu, T.H., de Charette, R., Wirbel, E., Pérez, P.: xmuda: Cross-modal unsupervised domain adaptation for 3d semantic segmentation. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 12605-12614 (2020) +41. Jiang, P., Osteen, P., Wigness, M., Saripallig, S.: Rellis-3d dataset: Data, benchmarks and analysis. In: IEEE International Conference on Robotics and Automation. pp. 1110–1116 (2021) +42. Kirillov, A., Mintun, E., Ravi, N., Mao, H., Rolland, C., Gustafson, L., Xiao, T., Whitehead, S., Berg, A.C., Lo, W.Y., Dollar, P., Girshick, R.: Segment anything. In: IEEE/CVF International Conference on Computer Vision. pp. 4015-4026 (2023) + +43. Klokov, A., Pak, D.U., Khorin, A., Yudin, D., Kochiev, L., Luchinskiy, V., Bezuglyj, V.: Daps3d: Domain adaptive projective segmentation of 3d lidar point clouds. IEEE Access 11, 79341-79356 (2023) +44. Kong, L., Liu, Y., Chen, R., Ma, Y., Zhu, X., Li, Y., Hou, Y., Qiao, Y., Liu, Z.: Rethinking range view representation for lidar segmentation. In: IEEE/CVF International Conference on Computer Vision. pp. 228-240 (2023) +45. Kong, L., Liu, Y., Li, X., Chen, R., Zhang, W., Ren, J., Pan, L., Chen, K., Liu, Z.: Robo3d: Towards robust and reliable 3d perception against corruptions. In: IEEE/CVF International Conference on Computer Vision. pp. 19994-20006 (2023) +46. Kong, L., Quader, N., Liong, V.E.: Conda: Unsupervised domain adaptation for lidar segmentation via regularized domain concatenation. In: IEEE International Conference on Robotics and Automation. pp. 9338-9345 (2023) +47. Kong, L., Ren, J., Pan, L., Liu, Z.: Lasermix for semi-supervised lidar semantic segmentation. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 21705-21715 (2023) +48. Kong, L., Xie, S., Hu, H., Ng, L.X., Cottereau, B.R., Ooi, W.T.: Robodepth: Robust out-of-distribution depth estimation under corruptions. In: Advances in Neural Information Processing Systems. vol. 36 (2023) +49. Kong, L., Xu, X., Ren, J., Zhang, W., Pan, L., Chen, K., Ooi, W.T., Liu, Z.: Multi-modal data-efficient 3d scene understanding for autonomous driving. arXiv preprint arXiv:2405.05258 (2024) +50. Krispel, G., Schinagl, D., Fruhwirth-Reisinger, C., Possegger, H., Bischof, H.: Maeli: Masked autoencoder for large-scale lidar point clouds. In: IEEE/CVF Winter Conference on Applications of Computer Vision. pp. 3383-3392 (2024) +51. Le-Khac, P.H., Healy, G., Smeaton, A.F.: Contrastive representation learning: A framework and review. IEEE Transactions on Pattern Analysis and Machine Intelligence 8, 193907-193934 (2020) +52. Li, L., Shum, H.P., Breckon, T.P.: Less is more: Reducing task and model complexity for 3d point cloud semantic segmentation. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 9361-9371 (2023) +53. Li, R., de Charette, R., Cao, A.Q.: Coarse3d: Class-prototypes for contrastive learning in weakly-supervised 3d point cloud segmentation. In: British Machine Vision Conference (2022) +54. Li, Y., Kong, L., Hu, H., Xu, X., Huang, X.: Optimizing lidar placements for robust driving perception in adverse conditions. arXiv preprint arXiv:2403.17009 (2024) +55. Lim, H., Oh, M., Myung, H.: Patchwork: Concentric zone-based region-wise ground segmentation with ground likelihood estimation using a 3d lidar sensor. IEEE Robotics and Automation Letters 6(4), 6458-6465 (2021) +56. Liong, V.E., Nguyen, T.N.T., Widjaja, S., Sharma, D., Chong, Z.J.: Amvnet: Assertion-based multi-view fusion network for lidar semantic segmentation. arXiv preprint arXiv:2012.04934 (2020) +57. Liu, M., Zhou, Y., Qi, C.R., Gong, B., Su, H., Anguelov, D.: Less: Label-efficient semantic segmentation for lidar point clouds. In: European Conference on Computer Vision. pp. 70-89 (2022) +58. Liu, M., Yurtsever, E., Zhou, X., Fossaert, J., Cui, Y., Zagar, B.L., Knoll., A.C.: A survey on autonomous driving datasets: Data statistic, annotation, and outlook. arXiv preprint arXiv:2401.01454 (2024) + +59. Liu, Y., Bai, Y., Kong, L., Chen, R., Hou, Y., Shi, B., Li, Y.: Pcseg: An open source point cloud segmentation codebase. https://github.com/PJLab-ADG/PCSeg (2023) +60. Liu, Y., Chen, R., Li, X., Kong, L., Yang, Y., Xia, Z., Bai, Y., Zhu, X., Ma, Y., Li, Y., Qiao, Y., Hou, Y.: Uniseg: A unified multi-modal lidar segmentation network and the openpcseg codebase. In: IEEE/CVF International Conference on Computer Vision. pp. 21662-21673 (2023) +61. Liu, Y., Kong, L., Cen, J., Chen, R., Zhang, W., Pan, L., Chen, K., Liu, Z.: Segment any point cloud sequences by distilling vision foundation models. In: Advances in Neural Information Processing Systems. vol. 36 (2023) +62. Liu, Y., Kong, L., Wu, X., Chen, R., Li, X., Pan, L., Liu, Z., Ma, Y.: Multi-space alignments towards universal lidar segmentation. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 14648-14661 (2024) +63. Liu, Y.C., Huang, Y.K., Chiang, H.Y., Su, H.T., Liu, Z.Y., Chen, C.T., Tseng, C.Y., Hsu, W.H.: Learning from 2d: Contrastive pixel-to-point knowledge transfer for 3d pretraining. arXiv preprint arXiv:2104.04687 (2021) +64. Liu, Y., Chen, J., Zhang, Z., Huang, J., Yi, L.: Leaf: Learning frames for 4d point cloud sequence understanding. In: IEEE/CVF International Conference on Computer Vision. pp. 604-613 (2023) +65. Loshchilov, I., Hutter, F.: Decoupled weight decay regularization. In: International Conference on Learning Representations (2018) +66. Mahmoud, A., Hu, J.S., Kuai, T., Harakeh, A., Paull, L., Waslander, S.L.: Self-supervised image-to-point distillation via semantically tolerant contrastive loss. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 7102-7110 (2023) +67. Michele, B., Boulch, A., Puy, G., Vu, T.H., Marlet, R., Courty, N.: Saluda: Surface-based automotive lidar unsupervised domain adaptation. arXiv preprint arXiv:2304.03251 (2023) +68. Milioto, A., Vizzo, I., Behley, J., Stachniss, C.: Rangenet++: Fast and accurate lidar semantic segmentation. In: IEEE/RSJ International Conference on Intelligent Robots and Systems. pp. 4213-4220 (2019) +69. Muhammad, K., Ullah, A., Lloret, J., Ser, J.D., de Albuquerque, V.H.C.: Deep learning for safe autonomous driving: Current challenges and future directions. IEEE Transactions on Intelligent Transportation Systems 22(7), 4316-4336 (2020) +70. Nunes, L., Marcuzzi, R., Chen, X., Behley, J., Stachniss, C.: Segcontrast: 3d point cloud feature representation learning through self-supervised segment discrimination. IEEE Robotics and Automation Letters 7(2), 2116-2123 (2022) +71. Nunes, L., Wiesmann, L., Marcuzzi, R., Chen, X., Behley, J., Stachniss, C.: Temporal consistent 3d lidar representation learning for semantic perception in autonomous driving. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5217-5228 (2023) +72. Oquab, M., Darcet, T., Moutakanni, T., Vo, H., Szafraniec, M., Khalidov, V., Fernandez, P., Haziza, D., Massa, F., El-Nouby, A., Assran, M., Ballas, N., Galuba, W., Howes, R., Huang, P.Y., Li, S.W., Misra, I., Rabbat, M., Sharma, V., Synnaeve, G., Xu, H., Jegou, H., Mairal, J., Labatut, P., Joulin, A., Bojanowski, P.: Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:2304.07193 (2023) +73. Pan, Y., Gao, B., Mei, J., Geng, S., Li, C., Zhao, H.: Semanticposs: A point cloud dataset with large quantity of dynamic instances. In: IEEE Intelligent Vehicles Symposium. pp. 687-693 (2020) + +74. Pang, B., Xia, H., Lu, C.: Unsupervised 3d point cloud representation learning by triangle constrained contrast for autonomous driving. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5229-5239 (2023) +75. Puy, G., Gidaris, S., Boulch, A., Simeoni, O., Sautier, C., Pérez, P., Bursuc, A., Marlet, R.: Revisiting the distillation of image representations into point clouds for autonomous driving. arXiv preprint arXiv:2310.17504 (2023) +76. Puy, G., Gidaris, S., Boulch, A., Simeoni, O., Sautier, C., Pérez, P., Bursuc, A., Marlet, R.: Three pillars improving vision foundation model distillation for lidar. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 21519-21529 (2024) +77. Qiu, H., Yu, B., Tao, D.: Gfnet: Geometric flow network for 3d point cloud semantic segmentation. Transactions on Machine Learning Research (2022) +78. Radford, A., Kim, J.W., Hallacy, C., Ramesh, A., Goh, G., Agarwal, S., Sastry, G., Askell, A., Mishkin, P., Clark, J., et al.: Learning transferable visual models from natural language supervision. In: International conference on machine learning. pp. 8748-8763. PMLR (2021) +79. Rizzoli, G., Barbato, F., Zanuttigh, P.: Multimodal semantic segmentation in autonomous driving: A review of current approaches and future perspectives. Technologies 10(4) (2022) +80. Saltori, C., Krivosheev, E., Lathuilière, S., Sebe, N., Galasso, F., Fiameni, G., Ricci, E., Poiesi, F.: Gipso: Geometrically informed propagation for online adaptation in 3d lidar segmentation. In: European Conference on Computer Vision. pp. 567-585 (2022) +81. Sautier, C., Puy, G., Boulch, A., Marlet, R., Lepetit, V.: Bevcontrast: Self-supervision in bev space for automotive lidar point clouds. arXiv preprint arXiv:2310.17281 (2023) +82. Sautier, C., Puy, G., Gidaris, S., Boulch, A., Bursuc, A., Marlet, R.: Image-to-lidar self-supervised distillation for autonomous driving data. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 9891-9901 (2022) +83. Shen, Z., Sheng, X., Fan, H., Wang, L., Guo, Y., Liu, Q., Wen, H., Zhou, X.: Masked spatio-temporal structure prediction for self-supervised learning on point cloud videos. In: IEEE/CVF International Conference on Computer Vision. pp. 16580-16589 (2023) +84. Sheng, X., Shen, Z., Xiao, G., Wang, L., Guo, Y., Fan, H.: Point contrastive prediction with semantic clustering for self-supervised learning on point cloud videos. In: IEEE/CVF International Conference on Computer Vision. pp. 16515-16524 (2023) +85. Shi, H., Lin, G., Wang, H., Hung, T.Y., Wang, Z.: Spsequencenet: Semantic segmentation network on 4d point clouds. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 4574-4583 (2020) +86. Shi, H., Wei, J., Li, R., Liu, F., Lin, G.: Weakly supervised segmentation on outdoor 4d point clouds with temporal matching and spatial graph propagation. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 11840-11849 (2022) +87. Smith, L.N., Topin, N.: Super-convergence: Very fast training of neural networks using large learning rates. arXiv preprint arXiv:1708.07120 (2017) +88. Sun, J., Xu, X., Kong, L., Liu, Y., Li, L., Zhu, C., Zhang, J., Xiao, Z., Chen, R., Wang, T., Zhang, W., Chen, K., Qing, C.: An empirical study of training state-of-the-art lidar segmentation models. arXiv preprint arXiv:2405.14870 (2024) + +89. Sun, P., Kretzschmar, H., Dotiwalla, X., Chouard, A., Patnaik, V., Tsui, P., Guo, J., Zhou, Y., Chai, Y., Caine, B., Vasudevan, V., Han, W., Ngiam, J., Zhao, H., Timofeev, A., Ettinger, S., Krivokon, M., Gao, A., Joshi, A., Zhang, Y., Shlens, J., Chen, Z., Anguelov, D.: Scalability in perception for autonomous driving: Waymo open dataset. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 2446-2454 (2020) +90. Tang, H., Liu, Z., Zhao, S., Lin, Y., Lin, J., Wang, H., Han, S.: Searching efficient 3d architectures with sparse point-voxel convolution. In: European Conference on Computer Vision. pp. 685-702 (2020) +91. Tarvainen, A., Valpola, H.: Mean teachers are better role models: Weight-averaged consistency targets improve semi-supervised deep learning results. In: Advances in Neural Information Processing Systems. vol. 30 (2017) +92. Triess, L.T., Dreissig, M., Rist, C.B., Zollner, J.M.: A survey on deep domain adaptation for lidar perception. In: IEEE Intelligent Vehicles Symposium Workshops. pp. 350-357 (2021) +93. Uecker, M., Fleck, T., Pflugfelder, M., Zöllner, J.M.: Analyzing deep learning representations of point clouds for real-time in-vehicle lidar perception. arXiv preprint arXiv:2210.14612 (2022) +94. Unal, O., Dai, D., Gool, L.V.: Scribble-supervised lidar semantic segmentation. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 2697-2707 (2022) +95. Wei, W., Nejadasl, F.K., Gevers, T., Oswald, M.R.: T-mae: Temporal masked autoencoders for point cloud representation learning. arXiv preprint arXiv:2312.10217 (2023) +96. Wu, Y., Zhang, T., Ke, W., Süssstrunk, S., Salzmann, M.: Spatiotemporal self-supervised learning for point clouds in the wild. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5251-5260 (2023) +97. Xiao, A., Huang, J., Guan, D., Zhan, F., Lu, S.: Transfer learning from synthetic to real lidar point cloud for semantic segmentation. In: AAAI Conference on Artificial Intelligence. pp. 2795-2803 (2022) +98. Xiao, A., Huang, J., Guan, D., Zhang, X., Lu, S., Shao, L.: Unsupervised point cloud representation learning with deep neural networks: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence 45(9), 11321-11339 (2023) +99. Xiao, A., Huang, J., Xuan, W., Ren, R., Liu, K., Guan, D., Saddik, A.E., Lu, S., Xing, E.: 3d semantic segmentation in the wild: Learning generalized models for adverse-condition point clouds. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 9382-9392 (2023) +100. Xie, B., Li, S., Guo, Q., Liu, C.H., Cheng, X.: Annotator: A generic active learning baseline for lidar semantic segmentation. In: Advances in Neural Information Processing Systems. vol. 36 (2023) +101. Xie, S., Gu, J., Guo, D., Qi, C.R., Guibas, L., Litany, O.: Pointcontrast: Unsupervised pre-training for 3d point cloud understanding. In: European Conference on Computer Vision. pp. 574-591 (2020) +102. Xie, S., Kong, L., Zhang, W., Ren, J., Pan, L., Chen, K., Liu, Z.: Benchmarking and improving bird's eye view perception robustness in autonomous driving. arXiv preprint arXiv:2405.17426 (2024) +103. Xie, Z., Zhang, Z., Cao, Y., Lin, Y., Bao, J., Yao, Z., Dai, Q., Hu, H.: Simmim: A simple framework for masked image modeling. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 9653-9663 (2022) + +104. Xu, C., Wu, B., Wang, Z., Zhan, W., Vajda, P., Keutzer, K., Tomizuka, M.: Squeezesegv3: Spatially-adaptive convolution for efficient point-cloud segmentation. In: European Conference on Computer Vision. pp. 1-19 (2020) +105. Xu, J., Zhang, R., Dou, J., Zhu, Y., Sun, J., Pu, S.: Rpvnet: A deep and efficient range-point-voxel fusion network for lidar point cloud segmentation. In: IEEE/CVF International Conference on Computer Vision. pp. 16024-16033 (2021) +106. Xu, W., Li, X., Ni, P., Guang, X., Luo, H., Zhao, X.: Multi-view fusion driven 3d point cloud semantic segmentation based on hierarchical transformer. IEEE Sensors Journal 23(24), 31461-31470 (2023) +107. Xu, X., Kong, L., Shuai, H., Liu, Q.: Frnet: Frustum-range networks for scalable lidar segmentation. arXiv preprint arXiv:2312.04484 (2023) +108. Yin, J., Zhou, D., Zhang, L., Fang, J., Xu, C.Z., Shen, J., Wang, W.: Proposal contrast: Unsupervised pre-training for lidar-based 3d object detection. In: European Conference on Computer Vision. pp. 17-33 (2022) +109. Zhang, H., Li, F., Zou, X., Liu, S., Li, C., Gao, J., Yang, J., Zhang, L.: A simple framework for open-vocabulary segmentation and detection. In: IEEE/CVF International Conference on Computer Vision. pp. 1020-1031 (2023) +110. Zhang, S., Deng, J., Bai, L., Li, H., Ouyang, W., Zhang, Y.: Hvdistill: Transferring knowledge from images to point clouds via unsupervised hybrid-view distillation. International Journal of Computer Vision pp. 1-15 (2024) +111. Zhang, Y., Zhou, Z., David, P., Yue, X., Xi, Z., Gong, B., Foroosh, H.: Polarnet: An improved grid representation for online lidar point clouds semantic segmentation. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 9601-9610 (2020) +112. Zhang, Y., Hou, J., Yuan, Y.: A comprehensive study of the robustness for lidar-based 3d object detectors against adversarial attacks. International Journal of Computer Vision pp. 1-33 (2023) +113. Zhang, Z., Girdhar, R., Joulin, A., Misra, I.: Self-supervised pretraining of 3d features on any point-cloud. In: IEEE/CVF International Conference on Computer Vision. pp. 10252-10263 (2021) +114. Zhang, Z., Dong, Y., Liu, Y., Yi, L.: Complete-to-partial 4d distillation for self-supervised point cloud sequence representation learning. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 17661-17670 (2023) +115. Zhang, Z., Yang, B., Wang, B., Li, B.: Growsp: Unsupervised semantic segmentation of 3d point clouds. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 17619-17629 (2023) +116. Zhao, Y., Bai, L., Huang, X.: Fidnet: Lidar point cloud semantic segmentation with fully interpolation decoding. In: IEEE/RSJ International Conference on Intelligent Robots and Systems. pp. 4453-4458 (2021) +117. Zhou, Z., Zhang, Y., Foroosh, H.: Panoptic-polarnet: Proposal-free lidar point cloud panoptic segmentation. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 13194-13203 (2021) +118. Zhu, X., Zhou, H., Wang, T., Hong, F., Ma, Y., Li, W., Li, H., Lin, D.: Cylindrical and asymmetrical 3d convolution networks for lidar segmentation. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 9939-9948 (2021) +119. Zou, X., Dou, Z.Y., Yang, J., Gan, Z., Li, L., Li, C., Dai, X., Behl, H., Wang, J., Yuan, L., Peng, N., Wang, L., Lee, Y.J., Gao, J.: Generalized decoding for pixel, image, and language. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 15116-15127 (2023) + +120. Zou, X., Yang, J., Zhang, H., Li, F., Li, L., Gao, J., Lee, Y.J.: Segment everything everywhere all at once. In: Advances in Neural Information Processing Systems. vol. 36 (2023) \ No newline at end of file diff --git a/2024/4D Contrastive Superflows are Dense 3D Representation Learners/images.zip b/2024/4D Contrastive Superflows are Dense 3D Representation Learners/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..ecd6c86c2f0fe2a52635fb4d39768d668ae72237 --- /dev/null +++ b/2024/4D Contrastive Superflows are Dense 3D Representation Learners/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:165f95a739eaca64d26fe70745707a769c8f7cc77a15a839bba22c1206a0f8f0 +size 821054 diff --git a/2024/4D Contrastive Superflows are Dense 3D Representation Learners/layout.json b/2024/4D Contrastive Superflows are Dense 3D Representation Learners/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..3d2581025cf337d6f7c7801b3914b5d7581e70b8 --- /dev/null +++ b/2024/4D Contrastive Superflows are Dense 3D Representation Learners/layout.json @@ -0,0 +1,13422 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 165, + 111, + 449, + 148 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 111, + 449, + 148 + ], + "spans": [ + { + "bbox": [ + 165, + 111, + 449, + 148 + ], + "type": "text", + "content": "4D Contrastive Superflows are Dense 3D Representation Learners" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 168, + 167, + 445, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 168, + 167, + 445, + 194 + ], + "spans": [ + { + "bbox": [ + 168, + 167, + 445, + 194 + ], + "type": "text", + "content": "Xiang Xu" + }, + { + "bbox": [ + 168, + 167, + 445, + 194 + ], + "type": "inline_equation", + "content": "^{1,\\star}" + }, + { + "bbox": [ + 168, + 167, + 445, + 194 + ], + "type": "text", + "content": ", Lingdong Kong" + }, + { + "bbox": [ + 168, + 167, + 445, + 194 + ], + "type": "inline_equation", + "content": "^{2,3,*}" + }, + { + "bbox": [ + 168, + 167, + 445, + 194 + ], + "type": "text", + "content": ", Hui Shuai" + }, + { + "bbox": [ + 168, + 167, + 445, + 194 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 168, + 167, + 445, + 194 + ], + "type": "text", + "content": ", Wenwei Zhang" + }, + { + "bbox": [ + 168, + 167, + 445, + 194 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 168, + 167, + 445, + 194 + ], + "type": "text", + "content": ", Liang Pan" + }, + { + "bbox": [ + 168, + 167, + 445, + 194 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 168, + 167, + 445, + 194 + ], + "type": "text", + "content": ", Kai Chen" + }, + { + "bbox": [ + 168, + 167, + 445, + 194 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 168, + 167, + 445, + 194 + ], + "type": "text", + "content": ", Ziwei Liu" + }, + { + "bbox": [ + 168, + 167, + 445, + 194 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 168, + 167, + 445, + 194 + ], + "type": "text", + "content": ", and Qingshan Liu" + }, + { + "bbox": [ + 168, + 167, + 445, + 194 + ], + "type": "inline_equation", + "content": "^{4,\\text{图}}" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 195, + 201, + 417, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 195, + 201, + 417, + 213 + ], + "spans": [ + { + "bbox": [ + 195, + 201, + 417, + 213 + ], + "type": "text", + "content": "1 Nanjing University of Aeronautics and Astronautics" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 252, + 213, + 361, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 252, + 213, + 361, + 224 + ], + "spans": [ + { + "bbox": [ + 252, + 213, + 361, + 224 + ], + "type": "text", + "content": "2 Shanghai AI Laboratory" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 235, + 224, + 378, + 235 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 235, + 224, + 378, + 235 + ], + "spans": [ + { + "bbox": [ + 235, + 224, + 378, + 235 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 235, + 224, + 378, + 235 + ], + "type": "text", + "content": " National University of Singapore" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 195, + 235, + 418, + 246 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 195, + 235, + 418, + 246 + ], + "spans": [ + { + "bbox": [ + 195, + 235, + 418, + 246 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 195, + 235, + 418, + 246 + ], + "type": "text", + "content": " Nanjing University of Posts and Telecommunications" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 218, + 246, + 395, + 256 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 218, + 246, + 395, + 256 + ], + "spans": [ + { + "bbox": [ + 218, + 246, + 395, + 256 + ], + "type": "text", + "content": "S-Lab, Nanyang Technological University" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 159, + 281, + 455, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 281, + 455, + 491 + ], + "spans": [ + { + "bbox": [ + 159, + 281, + 455, + 491 + ], + "type": "text", + "content": "Abstract. In the realm of autonomous driving, accurate 3D perception is the foundation. However, developing such models relies on extensive human annotations – a process that is both costly and labor-intensive. To address this challenge from a data representation learning perspective, we introduce SuperFlow, a novel framework designed to harness consecutive LiDAR-camera pairs for establishing spatiotemporal pretraining objectives. SuperFlow stands out by integrating two key designs: 1) a dense-to-sparse consistency regularization, which promotes insensitivity to point cloud density variations during feature learning, and 2) a flow-based contrastive learning module, carefully crafted to extract meaningful temporal cues from readily available sensor calibrations. To further boost learning efficiency, we incorporate a plug-and-play view consistency module that enhances the alignment of the knowledge distilled from camera views. Extensive comparative and ablation studies across 11 heterogeneous LiDAR datasets validate our effectiveness and superiority. Additionally, we observe several interesting emerging properties by scaling up the 2D and 3D backbones during pretraining, shedding light on the future research of 3D foundation models for LiDAR-based perception. Code is publicly available at https://github.com/Xiangxu-0103/SuperFlow." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 159, + 500, + 455, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 500, + 455, + 523 + ], + "spans": [ + { + "bbox": [ + 159, + 500, + 455, + 523 + ], + "type": "text", + "content": "Keywords: LiDAR Segmentation " + }, + { + "bbox": [ + 159, + 500, + 455, + 523 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 159, + 500, + 455, + 523 + ], + "type": "text", + "content": " 3D Data Pretraining " + }, + { + "bbox": [ + 159, + 500, + 455, + 523 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 159, + 500, + 455, + 523 + ], + "type": "text", + "content": " Autonomous Driving " + }, + { + "bbox": [ + 159, + 500, + 455, + 523 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 159, + 500, + 455, + 523 + ], + "type": "text", + "content": " Image-to-LiDAR Contrastive Learning " + }, + { + "bbox": [ + 159, + 500, + 455, + 523 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 159, + 500, + 455, + 523 + ], + "type": "text", + "content": " Semantic Superpixels" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 540, + 230, + 553 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 540, + 230, + 553 + ], + "spans": [ + { + "bbox": [ + 132, + 540, + 230, + 553 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 564, + 481, + 612 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 564, + 481, + 612 + ], + "spans": [ + { + "bbox": [ + 130, + 564, + 481, + 612 + ], + "type": "text", + "content": "Driving perception is one of the most crucial components of an autonomous vehicle system. Recent advancements in sensing technologies, such as light detection and ranging (LiDAR) sensors and surrounding-view cameras, open up new possibilities for a holistic, accurate, and 3D-aware scene perception [3,9,79]." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 612, + 481, + 648 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 612, + 481, + 648 + ], + "spans": [ + { + "bbox": [ + 130, + 612, + 481, + 648 + ], + "type": "text", + "content": "Training a 3D perception model that can perform well in real-world scenarios often requires large-scale datasets and sufficient computing power [27,58]. Different from 2D, annotating 3D data is notably more expensive and labor-intensive," + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 653, + 463, + 666 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 653, + 463, + 666 + ], + "spans": [ + { + "bbox": [ + 133, + 653, + 463, + 666 + ], + "type": "text", + "content": "* X. Xu and L. Kong contributed equally to this work. ⌒ Corresponding author." + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 177 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 177 + ], + "type": "text", + "content": "which hinders the scalability of existing 3D perception models [28,69,98,112]. Data representation learning serves as a potential solution to mitigate such a problem [6,76]. By designing suitable pretraining objectives, the models are anticipated to extract useful concepts from raw data, where such concepts can help improve models' performance on downstream tasks with fewer annotations [51]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 181, + 279, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 181, + 279, + 445 + ], + "spans": [ + { + "bbox": [ + 130, + 181, + 279, + 445 + ], + "type": "text", + "content": "Recently, Sautier et al. [82] proposed SLidR to distill knowledge from surrounding camera views - using a pretrained 2D backbone such as MoCo [14] and DINO [72] - to LiDAR point clouds, exhibiting promising 3D representation learning properties. The key to its success is the superpixel-driven contrastive objectives between cameras and LiDAR sensors. Subsequent works further extended this framework from various aspects, such as class balancing [66], hybrid-view distillation [110], semantic superpixels [11, 12, 61], and so on. While these methods showed improved performance over their baselines, there exist several issues that could undermine the data representation learning." + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 285, + 187, + 481, + 355 + ], + "blocks": [ + { + "bbox": [ + 285, + 187, + 481, + 355 + ], + "lines": [ + { + "bbox": [ + 285, + 187, + 481, + 355 + ], + "spans": [ + { + "bbox": [ + 285, + 187, + 481, + 355 + ], + "type": "image", + "image_path": "1198c7f3899ffac5b4f62f30fa3305ba0aaee00132c41cf2e3ab3b3bec78a1c5.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 283, + 360, + 482, + 437 + ], + "lines": [ + { + "bbox": [ + 283, + 360, + 482, + 437 + ], + "spans": [ + { + "bbox": [ + 283, + 360, + 482, + 437 + ], + "type": "text", + "content": "Fig.1: Performance overview of SuperFlow compared to state-of-the-art image-to-LiDAR pretraining methods, i.e., Seal [61], SLidR [82], and PPKT [63], on eleven LiDAR datasets. The scores of prior methods are normalized based on SuperFlow's scores. The larger the area coverage, the better the overall segmentation performance." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 450, + 482, + 547 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 450, + 482, + 547 + ], + "spans": [ + { + "bbox": [ + 130, + 450, + 482, + 547 + ], + "type": "text", + "content": "The first concern revolves around the inherent temporal dynamics of LiDAR data [4,8]. LiDAR point clouds are acquired sequentially, capturing the essence of motion within the scene. Traditional approaches [61,63,66,82,110] often overlook this temporal aspect, treating each snapshot as an isolated scan. However, this sequential nature holds a wealth of information that can significantly enrich the model's understanding of the 3D environment [71,96]. Utilizing these temporal cues can lead to more robust and context-aware 3D perception models, which is crucial for dynamic environments encountered in autonomous driving." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 552, + 482, + 636 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 552, + 482, + 636 + ], + "spans": [ + { + "bbox": [ + 130, + 552, + 482, + 636 + ], + "type": "text", + "content": "Moreover, the varying density of LiDAR point clouds presents a unique challenge [45, 47, 94]. Due to the nature of LiDAR scanning and data acquisition, different areas within the same scene can have significantly different point densities, which can in turn affect the consistency of feature representation across the scene [2, 47, 108, 111]. Therefore, a model that can learn invariant features regardless of point cloud density tends to be effective for recognizing the structural and semantic information in the 3D space." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 641, + 481, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 641, + 481, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 641, + 481, + 666 + ], + "type": "text", + "content": "In lieu of existing challenges, we propose a novel spatiotemporal contrastive learning dubbed SuperFlow to encourage effective cross-sensor knowledge dis" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 216, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 216, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 216, + 101 + ], + "type": "text", + "content": "X. Xu et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 480, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 116, + 480, + 140 + ], + "spans": [ + { + "bbox": [ + 132, + 116, + 480, + 140 + ], + "type": "text", + "content": "tillation. Our approach features three key components, all centered around the use of the off-the-shelf temporal cues inherent in the LiDAR acquisition process:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 138, + 144, + 481, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 144, + 481, + 204 + ], + "spans": [ + { + "bbox": [ + 138, + 144, + 481, + 204 + ], + "type": "text", + "content": "- We first introduce a straightforward yet effective view consistency alignment that seamlessly generates semantic superpixels with language guidance, alleviating the \"self-conflict\" issues in existing works [61,66,82]. As opposed to the previous pipeline, our method also aligns the semantics across camera views in consecutive scenes, paving the way for more sophisticated designs." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 138, + 204, + 481, + 286 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 204, + 481, + 286 + ], + "spans": [ + { + "bbox": [ + 138, + 204, + 481, + 286 + ], + "type": "text", + "content": "- To address the varying density of LiDAR point clouds, we present a dense-to-sparse regularization module that encourages consistency between features of dense and sparse point clouds. Dense points are obtained by concatenating multi-sweep LiDAR scans within a suitable time window and propagating the semantic superpixels from sparse to dense points. By leveraging dense point features to regularize sparse point features, the model promotes insensitivity to point cloud density variations." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 138, + 286, + 481, + 381 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 286, + 481, + 381 + ], + "spans": [ + { + "bbox": [ + 138, + 286, + 481, + 381 + ], + "type": "text", + "content": "- To capture useful temporal cues from consecutive scans across different timestamps, we design a flow-based contrastive learning module. This module takes multiple LiDAR-camera pairs as input and excites strong consistency between temporally shifted representations. Analogous to existing image-to-LiDAR representation learning methods [61,66,82], we also incorporate useful spatial contrastive objectives into our framework, setting a unified pipeline that emphasizes holistic representation learning from both the structural 3D layouts and the temporal 4D information." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 387, + 480, + 470 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 387, + 480, + 470 + ], + "spans": [ + { + "bbox": [ + 132, + 387, + 480, + 470 + ], + "type": "text", + "content": "The strong spatiotemporal consistency regularization in SuperFlow effectively forms a semantically rich landscape that enhances data representations. As illustrated in Fig. 1, our approach achieves appealing performance gains over state-of-the-art 3D pretraining methods across a diverse spectrum of downstream tasks. Meanwhile, we also target at scaling the capacity of both 2D and 3D backbones during pretraining, shedding light on the future development of more robust, unified, and ubiquitous 3D perception models." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 146, + 470, + 467, + 482 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 146, + 470, + 467, + 482 + ], + "spans": [ + { + "bbox": [ + 146, + 470, + 467, + 482 + ], + "type": "text", + "content": "To summarize, this work incorporates key contributions listed as follows:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 138, + 487, + 480, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 487, + 480, + 510 + ], + "spans": [ + { + "bbox": [ + 138, + 487, + 480, + 510 + ], + "type": "text", + "content": "- We present SuperFlow, a novel framework aimed to harness consecutive LiDAR-camera pairs for establishing spatiotemporal pretraining objectives." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 138, + 510, + 480, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 510, + 480, + 555 + ], + "spans": [ + { + "bbox": [ + 138, + 510, + 480, + 555 + ], + "type": "text", + "content": "- Our framework incorporates novel designs including view consistency alignment, dense-to-sparse regularization, and flow-based contrastive learning, which better encourages data representation learning effects between camera and LiDAR sensors across consecutive scans." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 138, + 555, + 480, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 555, + 480, + 604 + ], + "spans": [ + { + "bbox": [ + 138, + 555, + 480, + 604 + ], + "type": "text", + "content": "- Our approach sets a new state-of-the-art performance across 11 LiDAR datasets, exhibiting strong robustness and generalizability. We also reveal intriguing emergent properties as we scale up the 2D and 3D backbones, which could lay the foundation for scalable 3D perception." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 620, + 237, + 632 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 620, + 237, + 632 + ], + "spans": [ + { + "bbox": [ + 132, + 620, + 237, + 632 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 641, + 480, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 641, + 480, + 665 + ], + "spans": [ + { + "bbox": [ + 132, + 641, + 480, + 665 + ], + "type": "text", + "content": "LiDAR-based 3D Perception. The LiDAR sensor has been widely used in today's 3D perception systems, credited to its robust and structural sensing abl-" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 179, + 91, + 448, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 179, + 91, + 448, + 102 + ], + "spans": [ + { + "bbox": [ + 179, + 91, + 448, + 102 + ], + "type": "text", + "content": "4D Contrastive Superflows are Dense 3D Representation Learners" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 236 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 236 + ], + "type": "text", + "content": "ities [4, 88, 92]. Due to the sparse and unordered nature of LiDAR point clouds, suitable rasterization strategies are needed to convert them into structural inputs [37, 93]. Popular choices include sparse voxels [18, 19, 33, 34, 90, 118], bird's eye view maps [10, 56, 111, 117], range view images [17, 21, 44, 68, 104, 107, 116], and multi-view fusion [18, 40, 60, 62, 77, 105, 106]. While witnessing record-breaking performances on standard benchmarks, existing approaches rely heavily on human annotations, which hinders scalability [27]. In response to this challenge, we resort to newly appeared 3D representation learning, hoping to leverage the rich collections of unlabeled LiDAR point clouds for more effective learning from LiDAR data. This could further enrich the efficacy of LiDAR-based perception." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 239, + 482, + 371 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 239, + 482, + 371 + ], + "spans": [ + { + "bbox": [ + 130, + 239, + 482, + 371 + ], + "type": "text", + "content": "Data-Efficient 3D Perception. To better save annotation budgets, previous efforts seek 3D perception in a data-efficient manner [11, 12, 27, 40, 46, 49]. One line of research resorts to weak supervision, e.g., seeding points [36, 53, 86, 115], active prompts [38, 57, 100], and scribbles [94], for weakly-supervised LiDAR semantic segmentation. Another line of research seeks semi-supervised learning approaches [47, 52, 91] to better tackle efficient 3D scene perception and achieve promising results. In this work, different from the prior pursuits, we tackle efficient 3D perception from the data representation learning perspective. We establish several LiDAR-based data representation learning settings that seamlessly combine pretraining with weakly- and semi-supervised learning, further enhancing the scalability of 3D perception systems." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 374, + 482, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 374, + 482, + 590 + ], + "spans": [ + { + "bbox": [ + 130, + 374, + 482, + 590 + ], + "type": "text", + "content": "3D Representation Learning. Analog to 2D representation learning strategies [13,15,30,31,103], prior works designed contrastive [35,70,81,101,108,113], masked modeling [32,50,95], and reconstruction [7,67] objectives for 3D pretraining. Most early 3D representation learning approaches use a single modality for pretraining, leaving room for further development. The off-the-shelf calibrations among different types of sensors provide a promising solution for building pretraining objectives [63]. Recently, SLidR [82] has made the first contribution toward multi-modal 3D representation learning between camera and LiDAR sensors. Subsequent works [66,74,110] extended this framework with more advanced designs. Seal [61] leverages powerful vision foundation models [42,109,119,120] to better assist the contrastive learning across sensors. Puy et al. [75,76] conducted a comprehensive study on the distillation recipe for better pretraining effects. While these approaches have exhibited better performance than their baselines, they overlooked the rich temporal cues across consecutive scans, which might lead to sub-opt pretraining performance. In this work, we construct dense 3D representation learning objectives using calibrated LiDAR sequences. Our approach encourages the consistency between features from sparse to dense inputs and features across timestamps, yielding superiority over existing endeavors." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "type": "text", + "content": "4D Representation Learning. Leveraging consecutive scans is promising in extracting temporal relations [2, 23, 33, 85]. For point cloud data pretraining, prior works [16, 64, 83, 84, 114] mainly focused on applying 4D cues on object- and human-centric point clouds, which are often small in scale. For large-scale automotive point clouds, STRL [39] learns spatiotemporal data invariance with different spatial augmentations in the point cloud sequence. TARL [71] and" + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 216, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 216, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 216, + 100 + ], + "type": "text", + "content": "X. Xu et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 479, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 479, + 224 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 479, + 224 + ], + "type": "text", + "content": "STSSL [96] encourage similarities of point clusters in two consecutive frames, where such clusters are obtained by ground removal and clustering algorithms, i.e., RANSAC [25], Patchwork [55], and HDBSCAN [24]. BEVContrast [81] shares a similar motivation but utilizes BEV maps for contrastive learning, which yields a more effective implementation. The \"one-fits-all\" clustering parameters, however, are often difficult to obtain, hindering existing works. Different from existing methods that use a single modality for 4D representation learning, we propose to leverage LiDAR-camera correspondences and semantic-rich superpixels to establish meaningful multi-modality 4D pretraining objectives." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 243, + 217, + 257 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 243, + 217, + 257 + ], + "spans": [ + { + "bbox": [ + 132, + 243, + 217, + 257 + ], + "type": "text", + "content": "3 SuperFlow" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 270, + 480, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 270, + 480, + 342 + ], + "spans": [ + { + "bbox": [ + 130, + 270, + 480, + 342 + ], + "type": "text", + "content": "In this section, we first revisit the common setups of the camera-to-LiDAR distillation baseline (cf. Sec. 3.1). We then elaborate on the technical details of SuperFlow, encompassing a straightforward yet effective view consistency alignment (cf. Sec. 3.2), a dense-to-sparse consistency regularization (cf. Sec. 3.3), and a flow-based spatiotemporal contrastive learning (cf. Sec. 3.4). The overall pipeline of the proposed SuperFlow framework is depicted in Fig. 4." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 360, + 228, + 370 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 360, + 228, + 370 + ], + "spans": [ + { + "bbox": [ + 132, + 360, + 228, + 370 + ], + "type": "text", + "content": "3.1 Preliminaries" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 380, + 480, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 380, + 480, + 513 + ], + "spans": [ + { + "bbox": [ + 130, + 380, + 480, + 513 + ], + "type": "text", + "content": "Problem Definition. Given a point cloud " + }, + { + "bbox": [ + 130, + 380, + 480, + 513 + ], + "type": "inline_equation", + "content": "\\mathcal{P}^t = \\{\\mathbf{p}_i^t, \\mathbf{f}_i^t | i = 1, \\dots, N\\}" + }, + { + "bbox": [ + 130, + 380, + 480, + 513 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 130, + 380, + 480, + 513 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 130, + 380, + 480, + 513 + ], + "type": "text", + "content": " points captured by a LiDAR sensor at time " + }, + { + "bbox": [ + 130, + 380, + 480, + 513 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 130, + 380, + 480, + 513 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 130, + 380, + 480, + 513 + ], + "type": "inline_equation", + "content": "\\mathbf{p}_i \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 130, + 380, + 480, + 513 + ], + "type": "text", + "content": " denotes the coordinate of the point and " + }, + { + "bbox": [ + 130, + 380, + 480, + 513 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_i \\in \\mathbb{R}^C" + }, + { + "bbox": [ + 130, + 380, + 480, + 513 + ], + "type": "text", + "content": " is the corresponding feature, we aim to transfer knowledge from " + }, + { + "bbox": [ + 130, + 380, + 480, + 513 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 130, + 380, + 480, + 513 + ], + "type": "text", + "content": " surrounding camera images " + }, + { + "bbox": [ + 130, + 380, + 480, + 513 + ], + "type": "inline_equation", + "content": "\\mathcal{I}^t = \\{\\mathbf{I}_i^t | i = 1, \\dots, M\\}" + }, + { + "bbox": [ + 130, + 380, + 480, + 513 + ], + "type": "text", + "content": " into the point cloud. Here, " + }, + { + "bbox": [ + 130, + 380, + 480, + 513 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_i \\in \\mathbb{R}^{H \\times W \\times 3}" + }, + { + "bbox": [ + 130, + 380, + 480, + 513 + ], + "type": "text", + "content": " represents an image with height " + }, + { + "bbox": [ + 130, + 380, + 480, + 513 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 130, + 380, + 480, + 513 + ], + "type": "text", + "content": " and width " + }, + { + "bbox": [ + 130, + 380, + 480, + 513 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 130, + 380, + 480, + 513 + ], + "type": "text", + "content": ". Prior works [61, 82] generate a set of class-agnostic superpixels " + }, + { + "bbox": [ + 130, + 380, + 480, + 513 + ], + "type": "inline_equation", + "content": "\\mathcal{X}_i = \\{\\mathbf{X}_i^j | j = 1, \\dots, V\\}" + }, + { + "bbox": [ + 130, + 380, + 480, + 513 + ], + "type": "text", + "content": " for each image via the unsupervised SLIC algorithm [1] or the more recent vision foundation models (VFMs) [42, 119, 120], where " + }, + { + "bbox": [ + 130, + 380, + 480, + 513 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 130, + 380, + 480, + 513 + ], + "type": "text", + "content": " denotes the total number of superpixels. Assuming that the point cloud " + }, + { + "bbox": [ + 130, + 380, + 480, + 513 + ], + "type": "inline_equation", + "content": "\\mathcal{P}^t" + }, + { + "bbox": [ + 130, + 380, + 480, + 513 + ], + "type": "text", + "content": " and images " + }, + { + "bbox": [ + 130, + 380, + 480, + 513 + ], + "type": "inline_equation", + "content": "\\mathcal{I}^t" + }, + { + "bbox": [ + 130, + 380, + 480, + 513 + ], + "type": "text", + "content": " are calibrated, the point cloud " + }, + { + "bbox": [ + 130, + 380, + 480, + 513 + ], + "type": "inline_equation", + "content": "\\mathbf{p}_i = (x_i, y_i, z_i)" + }, + { + "bbox": [ + 130, + 380, + 480, + 513 + ], + "type": "text", + "content": " can be then projected to the image plane " + }, + { + "bbox": [ + 130, + 380, + 480, + 513 + ], + "type": "inline_equation", + "content": "(u_i, v_i)" + }, + { + "bbox": [ + 130, + 380, + 480, + 513 + ], + "type": "text", + "content": " using the following sensor calibration parameters:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 212, + 521, + 480, + 545 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 212, + 521, + 480, + 545 + ], + "spans": [ + { + "bbox": [ + 212, + 521, + 480, + 545 + ], + "type": "interline_equation", + "content": "[ u _ {i}, v _ {i}, 1 ] ^ {\\mathrm {T}} = \\frac {1}{z _ {i}} \\times \\Gamma_ {K} \\times \\Gamma_ {c \\leftarrow l} \\times [ x _ {i}, y _ {i}, z _ {i} ] ^ {\\mathrm {T}}, \\tag {1}", + "image_path": "62f73f360a4d34e7fe08b63398de257a1f8a2bf90026c34584e66cc71284c1c4.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 554, + 479, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 554, + 479, + 589 + ], + "spans": [ + { + "bbox": [ + 130, + 554, + 479, + 589 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 554, + 479, + 589 + ], + "type": "inline_equation", + "content": "\\Gamma_K" + }, + { + "bbox": [ + 130, + 554, + 479, + 589 + ], + "type": "text", + "content": " denotes the camera intrinsic matrix and " + }, + { + "bbox": [ + 130, + 554, + 479, + 589 + ], + "type": "inline_equation", + "content": "\\Gamma_{c\\leftarrow l}" + }, + { + "bbox": [ + 130, + 554, + 479, + 589 + ], + "type": "text", + "content": " is the transformation matrix from LiDAR sensors to surrounding-view cameras. We also obtain a set of superpoints " + }, + { + "bbox": [ + 130, + 554, + 479, + 589 + ], + "type": "inline_equation", + "content": "\\mathcal{Y} = \\{\\mathbf{Y}^j | j = 1, \\dots, V\\}" + }, + { + "bbox": [ + 130, + 554, + 479, + 589 + ], + "type": "text", + "content": " through this projection." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 590, + 480, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 590, + 480, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 590, + 480, + 666 + ], + "type": "text", + "content": "Network Representations. Let " + }, + { + "bbox": [ + 130, + 590, + 480, + 666 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\theta_p}:\\mathbb{R}^{N\\times (3 + C)}\\to \\mathbb{R}^{N\\times D}" + }, + { + "bbox": [ + 130, + 590, + 480, + 666 + ], + "type": "text", + "content": " be a 3D backbone with trainable parameters " + }, + { + "bbox": [ + 130, + 590, + 480, + 666 + ], + "type": "inline_equation", + "content": "\\theta_{p}" + }, + { + "bbox": [ + 130, + 590, + 480, + 666 + ], + "type": "text", + "content": ", which takes LiDAR points as input and outputs " + }, + { + "bbox": [ + 130, + 590, + 480, + 666 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 130, + 590, + 480, + 666 + ], + "type": "text", + "content": "-dimensional point features. Let " + }, + { + "bbox": [ + 130, + 590, + 480, + 666 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_{\\theta_i}:\\mathbb{R}^{H\\times W\\times 3}\\to \\mathbb{R}^{\\frac{H}{S}\\times \\frac{W}{S}\\times E}" + }, + { + "bbox": [ + 130, + 590, + 480, + 666 + ], + "type": "text", + "content": " be an image backbone with pretrained parameters " + }, + { + "bbox": [ + 130, + 590, + 480, + 666 + ], + "type": "inline_equation", + "content": "\\theta_{i}" + }, + { + "bbox": [ + 130, + 590, + 480, + 666 + ], + "type": "text", + "content": " that takes images as input and outputs " + }, + { + "bbox": [ + 130, + 590, + 480, + 666 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 130, + 590, + 480, + 666 + ], + "type": "text", + "content": "-dimensional image features with stride " + }, + { + "bbox": [ + 130, + 590, + 480, + 666 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 130, + 590, + 480, + 666 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 130, + 590, + 480, + 666 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_{\\omega_p}:\\mathbb{R}^{N\\times D}\\to \\mathbb{R}^{N\\times L}" + }, + { + "bbox": [ + 130, + 590, + 480, + 666 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 590, + 480, + 666 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_{\\omega_i}:\\mathbb{R}^{\\frac{H}{S}\\times \\frac{W}{S}\\times E}\\to \\mathbb{R}^{H\\times W\\times L}" + }, + { + "bbox": [ + 130, + 590, + 480, + 666 + ], + "type": "text", + "content": " be linear heads with trainable parameters " + }, + { + "bbox": [ + 130, + 590, + 480, + 666 + ], + "type": "inline_equation", + "content": "\\omega_{p}" + }, + { + "bbox": [ + 130, + 590, + 480, + 666 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 590, + 480, + 666 + ], + "type": "inline_equation", + "content": "\\omega_{i}" + }, + { + "bbox": [ + 130, + 590, + 480, + 666 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 179, + 91, + 448, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 179, + 91, + 448, + 102 + ], + "spans": [ + { + "bbox": [ + 179, + 91, + 448, + 102 + ], + "type": "text", + "content": "4D Contrastive Superflows are Dense 3D Representation Learners" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 135, + 114, + 242, + 215 + ], + "blocks": [ + { + "bbox": [ + 135, + 114, + 242, + 215 + ], + "lines": [ + { + "bbox": [ + 135, + 114, + 242, + 215 + ], + "spans": [ + { + "bbox": [ + 135, + 114, + 242, + 215 + ], + "type": "image", + "image_path": "d6f78ecf544015a58501fc084c9b78db5f5dbc080295b048e04ffc6452db34c1.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 165, + 217, + 212, + 226 + ], + "lines": [ + { + "bbox": [ + 165, + 217, + 212, + 226 + ], + "spans": [ + { + "bbox": [ + 165, + 217, + 212, + 226 + ], + "type": "text", + "content": "(a) Heuristic" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 253, + 114, + 361, + 215 + ], + "blocks": [ + { + "bbox": [ + 253, + 114, + 361, + 215 + ], + "lines": [ + { + "bbox": [ + 253, + 114, + 361, + 215 + ], + "spans": [ + { + "bbox": [ + 253, + 114, + 361, + 215 + ], + "type": "image", + "image_path": "a0575d5b2a37ee97764d9318e67aea6b2e6ebae92c7921259b2f9e2bb23d7ba0.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 272, + 217, + 341, + 226 + ], + "lines": [ + { + "bbox": [ + 272, + 217, + 341, + 226 + ], + "spans": [ + { + "bbox": [ + 272, + 217, + 341, + 226 + ], + "type": "text", + "content": "(b) Class Agnostic" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 371, + 114, + 475, + 215 + ], + "blocks": [ + { + "bbox": [ + 371, + 114, + 475, + 215 + ], + "lines": [ + { + "bbox": [ + 371, + 114, + 475, + 215 + ], + "spans": [ + { + "bbox": [ + 371, + 114, + 475, + 215 + ], + "type": "image", + "image_path": "50c03ec25032693f317237d850c56bf109c008111e4ce182b074ba20ba1cd76b.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 389, + 217, + 461, + 226 + ], + "lines": [ + { + "bbox": [ + 389, + 217, + 461, + 226 + ], + "spans": [ + { + "bbox": [ + 389, + 217, + 461, + 226 + ], + "type": "text", + "content": "(c) View Consistent" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 131, + 236, + 481, + 280 + ], + "lines": [ + { + "bbox": [ + 131, + 236, + 481, + 280 + ], + "spans": [ + { + "bbox": [ + 131, + 236, + 481, + 280 + ], + "type": "text", + "content": "Fig. 2: Comparisons of different superpixels. (a) Class-agnostic superpixels generated by the unsupervised SLIC [1] algorithm. (b) Class-agnostic semantic superpixels generated by vision foundation models (VFMs) [109, 119, 120]. (c) View-consistent semantic superpixels generated by our view consistency alignment module." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 131, + 299, + 480, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 299, + 480, + 323 + ], + "spans": [ + { + "bbox": [ + 131, + 299, + 480, + 323 + ], + "type": "text", + "content": "which project backbone features to " + }, + { + "bbox": [ + 131, + 299, + 480, + 323 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 131, + 299, + 480, + 323 + ], + "type": "text", + "content": "-dimensional features with " + }, + { + "bbox": [ + 131, + 299, + 480, + 323 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 131, + 299, + 480, + 323 + ], + "type": "text", + "content": "-normalization and upsample image features to " + }, + { + "bbox": [ + 131, + 299, + 480, + 323 + ], + "type": "inline_equation", + "content": "H\\times W" + }, + { + "bbox": [ + 131, + 299, + 480, + 323 + ], + "type": "text", + "content": " with bilinear interpolation." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 131, + 323, + 480, + 371 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 323, + 480, + 371 + ], + "spans": [ + { + "bbox": [ + 131, + 323, + 480, + 371 + ], + "type": "text", + "content": "Pretraining Objective. The overall objective of image-to-LiDAR representation learning [82] is to transfer knowledge from the trained image backbone " + }, + { + "bbox": [ + 131, + 323, + 480, + 371 + ], + "type": "inline_equation", + "content": "\\mathcal{G}_{\\theta_i}" + }, + { + "bbox": [ + 131, + 323, + 480, + 371 + ], + "type": "text", + "content": " to the 3D backbone " + }, + { + "bbox": [ + 131, + 323, + 480, + 371 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\theta_p}" + }, + { + "bbox": [ + 131, + 323, + 480, + 371 + ], + "type": "text", + "content": ". The superpixels " + }, + { + "bbox": [ + 131, + 323, + 480, + 371 + ], + "type": "inline_equation", + "content": "\\mathcal{X}_i" + }, + { + "bbox": [ + 131, + 323, + 480, + 371 + ], + "type": "text", + "content": " generated offline, serve as an intermediate to effectively guide the knowledge transfer process." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 391, + 306, + 403 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 391, + 306, + 403 + ], + "spans": [ + { + "bbox": [ + 132, + 391, + 306, + 403 + ], + "type": "text", + "content": "3.2 View Consistency Alignment" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 131, + 413, + 480, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 413, + 480, + 460 + ], + "spans": [ + { + "bbox": [ + 131, + 413, + 480, + 460 + ], + "type": "text", + "content": "Motivation. The class-agnostic superpixels " + }, + { + "bbox": [ + 131, + 413, + 480, + 460 + ], + "type": "inline_equation", + "content": "\\mathcal{X}_i" + }, + { + "bbox": [ + 131, + 413, + 480, + 460 + ], + "type": "text", + "content": " used in prior works [61,66,82] are typically instance-level and do not consider their actual categories. As discussed in [66], instance-level superpixels can lead to \"self-conflict\" problems, which undermines the effectiveness of pretraining." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 131, + 462, + 481, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 462, + 481, + 568 + ], + "spans": [ + { + "bbox": [ + 131, + 462, + 481, + 568 + ], + "type": "text", + "content": "Superpixel Comparisons. Fig. 2 compares superpixels generated via the unsupervised SLIC [1] and VFMs. SLIC [1] tends to over-segment objects, causing semantic conflicts. VFMs generate superpixels through a panoptic segmentation head, which can still lead to \"self-conflict\" in three conditions (see Fig. 2b): ① when the same object appears in different camera views, leading to different parts of the same object being treated as negative samples; ② when objects of the same category within the same camera view are treated as negative samples; ③ when objects across different camera views are treated as negative samples even if they share the same label." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 131, + 570, + 481, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 570, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 131, + 570, + 481, + 665 + ], + "type": "text", + "content": "Semantic-Related Superpixels Generation. To address these issues, we propose generating semantic-related superpixels to ensure consistency across camera views. Contrastive Vision-Language Pre-training (CLIP) [78] has shown great generalization in few-shot learning. Building on existing VFMs [42,119,120], we employ CLIP's text encoder and fine-tune the last layer of the segmentation head from VFMs with predefined text prompts. This allows the segmentation head to generate language-guided semantic categories for each pixel, which we leverage as superpixels. As shown in Fig. 2c, we unify superpixels across camera" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 216, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 216, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 216, + 100 + ], + "type": "text", + "content": "X. Xu et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 131, + 116, + 479, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 116, + 479, + 140 + ], + "spans": [ + { + "bbox": [ + 131, + 116, + 479, + 140 + ], + "type": "text", + "content": "views based on semantic category, alleviating the \"self-conflict\" problem in prior image-to-LiDAR contrastive learning pipelines." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 131, + 163, + 410, + 176 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 163, + 410, + 176 + ], + "spans": [ + { + "bbox": [ + 131, + 163, + 410, + 176 + ], + "type": "text", + "content": "3.3 D2S: Dense-to-Sparse Consistency Regularization" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 190, + 479, + 249 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 190, + 479, + 249 + ], + "spans": [ + { + "bbox": [ + 130, + 190, + 479, + 249 + ], + "type": "text", + "content": "Motivation. LiDAR points are sparse and often incomplete, significantly restricting the efficacy of the cross-sensor feature representation learning process. In this work, we propose to tackle this challenge by combining multiple LiDAR scans within a suitable time window to create a dense point cloud, which is then used to encourage consistency with the sparse point cloud." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 131, + 251, + 277, + 381 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 251, + 277, + 381 + ], + "spans": [ + { + "bbox": [ + 131, + 251, + 277, + 381 + ], + "type": "text", + "content": "Point Cloud Concatenation. Specifically, given a keyframe point cloud " + }, + { + "bbox": [ + 131, + 251, + 277, + 381 + ], + "type": "inline_equation", + "content": "\\mathcal{P}^t" + }, + { + "bbox": [ + 131, + 251, + 277, + 381 + ], + "type": "text", + "content": " captured at time " + }, + { + "bbox": [ + 131, + 251, + 277, + 381 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 131, + 251, + 277, + 381 + ], + "type": "text", + "content": " and a set of sweep point clouds " + }, + { + "bbox": [ + 131, + 251, + 277, + 381 + ], + "type": "inline_equation", + "content": "\\{\\mathcal{P}^s | s = 1, \\dots, T\\}" + }, + { + "bbox": [ + 131, + 251, + 277, + 381 + ], + "type": "text", + "content": " captured at previous times " + }, + { + "bbox": [ + 131, + 251, + 277, + 381 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 131, + 251, + 277, + 381 + ], + "type": "text", + "content": ", we first transform the coordinate " + }, + { + "bbox": [ + 131, + 251, + 277, + 381 + ], + "type": "inline_equation", + "content": "(x^s, y^s, z^s)" + }, + { + "bbox": [ + 131, + 251, + 277, + 381 + ], + "type": "text", + "content": " of the sweep point cloud " + }, + { + "bbox": [ + 131, + 251, + 277, + 381 + ], + "type": "inline_equation", + "content": "\\mathcal{P}^s" + }, + { + "bbox": [ + 131, + 251, + 277, + 381 + ], + "type": "text", + "content": " to the coordinate systems of " + }, + { + "bbox": [ + 131, + 251, + 277, + 381 + ], + "type": "inline_equation", + "content": "\\mathcal{P}^t" + }, + { + "bbox": [ + 131, + 251, + 277, + 381 + ], + "type": "text", + "content": ", as they share different systems due to the vehicle's movement:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 392, + 280, + 418 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 392, + 280, + 418 + ], + "spans": [ + { + "bbox": [ + 132, + 392, + 280, + 418 + ], + "type": "interline_equation", + "content": "\\left[ \\tilde {x} ^ {s}, \\tilde {y} ^ {s}, \\tilde {z} ^ {s} \\right] ^ {\\mathrm {T}} = \\Gamma_ {t \\leftarrow s} \\times \\left[ x ^ {s}, y ^ {s}, z ^ {s} \\right] ^ {\\mathrm {T}}, \\tag {2}", + "image_path": "1b4fb96c43014b2e333392dfcba6d2ff580c1191be1e928aa0b149ee0768d8db.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 131, + 419, + 278, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 419, + 278, + 491 + ], + "spans": [ + { + "bbox": [ + 131, + 419, + 278, + 491 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 131, + 419, + 278, + 491 + ], + "type": "inline_equation", + "content": "\\Gamma_{t\\leftarrow s}" + }, + { + "bbox": [ + 131, + 419, + 278, + 491 + ], + "type": "text", + "content": " denotes the transformation matrix from the sweep point cloud at time " + }, + { + "bbox": [ + 131, + 419, + 278, + 491 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 131, + 419, + 278, + 491 + ], + "type": "text", + "content": " to the keyframe point cloud at time " + }, + { + "bbox": [ + 131, + 419, + 278, + 491 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 131, + 419, + 278, + 491 + ], + "type": "text", + "content": ". We then concatenate the transformed sweep points " + }, + { + "bbox": [ + 131, + 419, + 278, + 491 + ], + "type": "inline_equation", + "content": "\\{\\tilde{\\mathcal{P}}^s |s =" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 286, + 257, + 481, + 407 + ], + "blocks": [ + { + "bbox": [ + 286, + 257, + 481, + 407 + ], + "lines": [ + { + "bbox": [ + 286, + 257, + 481, + 407 + ], + "spans": [ + { + "bbox": [ + 286, + 257, + 481, + 407 + ], + "type": "image", + "image_path": "b52e547912a4ed8aa7f1a04c2e0540c4daaa084ad61bafd7bf4f8239a0d546ef.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 283, + 412, + 482, + 479 + ], + "lines": [ + { + "bbox": [ + 283, + 412, + 482, + 479 + ], + "spans": [ + { + "bbox": [ + 283, + 412, + 482, + 479 + ], + "type": "text", + "content": "Fig.3: Dense-to-sparse (D2S) consistency regularization module. Dense point clouds are obtained by combining multiple point clouds captured at different times. A D2S regularization is formulated by encouraging the consistency between dense features and sparse features." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 131, + 490, + 479, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 490, + 479, + 525 + ], + "spans": [ + { + "bbox": [ + 131, + 490, + 479, + 525 + ], + "type": "inline_equation", + "content": "1, \\ldots, T\\}" + }, + { + "bbox": [ + 131, + 490, + 479, + 525 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 131, + 490, + 479, + 525 + ], + "type": "inline_equation", + "content": "\\mathcal{P}^t" + }, + { + "bbox": [ + 131, + 490, + 479, + 525 + ], + "type": "text", + "content": " to obtain a dense point cloud " + }, + { + "bbox": [ + 131, + 490, + 479, + 525 + ], + "type": "inline_equation", + "content": "\\mathcal{P}^d" + }, + { + "bbox": [ + 131, + 490, + 479, + 525 + ], + "type": "text", + "content": ". As shown in Fig. 3, " + }, + { + "bbox": [ + 131, + 490, + 479, + 525 + ], + "type": "inline_equation", + "content": "\\mathcal{P}^d" + }, + { + "bbox": [ + 131, + 490, + 479, + 525 + ], + "type": "text", + "content": " fuses temporal information from consecutive point clouds, resulting in a dense and semantically rich representation for feature learning." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 131, + 526, + 480, + 599 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 526, + 480, + 599 + ], + "spans": [ + { + "bbox": [ + 131, + 526, + 480, + 599 + ], + "type": "text", + "content": "Dense Superpoints. Meanwhile, we generate sets of superpoints " + }, + { + "bbox": [ + 131, + 526, + 480, + 599 + ], + "type": "inline_equation", + "content": "\\mathcal{Y}^d" + }, + { + "bbox": [ + 131, + 526, + 480, + 599 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 131, + 526, + 480, + 599 + ], + "type": "inline_equation", + "content": "\\mathcal{Y}^t" + }, + { + "bbox": [ + 131, + 526, + 480, + 599 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 131, + 526, + 480, + 599 + ], + "type": "inline_equation", + "content": "\\mathcal{P}^d" + }, + { + "bbox": [ + 131, + 526, + 480, + 599 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 131, + 526, + 480, + 599 + ], + "type": "inline_equation", + "content": "\\mathcal{P}^t" + }, + { + "bbox": [ + 131, + 526, + 480, + 599 + ], + "type": "text", + "content": ", respectively, using superpixels " + }, + { + "bbox": [ + 131, + 526, + 480, + 599 + ], + "type": "inline_equation", + "content": "\\mathcal{X}^t" + }, + { + "bbox": [ + 131, + 526, + 480, + 599 + ], + "type": "text", + "content": ". Both " + }, + { + "bbox": [ + 131, + 526, + 480, + 599 + ], + "type": "inline_equation", + "content": "\\mathcal{P}^t" + }, + { + "bbox": [ + 131, + 526, + 480, + 599 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 131, + 526, + 480, + 599 + ], + "type": "inline_equation", + "content": "\\mathcal{P}^d" + }, + { + "bbox": [ + 131, + 526, + 480, + 599 + ], + "type": "text", + "content": " are fed into the weight-shared 3D network " + }, + { + "bbox": [ + 131, + 526, + 480, + 599 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\theta_p}" + }, + { + "bbox": [ + 131, + 526, + 480, + 599 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 131, + 526, + 480, + 599 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_{\\omega_p}" + }, + { + "bbox": [ + 131, + 526, + 480, + 599 + ], + "type": "text", + "content": " for feature extraction. The output features are grouped via average pooling based on the superpoint indices to obtain superpoint features " + }, + { + "bbox": [ + 131, + 526, + 480, + 599 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}^d" + }, + { + "bbox": [ + 131, + 526, + 480, + 599 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 131, + 526, + 480, + 599 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}^t" + }, + { + "bbox": [ + 131, + 526, + 480, + 599 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 131, + 526, + 480, + 599 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}^d \\in \\mathbb{R}^{V \\times L}" + }, + { + "bbox": [ + 131, + 526, + 480, + 599 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 131, + 526, + 480, + 599 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}^d \\in \\mathbb{R}^{V \\times L}" + }, + { + "bbox": [ + 131, + 526, + 480, + 599 + ], + "type": "text", + "content": ". We expect " + }, + { + "bbox": [ + 131, + 526, + 480, + 599 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}^d" + }, + { + "bbox": [ + 131, + 526, + 480, + 599 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 131, + 526, + 480, + 599 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}^t" + }, + { + "bbox": [ + 131, + 526, + 480, + 599 + ], + "type": "text", + "content": " to share similar features, leading to the following D2S loss:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 237, + 609, + 480, + 643 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 237, + 609, + 480, + 643 + ], + "spans": [ + { + "bbox": [ + 237, + 609, + 480, + 643 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {d} 2 \\mathrm {s}} = \\frac {1}{V} \\sum_ {i = 1} ^ {V} \\left(1 - < \\mathbf {q} _ {i} ^ {t}, \\mathbf {q} _ {i} ^ {d} >\\right), \\tag {3}", + "image_path": "f7db99b1f6cb4cb4f3a98320f2b369d8d76df9eac2f9c1df7b94096953140992.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 131, + 653, + 476, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 653, + 476, + 665 + ], + "spans": [ + { + "bbox": [ + 131, + 653, + 476, + 665 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 131, + 653, + 476, + 665 + ], + "type": "inline_equation", + "content": "< \\cdot, \\cdot >" + }, + { + "bbox": [ + 131, + 653, + 476, + 665 + ], + "type": "text", + "content": " denotes the scalar product to measure the similarity of features." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 179, + 91, + 448, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 179, + 91, + 448, + 102 + ], + "spans": [ + { + "bbox": [ + 179, + 91, + 448, + 102 + ], + "type": "text", + "content": "4D Contrastive Superflows are Dense 3D Representation Learners" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 91, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 91, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 91, + 480, + 100 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 136, + 114, + 479, + 304 + ], + "blocks": [ + { + "bbox": [ + 136, + 114, + 479, + 304 + ], + "lines": [ + { + "bbox": [ + 136, + 114, + 479, + 304 + ], + "spans": [ + { + "bbox": [ + 136, + 114, + 479, + 304 + ], + "type": "image", + "image_path": "22d2b1936af646bca2a681273d837a052b79ea967189ee36a0423cb3177ac22a.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 311, + 482, + 367 + ], + "lines": [ + { + "bbox": [ + 130, + 311, + 482, + 367 + ], + "spans": [ + { + "bbox": [ + 130, + 311, + 482, + 367 + ], + "type": "text", + "content": "Fig. 4: Flow-based contrastive learning (FCL) pipeline. FCL takes multiple LiDAR-camera pairs from consecutive scans as input. Based on temporally aligned semantic superpixel and superpoints, two contrastive learning objectives are formulated: 1) spatial contrastive learning between each LiDAR-camera pair " + }, + { + "bbox": [ + 130, + 311, + 482, + 367 + ], + "type": "inline_equation", + "content": "(\\mathcal{L}_{\\mathrm{sc}})" + }, + { + "bbox": [ + 130, + 311, + 482, + 367 + ], + "type": "text", + "content": ", and 2) temporal contrastive learning among consecutive LiDAR point clouds across scenes " + }, + { + "bbox": [ + 130, + 311, + 482, + 367 + ], + "type": "inline_equation", + "content": "(\\mathcal{L}_{\\mathrm{tc}})" + }, + { + "bbox": [ + 130, + 311, + 482, + 367 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 131, + 383, + 358, + 396 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 383, + 358, + 396 + ], + "spans": [ + { + "bbox": [ + 131, + 383, + 358, + 396 + ], + "type": "text", + "content": "3.4 FCL: Flow-Based Contrastive Learning" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 403, + 479, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 403, + 479, + 474 + ], + "spans": [ + { + "bbox": [ + 130, + 403, + 479, + 474 + ], + "type": "text", + "content": "Motivation. LiDAR point clouds are acquired sequentially, embedding rich dynamic scene information across consecutive timestamps. Prior works [61, 66, 82] primarily focused on single LiDAR scans, overlooking the consistency of moving objects across scenes. To address these limitations, we propose flow-based contrastive learning (FCL) across sequential LiDAR scenes to encourage spatiotemporal consistency." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 475, + 480, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 475, + 480, + 605 + ], + "spans": [ + { + "bbox": [ + 130, + 475, + 480, + 605 + ], + "type": "text", + "content": "Spatial Contrastive Learning. Our framework, depicted in Fig. 4, takes three LiDAR-camera pairs from different timestamps within a suitable time window as input, i.e., " + }, + { + "bbox": [ + 130, + 475, + 480, + 605 + ], + "type": "inline_equation", + "content": "\\{(\\mathcal{P}^t,\\mathcal{I}^t),(\\mathcal{P}^{t + \\Delta t},\\mathcal{I}^{t + \\Delta t}),(\\mathcal{P}^{t - \\Delta t},\\mathcal{I}^{t - \\Delta t})\\}" + }, + { + "bbox": [ + 130, + 475, + 480, + 605 + ], + "type": "text", + "content": ", where timestamp " + }, + { + "bbox": [ + 130, + 475, + 480, + 605 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 130, + 475, + 480, + 605 + ], + "type": "text", + "content": " denotes the current scene and " + }, + { + "bbox": [ + 130, + 475, + 480, + 605 + ], + "type": "inline_equation", + "content": "\\Delta t" + }, + { + "bbox": [ + 130, + 475, + 480, + 605 + ], + "type": "text", + "content": " is the timespan. Following previous works [61,82], we first distill knowledge from the 2D network into the 3D network for each scene separately. Taking " + }, + { + "bbox": [ + 130, + 475, + 480, + 605 + ], + "type": "inline_equation", + "content": "(\\mathcal{P}^t,\\mathcal{I}^t)" + }, + { + "bbox": [ + 130, + 475, + 480, + 605 + ], + "type": "text", + "content": " as an example, " + }, + { + "bbox": [ + 130, + 475, + 480, + 605 + ], + "type": "inline_equation", + "content": "\\mathcal{P}^t" + }, + { + "bbox": [ + 130, + 475, + 480, + 605 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 475, + 480, + 605 + ], + "type": "inline_equation", + "content": "\\mathcal{I}^t" + }, + { + "bbox": [ + 130, + 475, + 480, + 605 + ], + "type": "text", + "content": " are fed into the 3D and 2D networks to extract per-point and image features. The output features are then grouped via average pooling based on superpoints " + }, + { + "bbox": [ + 130, + 475, + 480, + 605 + ], + "type": "inline_equation", + "content": "\\mathcal{Y}^t" + }, + { + "bbox": [ + 130, + 475, + 480, + 605 + ], + "type": "text", + "content": " and superpixels " + }, + { + "bbox": [ + 130, + 475, + 480, + 605 + ], + "type": "inline_equation", + "content": "\\mathcal{X}^t" + }, + { + "bbox": [ + 130, + 475, + 480, + 605 + ], + "type": "text", + "content": " to obtain superpoint features " + }, + { + "bbox": [ + 130, + 475, + 480, + 605 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}^t" + }, + { + "bbox": [ + 130, + 475, + 480, + 605 + ], + "type": "text", + "content": " and superpixel features " + }, + { + "bbox": [ + 130, + 475, + 480, + 605 + ], + "type": "inline_equation", + "content": "\\mathbf{K}^t" + }, + { + "bbox": [ + 130, + 475, + 480, + 605 + ], + "type": "text", + "content": ". A spatial contrastive loss is formulated to constrain 3D representation via pretrained 2D prior knowledge. This process is formulated as follows:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 184, + 613, + 480, + 647 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 184, + 613, + 480, + 647 + ], + "spans": [ + { + "bbox": [ + 184, + 613, + 480, + 647 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {s c}} = - \\frac {1}{V} \\sum_ {i = 1} ^ {V} \\log \\left[ \\frac {e ^ {(< \\mathbf {q} _ {i} , \\mathbf {k} _ {i} > / \\tau)}}{\\sum_ {j \\neq i} e ^ {(< \\mathbf {q} _ {i} , \\mathbf {k} _ {j} > / \\tau)} + e (< \\mathbf {q} _ {i} , \\mathbf {k} _ {i} > / \\tau)} \\right], \\tag {4}", + "image_path": "1eccea998b5467717359d7cff367276a24fdbe8fed94fa184916b5fbd8a9573d.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 131, + 653, + 455, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 653, + 455, + 665 + ], + "spans": [ + { + "bbox": [ + 131, + 653, + 455, + 665 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 131, + 653, + 455, + 665 + ], + "type": "inline_equation", + "content": "\\tau > 0" + }, + { + "bbox": [ + 131, + 653, + 455, + 665 + ], + "type": "text", + "content": " is a temperature that controls the smoothness of distillation." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 216, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 216, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 216, + 100 + ], + "type": "text", + "content": "X. Xu et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 479, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 479, + 200 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 479, + 200 + ], + "type": "text", + "content": "Flow-Based Contrastive Learning. The spatial contrastive learning objective between images and point clouds, as depicted in Eq. (4), fails to ensure that moving objects share similar attributes across different scenes. To maintain consistency across scenes, a temporal consistency loss is introduced among superpoint features across different scenes. For the point clouds " + }, + { + "bbox": [ + 130, + 116, + 479, + 200 + ], + "type": "inline_equation", + "content": "\\mathcal{P}^t" + }, + { + "bbox": [ + 130, + 116, + 479, + 200 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 116, + 479, + 200 + ], + "type": "inline_equation", + "content": "\\mathcal{P}^{t + \\Delta t}" + }, + { + "bbox": [ + 130, + 116, + 479, + 200 + ], + "type": "text", + "content": ", the corresponding superpoint features " + }, + { + "bbox": [ + 130, + 116, + 479, + 200 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}^t" + }, + { + "bbox": [ + 130, + 116, + 479, + 200 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 116, + 479, + 200 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}^{t + \\Delta t}" + }, + { + "bbox": [ + 130, + 116, + 479, + 200 + ], + "type": "text", + "content": " are obtained via their superpoints. The temporal contrastive loss operates on " + }, + { + "bbox": [ + 130, + 116, + 479, + 200 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}^t" + }, + { + "bbox": [ + 130, + 116, + 479, + 200 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 116, + 479, + 200 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}^{t + \\Delta t}" + }, + { + "bbox": [ + 130, + 116, + 479, + 200 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 160, + 205, + 481, + 243 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 205, + 481, + 243 + ], + "spans": [ + { + "bbox": [ + 160, + 205, + 481, + 243 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {t c}} ^ {t \\leftarrow t + \\Delta t} = - \\frac {1}{V} \\sum_ {i = 1} ^ {V} \\log \\left[ \\frac {e ^ {(< \\mathbf {q} _ {i} ^ {t} , \\mathbf {q} _ {i} ^ {t + \\Delta t} > / \\tau)}}{\\sum_ {j \\neq i} e ^ {(< \\mathbf {q} _ {i} ^ {t} , \\mathbf {q} _ {j} ^ {t + \\Delta t} > / \\tau)} + e ^ {(< \\mathbf {q} _ {i} ^ {t} , \\mathbf {q} _ {i} ^ {t + \\Delta t} > / \\tau)}} \\right]. (5)", + "image_path": "4258b4193c29f161236d4c4f44eda9f855bb3175caed334ee206b2bf3e63fdfe.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 131, + 247, + 479, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 247, + 479, + 270 + ], + "spans": [ + { + "bbox": [ + 131, + 247, + 479, + 270 + ], + "type": "text", + "content": "The same function is also applied between " + }, + { + "bbox": [ + 131, + 247, + 479, + 270 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}^t" + }, + { + "bbox": [ + 131, + 247, + 479, + 270 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 131, + 247, + 479, + 270 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}^{t - \\Delta t}" + }, + { + "bbox": [ + 131, + 247, + 479, + 270 + ], + "type": "text", + "content": ". This approach enables point features at time " + }, + { + "bbox": [ + 131, + 247, + 479, + 270 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 131, + 247, + 479, + 270 + ], + "type": "text", + "content": " to extract more context-aware information across scenes." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 131, + 285, + 229, + 300 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 285, + 229, + 300 + ], + "spans": [ + { + "bbox": [ + 131, + 285, + 229, + 300 + ], + "type": "text", + "content": "4 Experiments" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 131, + 308, + 202, + 319 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 308, + 202, + 319 + ], + "spans": [ + { + "bbox": [ + 131, + 308, + 202, + 319 + ], + "type": "text", + "content": "4.1 Settings" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 323, + 480, + 395 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 323, + 480, + 395 + ], + "spans": [ + { + "bbox": [ + 130, + 323, + 480, + 395 + ], + "type": "text", + "content": "Data. We follow the seminar works SLidR [82] and Seal [61] when preparing the datasets. A total of eleven datasets are used in our experiments, including " + }, + { + "bbox": [ + 130, + 323, + 480, + 395 + ], + "type": "inline_equation", + "content": "^1 nuScenes" + }, + { + "bbox": [ + 130, + 323, + 480, + 395 + ], + "type": "text", + "content": " [26], " + }, + { + "bbox": [ + 130, + 323, + 480, + 395 + ], + "type": "inline_equation", + "content": "^2 SemanticKITTI" + }, + { + "bbox": [ + 130, + 323, + 480, + 395 + ], + "type": "text", + "content": " [5], " + }, + { + "bbox": [ + 130, + 323, + 480, + 395 + ], + "type": "inline_equation", + "content": "^3 Waymo" + }, + { + "bbox": [ + 130, + 323, + 480, + 395 + ], + "type": "text", + "content": " Open [89], " + }, + { + "bbox": [ + 130, + 323, + 480, + 395 + ], + "type": "inline_equation", + "content": "^4 ScribbleKITTI" + }, + { + "bbox": [ + 130, + 323, + 480, + 395 + ], + "type": "text", + "content": " [94], " + }, + { + "bbox": [ + 130, + 323, + 480, + 395 + ], + "type": "inline_equation", + "content": "^5 RELLIS-3D" + }, + { + "bbox": [ + 130, + 323, + 480, + 395 + ], + "type": "text", + "content": " [41], " + }, + { + "bbox": [ + 130, + 323, + 480, + 395 + ], + "type": "inline_equation", + "content": "^6 SemanticPOSS" + }, + { + "bbox": [ + 130, + 323, + 480, + 395 + ], + "type": "text", + "content": " [73], " + }, + { + "bbox": [ + 130, + 323, + 480, + 395 + ], + "type": "inline_equation", + "content": "^7 SemanticSTF" + }, + { + "bbox": [ + 130, + 323, + 480, + 395 + ], + "type": "text", + "content": " [99], " + }, + { + "bbox": [ + 130, + 323, + 480, + 395 + ], + "type": "inline_equation", + "content": "^8 SynLiDAR" + }, + { + "bbox": [ + 130, + 323, + 480, + 395 + ], + "type": "text", + "content": " [97], " + }, + { + "bbox": [ + 130, + 323, + 480, + 395 + ], + "type": "inline_equation", + "content": "^9 DAPS-3D" + }, + { + "bbox": [ + 130, + 323, + 480, + 395 + ], + "type": "text", + "content": " [43], " + }, + { + "bbox": [ + 130, + 323, + 480, + 395 + ], + "type": "inline_equation", + "content": "^{10}" + }, + { + "bbox": [ + 130, + 323, + 480, + 395 + ], + "type": "text", + "content": " Synth4D [80], and " + }, + { + "bbox": [ + 130, + 323, + 480, + 395 + ], + "type": "inline_equation", + "content": "^{11}" + }, + { + "bbox": [ + 130, + 323, + 480, + 395 + ], + "type": "text", + "content": " Robo3D [45]. Due to space limits, kindly refer to the Appendix and [61, 82] for additional details about these datasets." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 396, + 479, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 396, + 479, + 550 + ], + "spans": [ + { + "bbox": [ + 130, + 396, + 479, + 550 + ], + "type": "text", + "content": "Implementation Details. SuperFlow is implemented using the MMDetection3D [20] and OpenPCSeg [59] codebases. Consistent with prior works [61,82], we employ MinkUNet [19] as the 3D backbone and DINOv2 [72] (with ViT backbones [22]) as the 2D backbone, distilling from three variants: small (S), base (B), and large (L). Following Seal [61], OpenSeeD [109] is used to generate semantic superpixels. The framework is pretrained end-to-end on 600 scenes from nuScenes [26], then linear probed and fine-tuned on nuScenes [26] according to the data splits in SLidR [82]. The domain generalization study adheres to the same configurations as Seal [61] for the other ten datasets. Both the baselines and SuperFlow are pretrained using eight GPUs for 50 epochs, while linear probing and downstream fine-tuning experiments use four GPUs for 100 epochs, all utilizing the AdamW optimizer [65] and OneCycle scheduler [87]. Due to space limits, kindly refer to the Appendix for additional implementation details." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 552, + 479, + 599 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 552, + 479, + 599 + ], + "spans": [ + { + "bbox": [ + 130, + 552, + 479, + 599 + ], + "type": "text", + "content": "Evaluation Protocols. Following conventions, we report the Intersection-over-Union (IoU) on each semantic class and mean IoU (mIoU) over all classes for downstream tasks. For 3D robustness evaluations, we follow Robo3D [45] and report the mean Corruption Error (mCE) and mean Resilience Rate (mRR)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 131, + 613, + 259, + 625 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 613, + 259, + 625 + ], + "spans": [ + { + "bbox": [ + 131, + 613, + 259, + 625 + ], + "type": "text", + "content": "4.2 Comparative Study" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 629, + 479, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 629, + 479, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 629, + 479, + 665 + ], + "type": "text", + "content": "Linear Probing. We start by investigating the pretraining quality via linear probing. For this setup, we initialize the 3D backbone " + }, + { + "bbox": [ + 130, + 629, + 479, + 665 + ], + "type": "inline_equation", + "content": "\\mathcal{F}_{\\theta_p}" + }, + { + "bbox": [ + 130, + 629, + 479, + 665 + ], + "type": "text", + "content": " with pretrained parameters and fine-tune only the added-on segmentation head. As shown in Tab. 1," + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 179, + 91, + 448, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 179, + 91, + 448, + 102 + ], + "spans": [ + { + "bbox": [ + 179, + 91, + 448, + 102 + ], + "type": "text", + "content": "4D Contrastive Superflows are Dense 3D Representation Learners" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 135, + 174, + 485, + 414 + ], + "blocks": [ + { + "bbox": [ + 132, + 114, + 482, + 170 + ], + "lines": [ + { + "bbox": [ + 132, + 114, + 482, + 170 + ], + "spans": [ + { + "bbox": [ + 132, + 114, + 482, + 170 + ], + "type": "text", + "content": "Table 1: Comparisons of state-of-the-art pretraining methods pretrained on nuScenes [26] and fine-tuned on SemanticKITTI [5] and Waymo Open [89] with specified data portions, respectively. All methods use MinkUNet [19] as the 3D semantic segmentation backbone. LP denotes linear probing with a frozen backbone. All scores are given in percentage (\\%). Best scores in each configuration are shaded with colors." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 135, + 174, + 485, + 414 + ], + "lines": [ + { + "bbox": [ + 135, + 174, + 485, + 414 + ], + "spans": [ + { + "bbox": [ + 135, + 174, + 485, + 414 + ], + "type": "table", + "html": "
MethodVenueDistillnuScenesKITTI1%Waymo1%
LP1%5%10%25%Full
Random--8.1030.3047.8456.1565.4874.6639.5039.41
PointContrast [101]ECCV'20None21.9032.50----41.10-
DepthContrast [113]ICCV'21None22.1031.70----41.50-
ALSO [7]CVPR'23None-37.70-59.40-72.00--
BEVContrast [81]3DV'24None-38.30-59.60-72.30--
PPKT [63]arXiv'21ResNet35.9037.8053.7460.2567.1474.5244.0047.60
SLidR [82]CVPR'22ResNet38.8038.3052.4959.8466.9174.7944.6047.12
ST-SLidR [66]CVPR'23ResNet40.4840.7554.6960.7567.7075.1444.7244.93
TriCC [74]CVPR'23ResNet38.0041.2054.1060.4067.6075.6045.90-
Seal [61]NeurIPS'23ResNet44.9545.8455.6462.9768.4175.6046.6349.34
HVDistill [110]IJCV'24ResNet39.5042.7056.6062.9069.3076.6049.70-
PPKT [63]arXiv'21ViT-S38.6040.6052.0659.9965.7673.9743.2547.44
SLidR [82]CVPR'22ViT-S44.7041.1653.6561.4766.7174.2044.6747.57
Seal [61]NeurIPS'23ViT-S45.1644.2755.1362.4667.6475.5846.5148.67
SuperFlowOursViT-S46.4447.8159.4464.4769.2076.5447.9749.94
PPKT [63]arXiv'21ViT-B39.9540.9153.2160.8766.2274.0744.0947.57
SLidR [82]CVPR'22ViT-B45.3541.6455.8362.6867.6174.9845.5048.32
Seal [61]NeurIPS'23ViT-B46.5945.9857.1562.7968.1875.4147.2448.91
SuperFlowOursViT-B47.6648.0959.6664.5269.7976.5748.4050.20
PPKT [63]arXiv'21ViT-L41.5742.0555.7561.2666.8874.3345.8747.82
SLidR [82]CVPR'22ViT-L45.7042.7757.4563.2068.1375.5147.0148.60
Seal [61]NeurIPS'23ViT-L46.8146.2758.1463.2768.6775.6647.5550.02
SuperFlowOursViT-L48.0149.9560.7265.0970.0177.1949.0750.67
", + "image_path": "c75525b860ff1e4e730f59e5ef2ab19f40a15480d7638b3b25517c3e56a791e1.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 435, + 481, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 435, + 481, + 507 + ], + "spans": [ + { + "bbox": [ + 132, + 435, + 481, + 507 + ], + "type": "text", + "content": "SuperFlow consistently outperforms state-of-the-art methods under diverse configurations. We attribute this to the use of temporal consistency learning, which captures the structurally rich temporal cues across consecutive scenes and enhances the semantic representation learning of the 3D backbone. We also observe improved performance with larger 2D networks (i.e., from ViT-S to ViT-L), revealing a promising direction of achieving higher quality 3D pretraining." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 509, + 482, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 509, + 482, + 640 + ], + "spans": [ + { + "bbox": [ + 132, + 509, + 482, + 640 + ], + "type": "text", + "content": "Downstream Fine-Tuning. It is known that data representation learning can mitigate the need for large-scale human annotations. Our study systematically compares SuperFlow with prior works on three popular datasets, including nuScenes [26], SemanticKITTI [5], and Waymo Open [89], under limited annotations for few-shot fine-tuning. From Tab. 1, we observe that SuperFlow achieves promising performance gains among three datasets across all fine-tuning tasks. We also use the pretrained 3D backbone as initialization for the fully-supervised learning study on nuScenes [26]. As can be seen from Tab. 1, models pretrained via representation learning consistently outperform the random initialization counterparts, highlighting the efficacy of conducting data pretraining. We also find that distillations from larger 2D networks show consistent improvements." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 641, + 481, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 641, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 132, + 641, + 481, + 665 + ], + "type": "text", + "content": "Cross-Domain Generalization. To verify the strong generalizability of SuperFlow, we conduct a comprehensive study using seven diverse LiDAR datasets and" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 216, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 216, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 216, + 101 + ], + "type": "text", + "content": "X. Xu et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 135, + 163, + 481, + 237 + ], + "blocks": [ + { + "bbox": [ + 130, + 114, + 482, + 159 + ], + "lines": [ + { + "bbox": [ + 130, + 114, + 482, + 159 + ], + "spans": [ + { + "bbox": [ + 130, + 114, + 482, + 159 + ], + "type": "text", + "content": "Table 2: Domain generalization study of different pretraining methods pretrained on the nuScenes [26] dataset and fine-tuned on other seven heterogeneous 3D semantic segmentation datasets with specified data portions, respectively. All scores are given in percentage (\\%). Best scores in each configuration are shaded with colors." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 135, + 163, + 481, + 237 + ], + "lines": [ + { + "bbox": [ + 135, + 163, + 481, + 237 + ], + "spans": [ + { + "bbox": [ + 135, + 163, + 481, + 237 + ], + "type": "table", + "html": "
MethodSciKITTIRellis-3DSemPOSSSemSTFSynLiDARDAPS-3DSynth4D
1%10%1%10%HalfFullHalfFull1%10%HalfFull1%10%
Random23.8147.6038.4653.6046.2654.1248.0348.1519.8944.7474.3279.3820.2266.87
PPKT [63]36.5051.6749.7154.3350.1856.0050.9254.6937.5746.4878.9084.0061.1062.41
SLidR [82]39.6050.4549.7554.5751.5655.3652.0154.3542.0547.8481.0085.4063.1062.67
Seal [61]40.6452.7751.0955.0353.2656.8953.4655.3643.5849.2681.8885.9064.5066.96
SuperFlow42.7054.0052.8355.7154.4157.3354.7256.5744.8551.3882.4386.2165.3169.43
", + "image_path": "99212ac06ff53c7622e60563a0cf6f05dabbeb103da805ef130aac6e3606a40d.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 135, + 304, + 482, + 464 + ], + "blocks": [ + { + "bbox": [ + 130, + 244, + 482, + 301 + ], + "lines": [ + { + "bbox": [ + 130, + 244, + 482, + 301 + ], + "spans": [ + { + "bbox": [ + 130, + 244, + 482, + 301 + ], + "type": "text", + "content": "Table 3: Out-of-distribution 3D robustness study of state-of-the-art pretraining methods under corruption and sensor failure scenarios in the nuScenes- " + }, + { + "bbox": [ + 130, + 244, + 482, + 301 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 130, + 244, + 482, + 301 + ], + "type": "text", + "content": " dataset from the Robo3D benchmark [45]. Full denotes fine-tuning with full labels. LP denotes linear probing with a frozen backbone. All mCE " + }, + { + "bbox": [ + 130, + 244, + 482, + 301 + ], + "type": "inline_equation", + "content": "(\\downarrow)" + }, + { + "bbox": [ + 130, + 244, + 482, + 301 + ], + "type": "text", + "content": ", mRR " + }, + { + "bbox": [ + 130, + 244, + 482, + 301 + ], + "type": "inline_equation", + "content": "(\\uparrow)" + }, + { + "bbox": [ + 130, + 244, + 482, + 301 + ], + "type": "text", + "content": ", and mIoU " + }, + { + "bbox": [ + 130, + 244, + 482, + 301 + ], + "type": "inline_equation", + "content": "(\\uparrow)" + }, + { + "bbox": [ + 130, + 244, + 482, + 301 + ], + "type": "text", + "content": " scores are given in percentage " + }, + { + "bbox": [ + 130, + 244, + 482, + 301 + ], + "type": "inline_equation", + "content": "(\\%)" + }, + { + "bbox": [ + 130, + 244, + 482, + 301 + ], + "type": "text", + "content": ". Best scores in each configuration are shaded with colors." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 135, + 304, + 482, + 464 + ], + "lines": [ + { + "bbox": [ + 135, + 304, + 482, + 464 + ], + "spans": [ + { + "bbox": [ + 135, + 304, + 482, + 464 + ], + "type": "table", + "html": "
#InitialBackbonemCEmRRFogRainSnowBlurBeamCrossEchoSensorAvg
FullRandomMinkU-18 o115.6170.8553.9071.1048.2251.8562.2137.7357.4738.9752.68
SuperFlowMinkU-18 o109.0075.6654.9572.7949.5657.6862.8242.4559.6141.7755.21
RandomMinkU-34 o112.2072.5762.9670.6555.4851.7162.0131.5659.6439.4154.18
PPKT [63]MinkU-34 o105.6475.8764.0172.1859.0857.1763.8836.3460.5939.5756.60
SLiD R [82]MinkU-34 o106.0875.9965.4172.3156.0156.0762.8741.9461.1638.9056.83
Seal [61]MinkU-34 o92.6383.0872.6674.3166.2266.1465.9657.4459.8739.8562.81
SuperFlowMinkU-34 o91.6783.1770.3275.7765.4161.0568.0960.0258.3650.4163.68
RandomMinkU-50 o113.7672.8149.9571.1645.3655.5562.8436.9459.1243.1553.01
SuperFlowMinkU-50 o107.3574.0254.3673.0850.0756.9264.0538.1062.0247.0255.70
RandomMinkU-101 o109.1074.0750.4573.0248.8558.4864.1843.8659.8241.4755.02
SuperFlowMinkU-101 o96.4478.5756.9276.2954.7059.3571.8955.1360.2751.6060.77
LPPPKT [63]MinkU-34 o183.4478.1530.6535.4228.1229.2132.8219.5228.0120.7128.06
SLiD R [82]MinkU-34 o179.3877.1834.8838.0932.6426.4433.7320.8131.5421.4429.95
Seal [61]MinkU-34 o166.1875.3837.3342.7729.9337.7340.3220.3137.7324.9433.88
SuperFlowMinkU-34 o161.7875.5237.5943.4237.6039.5741.4023.6438.0326.6935.99
", + "image_path": "433d3674cae395d6a29464e75ddd8e068e1d2d9bb9cb9febc736b81435671f39.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 484, + 482, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 484, + 482, + 555 + ], + "spans": [ + { + "bbox": [ + 130, + 484, + 482, + 555 + ], + "type": "text", + "content": "show results in Tab. 2. It is worth noting that these datasets are collected under different acquisition and annotation conditions, including adverse weather, weak annotations, synthetic collection, and dynamic objects. For all fourteen domain generalization fine-tuning tasks, SuperFlow exhibits superior performance over the prior arts [61,63,82]. This study strongly verifies the effectiveness of the proposed flow-based contrastive learning for image-to-LiDAR data representation." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 557, + 482, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 557, + 482, + 641 + ], + "spans": [ + { + "bbox": [ + 130, + 557, + 482, + 641 + ], + "type": "text", + "content": "Out-of-Distribution Robustness. The robustness of 3D perception models against unprecedented conditions directly correlates with the model's applicability to real-world applications [29, 48, 54, 102]. We compare our SuperFlow with prior models in the nuScenes- " + }, + { + "bbox": [ + 130, + 557, + 482, + 641 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 130, + 557, + 482, + 641 + ], + "type": "text", + "content": " dataset from the Robo3D benchmark [45] and show results in Tab. 3. We observe that models pretrained using SuperFlow exhibit improved robustness over the random initialization counterparts. Besides, we find that 3D networks with different capacities often pose diverse robustness." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 641, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 641, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 641, + 482, + 666 + ], + "type": "text", + "content": "Quantitative Assessments. We visualize the prediction results fine-tuned on nuScenes [26], SemanticKITTI [5] and Waymo Open [89], compared with random" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 180, + 91, + 448, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 180, + 91, + 448, + 102 + ], + "spans": [ + { + "bbox": [ + 180, + 91, + 448, + 102 + ], + "type": "text", + "content": "4D Contrastive Superflows are Dense 3D Representation Learners" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 479, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 479, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 479, + 100 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 134, + 175, + 290, + 236 + ], + "blocks": [ + { + "bbox": [ + 132, + 114, + 291, + 169 + ], + "lines": [ + { + "bbox": [ + 132, + 114, + 291, + 169 + ], + "spans": [ + { + "bbox": [ + 132, + 114, + 291, + 169 + ], + "type": "text", + "content": "Table 4: Ablation study of SuperFlow using different # of sweeps. All methods use ViT-B [72] for distillation. All scores are given in percentage (%). Baseline results are shaded with colors." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 134, + 175, + 290, + 236 + ], + "lines": [ + { + "bbox": [ + 134, + 175, + 290, + 236 + ], + "spans": [ + { + "bbox": [ + 134, + 175, + 290, + 236 + ], + "type": "table", + "html": "
BackbonenuScenesKITTIWaymo
LP1%1%1%
1× Sweeps ○47.4147.5248.1449.31
2× Sweeps •47.6648.0948.4050.20
5× Sweeps ○47.2348.0047.9449.14
7× Sweeps •46.0347.9846.8347.97
", + "image_path": "80c858bd9940bcdce9361f73b8d3a1dce32044c247d62574983c2a163418f2c0.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 301, + 175, + 479, + 236 + ], + "blocks": [ + { + "bbox": [ + 299, + 114, + 479, + 170 + ], + "lines": [ + { + "bbox": [ + 299, + 114, + 479, + 170 + ], + "spans": [ + { + "bbox": [ + 299, + 114, + 479, + 170 + ], + "type": "text", + "content": "Table 5: Ablation study of SuperFlow on network capacity (# params) of 3D backbones. All methods use ViT-B [72] for distillation. All scores are given in percentage " + }, + { + "bbox": [ + 299, + 114, + 479, + 170 + ], + "type": "inline_equation", + "content": "(\\%)" + }, + { + "bbox": [ + 299, + 114, + 479, + 170 + ], + "type": "text", + "content": ". Baseline results are shaded with colors." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 301, + 175, + 479, + 236 + ], + "lines": [ + { + "bbox": [ + 301, + 175, + 479, + 236 + ], + "spans": [ + { + "bbox": [ + 301, + 175, + 479, + 236 + ], + "type": "table", + "html": "
BackboneLayernuScenesKITTIWaymo
LP1%1%1%
MinkUNet o1847.2047.7048.0449.24
MinkUNet •3447.6648.0948.4050.20
MinkUNet o5054.1152.8649.2251.20
MinkUNet o10152.5651.1948.5150.01
", + "image_path": "21db9c2125c93b3217ec0829a9990d5e1da45242cf0abf42ca3ba2322fc09b22.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 135, + 247, + 479, + 449 + ], + "blocks": [ + { + "bbox": [ + 135, + 247, + 479, + 449 + ], + "lines": [ + { + "bbox": [ + 135, + 247, + 479, + 449 + ], + "spans": [ + { + "bbox": [ + 135, + 247, + 479, + 449 + ], + "type": "image", + "image_path": "f4be9d88cc20bc6d06a3a363feb9e31082ded5b164315a623c796c9ade2ef37a.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 454, + 482, + 498 + ], + "lines": [ + { + "bbox": [ + 130, + 454, + 482, + 498 + ], + "spans": [ + { + "bbox": [ + 130, + 454, + 482, + 498 + ], + "type": "text", + "content": "Fig. 5: Qualitative assessments of state-of-the-art pretraining methods pretrained on nuScenes [26] and fine-tuned on nuScenes [26], SemanticKITTI [5], and Waymo Open [89], with " + }, + { + "bbox": [ + 130, + 454, + 482, + 498 + ], + "type": "inline_equation", + "content": "1\\%" + }, + { + "bbox": [ + 130, + 454, + 482, + 498 + ], + "type": "text", + "content": " annotations. The error maps show the correct and incorrect predictions in gray and red, respectively. Best viewed in colors and zoomed-in for details." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 517, + 482, + 541 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 517, + 482, + 541 + ], + "spans": [ + { + "bbox": [ + 130, + 517, + 482, + 541 + ], + "type": "text", + "content": "initialization, SLiDR [82], and Seal [61]. As shown in Fig. 5, Superflow performs well, especially on backgrounds, i.e., \"road\" and \"sidewalk\" in complex scenarios." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 560, + 238, + 572 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 560, + 238, + 572 + ], + "spans": [ + { + "bbox": [ + 132, + 560, + 238, + 572 + ], + "type": "text", + "content": "4.3 Ablation Study" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 581, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 581, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 581, + 482, + 666 + ], + "type": "text", + "content": "In this section, we are tailored to understand the efficacy of each design in our SuperFlow framework. Unless otherwise specified, we adopt MinkUNet-34 [19] and ViT-B [72] as the 3D and 2D backbones, respectively, throughout this study. 3D Network Capacity. Existing 3D backbones are relatively small in scale compared to their 2D counterparts. We study the scale of the 3D network and the results are shown in Tab. 5. We observe improved performance as the network capacity scales up, except for MinkUNet-101 [19]. We conjecture that this is due" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 216, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 216, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 216, + 101 + ], + "type": "text", + "content": "X. Xu et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 134, + 196, + 312, + 280 + ], + "blocks": [ + { + "bbox": [ + 132, + 114, + 310, + 192 + ], + "lines": [ + { + "bbox": [ + 132, + 114, + 310, + 192 + ], + "spans": [ + { + "bbox": [ + 132, + 114, + 310, + 192 + ], + "type": "text", + "content": "Table 6: Ablation study of each component in SuperFlow. All variants use a MinkUNet-34 [19] as the 3D backbone and ViT-B [72] for distillation. VC: View consistency. D2S: Dense-to-sparse regularization. FCL: Flow-based contrastive learning. All scores are given in percentage " + }, + { + "bbox": [ + 132, + 114, + 310, + 192 + ], + "type": "inline_equation", + "content": "(\\%)" + }, + { + "bbox": [ + 132, + 114, + 310, + 192 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 134, + 196, + 312, + 280 + ], + "lines": [ + { + "bbox": [ + 134, + 196, + 312, + 280 + ], + "spans": [ + { + "bbox": [ + 134, + 196, + 312, + 280 + ], + "type": "table", + "html": "
#VC D2S FCLnuScenesKITTIWaymo
LP1%1%1%
-Random8.1030.3039.5039.41
(a)XXX44.6544.4746.6547.77
(b)XX45.5745.2146.8748.01
(c)X46.1746.9147.2649.01
(d)X47.2447.6748.2149.80
(e)47.6648.0948.4050.20
", + "image_path": "9947106944d7c65ab4ef03b16593ed7bd79bae48d2b6b2af6e7d37116af6bdc5.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 322, + 196, + 480, + 279 + ], + "blocks": [ + { + "bbox": [ + 320, + 114, + 482, + 192 + ], + "lines": [ + { + "bbox": [ + 320, + 114, + 482, + 192 + ], + "spans": [ + { + "bbox": [ + 320, + 114, + 482, + 192 + ], + "type": "text", + "content": "Table 7: Ablation study on spatiotemporal consistency. All variants use a MinkUNet-34 [19] as the 3D backbone and ViT-B [72] for distillation. 0 denotes current timestamp. 0.5s corresponds to a " + }, + { + "bbox": [ + 320, + 114, + 482, + 192 + ], + "type": "inline_equation", + "content": "20\\mathrm{Hz}" + }, + { + "bbox": [ + 320, + 114, + 482, + 192 + ], + "type": "text", + "content": " timespan. All scores are given in percentage " + }, + { + "bbox": [ + 320, + 114, + 482, + 192 + ], + "type": "inline_equation", + "content": "(\\%)" + }, + { + "bbox": [ + 320, + 114, + 482, + 192 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 322, + 196, + 480, + 279 + ], + "lines": [ + { + "bbox": [ + 322, + 196, + 480, + 279 + ], + "spans": [ + { + "bbox": [ + 322, + 196, + 480, + 279 + ], + "type": "table", + "html": "
TimespannuScenesKITTIWaymo
LP1%1%1%
Single-Frame46.1746.9147.2649.01
0, -0.5s46.3947.0847.9949.78
-0.5s, 0, +0.5s47.6648.0948.4050.20
-1.0s, 0, +1.0s47.6047.9948.4350.18
-1.5s, 0, +1.5s46.4348.2748.3449.93
-2.0s, 0, +2.0s46.2048.4948.1850.01
", + "image_path": "1580a61da61b3b5b4e4ceb003be4eabb9a3e84525f5348187f647808093c2701.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 294, + 480, + 330 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 294, + 480, + 330 + ], + "spans": [ + { + "bbox": [ + 130, + 294, + 480, + 330 + ], + "type": "text", + "content": "to the fact that models with limited parameters are less effective in capturing patterns during representation learning, and, conversely, models with a large set of trainable parameters tend to be difficult to converge." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 331, + 481, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 331, + 481, + 437 + ], + "spans": [ + { + "bbox": [ + 130, + 331, + 481, + 437 + ], + "type": "text", + "content": "Representation Density. The consistency regularization between sparse and dense point clouds encourages useful representation learning. To analyze the degree of regularization, we investigate various point cloud densities and show the results in Tab. 4. We observe that a suitable point cloud density can improve the model's ability to feature representation. When the density of point clouds is too dense, the motion of objects is obvious in the scene. However, we generate superpoints of the dense points based on superpixels captured at the time of sparse points. The displacement difference of dynamic objects makes the projection misalignment. A trade-off selection would be two or three sweeps." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 437, + 482, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 437, + 482, + 557 + ], + "spans": [ + { + "bbox": [ + 130, + 437, + 482, + 557 + ], + "type": "text", + "content": "Temporal Consistency. The ability to capture semantically coherent temporal cues is crucial in our SuperFlow framework. In Eq. (5), we operate temporal contrastive learning on superpoints features across scenes. As shown in Tab. 7, we observe that temporal contrastive learning achieves better results compared to single-frame methods. We also compare the impact of frames used to capture temporal cues. When we use 3 frames, it acquires more context-aware information than 2 frames and achieves better results. Finally, we study the impact of the timespan between frames. The performance will drop with a longer timespan. We conjecture that scenes with short timespans have more consistency, while long timespans tend to have more uncertain factors." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 558, + 482, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 558, + 482, + 641 + ], + "spans": [ + { + "bbox": [ + 130, + 558, + 482, + 641 + ], + "type": "text", + "content": "Component Analysis. In Tab. 6, we analyze each component in the SuperFlow framework, including view consistency, dense-to-sparse regularization, and flow-based contrastive learning. The baseline is SLiDR [82] with VFMs-based superpixels. View consistency brings slight improvements among the popular datasets with a few annotations. D2S distills dense features into sparse features and it brings about " + }, + { + "bbox": [ + 130, + 558, + 482, + 641 + ], + "type": "inline_equation", + "content": "1\\%" + }, + { + "bbox": [ + 130, + 558, + 482, + 641 + ], + "type": "text", + "content": " mIoU gains. FCL extracts temporal cues via temporal contrastive learning and it significantly leads to about " + }, + { + "bbox": [ + 130, + 558, + 482, + 641 + ], + "type": "inline_equation", + "content": "2.0\\%" + }, + { + "bbox": [ + 130, + 558, + 482, + 641 + ], + "type": "text", + "content": " mIoU gains." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 641, + 481, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 641, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 641, + 481, + 665 + ], + "type": "text", + "content": "Visual Inspections. Similarity maps presented in Fig. 6 denote the segmentation ability of our pretrained model. The query points include \"car\", \"man-" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 179, + 91, + 448, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 179, + 91, + 448, + 102 + ], + "spans": [ + { + "bbox": [ + 179, + 91, + 448, + 102 + ], + "type": "text", + "content": "4D Contrastive Superflows are Dense 3D Representation Learners" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 134, + 114, + 242, + 168 + ], + "blocks": [ + { + "bbox": [ + 134, + 114, + 242, + 168 + ], + "lines": [ + { + "bbox": [ + 134, + 114, + 242, + 168 + ], + "spans": [ + { + "bbox": [ + 134, + 114, + 242, + 168 + ], + "type": "image", + "image_path": "12bc1af94125515fe8051a70711c3ff20bdc4863433c7582ef4dccbea1b86ce2.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 164, + 170, + 214, + 178 + ], + "lines": [ + { + "bbox": [ + 164, + 170, + 214, + 178 + ], + "spans": [ + { + "bbox": [ + 164, + 170, + 214, + 178 + ], + "type": "text", + "content": "(a) \"car\" (3D)" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 252, + 114, + 361, + 168 + ], + "blocks": [ + { + "bbox": [ + 252, + 114, + 361, + 168 + ], + "lines": [ + { + "bbox": [ + 252, + 114, + 361, + 168 + ], + "spans": [ + { + "bbox": [ + 252, + 114, + 361, + 168 + ], + "type": "image", + "image_path": "fb0e28d4d6c13aaa80ccc3373c14b356ca27a471b46636f037b077d7ae892728.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 269, + 170, + 343, + 178 + ], + "lines": [ + { + "bbox": [ + 269, + 170, + 343, + 178 + ], + "spans": [ + { + "bbox": [ + 269, + 170, + 343, + 178 + ], + "type": "text", + "content": "(b) \"manmade\" (3D)" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 371, + 114, + 479, + 168 + ], + "blocks": [ + { + "bbox": [ + 371, + 114, + 479, + 168 + ], + "lines": [ + { + "bbox": [ + 371, + 114, + 479, + 168 + ], + "spans": [ + { + "bbox": [ + 371, + 114, + 479, + 168 + ], + "type": "image", + "image_path": "b6f43375c411a39ae209a34edb715b422dbf5ebe2cad8e5d5edf47e96649d7ef.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 135, + 179, + 242, + 233 + ], + "blocks": [ + { + "bbox": [ + 135, + 179, + 242, + 233 + ], + "lines": [ + { + "bbox": [ + 135, + 179, + 242, + 233 + ], + "spans": [ + { + "bbox": [ + 135, + 179, + 242, + 233 + ], + "type": "image", + "image_path": "6f4855312f348390127186ae934af7372a4890336ae6ec836de8ddf26685b32e.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 163, + 235, + 214, + 243 + ], + "lines": [ + { + "bbox": [ + 163, + 235, + 214, + 243 + ], + "spans": [ + { + "bbox": [ + 163, + 235, + 214, + 243 + ], + "type": "text", + "content": "(d) \"car\" (2D)" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 253, + 179, + 361, + 233 + ], + "blocks": [ + { + "bbox": [ + 253, + 179, + 361, + 233 + ], + "lines": [ + { + "bbox": [ + 253, + 179, + 361, + 233 + ], + "spans": [ + { + "bbox": [ + 253, + 179, + 361, + 233 + ], + "type": "image", + "image_path": "3fdf1159b556ec7132d4c18c87d23b73b04cbab10c84428d9280dcf0a065ed4c.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 270, + 235, + 343, + 243 + ], + "lines": [ + { + "bbox": [ + 270, + 235, + 343, + 243 + ], + "spans": [ + { + "bbox": [ + 270, + 235, + 343, + 243 + ], + "type": "text", + "content": "(e) \"manmade\" (2D)" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 371, + 179, + 479, + 233 + ], + "blocks": [ + { + "bbox": [ + 391, + 170, + 459, + 178 + ], + "lines": [ + { + "bbox": [ + 391, + 170, + 459, + 178 + ], + "spans": [ + { + "bbox": [ + 391, + 170, + 459, + 178 + ], + "type": "text", + "content": "(c) \"sidewalk\" (3D)" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 371, + 179, + 479, + 233 + ], + "lines": [ + { + "bbox": [ + 371, + 179, + 479, + 233 + ], + "spans": [ + { + "bbox": [ + 371, + 179, + 479, + 233 + ], + "type": "image", + "image_path": "d78f6d33c4c9b14ebe6bb966101a2e602206ca40d62d1279e38e533a6ff4556c.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 135, + 244, + 242, + 299 + ], + "blocks": [ + { + "bbox": [ + 135, + 244, + 242, + 299 + ], + "lines": [ + { + "bbox": [ + 135, + 244, + 242, + 299 + ], + "spans": [ + { + "bbox": [ + 135, + 244, + 242, + 299 + ], + "type": "image", + "image_path": "06f1520cfe6c3acf8a78f2f5cfbf805329812e90e08b8d457ef2ed6e61b6fa2b.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 253, + 244, + 361, + 299 + ], + "blocks": [ + { + "bbox": [ + 253, + 244, + 361, + 299 + ], + "lines": [ + { + "bbox": [ + 253, + 244, + 361, + 299 + ], + "spans": [ + { + "bbox": [ + 253, + 244, + 361, + 299 + ], + "type": "image", + "image_path": "a46f1f5da693229841ddb3ecf854bdafedbeb44368d7508aec878ebda3e629ed.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 371, + 244, + 479, + 299 + ], + "blocks": [ + { + "bbox": [ + 391, + 235, + 459, + 243 + ], + "lines": [ + { + "bbox": [ + 391, + 235, + 459, + 243 + ], + "spans": [ + { + "bbox": [ + 391, + 235, + 459, + 243 + ], + "type": "text", + "content": "(f) \"sidewalk\" (2D)" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 371, + 244, + 479, + 299 + ], + "lines": [ + { + "bbox": [ + 371, + 244, + 479, + 299 + ], + "spans": [ + { + "bbox": [ + 371, + 244, + 479, + 299 + ], + "type": "image", + "image_path": "2f515a9dfd96c708f7d7559bf86b7a5e6f0fe21524266fe90d9d7e78185e290b.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 394, + 300, + 456, + 308 + ], + "lines": [ + { + "bbox": [ + 394, + 300, + 456, + 308 + ], + "spans": [ + { + "bbox": [ + 394, + 300, + 456, + 308 + ], + "type": "text", + "content": "(i) \"terrain\" (3D)" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 135, + 309, + 242, + 363 + ], + "blocks": [ + { + "bbox": [ + 151, + 300, + 227, + 309 + ], + "lines": [ + { + "bbox": [ + 151, + 300, + 227, + 309 + ], + "spans": [ + { + "bbox": [ + 151, + 300, + 227, + 309 + ], + "type": "text", + "content": "(g) \"vegetation\" (3D)" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 135, + 309, + 242, + 363 + ], + "lines": [ + { + "bbox": [ + 135, + 309, + 242, + 363 + ], + "spans": [ + { + "bbox": [ + 135, + 309, + 242, + 363 + ], + "type": "image", + "image_path": "a24a5d71cc50cab9fe2a8266fbc4ea2ca49f68c1fbc4d0faa5952b776f76e50c.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 151, + 365, + 226, + 374 + ], + "lines": [ + { + "bbox": [ + 151, + 365, + 226, + 374 + ], + "spans": [ + { + "bbox": [ + 151, + 365, + 226, + 374 + ], + "type": "text", + "content": "(j) \"vegetation\" (2D)" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 130, + 383, + 482, + 428 + ], + "lines": [ + { + "bbox": [ + 130, + 383, + 482, + 428 + ], + "spans": [ + { + "bbox": [ + 130, + 383, + 482, + 428 + ], + "type": "text", + "content": "Fig. 6: Cosine similarity between features of a query point (red dot) and: 1) features of other points projected in the image (the 1st and 3rd rows); and 2) features of an image with the same scene (the 2nd and 4th rows). The color goes from red to blue denoting low and high similarity scores, respectively. Best viewed in color." + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 253, + 309, + 361, + 363 + ], + "blocks": [ + { + "bbox": [ + 257, + 300, + 356, + 309 + ], + "lines": [ + { + "bbox": [ + 257, + 300, + 356, + 309 + ], + "spans": [ + { + "bbox": [ + 257, + 300, + 356, + 309 + ], + "type": "text", + "content": "(h) \"driveable surface\" (3D)" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 253, + 309, + 361, + 363 + ], + "lines": [ + { + "bbox": [ + 253, + 309, + 361, + 363 + ], + "spans": [ + { + "bbox": [ + 253, + 309, + 361, + 363 + ], + "type": "image", + "image_path": "9d0bb47d746a504c47701d6a3948d9f767d00fce5330b9c770273c0e227a548a.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 257, + 365, + 356, + 374 + ], + "lines": [ + { + "bbox": [ + 257, + 365, + 356, + 374 + ], + "spans": [ + { + "bbox": [ + 257, + 365, + 356, + 374 + ], + "type": "text", + "content": "(k) \"driveable surface\" (2D)" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 371, + 309, + 479, + 363 + ], + "blocks": [ + { + "bbox": [ + 371, + 309, + 479, + 363 + ], + "lines": [ + { + "bbox": [ + 371, + 309, + 479, + 363 + ], + "spans": [ + { + "bbox": [ + 371, + 309, + 479, + 363 + ], + "type": "image", + "image_path": "09c89e52a9980fd253975c6cc38438de5e90b29a9cd3e1aaef25e6cf8c64cf1b.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 394, + 365, + 456, + 374 + ], + "lines": [ + { + "bbox": [ + 394, + 365, + 456, + 374 + ], + "spans": [ + { + "bbox": [ + 394, + 365, + 456, + 374 + ], + "type": "text", + "content": "(1) \"terrain\" (2D)" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_caption" + } + ], + "index": 24 + }, + { + "bbox": [ + 130, + 444, + 482, + 516 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 444, + 482, + 516 + ], + "spans": [ + { + "bbox": [ + 130, + 444, + 482, + 516 + ], + "type": "text", + "content": "made\", \"sidewalk\", \"vegetation\", \"driveable surface\", and \"terrain\". SuperFlows shows strong semantic discriminative ability without fine-tuning. We conjecture that it comes from three aspects: 1) View consistent superpixels enable the network to learn semantic representation; 2) Dense-to-sparse regularization enhances the network to learn varying density features; 3) Temporal contrastive learning extracts semantic cues across scenes." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 132, + 533, + 220, + 547 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 533, + 220, + 547 + ], + "spans": [ + { + "bbox": [ + 132, + 533, + 220, + 547 + ], + "type": "text", + "content": "5 Conclusion" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 130, + 557, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 557, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 557, + 482, + 666 + ], + "type": "text", + "content": "In this work, we presented SuperFlow to tackle the challenging 3D data representation learning. Motivated by the sequential nature of LiDAR acquisitions, we proposed three novel designs to better encourage spatiotemporal consistency, encompassing view consistency alignment, dense-to-sparse regularization, and flow-based contrastive learning. Extensive experiments across 11 diverse LiDAR datasets showed that SuperFlow consistently outperforms prior approaches in linear probing, downstream fine-tuning, and robustness probing. Our study on scaling up 2D and 3D network capacities reveals insightful findings. We hope this work could shed light on future designs of powerful 3D foundation models." + } + ] + } + ], + "index": 29 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 216, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 216, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 216, + 100 + ], + "type": "text", + "content": "X. Xu et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 236 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 236 + ], + "type": "text", + "content": "Acknowledgements. This work was supported by the Scientific and Technological Innovation 2030 - \"New Generation Artificial Intelligence\" Major Project (No. 2021ZD0112200), the Joint Funds of the National Natural Science Foundation of China (No. U21B2044), the Key Research and Development Program of Jiangsu Province (No. BE2023016-3), and the Talent Research Start-up Foundation of Nanjing University of Posts and Telecommunications (No. NY223172). This work was also supported by the Ministry of Education, Singapore, under its MOE AcRF Tier 2 (MOET2EP20221-0012), NTU NAP, and under the RIE2020 Industry Alignment Fund - Industry Collaboration Projects (IAF-ICP) Funding Initiative, as well as cash and in-kind contribution from the industry partner(s)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 133, + 257, + 197, + 270 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 257, + 197, + 270 + ], + "spans": [ + { + "bbox": [ + 133, + 257, + 197, + 270 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 138, + 285, + 481, + 665 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 141, + 285, + 481, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 285, + 481, + 319 + ], + "spans": [ + { + "bbox": [ + 141, + 285, + 481, + 319 + ], + "type": "text", + "content": "1. Achanta, R., Shaji, A., Smith, K., Lucchi, A., Fua, P., Susstrunk, S.: Slic superpixels compared to state-of-the-art superpixel methods. IEEE Transactions on Pattern Analysis and Machine Intelligence 34(11), 2274-2282 (2012)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 141, + 320, + 481, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 320, + 481, + 352 + ], + "spans": [ + { + "bbox": [ + 141, + 320, + 481, + 352 + ], + "type": "text", + "content": "2. Aygun, M., Osep, A., Weber, M., Maximov, M., Stachniss, C., Behley, J., Leal-Taixe, L.: 4d panoptic lidar segmentation. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5527-5537 (2021)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 141, + 354, + 481, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 354, + 481, + 397 + ], + "spans": [ + { + "bbox": [ + 141, + 354, + 481, + 397 + ], + "type": "text", + "content": "3. Badue, C., Guidolini, R., Carneiro, R.V., Azevedo, P., Cardoso, V.B., Forechi, A., Jesus, L., Berriel, R., Paixão, T.M., Mutz, F., de Paula Veronese, L., Oliveira-Santos, T., Souza, A.F.D.: Self-driving cars: A survey. Expert Systems with Applications 165, 113816 (2021)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 141, + 399, + 481, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 399, + 481, + 441 + ], + "spans": [ + { + "bbox": [ + 141, + 399, + 481, + 441 + ], + "type": "text", + "content": "4. Behley, J., Garbade, M., Milioto, A., Quenzel, J., Behnke, S., Gall, J., Stachniss, C.: Towards 3d lidar-based semantic scene understanding of 3d point cloud sequences: The semanticicketti dataset. International Journal of Robotics Research 40, 959-96 (2021)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 141, + 442, + 481, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 442, + 481, + 485 + ], + "spans": [ + { + "bbox": [ + 141, + 442, + 481, + 485 + ], + "type": "text", + "content": "5. Behley, J., Garbade, M., Milioto, A., Quenzel, J., Behnke, S., Stachniss, C., Gall, J.: Semantickitti: A dataset for semantic scene understanding of lidar sequences. In: IEEE/CVF International Conference on Computer Vision. pp. 9297-9307 (2019)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 141, + 487, + 481, + 520 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 487, + 481, + 520 + ], + "spans": [ + { + "bbox": [ + 141, + 487, + 481, + 520 + ], + "type": "text", + "content": "6. Bengio, Y., Courville, A., Vincent, P.: Representation learning: A review and new perspectives. IEEE Transactions on Pattern Analysis and Machine Intelligence 35(8), 1798-1828 (2013)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 141, + 521, + 481, + 553 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 521, + 481, + 553 + ], + "spans": [ + { + "bbox": [ + 141, + 521, + 481, + 553 + ], + "type": "text", + "content": "7. Boulch, A., Sautier, C., Michele, B., Puy, G., Marlet, R.: Also: Automotive lidar self-supervision by occupancy estimation. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 13455-13465 (2023)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 141, + 554, + 481, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 554, + 481, + 597 + ], + "spans": [ + { + "bbox": [ + 141, + 554, + 481, + 597 + ], + "type": "text", + "content": "8. Caesar, H., Bankiti, V., Lang, A.H., Vora, S., Liong, V.E., Xu, Q., Krishnan, A., Pan, Y., Baldan, G., Beijbom, O.: nuscenes: A multimodal dataset for autonomous driving. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 11621-11631 (2020)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 141, + 599, + 481, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 599, + 481, + 632 + ], + "spans": [ + { + "bbox": [ + 141, + 599, + 481, + 632 + ], + "type": "text", + "content": "9. Cao, A.Q., Dai, A., de Charette, R.: Pasco: Urban 3d panoptic scene completion with uncertainty awareness. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 14554-14564 (2024)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 138, + 632, + 481, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 632, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 138, + 632, + 481, + 665 + ], + "type": "text", + "content": "10. Chen, Q., Vora, S., Beijbom, O.: Polarstream: Streaming lidar object detection and segmentation with polar pillars. In: Advances in Neural Information Processing Systems. vol. 34 (2021)" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 179, + 91, + 448, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 179, + 91, + 448, + 102 + ], + "spans": [ + { + "bbox": [ + 179, + 91, + 448, + 102 + ], + "type": "text", + "content": "4D Contrastive Superflows are Dense 3D Representation Learners" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 138, + 116, + 480, + 665 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 138, + 116, + 480, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 116, + 480, + 149 + ], + "spans": [ + { + "bbox": [ + 138, + 116, + 480, + 149 + ], + "type": "text", + "content": "11. Chen, R., Liu, Y., Kong, L., Chen, N., Zhu, X., Ma, Y., Liu, T., Wang, W.: Towards label-free scene understanding by vision foundation models. In: Advances in Neural Information Processing Systems. vol. 36 (2023)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 138, + 150, + 480, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 150, + 480, + 184 + ], + "spans": [ + { + "bbox": [ + 138, + 150, + 480, + 184 + ], + "type": "text", + "content": "12. Chen, R., Liu, Y., Kong, L., Zhu, X., Ma, Y., Li, Y., Hou, Y., Qiao, Y., Wang, W.: Clip2scene: Towards label-efficient 3d scene understanding by clip. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 7020-7030 (2023)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 138, + 185, + 480, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 185, + 480, + 217 + ], + "spans": [ + { + "bbox": [ + 138, + 185, + 480, + 217 + ], + "type": "text", + "content": "13. Chen, T., Kornblith, S., Norouzi, M., Hinton, G.: A simple framework for contrastive learning of visual representations. In: International Conference on Machine Learning. pp. 1597-1607 (2020)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 138, + 218, + 480, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 218, + 480, + 239 + ], + "spans": [ + { + "bbox": [ + 138, + 218, + 480, + 239 + ], + "type": "text", + "content": "14. Chen, X., Fan, H., Girshick, R., He, K.: Improved baselines with momentum contrastive learning. arXiv preprint arXiv:2003.04297 (2020)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 138, + 240, + 480, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 240, + 480, + 274 + ], + "spans": [ + { + "bbox": [ + 138, + 240, + 480, + 274 + ], + "type": "text", + "content": "15. Chen, X., Xie, S., He, K.: An empirical study of training self-supervised vision transformers. In: IEEE/CVF International Conference on Computer Vision. pp. 9640-9649 (2021)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 138, + 274, + 480, + 307 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 274, + 480, + 307 + ], + "spans": [ + { + "bbox": [ + 138, + 274, + 480, + 307 + ], + "type": "text", + "content": "16. Chen, Y., Nießner, M., Dai, A.: 4dcontrast: Contrastive learning with dynamic correspondences for 3d scene understanding. In: European Conference on Computer Vision. pp. 543-560 (2022)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 138, + 308, + 480, + 341 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 308, + 480, + 341 + ], + "spans": [ + { + "bbox": [ + 138, + 308, + 480, + 341 + ], + "type": "text", + "content": "17. Cheng, H., Han, X., Xiao, G.: Cenet: Toward concise and efficient lidar semantic segmentation for autonomous driving. In: IEEE International Conference on Multimedia and Expo. pp. 1-6 (2022)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 138, + 342, + 480, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 342, + 480, + 385 + ], + "spans": [ + { + "bbox": [ + 138, + 342, + 480, + 385 + ], + "type": "text", + "content": "18. Cheng, R., Razani, R., Taghavi, E., Li, E., Liu, B.: Af2-s3net: Attentive feature fusion with adaptive feature selection for sparse semantic segmentation network. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 12547-12556 (2021)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 138, + 386, + 480, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 386, + 480, + 418 + ], + "spans": [ + { + "bbox": [ + 138, + 386, + 480, + 418 + ], + "type": "text", + "content": "19. Choy, C., Gwak, J., Savarese, S.: 4d spatio-temporal convnets: Minkowski convolutional neural networks. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 3075-3084 (2019)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 138, + 419, + 480, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 419, + 480, + 453 + ], + "spans": [ + { + "bbox": [ + 138, + 419, + 480, + 453 + ], + "type": "text", + "content": "20. Contributors, M.: MMDetection3D: OpenMMLab next-generation platform for general 3D object detection. https://github.com/open-mmlab/mmdetection3d (2020)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 138, + 453, + 480, + 486 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 453, + 480, + 486 + ], + "spans": [ + { + "bbox": [ + 138, + 453, + 480, + 486 + ], + "type": "text", + "content": "21. Cortinhal, T., Tzelepis, G., Aksoy, E.E.: Salsanext: Fast, uncertainty-aware semantic segmentation of lidar point clouds. In: International Symposium on Visual Computing. pp. 207-222 (2020)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 138, + 487, + 480, + 531 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 487, + 480, + 531 + ], + "spans": [ + { + "bbox": [ + 138, + 487, + 480, + 531 + ], + "type": "text", + "content": "22. Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., Zhai, X., Unterthiner, T., Dehghani, M., Minderer, M., Heigold, G., Gelly, S., Uszkoreit, J., Houlsby, N.: An image is worth 16x16 words: Transformers for image recognition at scale. In: International Conference on Learning Representations (2020)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 138, + 531, + 480, + 564 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 531, + 480, + 564 + ], + "spans": [ + { + "bbox": [ + 138, + 531, + 480, + 564 + ], + "type": "text", + "content": "23. Duerr, F., Pfaller, M., Weigel, H., Beyerer, J.: Lidar-based recurrent 3d semantic segmentation with temporal memory alignment. In: International Conference on 3D Vision. pp. 781-790 (2020)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 138, + 565, + 480, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 565, + 480, + 597 + ], + "spans": [ + { + "bbox": [ + 138, + 565, + 480, + 597 + ], + "type": "text", + "content": "24. Ester, M., Kriegel, H.P., Sander, J., Xu, X.: A density-based algorithm for discovering clusters in large spatial databases with noise. In: ACM SIGKDD Conference on Knowledge Discovery and Data Mining. pp. 226-231 (1996)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 138, + 599, + 480, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 599, + 480, + 632 + ], + "spans": [ + { + "bbox": [ + 138, + 599, + 480, + 632 + ], + "type": "text", + "content": "25. Fischler, M.A., Bolles, R.C.: Random sample consensus: A paradigm for model fitting with applications to image analysis and automated cartography. Communications of the ACM 24(6), 381-395 (1981)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 138, + 632, + 480, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 632, + 480, + 665 + ], + "spans": [ + { + "bbox": [ + 138, + 632, + 480, + 665 + ], + "type": "text", + "content": "26. Fong, W.K., Mohan, R., Hurtado, J.V., Zhou, L., Caesar, H., Beijbom, O., Valada, A.: Panoptic nuscenes: A large-scale benchmark for lidar panoptic segmentation and tracking. IEEE Robotics and Automation Letters 7, 3795-3802 (2022)" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 215, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 215, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 215, + 100 + ], + "type": "text", + "content": "X. Xu et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 138, + 116, + 480, + 665 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 138, + 116, + 480, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 116, + 480, + 149 + ], + "spans": [ + { + "bbox": [ + 138, + 116, + 480, + 149 + ], + "type": "text", + "content": "27. Gao, B., Pan, Y., Li, C., Geng, S., Zhao, H.: Are we hungry for 3d lidar data for semantic segmentation? a survey of datasets and methods. IEEE Transactions on Intelligent Transportation Systems 23(7), 6063-6081 (2021)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 138, + 150, + 480, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 150, + 480, + 182 + ], + "spans": [ + { + "bbox": [ + 138, + 150, + 480, + 182 + ], + "type": "text", + "content": "28. Geiger, A., Lenz, P., Urtasun, R.: Are we ready for autonomous driving? the kitti vision benchmark suite. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 3354-3361 (2012)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 138, + 182, + 480, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 182, + 480, + 215 + ], + "spans": [ + { + "bbox": [ + 138, + 182, + 480, + 215 + ], + "type": "text", + "content": "29. Hao, X., Wei, M., Yang, Y., Zhao, H., Zhang, H., Zhou, Y., Wang, Q., Li, W., Kong, L., Zhang, J.: Is your hd map constructor reliable under sensor corruptions? arXiv preprint arXiv:2406.12214 (2024)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 138, + 216, + 480, + 248 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 216, + 480, + 248 + ], + "spans": [ + { + "bbox": [ + 138, + 216, + 480, + 248 + ], + "type": "text", + "content": "30. He, K., Chen, X., Xie, S., Li, Y., Dólár, P., Girshick, R.: Masked autoencoders are scalable vision learners. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 16000-16009 (2022)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 138, + 249, + 480, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 249, + 480, + 281 + ], + "spans": [ + { + "bbox": [ + 138, + 249, + 480, + 281 + ], + "type": "text", + "content": "31. He, K., Fan, H., Wu, Y., Xie, S., Girshick, R.: Momentum contrast for unsupervised visual representation learning. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 9729-9738 (2020)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 138, + 281, + 480, + 314 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 281, + 480, + 314 + ], + "spans": [ + { + "bbox": [ + 138, + 281, + 480, + 314 + ], + "type": "text", + "content": "32. Hess, G., Jaxing, J., Svensson, E., Hagerman, D., Petersson, C., Svensson, L.: Masked autoencoders for self-supervised learning on automotive point clouds. arXiv preprint arXiv:2207.00531 (2022)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 138, + 315, + 480, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 315, + 480, + 346 + ], + "spans": [ + { + "bbox": [ + 138, + 315, + 480, + 346 + ], + "type": "text", + "content": "33. Hong, F., Kong, L., Zhou, H., Zhu, X., Li, H., Liu, Z.: Unified 3d and 4d panoptic segmentation via dynamic shifting networks. IEEE Transactions on Pattern Analysis and Machine Intelligence 46(5), 3480-3495 (2024)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 138, + 347, + 480, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 347, + 480, + 380 + ], + "spans": [ + { + "bbox": [ + 138, + 347, + 480, + 380 + ], + "type": "text", + "content": "34. Hong, F., Zhou, H., Zhu, X., Li, H., Liu, Z.: Lidar-based panoptic segmentation via dynamic shifting network. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 13090-13099 (2021)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 138, + 380, + 480, + 413 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 380, + 480, + 413 + ], + "spans": [ + { + "bbox": [ + 138, + 380, + 480, + 413 + ], + "type": "text", + "content": "35. Hou, J., Graham, B., Nießner, M., Xie, S.: Exploring data-efficient 3d scene understanding with contrastive scene contexts. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 15587-15597 (2021)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 138, + 413, + 480, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 413, + 480, + 445 + ], + "spans": [ + { + "bbox": [ + 138, + 413, + 480, + 445 + ], + "type": "text", + "content": "36. Hu, Q., Yang, B., Fang, G., Guo, Y., Leonardis, A., Trigoni, N., Markham, A.: Sqn: Weakly-supervised semantic segmentation of large-scale 3d point clouds. In: European Conference on Computer Vision. pp. 600-619 (2022)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 138, + 446, + 480, + 489 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 446, + 480, + 489 + ], + "spans": [ + { + "bbox": [ + 138, + 446, + 480, + 489 + ], + "type": "text", + "content": "37. Hu, Q., Yang, B., Khalid, S., Xiao, W., Trigoni, N., Markham, A.: Towards semantic segmentation of urban-scale 3d point clouds: A dataset, benchmarks and challenges. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 4977-4987 (2021)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 138, + 490, + 480, + 522 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 490, + 480, + 522 + ], + "spans": [ + { + "bbox": [ + 138, + 490, + 480, + 522 + ], + "type": "text", + "content": "38. Hu, Z., Bai, X., Zhang, R., Wang, X., Sun, G., Fu, H., Tai, C.L.: Lidal: Interframe uncertainty based active learning for 3d lidar semantic segmentation. In: European Conference on Computer Vision. pp. 248-265 (2022)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 138, + 522, + 480, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 522, + 480, + 555 + ], + "spans": [ + { + "bbox": [ + 138, + 522, + 480, + 555 + ], + "type": "text", + "content": "39. Huang, S., Xie, Y., Zhu, S.C., Zhu, Y.: Spatio-temporal self-supervised representation learning for 3d point clouds. In: IEEE/CVF International Conference on Computer Vision. pp. 6535-6545 (2021)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 138, + 555, + 480, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 555, + 480, + 588 + ], + "spans": [ + { + "bbox": [ + 138, + 555, + 480, + 588 + ], + "type": "text", + "content": "40. Jaritz, M., Vu, T.H., de Charette, R., Wirbel, E., Pérez, P.: xmuda: Cross-modal unsupervised domain adaptation for 3d semantic segmentation. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 12605-12614 (2020)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 138, + 589, + 480, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 589, + 480, + 621 + ], + "spans": [ + { + "bbox": [ + 138, + 589, + 480, + 621 + ], + "type": "text", + "content": "41. Jiang, P., Osteen, P., Wigness, M., Saripallig, S.: Rellis-3d dataset: Data, benchmarks and analysis. In: IEEE International Conference on Robotics and Automation. pp. 1110–1116 (2021)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 138, + 621, + 480, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 621, + 480, + 665 + ], + "spans": [ + { + "bbox": [ + 138, + 621, + 480, + 665 + ], + "type": "text", + "content": "42. Kirillov, A., Mintun, E., Ravi, N., Mao, H., Rolland, C., Gustafson, L., Xiao, T., Whitehead, S., Berg, A.C., Lo, W.Y., Dollar, P., Girshick, R.: Segment anything. In: IEEE/CVF International Conference on Computer Vision. pp. 4015-4026 (2023)" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 179, + 91, + 448, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 179, + 91, + 448, + 102 + ], + "spans": [ + { + "bbox": [ + 179, + 91, + 448, + 102 + ], + "type": "text", + "content": "4D Contrastive Superflows are Dense 3D Representation Learners" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 138, + 116, + 480, + 665 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 138, + 116, + 480, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 116, + 480, + 149 + ], + "spans": [ + { + "bbox": [ + 138, + 116, + 480, + 149 + ], + "type": "text", + "content": "43. Klokov, A., Pak, D.U., Khorin, A., Yudin, D., Kochiev, L., Luchinskiy, V., Bezuglyj, V.: Daps3d: Domain adaptive projective segmentation of 3d lidar point clouds. IEEE Access 11, 79341-79356 (2023)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 138, + 150, + 480, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 150, + 480, + 184 + ], + "spans": [ + { + "bbox": [ + 138, + 150, + 480, + 184 + ], + "type": "text", + "content": "44. Kong, L., Liu, Y., Chen, R., Ma, Y., Zhu, X., Li, Y., Hou, Y., Qiao, Y., Liu, Z.: Rethinking range view representation for lidar segmentation. In: IEEE/CVF International Conference on Computer Vision. pp. 228-240 (2023)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 138, + 185, + 480, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 185, + 480, + 228 + ], + "spans": [ + { + "bbox": [ + 138, + 185, + 480, + 228 + ], + "type": "text", + "content": "45. Kong, L., Liu, Y., Li, X., Chen, R., Zhang, W., Ren, J., Pan, L., Chen, K., Liu, Z.: Robo3d: Towards robust and reliable 3d perception against corruptions. In: IEEE/CVF International Conference on Computer Vision. pp. 19994-20006 (2023)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 138, + 228, + 480, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 228, + 480, + 262 + ], + "spans": [ + { + "bbox": [ + 138, + 228, + 480, + 262 + ], + "type": "text", + "content": "46. Kong, L., Quader, N., Liong, V.E.: Conda: Unsupervised domain adaptation for lidar segmentation via regularized domain concatenation. In: IEEE International Conference on Robotics and Automation. pp. 9338-9345 (2023)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 138, + 262, + 480, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 262, + 480, + 296 + ], + "spans": [ + { + "bbox": [ + 138, + 262, + 480, + 296 + ], + "type": "text", + "content": "47. Kong, L., Ren, J., Pan, L., Liu, Z.: Lasermix for semi-supervised lidar semantic segmentation. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 21705-21715 (2023)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 138, + 297, + 480, + 329 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 297, + 480, + 329 + ], + "spans": [ + { + "bbox": [ + 138, + 297, + 480, + 329 + ], + "type": "text", + "content": "48. Kong, L., Xie, S., Hu, H., Ng, L.X., Cottereau, B.R., Ooi, W.T.: Robodepth: Robust out-of-distribution depth estimation under corruptions. In: Advances in Neural Information Processing Systems. vol. 36 (2023)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 138, + 330, + 480, + 363 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 330, + 480, + 363 + ], + "spans": [ + { + "bbox": [ + 138, + 330, + 480, + 363 + ], + "type": "text", + "content": "49. Kong, L., Xu, X., Ren, J., Zhang, W., Pan, L., Chen, K., Ooi, W.T., Liu, Z.: Multi-modal data-efficient 3d scene understanding for autonomous driving. arXiv preprint arXiv:2405.05258 (2024)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 138, + 363, + 480, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 363, + 480, + 396 + ], + "spans": [ + { + "bbox": [ + 138, + 363, + 480, + 396 + ], + "type": "text", + "content": "50. Krispel, G., Schinagl, D., Fruhwirth-Reisinger, C., Possegger, H., Bischof, H.: Maeli: Masked autoencoder for large-scale lidar point clouds. In: IEEE/CVF Winter Conference on Applications of Computer Vision. pp. 3383-3392 (2024)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 138, + 397, + 480, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 397, + 480, + 430 + ], + "spans": [ + { + "bbox": [ + 138, + 397, + 480, + 430 + ], + "type": "text", + "content": "51. Le-Khac, P.H., Healy, G., Smeaton, A.F.: Contrastive representation learning: A framework and review. IEEE Transactions on Pattern Analysis and Machine Intelligence 8, 193907-193934 (2020)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 138, + 430, + 480, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 430, + 480, + 464 + ], + "spans": [ + { + "bbox": [ + 138, + 430, + 480, + 464 + ], + "type": "text", + "content": "52. Li, L., Shum, H.P., Breckon, T.P.: Less is more: Reducing task and model complexity for 3d point cloud semantic segmentation. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 9361-9371 (2023)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 138, + 464, + 480, + 497 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 464, + 480, + 497 + ], + "spans": [ + { + "bbox": [ + 138, + 464, + 480, + 497 + ], + "type": "text", + "content": "53. Li, R., de Charette, R., Cao, A.Q.: Coarse3d: Class-prototypes for contrastive learning in weakly-supervised 3d point cloud segmentation. In: British Machine Vision Conference (2022)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 138, + 498, + 480, + 530 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 498, + 480, + 530 + ], + "spans": [ + { + "bbox": [ + 138, + 498, + 480, + 530 + ], + "type": "text", + "content": "54. Li, Y., Kong, L., Hu, H., Xu, X., Huang, X.: Optimizing lidar placements for robust driving perception in adverse conditions. arXiv preprint arXiv:2403.17009 (2024)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 138, + 531, + 480, + 564 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 531, + 480, + 564 + ], + "spans": [ + { + "bbox": [ + 138, + 531, + 480, + 564 + ], + "type": "text", + "content": "55. Lim, H., Oh, M., Myung, H.: Patchwork: Concentric zone-based region-wise ground segmentation with ground likelihood estimation using a 3d lidar sensor. IEEE Robotics and Automation Letters 6(4), 6458-6465 (2021)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 138, + 565, + 480, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 565, + 480, + 597 + ], + "spans": [ + { + "bbox": [ + 138, + 565, + 480, + 597 + ], + "type": "text", + "content": "56. Liong, V.E., Nguyen, T.N.T., Widjaja, S., Sharma, D., Chong, Z.J.: Amvnet: Assertion-based multi-view fusion network for lidar semantic segmentation. arXiv preprint arXiv:2012.04934 (2020)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 138, + 599, + 480, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 599, + 480, + 632 + ], + "spans": [ + { + "bbox": [ + 138, + 599, + 480, + 632 + ], + "type": "text", + "content": "57. Liu, M., Zhou, Y., Qi, C.R., Gong, B., Su, H., Anguelov, D.: Less: Label-efficient semantic segmentation for lidar point clouds. In: European Conference on Computer Vision. pp. 70-89 (2022)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 138, + 632, + 480, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 632, + 480, + 665 + ], + "spans": [ + { + "bbox": [ + 138, + 632, + 480, + 665 + ], + "type": "text", + "content": "58. Liu, M., Yurtsever, E., Zhou, X., Fossaert, J., Cui, Y., Zagar, B.L., Knoll., A.C.: A survey on autonomous driving datasets: Data statistic, annotation, and outlook. arXiv preprint arXiv:2401.01454 (2024)" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 216, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 216, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 216, + 100 + ], + "type": "text", + "content": "X. Xu et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 138, + 116, + 480, + 665 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 138, + 116, + 480, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 116, + 480, + 149 + ], + "spans": [ + { + "bbox": [ + 138, + 116, + 480, + 149 + ], + "type": "text", + "content": "59. Liu, Y., Bai, Y., Kong, L., Chen, R., Hou, Y., Shi, B., Li, Y.: Pcseg: An open source point cloud segmentation codebase. https://github.com/PJLab-ADG/PCSeg (2023)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 138, + 149, + 480, + 192 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 149, + 480, + 192 + ], + "spans": [ + { + "bbox": [ + 138, + 149, + 480, + 192 + ], + "type": "text", + "content": "60. Liu, Y., Chen, R., Li, X., Kong, L., Yang, Y., Xia, Z., Bai, Y., Zhu, X., Ma, Y., Li, Y., Qiao, Y., Hou, Y.: Uniseg: A unified multi-modal lidar segmentation network and the openpcseg codebase. In: IEEE/CVF International Conference on Computer Vision. pp. 21662-21673 (2023)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 138, + 192, + 480, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 192, + 480, + 224 + ], + "spans": [ + { + "bbox": [ + 138, + 192, + 480, + 224 + ], + "type": "text", + "content": "61. Liu, Y., Kong, L., Cen, J., Chen, R., Zhang, W., Pan, L., Chen, K., Liu, Z.: Segment any point cloud sequences by distilling vision foundation models. In: Advances in Neural Information Processing Systems. vol. 36 (2023)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 138, + 224, + 480, + 257 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 224, + 480, + 257 + ], + "spans": [ + { + "bbox": [ + 138, + 224, + 480, + 257 + ], + "type": "text", + "content": "62. Liu, Y., Kong, L., Wu, X., Chen, R., Li, X., Pan, L., Liu, Z., Ma, Y.: Multi-space alignments towards universal lidar segmentation. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 14648-14661 (2024)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 138, + 257, + 480, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 257, + 480, + 289 + ], + "spans": [ + { + "bbox": [ + 138, + 257, + 480, + 289 + ], + "type": "text", + "content": "63. Liu, Y.C., Huang, Y.K., Chiang, H.Y., Su, H.T., Liu, Z.Y., Chen, C.T., Tseng, C.Y., Hsu, W.H.: Learning from 2d: Contrastive pixel-to-point knowledge transfer for 3d pretraining. arXiv preprint arXiv:2104.04687 (2021)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 138, + 289, + 480, + 320 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 289, + 480, + 320 + ], + "spans": [ + { + "bbox": [ + 138, + 289, + 480, + 320 + ], + "type": "text", + "content": "64. Liu, Y., Chen, J., Zhang, Z., Huang, J., Yi, L.: Leaf: Learning frames for 4d point cloud sequence understanding. In: IEEE/CVF International Conference on Computer Vision. pp. 604-613 (2023)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 138, + 320, + 480, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 320, + 480, + 342 + ], + "spans": [ + { + "bbox": [ + 138, + 320, + 480, + 342 + ], + "type": "text", + "content": "65. Loshchilov, I., Hutter, F.: Decoupled weight decay regularization. In: International Conference on Learning Representations (2018)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 138, + 342, + 480, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 342, + 480, + 385 + ], + "spans": [ + { + "bbox": [ + 138, + 342, + 480, + 385 + ], + "type": "text", + "content": "66. Mahmoud, A., Hu, J.S., Kuai, T., Harakeh, A., Paull, L., Waslander, S.L.: Self-supervised image-to-point distillation via semantically tolerant contrastive loss. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 7102-7110 (2023)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 138, + 385, + 480, + 417 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 385, + 480, + 417 + ], + "spans": [ + { + "bbox": [ + 138, + 385, + 480, + 417 + ], + "type": "text", + "content": "67. Michele, B., Boulch, A., Puy, G., Vu, T.H., Marlet, R., Courty, N.: Saluda: Surface-based automotive lidar unsupervised domain adaptation. arXiv preprint arXiv:2304.03251 (2023)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 138, + 417, + 480, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 417, + 480, + 449 + ], + "spans": [ + { + "bbox": [ + 138, + 417, + 480, + 449 + ], + "type": "text", + "content": "68. Milioto, A., Vizzo, I., Behley, J., Stachniss, C.: Rangenet++: Fast and accurate lidar semantic segmentation. In: IEEE/RSJ International Conference on Intelligent Robots and Systems. pp. 4213-4220 (2019)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 138, + 449, + 480, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 449, + 480, + 491 + ], + "spans": [ + { + "bbox": [ + 138, + 449, + 480, + 491 + ], + "type": "text", + "content": "69. Muhammad, K., Ullah, A., Lloret, J., Ser, J.D., de Albuquerque, V.H.C.: Deep learning for safe autonomous driving: Current challenges and future directions. IEEE Transactions on Intelligent Transportation Systems 22(7), 4316-4336 (2020)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 138, + 492, + 480, + 524 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 492, + 480, + 524 + ], + "spans": [ + { + "bbox": [ + 138, + 492, + 480, + 524 + ], + "type": "text", + "content": "70. Nunes, L., Marcuzzi, R., Chen, X., Behley, J., Stachniss, C.: Segcontrast: 3d point cloud feature representation learning through self-supervised segment discrimination. IEEE Robotics and Automation Letters 7(2), 2116-2123 (2022)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 138, + 524, + 480, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 524, + 480, + 567 + ], + "spans": [ + { + "bbox": [ + 138, + 524, + 480, + 567 + ], + "type": "text", + "content": "71. Nunes, L., Wiesmann, L., Marcuzzi, R., Chen, X., Behley, J., Stachniss, C.: Temporal consistent 3d lidar representation learning for semantic perception in autonomous driving. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5217-5228 (2023)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 138, + 567, + 480, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 567, + 480, + 632 + ], + "spans": [ + { + "bbox": [ + 138, + 567, + 480, + 632 + ], + "type": "text", + "content": "72. Oquab, M., Darcet, T., Moutakanni, T., Vo, H., Szafraniec, M., Khalidov, V., Fernandez, P., Haziza, D., Massa, F., El-Nouby, A., Assran, M., Ballas, N., Galuba, W., Howes, R., Huang, P.Y., Li, S.W., Misra, I., Rabbat, M., Sharma, V., Synnaeve, G., Xu, H., Jegou, H., Mairal, J., Labatut, P., Joulin, A., Bojanowski, P.: Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:2304.07193 (2023)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 138, + 632, + 480, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 632, + 480, + 665 + ], + "spans": [ + { + "bbox": [ + 138, + 632, + 480, + 665 + ], + "type": "text", + "content": "73. Pan, Y., Gao, B., Mei, J., Geng, S., Li, C., Zhao, H.: Semanticposs: A point cloud dataset with large quantity of dynamic instances. In: IEEE Intelligent Vehicles Symposium. pp. 687-693 (2020)" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 180, + 91, + 448, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 180, + 91, + 448, + 102 + ], + "spans": [ + { + "bbox": [ + 180, + 91, + 448, + 102 + ], + "type": "text", + "content": "4D Contrastive Superflows are Dense 3D Representation Learners" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 137, + 117, + 481, + 666 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 137, + 117, + 481, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 117, + 481, + 149 + ], + "spans": [ + { + "bbox": [ + 137, + 117, + 481, + 149 + ], + "type": "text", + "content": "74. Pang, B., Xia, H., Lu, C.: Unsupervised 3d point cloud representation learning by triangle constrained contrast for autonomous driving. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5229-5239 (2023)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 138, + 151, + 481, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 151, + 481, + 184 + ], + "spans": [ + { + "bbox": [ + 138, + 151, + 481, + 184 + ], + "type": "text", + "content": "75. Puy, G., Gidaris, S., Boulch, A., Simeoni, O., Sautier, C., Pérez, P., Bursuc, A., Marlet, R.: Revisiting the distillation of image representations into point clouds for autonomous driving. arXiv preprint arXiv:2310.17504 (2023)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 138, + 185, + 481, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 185, + 481, + 228 + ], + "spans": [ + { + "bbox": [ + 138, + 185, + 481, + 228 + ], + "type": "text", + "content": "76. Puy, G., Gidaris, S., Boulch, A., Simeoni, O., Sautier, C., Pérez, P., Bursuc, A., Marlet, R.: Three pillars improving vision foundation model distillation for lidar. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 21519-21529 (2024)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 138, + 229, + 481, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 229, + 481, + 251 + ], + "spans": [ + { + "bbox": [ + 138, + 229, + 481, + 251 + ], + "type": "text", + "content": "77. Qiu, H., Yu, B., Tao, D.: Gfnet: Geometric flow network for 3d point cloud semantic segmentation. Transactions on Machine Learning Research (2022)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 138, + 253, + 481, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 253, + 481, + 296 + ], + "spans": [ + { + "bbox": [ + 138, + 253, + 481, + 296 + ], + "type": "text", + "content": "78. Radford, A., Kim, J.W., Hallacy, C., Ramesh, A., Goh, G., Agarwal, S., Sastry, G., Askell, A., Mishkin, P., Clark, J., et al.: Learning transferable visual models from natural language supervision. In: International conference on machine learning. pp. 8748-8763. PMLR (2021)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 138, + 297, + 481, + 329 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 297, + 481, + 329 + ], + "spans": [ + { + "bbox": [ + 138, + 297, + 481, + 329 + ], + "type": "text", + "content": "79. Rizzoli, G., Barbato, F., Zanuttigh, P.: Multimodal semantic segmentation in autonomous driving: A review of current approaches and future perspectives. Technologies 10(4) (2022)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 138, + 331, + 481, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 331, + 481, + 373 + ], + "spans": [ + { + "bbox": [ + 138, + 331, + 481, + 373 + ], + "type": "text", + "content": "80. Saltori, C., Krivosheev, E., Lathuilière, S., Sebe, N., Galasso, F., Fiameni, G., Ricci, E., Poiesi, F.: Gipso: Geometrically informed propagation for online adaptation in 3d lidar segmentation. In: European Conference on Computer Vision. pp. 567-585 (2022)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 138, + 375, + 481, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 375, + 481, + 407 + ], + "spans": [ + { + "bbox": [ + 138, + 375, + 481, + 407 + ], + "type": "text", + "content": "81. Sautier, C., Puy, G., Boulch, A., Marlet, R., Lepetit, V.: Bevcontrast: Self-supervision in bev space for automotive lidar point clouds. arXiv preprint arXiv:2310.17281 (2023)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 138, + 409, + 481, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 409, + 481, + 441 + ], + "spans": [ + { + "bbox": [ + 138, + 409, + 481, + 441 + ], + "type": "text", + "content": "82. Sautier, C., Puy, G., Gidaris, S., Boulch, A., Bursuc, A., Marlet, R.: Image-to-lidar self-supervised distillation for autonomous driving data. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 9891-9901 (2022)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 138, + 442, + 481, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 442, + 481, + 485 + ], + "spans": [ + { + "bbox": [ + 138, + 442, + 481, + 485 + ], + "type": "text", + "content": "83. Shen, Z., Sheng, X., Fan, H., Wang, L., Guo, Y., Liu, Q., Wen, H., Zhou, X.: Masked spatio-temporal structure prediction for self-supervised learning on point cloud videos. In: IEEE/CVF International Conference on Computer Vision. pp. 16580-16589 (2023)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 138, + 487, + 481, + 530 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 487, + 481, + 530 + ], + "spans": [ + { + "bbox": [ + 138, + 487, + 481, + 530 + ], + "type": "text", + "content": "84. Sheng, X., Shen, Z., Xiao, G., Wang, L., Guo, Y., Fan, H.: Point contrastive prediction with semantic clustering for self-supervised learning on point cloud videos. In: IEEE/CVF International Conference on Computer Vision. pp. 16515-16524 (2023)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 138, + 532, + 481, + 564 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 532, + 481, + 564 + ], + "spans": [ + { + "bbox": [ + 138, + 532, + 481, + 564 + ], + "type": "text", + "content": "85. Shi, H., Lin, G., Wang, H., Hung, T.Y., Wang, Z.: Spsequencenet: Semantic segmentation network on 4d point clouds. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 4574-4583 (2020)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 138, + 566, + 481, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 566, + 481, + 609 + ], + "spans": [ + { + "bbox": [ + 138, + 566, + 481, + 609 + ], + "type": "text", + "content": "86. Shi, H., Wei, J., Li, R., Liu, F., Lin, G.: Weakly supervised segmentation on outdoor 4d point clouds with temporal matching and spatial graph propagation. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 11840-11849 (2022)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 138, + 609, + 481, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 609, + 481, + 632 + ], + "spans": [ + { + "bbox": [ + 138, + 609, + 481, + 632 + ], + "type": "text", + "content": "87. Smith, L.N., Topin, N.: Super-convergence: Very fast training of neural networks using large learning rates. arXiv preprint arXiv:1708.07120 (2017)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 138, + 632, + 481, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 632, + 481, + 666 + ], + "spans": [ + { + "bbox": [ + 138, + 632, + 481, + 666 + ], + "type": "text", + "content": "88. Sun, J., Xu, X., Kong, L., Liu, Y., Li, L., Zhu, C., Zhang, J., Xiao, Z., Chen, R., Wang, T., Zhang, W., Chen, K., Qing, C.: An empirical study of training state-of-the-art lidar segmentation models. arXiv preprint arXiv:2405.14870 (2024)" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 215, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 215, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 215, + 100 + ], + "type": "text", + "content": "X. Xu et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 134, + 116, + 481, + 665 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 137, + 116, + 481, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 116, + 481, + 183 + ], + "spans": [ + { + "bbox": [ + 137, + 116, + 481, + 183 + ], + "type": "text", + "content": "89. Sun, P., Kretzschmar, H., Dotiwalla, X., Chouard, A., Patnaik, V., Tsui, P., Guo, J., Zhou, Y., Chai, Y., Caine, B., Vasudevan, V., Han, W., Ngiam, J., Zhao, H., Timofeev, A., Ettinger, S., Krivokon, M., Gao, A., Joshi, A., Zhang, Y., Shlens, J., Chen, Z., Anguelov, D.: Scalability in perception for autonomous driving: Waymo open dataset. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 2446-2454 (2020)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 137, + 184, + 481, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 184, + 481, + 217 + ], + "spans": [ + { + "bbox": [ + 137, + 184, + 481, + 217 + ], + "type": "text", + "content": "90. Tang, H., Liu, Z., Zhao, S., Lin, Y., Lin, J., Wang, H., Han, S.: Searching efficient 3d architectures with sparse point-voxel convolution. In: European Conference on Computer Vision. pp. 685-702 (2020)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 137, + 217, + 481, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 217, + 481, + 251 + ], + "spans": [ + { + "bbox": [ + 137, + 217, + 481, + 251 + ], + "type": "text", + "content": "91. Tarvainen, A., Valpola, H.: Mean teachers are better role models: Weight-averaged consistency targets improve semi-supervised deep learning results. In: Advances in Neural Information Processing Systems. vol. 30 (2017)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 138, + 251, + 481, + 283 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 251, + 481, + 283 + ], + "spans": [ + { + "bbox": [ + 138, + 251, + 481, + 283 + ], + "type": "text", + "content": "92. Triess, L.T., Dreissig, M., Rist, C.B., Zollner, J.M.: A survey on deep domain adaptation for lidar perception. In: IEEE Intelligent Vehicles Symposium Workshops. pp. 350-357 (2021)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 138, + 285, + 481, + 318 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 285, + 481, + 318 + ], + "spans": [ + { + "bbox": [ + 138, + 285, + 481, + 318 + ], + "type": "text", + "content": "93. Uecker, M., Fleck, T., Pflugfelder, M., Zöllner, J.M.: Analyzing deep learning representations of point clouds for real-time in-vehicle lidar perception. arXiv preprint arXiv:2210.14612 (2022)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 138, + 319, + 481, + 351 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 319, + 481, + 351 + ], + "spans": [ + { + "bbox": [ + 138, + 319, + 481, + 351 + ], + "type": "text", + "content": "94. Unal, O., Dai, D., Gool, L.V.: Scribble-supervised lidar semantic segmentation. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 2697-2707 (2022)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 138, + 352, + 481, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 352, + 481, + 385 + ], + "spans": [ + { + "bbox": [ + 138, + 352, + 481, + 385 + ], + "type": "text", + "content": "95. Wei, W., Nejadasl, F.K., Gevers, T., Oswald, M.R.: T-mae: Temporal masked autoencoders for point cloud representation learning. arXiv preprint arXiv:2312.10217 (2023)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 138, + 386, + 481, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 386, + 481, + 418 + ], + "spans": [ + { + "bbox": [ + 138, + 386, + 481, + 418 + ], + "type": "text", + "content": "96. Wu, Y., Zhang, T., Ke, W., Süssstrunk, S., Salzmann, M.: Spatiotemporal self-supervised learning for point clouds in the wild. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5251-5260 (2023)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 138, + 420, + 481, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 420, + 481, + 453 + ], + "spans": [ + { + "bbox": [ + 138, + 420, + 481, + 453 + ], + "type": "text", + "content": "97. Xiao, A., Huang, J., Guan, D., Zhan, F., Lu, S.: Transfer learning from synthetic to real lidar point cloud for semantic segmentation. In: AAAI Conference on Artificial Intelligence. pp. 2795-2803 (2022)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 138, + 453, + 481, + 486 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 453, + 481, + 486 + ], + "spans": [ + { + "bbox": [ + 138, + 453, + 481, + 486 + ], + "type": "text", + "content": "98. Xiao, A., Huang, J., Guan, D., Zhang, X., Lu, S., Shao, L.: Unsupervised point cloud representation learning with deep neural networks: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence 45(9), 11321-11339 (2023)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 138, + 487, + 481, + 530 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 487, + 481, + 530 + ], + "spans": [ + { + "bbox": [ + 138, + 487, + 481, + 530 + ], + "type": "text", + "content": "99. Xiao, A., Huang, J., Xuan, W., Ren, R., Liu, K., Guan, D., Saddik, A.E., Lu, S., Xing, E.: 3d semantic segmentation in the wild: Learning generalized models for adverse-condition point clouds. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 9382-9392 (2023)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 134, + 531, + 481, + 563 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 531, + 481, + 563 + ], + "spans": [ + { + "bbox": [ + 134, + 531, + 481, + 563 + ], + "type": "text", + "content": "100. Xie, B., Li, S., Guo, Q., Liu, C.H., Cheng, X.: Annotator: A generic active learning baseline for lidar semantic segmentation. In: Advances in Neural Information Processing Systems. vol. 36 (2023)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 134, + 565, + 481, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 565, + 481, + 597 + ], + "spans": [ + { + "bbox": [ + 134, + 565, + 481, + 597 + ], + "type": "text", + "content": "101. Xie, S., Gu, J., Guo, D., Qi, C.R., Guibas, L., Litany, O.: Pointcontrast: Unsupervised pre-training for 3d point cloud understanding. In: European Conference on Computer Vision. pp. 574-591 (2020)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 134, + 599, + 481, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 599, + 481, + 632 + ], + "spans": [ + { + "bbox": [ + 134, + 599, + 481, + 632 + ], + "type": "text", + "content": "102. Xie, S., Kong, L., Zhang, W., Ren, J., Pan, L., Chen, K., Liu, Z.: Benchmarking and improving bird's eye view perception robustness in autonomous driving. arXiv preprint arXiv:2405.17426 (2024)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 134, + 632, + 481, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 632, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 134, + 632, + 481, + 665 + ], + "type": "text", + "content": "103. Xie, Z., Zhang, Z., Cao, Y., Lin, Y., Bao, J., Yao, Z., Dai, Q., Hu, H.: Simmim: A simple framework for masked image modeling. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 9653-9663 (2022)" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 180, + 91, + 448, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 180, + 91, + 448, + 102 + ], + "spans": [ + { + "bbox": [ + 180, + 91, + 448, + 102 + ], + "type": "text", + "content": "4D Contrastive Superflows are Dense 3D Representation Learners" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 479, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 479, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 479, + 100 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 481, + 665 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 133, + 116, + 480, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 116, + 480, + 149 + ], + "spans": [ + { + "bbox": [ + 133, + 116, + 480, + 149 + ], + "type": "text", + "content": "104. Xu, C., Wu, B., Wang, Z., Zhan, W., Vajda, P., Keutzer, K., Tomizuka, M.: Squeezesegv3: Spatially-adaptive convolution for efficient point-cloud segmentation. In: European Conference on Computer Vision. pp. 1-19 (2020)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 133, + 150, + 481, + 193 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 150, + 481, + 193 + ], + "spans": [ + { + "bbox": [ + 133, + 150, + 481, + 193 + ], + "type": "text", + "content": "105. Xu, J., Zhang, R., Dou, J., Zhu, Y., Sun, J., Pu, S.: Rpvnet: A deep and efficient range-point-voxel fusion network for lidar point cloud segmentation. In: IEEE/CVF International Conference on Computer Vision. pp. 16024-16033 (2021)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 194, + 481, + 226 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 194, + 481, + 226 + ], + "spans": [ + { + "bbox": [ + 132, + 194, + 481, + 226 + ], + "type": "text", + "content": "106. Xu, W., Li, X., Ni, P., Guang, X., Luo, H., Zhao, X.: Multi-view fusion driven 3d point cloud semantic segmentation based on hierarchical transformer. IEEE Sensors Journal 23(24), 31461-31470 (2023)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 227, + 481, + 248 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 227, + 481, + 248 + ], + "spans": [ + { + "bbox": [ + 132, + 227, + 481, + 248 + ], + "type": "text", + "content": "107. Xu, X., Kong, L., Shuai, H., Liu, Q.: Frnet: Frustum-range networks for scalable lidar segmentation. arXiv preprint arXiv:2312.04484 (2023)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 249, + 481, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 249, + 481, + 282 + ], + "spans": [ + { + "bbox": [ + 132, + 249, + 481, + 282 + ], + "type": "text", + "content": "108. Yin, J., Zhou, D., Zhang, L., Fang, J., Xu, C.Z., Shen, J., Wang, W.: Proposal contrast: Unsupervised pre-training for lidar-based 3d object detection. In: European Conference on Computer Vision. pp. 17-33 (2022)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 282, + 481, + 315 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 282, + 481, + 315 + ], + "spans": [ + { + "bbox": [ + 132, + 282, + 481, + 315 + ], + "type": "text", + "content": "109. Zhang, H., Li, F., Zou, X., Liu, S., Li, C., Gao, J., Yang, J., Zhang, L.: A simple framework for open-vocabulary segmentation and detection. In: IEEE/CVF International Conference on Computer Vision. pp. 1020-1031 (2023)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 316, + 481, + 347 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 316, + 481, + 347 + ], + "spans": [ + { + "bbox": [ + 132, + 316, + 481, + 347 + ], + "type": "text", + "content": "110. Zhang, S., Deng, J., Bai, L., Li, H., Ouyang, W., Zhang, Y.: Hvdistill: Transferring knowledge from images to point clouds via unsupervised hybrid-view distillation. International Journal of Computer Vision pp. 1-15 (2024)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 347, + 481, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 347, + 481, + 392 + ], + "spans": [ + { + "bbox": [ + 132, + 347, + 481, + 392 + ], + "type": "text", + "content": "111. Zhang, Y., Zhou, Z., David, P., Yue, X., Xi, Z., Gong, B., Foroosh, H.: Polarnet: An improved grid representation for online lidar point clouds semantic segmentation. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 9601-9610 (2020)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 392, + 481, + 424 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 392, + 481, + 424 + ], + "spans": [ + { + "bbox": [ + 132, + 392, + 481, + 424 + ], + "type": "text", + "content": "112. Zhang, Y., Hou, J., Yuan, Y.: A comprehensive study of the robustness for lidar-based 3d object detectors against adversarial attacks. International Journal of Computer Vision pp. 1-33 (2023)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 424, + 481, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 424, + 481, + 456 + ], + "spans": [ + { + "bbox": [ + 132, + 424, + 481, + 456 + ], + "type": "text", + "content": "113. Zhang, Z., Girdhar, R., Joulin, A., Misra, I.: Self-supervised pretraining of 3d features on any point-cloud. In: IEEE/CVF International Conference on Computer Vision. pp. 10252-10263 (2021)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 456, + 481, + 490 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 456, + 481, + 490 + ], + "spans": [ + { + "bbox": [ + 132, + 456, + 481, + 490 + ], + "type": "text", + "content": "114. Zhang, Z., Dong, Y., Liu, Y., Yi, L.: Complete-to-partial 4d distillation for self-supervised point cloud sequence representation learning. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 17661-17670 (2023)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 132, + 491, + 481, + 522 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 491, + 481, + 522 + ], + "spans": [ + { + "bbox": [ + 132, + 491, + 481, + 522 + ], + "type": "text", + "content": "115. Zhang, Z., Yang, B., Wang, B., Li, B.: Growsp: Unsupervised semantic segmentation of 3d point clouds. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 17619-17629 (2023)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 132, + 522, + 481, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 522, + 481, + 555 + ], + "spans": [ + { + "bbox": [ + 132, + 522, + 481, + 555 + ], + "type": "text", + "content": "116. Zhao, Y., Bai, L., Huang, X.: Fidnet: Lidar point cloud semantic segmentation with fully interpolation decoding. In: IEEE/RSJ International Conference on Intelligent Robots and Systems. pp. 4453-4458 (2021)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 132, + 555, + 481, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 555, + 481, + 588 + ], + "spans": [ + { + "bbox": [ + 132, + 555, + 481, + 588 + ], + "type": "text", + "content": "117. Zhou, Z., Zhang, Y., Foroosh, H.: Panoptic-polarnet: Proposal-free lidar point cloud panoptic segmentation. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 13194-13203 (2021)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 132, + 589, + 481, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 589, + 481, + 621 + ], + "spans": [ + { + "bbox": [ + 132, + 589, + 481, + 621 + ], + "type": "text", + "content": "118. Zhu, X., Zhou, H., Wang, T., Hong, F., Ma, Y., Li, W., Li, H., Lin, D.: Cylindrical and asymmetrical 3d convolution networks for lidar segmentation. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 9939-9948 (2021)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 132, + 621, + 481, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 621, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 132, + 621, + 481, + 665 + ], + "type": "text", + "content": "119. Zou, X., Dou, Z.Y., Yang, J., Gan, Z., Li, L., Li, C., Dai, X., Behl, H., Wang, J., Yuan, L., Peng, N., Wang, L., Lee, Y.J., Gao, J.: Generalized decoding for pixel, image, and language. In: IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 15116-15127 (2023)" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 215, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 215, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 215, + 100 + ], + "type": "text", + "content": "X. Xu et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "bbox": [ + 133, + 116, + 482, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 116, + 482, + 150 + ], + "spans": [ + { + "bbox": [ + 133, + 116, + 482, + 150 + ], + "type": "text", + "content": "120. Zou, X., Yang, J., Zhang, H., Li, F., Li, L., Gao, J., Lee, Y.J.: Segment everything everywhere all at once. In: Advances in Neural Information Processing Systems. vol. 36 (2023)" + } + ] + } + ], + "index": 2 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 179, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 179, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 179, + 91, + 447, + 102 + ], + "type": "text", + "content": "4D Contrastive Superflows are Dense 3D Representation Learners" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/4Diff_ 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation/26bdb530-b8d4-43d7-9337-55a1116b4a83_content_list.json b/2024/4Diff_ 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation/26bdb530-b8d4-43d7-9337-55a1116b4a83_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..643a75c3d710d6a53a91b300878f010e2d9c045d --- /dev/null +++ b/2024/4Diff_ 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation/26bdb530-b8d4-43d7-9337-55a1116b4a83_content_list.json @@ -0,0 +1,1685 @@ +[ + { + "type": "text", + "text": "4DIFF: 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation", + "text_level": 1, + "bbox": [ + 287, + 140, + 715, + 186 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Feng Cheng $^{1,3*}$ , Mi Luo $^{2*}$ , Huiyu Wang $^{1}$ , Alex Dimakis $^{2}$ , Lorenzo Torresani $^{1}$ , Gedas Bertasius $^{3\\dagger}$ , and Kristen Grauman $^{1,2\\dagger}$", + "bbox": [ + 220, + 210, + 781, + 243 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ FAIR, Meta AI \n $^{2}$ The University of Texas at Austin \n $^{3}$ University of North Carolina at Chapel Hill \n* Equal contribution, † Co-lead the project", + "bbox": [ + 344, + 253, + 653, + 313 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract. We present 4DIFF, a 3D-aware diffusion model addressing the exo-to-ego viewpoint translation task — generating first-person (ego-centric) view images from the corresponding third-person (exocentric) images. Building on the diffusion model's ability to generate photorealistic images, we propose a transformer-based diffusion model that incorporates geometry priors through two mechanisms: (i) egocentric point cloud rasterization and (ii) 3D-aware rotary cross-attention. Egocentric point cloud rasterization converts the input exocentric image into an egocentric layout, which is subsequently used by a diffusion image transformer. As a component of the diffusion transformer's denoiser block, the 3D-aware rotary cross-attention further incorporates 3D information and semantic features from the source exocentric view. Our 4DIFF achieves state-of-the-art results on the challenging and diverse Ego-Exo4D multiview dataset and exhibits robust generalization to novel environments not encountered during training. Our code, processed data, and pretrained models are publicly available at https://klauscc.github.io/4diff.", + "bbox": [ + 261, + 349, + 743, + 571 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Keywords: Egocentric Vision $\\cdot$ View Synthesis", + "bbox": [ + 261, + 584, + 586, + 599 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 215, + 626, + 375, + 642 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "From early developmental stages, humans adeptly observe external actions (exo) and seamlessly integrate them into their own repertoire (ego), forming the cornerstone of visual learning. This actor-observer translation mechanism not only shapes individual development but also holds profound implications for technological advancements. Imagine the ability to immerse yourself in the first-person perspective of renowned athletes like Messi or glean intricate piano techniques from online tutorials converted to a first-person viewpoint. Such experiences hinge on seamless translation from third-person to first-person perspectives, highlighting the pivotal role of cross-view translation in facilitating immersive and enriching experiences across diverse domains.", + "bbox": [ + 212, + 657, + 787, + 808 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We leverage the recently released Ego-Exo4D dataset [18] to explore the third-person (exocentric) to first-person (egocentric) viewpoint translation task.", + "bbox": [ + 215, + 809, + 785, + 839 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/a38f371d4df030816d70b53bd9ff161ee79bd243c814920455fc7f8bcd1ed286.jpg", + "image_caption": [ + "Fig. 1: Given exocentric images of an egocentric camera wearer engaged in daily activities and the corresponding camera trajectories, we aim to synthesize the corresponding egocentric view that captures the scene from the wearer's first-person perspective." + ], + "image_footnote": [], + "bbox": [ + 228, + 147, + 781, + 248 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "As illustrated in Figure 1, our focus is on transforming the exocentrically observed images containing a designated individual into images depicting the same scene from the individual's first-person perspective. Our task is a specific instance of the Novel View Synthesis (NVS) task, which aims to generate new views conditioned on a few given views of a scene. However, the Ego-Exo4D dataset presents a formidable challenge compared to traditional novel view synthesis datasets [8, 9, 17, 22, 57, 72] and multiview datasets [1, 26, 55, 70]. As illustrated in Figure 2, the scenes in the Ego-Exo4D dataset are characterized by numerous objects and dynamic actions performed by the participants. The dataset encompasses diverse scenes, ranging from indoor to outdoor activities such as cooking and basketball. Furthermore, the visual differences between exocentric and egocentric images are pronounced due to sharp viewpoint changes. Besides, unlike numerous NVS datasets that use 3D data for arbitrary viewpoint sampling during training, Ego-Exo4D dataset only provides several views (e.g., four exo and one ego view) for each dynamic scene, which presents a challenge for convergence of prior geometry-based methods that regress the entire scene.", + "bbox": [ + 212, + 319, + 787, + 561 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Due to the challenges mentioned above, existing methods exhibit unsatisfactory performance in the exo-to-ego view translation task. Geometry-free generative models, including GAN-based [6,21] and diffusion-based [30,38,66] methods, face challenges in generating geometrically-correct images due to high complexity of the scenes. In contrast, geometry-based approaches, exemplified by NeRF-based methods [2,3,34,37,41,69], encounter limitations in achieving photorealistic images. Recent attempts [7, 10] aim to reconcile this dilemma by integrating a strong geometry-based method (e.g. NeRF-based) into diffusion models. However, these models are typically difficult to optimize on the extremely diverse scenes in the Ego-Exo4D benchmark, as we show in Sec. 4.2. Thus, they often fail to provide constructive geometry priors to the subsequent diffusion model.", + "bbox": [ + 212, + 564, + 787, + 731 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Motivated by these observations, we propose 4DIFF, a 3D-Aware Diffusion model for exocentric to egocentric viewpoint translation. We propose two mechanisms to incorporate 3D geometry into the diffusion model: (i) egocentric point cloud rasterization, and (ii) 3D-aware rotary cross-attention layers. Rather than relying on a complex geometry model like NeRF, we render an egocentric prior image using a lightweight rasterization technique [5, 67]. As a result, our approach is both easy to train and adaptable, allowing it to incorporate existing", + "bbox": [ + 212, + 734, + 787, + 840 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "F. Cheng and M. Luo et al.", + "bbox": [ + 271, + 114, + 455, + 128 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/9d391c59f0a1e04d3b2bf0aab09cffa0258fa9eeaed4a8728f942276bcac850a.jpg", + "image_caption": [ + "Fig. 2: Comparison of the Ego-Exo4D viewpoint translation (Ego-Exo4D-VT) benchmark, which we build on the Ego-Exo4D dataset [18], with existing novel view synthesis and cross-view translation benchmarks. Ego-Exo4D-VT presents numerous challenges that require fundamental advances in generative modeling to address." + ], + "image_footnote": [], + "bbox": [ + 217, + 143, + 782, + 407 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "open-source pretrained depth estimators. These estimators have demonstrated effectiveness in processing images from previously unseen environments [4, 68]. Solely rendering the egocentric prior feature map through point cloud rasterization can be problematic, as the source exo view often contains occluded and unobserved regions. To address this, we seamlessly integrate rasterization into the diffusion model, leveraging its substantial capacity for extrapolation and generating high-quality images. We further enhance the expressivity of our diffusion model by introducing 3D-aware rotary cross-attention, which is integrated into each denoising block of the model. This functionality aims to improve feature similarities and 3D spatial similarities between ego and exo views, allowing the diffusion feature maps to incorporate information from the semantic features encoded in the exocentric image more effectively.", + "bbox": [ + 212, + 479, + 787, + 662 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our method 4DIFF surpasses prior state-of-the-art techniques on the challenging Ego-Exo4D viewpoint translation benchmark, achieving a $3.6\\%$ improvement in LPIPS. Furthermore, leveraging the extensive scale of Ego-Exo4D data, our approach demonstrates robust generalization to novel environments not encountered during training.", + "bbox": [ + 212, + 662, + 787, + 738 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Related Work", + "text_level": 1, + "bbox": [ + 215, + 761, + 387, + 777 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Exo-to-Ego Viewpoint Translation. Prior methods [28, 44, 62] tackled this problem predominantly via GAN-based models [11]. Specifically, [43] proposed the X-Fork and X-Seq GAN-based architecture using an additional semantic map", + "bbox": [ + 212, + 794, + 787, + 840 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "4DIFF: 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation", + "bbox": [ + 228, + 114, + 732, + 128 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "for enhanced generation. [29] introduced STA-GAN, which focuses on learning spatial and temporal information to generate egocentric videos from exocentric views. [32] focuses on hand-object interactions, proposing to decouple hand layout generation and ego frame generation with a diffusion model. None of these methods develop an explicit geometry-aware generative framework. In contrast, our work introduces two effective mechanisms to incorporate 3D geometric priors into the diffusion model, specifically tailored to address the challenges posed by the Ego-Exo4D-VT benchmark.", + "bbox": [ + 212, + 146, + 787, + 267 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Novel View Synthesis (NVS). Our exo-to-ego viewpoint translation task represents a distinct facet of the NVS task, which aims to generate a target image with an arbitrary target camera pose from given source images and their camera poses. Previous works in NVS can be categorized into geometry-based [15,16,31,46,47,56,64,72], regression-based methods [25,35,54,63-65,69,72] and generative models [24,45,48,50,66,67]. Recently, several geometry-aware generative models [7,10] have explored ways to integrate NeRF with diffusion models. For instance, GeNVS [7] incorporates geometry priors into their diffusion model using a variant of pixelNeRF [69], which renders a target feature map from a 3D feature field. SSDNeRF [10] proposes a unified approach that employs an expressive diffusion model to learn a generalizable prior of neural radiance field (NeRF). However, these geometry-based models, typically implemented as NeRFs, often struggle to provide meaningful geometry priors to the diffusion model, especially in the challenging Ego-Exo4D-VT benchmark. This is because complex geometry methods require strong supervision (e.g., many densely sampled views of the same scene), which Ego-Exo4D does not provide. In contrast, our method uses simple point-cloud rasterization that relies solely on accurate depth estimation, avoiding the modeling of occluded and unobserved areas in the exocentric view. This approach shows better generalization and benefits from existing large-scale pretrained depth estimators.", + "bbox": [ + 212, + 270, + 787, + 573 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Diffusion Models [12, 19, 49] have made significant strides in producing photorealistic images and videos. They excel in modeling conditional distributions, including scenarios where conditioning is based on text [49, 52] or another image [20, 53]. Prior work has demonstrated a wide range of successful applications of diffusion models, including human pose generation [27] and depth estimation [14]. In our work, we employ a transformer-based diffusion model [39] to model the distribution of egocentric images conditioned on exocentric images.", + "bbox": [ + 212, + 575, + 787, + 683 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3 Methodology", + "text_level": 1, + "bbox": [ + 215, + 707, + 380, + 724 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1 Problem Setup", + "text_level": 1, + "bbox": [ + 215, + 739, + 387, + 756 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Given an exocentric image $x \\in \\mathbb{R}^{h \\times w \\times 3}$ and the relative camera pose $P \\in \\mathbb{R}^{4 \\times 4}$ from exo camera to the ego camera of the person of interest, our goal is to synthesize an egocentric image $y \\in \\mathbb{R}^{h \\times w \\times 3}$ from the conditional distribution:", + "bbox": [ + 212, + 765, + 785, + 813 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\np (y | x, P) \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 467, + 825, + 785, + 840 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "F. Cheng and M. Luo et al.", + "bbox": [ + 271, + 114, + 455, + 128 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/3d01e8a4ec309dc78928e926a18b1c3bf62b89e64d64c070db82d54a7c62a693.jpg", + "image_caption": [ + "Fig. 3: We propose 4DIFF, a 3D-Aware Diffusion model for exocentric to egocentric viewpoint translation. Our framework uses a point cloud rasterization scheme first to compute an egocentric prior, which captures egocentric layout cues. Afterward, the egocentric prior is fed into the diffusion model augmented with the proposed 3D-aware rotary cross-attention for egocentric image generation. The proposed 3D-aware rotary cross-attention guides the attention to consider geometric relationships between the egocentric and exocentric diffusion feature maps." + ], + "image_footnote": [], + "bbox": [ + 222, + 151, + 782, + 300 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We assume the relative camera pose $(P)$ is known, similar to the standard NVs tasks [40, 61, 69].", + "bbox": [ + 212, + 429, + 785, + 460 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Relation to the Official Ego-Exo4D Translation Benchmark. Ego-Exo4D [18] introduced an exo-to-ego translation benchmark, with the primary emphasis on object-level synthesis, i.e., generating an object at the correct location in the ego view based on an exo image and an exo segmentation mask of the object of interest. This approach is particularly valuable for precise object placement and detailed object-level interactions. In contrast, we focus on full-image synthesis — allowing for the generation of entire scenes, and enhancing the richness and diversity of generated viewpoints. Both are complementary; while Ego-Exo4D excels in object-specific scenarios, our method expands the scope to full-scene synthesis and can be seen as a new specialized NVS task.", + "bbox": [ + 212, + 460, + 787, + 612 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2 Our Framework", + "text_level": 1, + "bbox": [ + 215, + 642, + 393, + 655 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Due to the inherent complexity and dynamism present in diverse scenes, we use an expressive transformer-based diffusion model to model the conditional distribution in Equation 1. However, due to the inability to explicitly model 3D cues, the standard diffusion model may struggle to generate geometry-consistent images. Thus, we propose two techniques to incorporate geometry into our diffusion model: (i) egocentric point cloud rasterization and (ii) 3D-aware rotary cross-attention. As shown in Figure 3, the point cloud rasterization first renders an egocentric prior from the input exocentric view, which is then fed into the diffusion model. Afterward, the conditioned diffusion model is augmented with the proposed 3D-aware rotary cross-attention to generate the target egocentric image. We now describe each module in more detail.", + "bbox": [ + 212, + 672, + 787, + 840 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "4DIFF: 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation", + "bbox": [ + 228, + 114, + 730, + 128 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3 Egocentric Point Cloud Rasterization", + "text_level": 1, + "bbox": [ + 215, + 146, + 571, + 162 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "As a first step in our framework, we render an egocentric prior via the point cloud rasterization from an exocentric view. Specifically, we first use a depth estimator to convert the exocentric 2D image $x$ and a feature map $F^{\\mathrm{exo}}$ into a feature point cloud. Then, a differential renderer [67] projects this point cloud into an egocentric prior $H^{\\mathrm{prior}}$ :", + "bbox": [ + 212, + 172, + 787, + 247 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nH ^ {\\text {p r i o r}} = \\left[ x ^ {\\text {p r i o r}}, F ^ {\\text {p r i o r}} \\right] = \\operatorname {r e n d e r} \\left(\\left[ x, F ^ {\\text {e x o}} \\right], D, P\\right) \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 331, + 260, + 785, + 277 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Here, $F^{\\mathrm{exo}}$ is the semantic features of the exocentric image encoded by a feature encoder $f$ , $x^{\\mathrm{prior}}$ and $F^{\\mathrm{prior}}$ are the egocentric prior image and a feature map, rendered from the exocentric image $x$ and a feature map $F^{\\mathrm{exo}}$ respectively. $D$ denotes the depth map predicted by a depth estimator, and $P$ represents the relative camera pose.", + "bbox": [ + 214, + 287, + 787, + 363 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Depth Estimator. We construct the depth estimator based on the pretrained MiDaS [4]. Since MiDaS predicts relative disparity (the inverse of depth), we introduce two learnable scalars $s$ and $t$ for dataset-specific calibration. The depth map $D$ is predicted using the formula:", + "bbox": [ + 214, + 369, + 789, + 431 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nD = 1 / \\left(s \\cdot \\operatorname {M i D a S} \\left(x ^ {\\mathrm {e x o}}\\right) + t\\right). \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 400, + 444, + 785, + 460 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Rasterization. We employ the differentiable renderer [67] for our rasterization. This renderer splats 3D points onto the image plane and calculates pixel values by blending point features. In contrast to more intricate rendering techniques like NeRF [34,69] or Gaussian Splatting [23,60], our renderer is simpler to converge. It relies solely on depth estimation from 2D images, leveraging large-scale pretrained depth estimators. This design choice ensures robust generalization across diverse scenarios.", + "bbox": [ + 214, + 478, + 787, + 584 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.4 3D-Aware Diffusion Image Transformer", + "text_level": 1, + "bbox": [ + 215, + 606, + 586, + 622 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Our diffusion model uses a denoiser network to predict added noise $\\epsilon_{t}$ from the noisy target egocentric image $y_{t} = \\sqrt{\\bar{\\alpha}_{t}} y + \\sqrt{1 - \\bar{\\alpha}_{t}}\\epsilon_{t}$ , conditioned on the previously obtained egocentric prior $H^{\\mathrm{prior}}$ and the exocentric semantic features $F^{\\mathrm{exo}}$ :", + "bbox": [ + 214, + 632, + 787, + 691 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\epsilon} _ {t} = \\epsilon_ {\\theta} ([ y _ {t}, H ^ {\\text {p r i o r}} ], F ^ {\\text {e x o}}). \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 411, + 691, + 785, + 709 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "During inference, the target egocentric image $y_0$ is generated from a standard Gaussian noise $y_T$ by applying the denoiser network $\\epsilon_{\\theta}$ iteratively with a sampling strategy (e.g. DDIM [58]), i.e. $y_T \\to y_{T - \\delta} \\to \\ldots \\to y_0$ .", + "bbox": [ + 214, + 715, + 787, + 762 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Denoiser Network $\\epsilon_{\\theta}$ . Our proposed 3D-aware Diffusion image Transformer serves as the denoiser network. As shown in Figure 3 and Equation 4, our Transformer network takes as input the concatenation of the egocentric prior $H^{\\mathrm{prior}}$ and the noisy target egocentric image $y_{t}$ encoded via an off-the-shelf autoencoder from [49]. Following [39], the architecture of DiT is the same as ViT, consisting", + "bbox": [ + 214, + 763, + 787, + 842 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "F. Cheng and M. Luo et al.", + "bbox": [ + 271, + 114, + 455, + 128 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/8c2ea3fe833a7b406dce9ca37f22a3777b6854d9578326d3d82a2aa79ed62729.jpg", + "image_caption": [ + "Fig. 4: An illustration of the calculation of the rotation matrix $R_{m,n}$ in our 3D-aware rotary cross attention." + ], + "image_footnote": [], + "bbox": [ + 274, + 143, + 733, + 292 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "of $N$ transformer layers, each with a self-attention layer and a feedforward network. To further enhance the expressivity of our model and incorporate more geometric cues, we propose 3D-aware rotary cross-attention layers, which we describe next.", + "bbox": [ + 212, + 345, + 787, + 407 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3D-aware Rotary Cross-Attention. When conditioning the diffusion model on the exocentric feature map, we should consider similarities in the semantic feature and spatial 3D space. Exocentric features similar in appearance (i.e., semantic feature space) and 3D location with respect to the query features should have higher attention values in the diffusion model. Motivated by RoPE [59], we achieve this by incorporating rotations during attention weight calculations. The degree of rotation between a query and a key is determined by the angle between their 3D coordinates, with the ego camera as the center. Consequently, the cosine similarity between the query and key features can incorporate their 3D spatial angle, effectively capturing the 3D relationships between corresponding points in the egocentric and exocentric views.", + "bbox": [ + 212, + 412, + 787, + 578 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Specifically, given a feature map $Z \\in \\mathbb{R}^{l \\times c}$ in the diffusion model and the exocentric semantic feature map $F^{\\mathrm{exo}} \\in \\mathbb{R}^{l \\times c}$ , the 3D-aware rotary cross-attention calculates the output $O \\in \\mathbb{R}^{l \\times c}$ as:", + "bbox": [ + 212, + 579, + 789, + 625 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\na _ {m, n} = \\frac {\\exp \\left(\\frac {q _ {m} ^ {T} R _ {m , n} k _ {n}}{\\sqrt {c}}\\right)}{\\sum_ {j = 1} ^ {l} \\exp \\left(\\frac {q _ {m} ^ {T} R _ {m , j} k _ {j}}{\\sqrt {c}}\\right)} \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 395, + 633, + 785, + 683 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nO _ {m} = \\sum_ {n = 1} ^ {l} a _ {m, n} v _ {n} \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 403, + 685, + 785, + 724 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Here, $q_{m} = Z_{m}W_{q}$ is the $m$ -th query token, $k_{n} = F_{n}^{\\mathrm{exo}}W_{k}$ is the $n$ -th key token and $v_{n} = F_{n}^{\\mathrm{exo}}W_{v}$ is the $n$ -th value token. $W_{q}, W_{k}, W_{v}$ are learnable project matrices. $R_{m,n}$ is the rotation matrix that rotates the key token to align with the value token in 3D space, where the egocentric camera is used as the center. Since the query token is in the egocentric view, we map its coordinates to the exocentric view using the relative camera pose. The rotation matrix is computed in the exocentric view using the algorithm from [33]. When $R_{m,n}$ is an identity", + "bbox": [ + 212, + 733, + 787, + 843 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "4DIFF: 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation", + "bbox": [ + 228, + 113, + 732, + 128 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 774, + 114, + 785, + 126 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "matrix, our 3D-aware rotary cross-attention defaults to standard cross-attention. Figure 4 shows an illustration of this process. We insert such 3D-aware cross-attention layers after each self-attention layer in DiT.", + "bbox": [ + 215, + 146, + 784, + 191 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3.5 Training and Inference", + "text_level": 1, + "bbox": [ + 215, + 213, + 450, + 228 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Loss Function. Our model is trained with the diffusion denoising loss, which is the L2 loss between the predicted noise and the ground-truth added noise.", + "bbox": [ + 215, + 237, + 784, + 268 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Implementation Details We employ DINOv2 [36] pretrained ViT-L/14 as our feature encoder $f$ and MiDaS [4] with DPT-L as our depth estimator. Our denoiser network is built on DiT-B/2 [38] augmented with the proposed 3D-aware rotary cross-attention layers. The image sizes are $256 \\times 256$ for both egocentric and exocentric images. We freeze the feature encoder, as it is already well pretrained. The model is trained with the Adam optimizer, using a learning rate of $1e - 5$ for the depth estimator and $1e - 4$ for the other components. We employ a batch size of 4 per GPU and train the model across 32 V100 GPUs for 100 epochs, requiring approximately 48 hours. We set the diffusion steps $T$ to 1000 during training and sample 20 steps during inference using DDIM [58].", + "bbox": [ + 215, + 271, + 785, + 422 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4 Experiments", + "text_level": 1, + "bbox": [ + 215, + 445, + 374, + 462 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.1 Experimental Setup", + "text_level": 1, + "bbox": [ + 215, + 476, + 426, + 491 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Ego-Exo4D-VT Benchmark. Our benchmark is constructed based on the Ego-Exo4D dataset [18]. Adhering to the official splits, we use 2680/708/900 takes for training, validation, and testing, respectively. Each take is approximately 30 seconds to 5 minutes long and depicts a person performing a skilled activity, such as cooking a dish, with footage from 4 exocentric cameras and 1 egocentric camera. This benchmark encompasses five diverse, skilled human activities: basketball, bike repair, cooking, health, and music.", + "bbox": [ + 215, + 500, + 784, + 604 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The benchmark features 131 unique scenes, each characterized by complex backgrounds and numerous objects, demonstrating significant scale variation from 1 meter (e.g., a small kitchen) to 10 meters (e.g., a basketball court). These scenes are dynamic and depict subjects performing actions that involve interactions with objects. Additionally, the considerable viewpoint shift from exocentric to egocentric view causes objects to appear relatively small in the exocentric view compared to the egocentric view.", + "bbox": [ + 215, + 606, + 784, + 712 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Baselines. Since this is a new benchmark, we re-purpose a few state-of-the-art methods for image generation: (a) pix2pix [21], a GAN-based method, (b) GNT [61], a NeRF-based method, (c) diffusion model DiT [39] and 3DiM [66]. To tailor DiT for our task, we eliminate its original class label conditioning and condition it on the exocentric image through concatenation. Additionally, we implement 3DiM based on DiT since the code for 3DiM is unavailable.", + "bbox": [ + 215, + 715, + 784, + 805 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Metrics. Following NVS methods [10, 69], we employ perceptual metrics, including LPIPS [71], DISTS [13] and CLIP score [42], to measure the structural", + "bbox": [ + 215, + 809, + 784, + 839 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "F. Cheng and M. Luo et al.", + "bbox": [ + 271, + 114, + 454, + 127 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/f63fa86229afb740274978461bc78c9a0ef806b298f2df9b2e37e3815589c5c2.jpg", + "table_caption": [ + "Table 1: Quantitative comparison on the test set of Ego-Exo4D-VT benchmark. ${}^{ \\dagger }$ we reimplement 3DiM based on DiT as their code is not publicly available. Our 4DIFF achieves the best results on all the metrics, outperforming the second best method 3DiM by ${3.6}\\%$ in LPIPS and ${1.9}\\%$ in DISTS." + ], + "table_footnote": [], + "table_body": "
MethodLPIPS ↓DISTS ↓CLIP ↑PSNR ↑SSIM ↑
pix2pix [28]0.3720.26268.8515.800.515
GNT [61]0.4820.39263.7514.610.538
DiT [39]0.4120.23177.9815.470.564
3DiM† [66]0.3850.22678.2215.910.575
4DIFF (ours)0.3490.20779.7216.650.592
", + "bbox": [ + 264, + 212, + 738, + 321 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/6303e8e19a372c83144d6091b20e91e3bb6f0419e54c01cbc17bd8c5faf59a23.jpg", + "table_caption": [ + "Table 2: Comparison on the seen and unseen test sets of Ego-Exo4D-VT benchmark. $\\dagger$ we reimplement 3DiM based on DiT as their code is not publicly available." + ], + "table_footnote": [], + "table_body": "
Split SettingMethodLPIPS ↓DISTS ↓CLIP ↑PSNR ↑SSIM ↑
Seen \nScenespix2pix [21]0.3710.26068.6815.900.519
GNT [61]0.4790.39063.4414.710.542
DiT [39]0.4060.22678.7415.640.570
3DiM† [66]0.3650.21778.3015.980.583
4DIFF (ours)0.3160.18482.7917.090.600
Unseen \nScenespix2pix [21]0.3760.27269.8715.230.491
GNT [61]0.4970.40565.6013.970.513
DiT [39]0.4400.25673.6714.860.528
3DiM† [66]0.4360.26973.2614.900.542
4DIFF (ours)0.4270.24676.5414.450.508
", + "bbox": [ + 225, + 373, + 777, + 550 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "and texture similarity between the synthesized egocentric image and the ground-truth image. Additionally, we include PSNR and SSIM for completeness, even though numerous existing works [7,51,53] have demonstrated that these metrics are suboptimal for evaluating image and video generation models, as they tend to favor conservative and blurry estimates.", + "bbox": [ + 212, + 579, + 787, + 654 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.2 Comparison with State-of-the-art Methods", + "text_level": 1, + "bbox": [ + 215, + 676, + 614, + 693 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In Table 1, we present the comparison of our method to various baselines. Notably, diffusion-based models—DiT [39], 3DiM [66], and our 4DIFF—outperform other approaches across all metrics by large margins, including the GAN-based pix2pix and NeRF-based GNT. The poor performance of the NeRF-based method GNT on our benchmark can be attributed to itsslimited capacity for modeling hundreds of different scenes.", + "bbox": [ + 212, + 703, + 787, + 792 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In Table 2, we present the results on seen scenes and unseen scenes respectively and show that our method achieves the best performance. Overall, our method surpasses the second-best performing diffusion-based 3DiM by $3.6\\%$", + "bbox": [ + 212, + 795, + 787, + 840 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "4DIFF: 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation", + "bbox": [ + 228, + 113, + 730, + 128 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 774, + 114, + 784, + 126 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/8b80d7e937c46b87f0cf87ef1766837fdafd717fd27fecc2e0cba73dcd7dc2aa.jpg", + "image_caption": [ + "Fig. 5: Generated samples from five scenarios: cooking, music, health, basketball, and bike repair. Our 4DIFF demonstrates the best performance across all examples in terms of geometry correctness and object quality. We brighten the images and exclude pix2pix and GNT in the scenario breakdown for a better visual experience." + ], + "image_footnote": [], + "bbox": [ + 225, + 148, + 785, + 657 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "in LPIPS and $1.9\\%$ in DISTS, underscoring the effectiveness of our proposed geometry-based approach.", + "bbox": [ + 212, + 733, + 787, + 763 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Figure 5 presents qualitative comparisons with existing methods. GAN-based pix2pix [21] and NeRF-based GNT [61] exhibit challenges in producing photorealistic images, emphasizing the necessity of a robust generative model for the Ego-Exo4D-VT benchmark. Our 4DIFF demonstrates superior performance across various scenarios, excelling in both geometry correctness and object qual", + "bbox": [ + 212, + 763, + 787, + 839 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "F. Cheng and M. Luo et al.", + "bbox": [ + 271, + 114, + 455, + 128 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/c00a92bce599549a6f698586adccda63aa80e9b00bbf7472192bed31fd128aa3.jpg", + "image_caption": [ + "Exo Input", + "Fig. 6: We evaluate the effectiveness of our egocentric prior rendering module by visualizing the rendered prior image. Compared to NeRF-based rendering (GNT), our rendered prior image exhibits predominantly correct geometry, offering valuable egocentric cues to the diffusion model. Distortions and missing pixels arise from inaccurate depth estimation and occluded or unobserved regions in the exocentric view, which can be corrected by the diffusion model." + ], + "image_footnote": [], + "bbox": [ + 217, + 143, + 359, + 470 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/09ac5589ff26abb17deb1213cb52f061a5ab2b1e421424f6415d5f5fb8bb9eca.jpg", + "image_caption": [ + "Ego GT" + ], + "image_footnote": [], + "bbox": [ + 361, + 143, + 496, + 469 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/b8b5883e8904ab21488b61fdc9bf1cea87c301f4f878fff1da57837a46476205.jpg", + "image_caption": [ + "NeRF-based Rendering" + ], + "image_footnote": [], + "bbox": [ + 500, + 143, + 640, + 469 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/36995d5298cf2e4957d0a8f9a6b243beba9230043114f521b6abf018ba45bcf2.jpg", + "image_caption": [ + "Our Rasterization Module" + ], + "image_footnote": [], + "bbox": [ + 640, + 143, + 781, + 470 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "ity. Our 4DIFF is especially advantageous for view synthesis in complex scenes, such as the cooking scenario, where numerous objects exhibit intricate layouts. The qualitative results align well with our quantitative results in Table 1.", + "bbox": [ + 212, + 597, + 784, + 642 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "4.3 Qualitative Analysis", + "text_level": 1, + "bbox": [ + 214, + 662, + 429, + 678 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Investigating the visual results helps to gain a deeper insight into generative models. Thus, we perform a qualitative analysis below.", + "bbox": [ + 212, + 685, + 785, + 715 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Is the egocentric prior useful? We address this question by visualizing the rendered egocentric prior RGB image. In Figure 6, the NeRF-based renderer GNT [61] generates blurry images for all scenes, possibly due to its limited capacity to model many diverse scenes with limited views for supervision. In contrast, our rendered egocentric images produced by point cloud rasterization are mostly correct, offering valuable egocentric cues to the diffusion model. Despite distortions and missing pixels, our diffusion model demonstrates sufficient capacity to rectify these issues effectively.", + "bbox": [ + 212, + 719, + 787, + 839 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "4DIFF: 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation", + "bbox": [ + 228, + 114, + 730, + 128 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 767, + 114, + 782, + 126 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/405f22d7102ceb3f786f727dfba23d882fc90c93d5871ca0cf818e8e96b4c7de.jpg", + "image_caption": [ + "Fig. 7: Results on the unseen scenes. When synthesizing views from the scenes not encountered during training, our 4DIFF exhibits slight hallucinations but consistently outperforms existing methods, producing significantly improved results." + ], + "image_footnote": [], + "bbox": [ + 228, + 148, + 774, + 588 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Generalization to unseen scenes. Figure 7 shows our generation results on the unseen scenes. We observe that our 4DIFF displays slight hallucinations, particularly noticeable in elements such as walls. Despite this, our method consistently outperforms existing methods. Such a robust performance can be attributed to the highly generalizable depth-based geometry priors used by our model.", + "bbox": [ + 212, + 656, + 787, + 744 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "What causes poor generation? We conduct an analysis to discern errors arising from the diffusion model or geometry priors. In Figure 8, we present two representative examples. The first showcases generation results in an unseen scene, where the egocentric prior image is reasonably good, but the diffusion model exhibits significant hallucinations, yielding an incorrectly generated image. We posit that this discrepancy arises because the diffusion model focuses", + "bbox": [ + 212, + 750, + 787, + 840 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "F. Cheng and M. Luo et al.", + "bbox": [ + 271, + 114, + 455, + 128 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/8d66151c18e681d7a70df2474aa4f366825c46b17d254df1c71448da6cf22f0f.jpg", + "image_caption": [ + "Fig. 8: Failure case examples of our method. Top: While the point cloud rasterization module performs effectively, the diffusion model produces errors when generating an egocentric view. Bottom: Although the diffusion model accurately predicts objects, the synthesized egocentric view appears more zoomed-out than the ground truth view. This can be attributed to suboptimal egocentric layout synthesis." + ], + "image_footnote": [], + "bbox": [ + 217, + 148, + 362, + 378 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/e3188bbeb7f6979856526fbfc09a8e3ced963ee0d148bf9fa6d5877ddcab80c7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 362, + 148, + 500, + 378 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/6b7038d3a6cbeb3d000e7185604536eb886025b4ef2008c04f6cb751a8a7d438.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 148, + 643, + 378 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/4a83e828a2639d07996a7812f789f9be8978a26b13fe095edc6f4dd1e0818e8a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 643, + 148, + 785, + 378 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "on modeling the conditional training distribution, limiting its generalization to substantially different scenes not present in the training data. This limitation can be mitigated by employing a large-scale pretrained diffusion model that has already acquired knowledge from diverse scenes and objects in 2D space.", + "bbox": [ + 212, + 479, + 787, + 539 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "In the second example, we show that despite the incorrectly rendered egocentric prior image, the diffusion model can generate a photorealistic image, which is more zoomed-out than the ground-truth egocentric image. This observation suggests that the diffusion model can robustly handle inaccurately generated egocentric geometry priors.", + "bbox": [ + 212, + 541, + 787, + 614 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "4.4 Ablation Studies", + "text_level": 1, + "bbox": [ + 215, + 641, + 401, + 655 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "How important are our proposed modules? We study the importance of (i) 3D-aware rotary cross-attentions and (ii) egocentric point cloud rasterization by sequentially removing them from our framework. As shown in Tab. 3a, removing the 3D cross-attention worsens the LPIPS by $2.4\\%$ . Additionally, removing the point cloud rasterization further degrades LPIPS by $3.9\\%$ . Moreover, as shown in Figure 5, our 4DIFF with the proposed geometry priors consistently outperforms geometry-free diffusion models DiT and 3DiM in all scenarios. These results show the effectiveness of our proposed modules.", + "bbox": [ + 212, + 669, + 787, + 790 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Can we pretrain the depth estimator from scratch? Tab. 3b shows that training our model without using a pretrained depth estimator results in a significant $4.3\\%$ degradation in LPIPS. This suggests that an inaccurate depth", + "bbox": [ + 212, + 794, + 787, + 839 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "4DIFF: 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation", + "bbox": [ + 228, + 114, + 730, + 128 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 767, + 114, + 785, + 126 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/9e4d48121a4fa428522c4f6081311b199de48c065816ded43548a28561642953.jpg", + "table_caption": [ + "Table 3: Ablation studies on various design choices. (a) We study the importance of each module by removing each module sequentially; (b) Using a pretrained depth estimator significantly improves the LPIPS by $4.3\\%$ ; (c) DINOV2 outperforms CLIP by $1.7\\%$ in LPIPS." + ], + "table_footnote": [], + "table_body": "
(a) Module ablation.
ModelLPIPS ↓
4DIFF0.349
- 3D Rotary CA0.373
- ego rasterization0.412
", + "bbox": [ + 233, + 205, + 431, + 297 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/854e0ff8063ceb46619854e375352c01367255cf9a7d1c6d829a0b102325d1ea.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
(b) Depth estimator.
PretrainedLPIPS ↓
0.349
X0.392
", + "bbox": [ + 457, + 205, + 602, + 282 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/69e10122c803e26b2051d7d9d6099c92ba0b60ca51c4bd1175681f910896973d.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
(c) Feature encoder.
Feat. Enc.LPIPS ↓
DinoV20.349
CLIP0.366
", + "bbox": [ + 609, + 205, + 751, + 282 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "estimation may lead to most points from the exocentric view projected outside of the egocentric view. Consequently, these points will not receive sufficient gradient updates during training, leading to poor convergence. Thus, we conclude that a sufficiently accurate initial depth prediction is crucial for good performance.", + "bbox": [ + 212, + 306, + 782, + 380 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Which feature encoder should we use? We evaluate two strong feature encoders for obtaining a semantic representation for an exocentric RGB image: DINOv2 [36], and CLIP [42], both employing a ViT-L/14 backbone. The DINOv2 variant outperforms the CLIP variant by $1.7\\%$ LPIPS. We conjecture that compared to CLIP's vision-language pretraining, DINOv2's self-supervised pretraining leads to higher quality lower-level visual features which are important for exocentric to egocentric image translation problem.", + "bbox": [ + 212, + 386, + 787, + 492 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "5 Discussion and Conclusion", + "text_level": 1, + "bbox": [ + 215, + 512, + 508, + 527 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "In this work, we proposed 4DIFF, a 3D-aware transformer-based diffusion model that significantly outperforms prior approaches on the challenging Ego-Exo4D-VT benchmark. Our method demonstrates robust generalization to novel environments not encountered during training. Despite our excellent results, we also acknowledge a few limitations. Firstly, our method assumes known camera poses during training and inference, limiting its applicability to real-world scenarios. Integrating camera pose estimation via a head pose estimator could address this limitation, while remains difficult to estimate automatically. Secondly, our method focuses on image-to-image translation, leaving room for video generation by incorporating spatial-temporal cues. Thirdly, enhancing the quality of generated objects and improving generalization to unseen environments could be achieved by leveraging a more powerful pretrained diffusion model (e.g., Stable Diffusion [49]). Lastly, extending our framework from frame-level synthesis to object-level synthesis, considering the locations and appearances of objects such as hands and interacted objects, would bring it closer to real-world applications like AR/VR coaching. We plan to explore these research directions in our future work.", + "bbox": [ + 212, + 540, + 787, + 797 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "F. Cheng and M. Luo et al.", + "bbox": [ + 271, + 114, + 455, + 128 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Acknowledgment We thank Hanwen Jiang, Yan-Bo Lin, Md Mohaiminul Islam, Ce Zhang, Yue Yang, and Soumitri Chattopadhyay for their helpful discussions. UT Austin is supported by NSF Grants AF 1901292, CNS 2148141, Tripods CCF 1934932, IFML CCF 2019844 and research gifts by Western Digital, Amazon, WNCG IAP, UT Austin Machine Learning Lab (MLL), Cisco, the Stanly P. Finch Centennial Professorship in Engineering. UNC is supported by Sony Faculty Innovation Award, Laboratory for Analytic Sciences via NC State University, ONR Award N00014-23-1-2356. K.G. is paid as a research scientist at Meta.", + "bbox": [ + 212, + 146, + 787, + 282 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 215, + 306, + 323, + 321 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "1. Ardeshir, S., Borji, A.: Ego2top: Matching viewers in egocentric and top-view videos. In: Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part V 14. pp. 253-268. Springer (2016)", + "2. Barron, J.T., Mildenhall, B., Tancik, M., Hedman, P., Martin-Brualla, R., Srinivasan, P.P.: Mip-nerf: A multiscale representation for anti-aliasing neural radiance fields. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 5855-5864 (2021)", + "3. Barron, J.T., Mildenhall, B., Verbin, D., Srinivasan, P.P., Hedman, P.: Mipnerf 360: Unbounded anti-aliased neural radiance fields. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5470-5479 (2022)", + "4. Birkl, R., Wofk, D., Müller, M.: Midas v3.1 - a model zoo for robust monocular relative depth estimation. arXiv preprint arXiv:2307.14460 (2023)", + "5. Cao, A., Rockwell, C., Johnson, J.: Fwd: Real-time novel view synthesis with forward warping and depth. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 15713-15724 (2022)", + "6. Chan, E.R., Lin, C.Z., Chan, M.A., Nagano, K., Pan, B., De Mello, S., Gallo, O., Guibas, L.J., Tremblay, J., Khamis, S., et al.: Efficient geometry-aware 3d generative adversarial networks. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 16123-16133 (2022)", + "7. Chan, E.R., Nagano, K., Chan, M.A., Bergman, A.W., Park, J.J., Levy, A., Aittala, M., De Mello, S., Karras, T., Wetzstein, G.: Generative novel view synthesis with 3d-aware diffusion models. arXiv preprint arXiv:2304.02602 (2023)", + "8. Chang, A., Dai, A., Funkhouser, T., Halber, M., Niessner, M., Savva, M., Song, S., Zeng, A., Zhang, Y.: Matterport3d: Learning from rgb-d data in indoor environments. arXiv preprint arXiv:1709.06158 (2017)", + "9. Chang, A.X., Funkhouser, T., Guibas, L., Hanrahan, P., Huang, Q., Li, Z., Savarese, S., Savva, M., Song, S., Su, H., et al.: Shapenet: An information-rich 3d model repository. arXiv preprint arXiv:1512.03012 (2015)", + "10. Chen, H., Gu, J., Chen, A., Tian, W., Tu, Z., Liu, L., Su, H.: Single-stage diffusion nef: A unified approach to 3d generation and reconstruction. arXiv preprint arXiv:2304.06714 (2023)", + "11. Creswell, A., White, T., Dumoulin, V., Arulkumaran, K., Sengupta, B., Bharath, A.A.: Generative adversarial networks: An overview. IEEE signal processing magazine 35(1), 53-65 (2018)" + ], + "bbox": [ + 217, + 339, + 785, + 839 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "4DIFF: 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation", + "bbox": [ + 228, + 114, + 730, + 128 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 767, + 116, + 784, + 126 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "12. Dhariwal, P., Nichol, A.: Diffusion models beat gans on image synthesis. Advances in Neural Information Processing Systems 34, 8780-8794 (2021)", + "13. Ding, K., Ma, K., Wang, S., Simoncelli, E.P.: Image quality assessment: Unifying structure and texture similarity. IEEE transactions on pattern analysis and machine intelligence 44(5), 2567-2581 (2020)", + "14. Duan, Y., Guo, X., Zhu, Z.: Diffusiondepth: Diffusion denoising approach for monocular depth estimation. arXiv preprint arXiv:2303.05021 (2023)", + "15. Flynn, J., Broxton, M., Debevec, P., DuVall, M., Fyffe, G., Overbeck, R., Snavely, N., Tucker, R.: Deepview: View synthesis with learned gradient descent. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 2367-2376 (2019)", + "16. Flynn, J., Neulander, I., Philbin, J., Snively, N.: Deepstereo: Learning to predict new views from the world's imagery. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 5515-5524 (2016)", + "17. Geiger, A., Lenz, P., Stiller, C., Urtasun, R.: The kitti vision benchmark suite. URL http://www.cvlibs.net/datasets/kitti2(5) (2015)", + "18. Grauman, K., Westbury, A., Torresani, L., Kitani, K., Malik, J., Afouras, T., Ashutosh, K., Baiyya, V., Bansal, S., Boote, B., et al.: Ego-exo4d: Understanding skilled human activity from first-and third-person perspectives. arXiv preprint arXiv:2311.18259 (2023)", + "19. Ho, J., Jain, A., Abbeel, P.: Denoising diffusion probabilistic models. Advances in Neural Information Processing Systems 33, 6840-6851 (2020)", + "20. Ho, J., Sahara, C., Chan, W., Fleet, D.J., Norouzi, M., Salimans, T.: Cascaded diffusion models for high fidelity image generation. The Journal of Machine Learning Research 23(1), 2249-2281 (2022)", + "21. Isola, P., Zhu, J.Y., Zhou, T., Efros, A.A.: Image-to-image translation with conditional adversarial networks. CVPR (2017)", + "22. Johnson, J., Hariharan, B., Van Der Maaten, L., Fei-Fei, L., Lawrence Zitnick, C., Girshick, R.: Clevr: A diagnostic dataset for compositional language and elementary visual reasoning. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 2901–2910 (2017)", + "23. Kerbl, B., Kopanas, G., Leimkuhler, T., Drettakis, G.: 3d gaussian splatting for real-time radiance field rendering. ACM Transactions on Graphics 42(4) (2023)", + "24. Koh, J.Y., Lee, H., Yang, Y., Baldridge, J., Anderson, P.: Pathdreamer: A world model for indoor navigation. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 14738-14748 (2021)", + "25. Kulhánek, J., Derner, E., Sattler, T., Babuška, R.: Viewformer: Nerf-free neural rendering from few images using transformers. In: European Conference on Computer Vision. pp. 198-216. Springer (2022)", + "26. Kwon, T., Tekin, B., Stühmer, J., Bogo, F., Pollefeys, M.: H2o: Two hands manipulating objects for first person interaction recognition. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 10138-10148 (2021)", + "27. Li, J., Liu, K., Wu, J.: Ego-body pose estimation via ego-head pose estimation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 17142-17151 (2023)", + "28. Liu, G., Tang, H., Latapie, H., Yan, Y.: Exocentric to egocentric image generation via parallel generative adversarial network. In: ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). pp. 1843-1847. IEEE (2020)" + ], + "bbox": [ + 217, + 147, + 784, + 839 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "F. Cheng and M. Luo et al.", + "bbox": [ + 271, + 114, + 454, + 127 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "29. Liu, G., Tang, H., Latapie, H.M., Corso, J.J., Yan, Y.: Cross-view exocentric to egocentric video synthesis. In: Proceedings of the 29th ACM International Conference on Multimedia. pp. 974-982 (2021)", + "30. Liu, R., Wu, R., Van Hoorick, B., Tokmakov, P., Zakharov, S., Vondrick, C.: Zero-1-to-3: Zero-shot one image to 3d object. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 9298-9309 (2023)", + "31. Lombardi, S., Simon, T., Saragih, J., Schwartz, G., Lehrmann, A., Sheikh, Y.: Neural volumes: Learning dynamic renderable volumes from images. arXiv preprint arXiv:1906.07751 (2019)", + "32. Luo, M., Xue, Z., Dimakis, A., Grauman, K.: Put myself in your shoes: Lifting the egocentric perspective from exocentric videos. In: ECCV (2024)", + "33. Mathews, J.: Coordinate-free rotation formalism. American Journal of Physics 44(12), 1210-1210 (1976)", + "34. Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM 65(1), 99-106 (2021)", + "35. Niklaus, S., Mai, L., Yang, J., Liu, F.: 3d ken burns effect from a single image. ACM Transactions on Graphics (ToG) 38(6), 1-15 (2019)", + "36. Oquab, M., Darcet, T., Moutakanni, T., Vo, H., Szafraniec, M., Khalidov, V., Fernandez, P., Haziza, D., Massa, F., El-Nouby, A., et al.: Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:2304.07193 (2023)", + "37. Park, K., Sinha, U., Barron, J.T., Bouaziz, S., Goldman, D.B., Seitz, S.M., Martin-Brualla, R.: Nerfies: Deformable neural radiance fields. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 5865-5874 (2021)", + "38. Peebles, W., Xie, S.: Scalable diffusion models with transformers. arXiv preprint arXiv:2212.09748 (2022)", + "39. Peebles, W., Xie, S.: Scalable diffusion models with transformers. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 4195-4205 (2023)", + "40. Popov, S., Bauszat, P., Ferrari, V.: Corenet: Coherent 3d scene reconstruction from a single rgb image. In: Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part II 16. pp. 366-383. Springer (2020)", + "41. Pumarola, A., Corona, E., Pons-Moll, G., Moreno-Noguer, F.: D-nerf: Neural radiance fields for dynamic scenes. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 10318-10327 (2021)", + "42. Radford, A., Kim, J.W., Hallacy, C., Ramesh, A., Goh, G., Agarwal, S., Sastry, G., Askell, A., Mishkin, P., Clark, J., et al.: Learning transferable visual models from natural language supervision. In: International conference on machine learning. pp. 8748-8763. PMLR (2021)", + "43. Regmi, K., Borji, A.: Cross-view image synthesis using conditional gans. In: Proceedings of the IEEE conference on Computer Vision and Pattern Recognition. pp. 3501-3510 (2018)", + "44. Ren, B., Tang, H., Sebe, N.: Cascaded cross mlp-mixer gans for cross-view image translation. arXiv preprint arXiv:2110.10183 (2021)", + "45. Ren, X., Wang, X.: Look outside the room: Synthesizing a consistent long-term 3d scene video from a single image. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 3563-3573 (2022)", + "46. Riegler, G., Koltun, V.: Free view synthesis. In: Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XIX 16. pp. 623-640. Springer (2020)" + ], + "bbox": [ + 215, + 146, + 784, + 840 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "4DIFF: 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation", + "bbox": [ + 228, + 114, + 730, + 128 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 767, + 116, + 784, + 126 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "47. Riegler, G., Koltun, V.: Stable view synthesis. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 12216-12225 (2021)", + "48. Rockwell, C., Fouhey, D.F., Johnson, J.: Pixelsynth: Generating a 3d-consistent experience from a single image. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 14104-14113 (2021)", + "49. Rombach, R., Blattmann, A., Lorenz, D., Esser, P., Ommer, B.: High-resolution image synthesis with latent diffusion models. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 10684-10695 (2022)", + "50. Rombach, R., Esser, P., Ommer, B.: Geometry-free view synthesis: Transformers and no 3d priors. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 14356-14366 (2021)", + "51. Sahara, C., Chan, W., Chang, H., Lee, C., Ho, J., Salimans, T., Fleet, D., Norouzi, M.: Palette: Image-to-image diffusion models. In: ACM SIGGRAPH 2022 Conference Proceedings. pp. 1-10 (2022)", + "52. Sahara, C., Chan, W., Saxena, S., Li, L., Whang, J., Denton, E.L., Ghasemipour, K., Gontijo Lopes, R., Karagol Ayan, B., Salimans, T., et al.: Photorealistic text-to-image diffusion models with deep language understanding. Advances in Neural Information Processing Systems 35, 36479-36494 (2022)", + "53. Sahara, C., Ho, J., Chan, W., Salimans, T., Fleet, D.J., Norouzi, M.: Image superresolution via iterative refinement. IEEE Transactions on Pattern Analysis and Machine Intelligence 45(4), 4713-4726 (2022)", + "54. Sajjadi, M.S., Meyer, H., Pot, E., Bergmann, U., Greff, K., Radwan, N., Vora, S., Lucic, M., Duckworth, D., Dosovitskiy, A., et al.: Scene representation transformer: Geometry-free novel view synthesis through set-latent scene representations. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 6229-6238 (2022)", + "55. Sener, F., Chatterjee, D., Shelepov, D., He, K., Singhania, D., Wang, R., Yao, A.: Assembly101: A large-scale multi-view video dataset for understanding procedural activities. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 21096-21106 (2022)", + "56. Sitzmann, V., Thies, J., Heide, F., Nießner, M., Wetzstein, G., Zollhofer, M.: Deepvoxels: Learning persistent 3d feature embeddings. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 2437-2446 (2019)", + "57. Sitzmann, V., Zollhöfer, M., Wetzstein, G.: Scene representation networks: Continuous 3d-structure-aware neural scene representations. Advances in Neural Information Processing Systems 32 (2019)", + "58. Song, J., Meng, C., Ermon, S.: Denoising diffusion implicit models. arXiv:2010.02502 (October 2020), https://arxiv.org/abs/2010.02502", + "59. Su, J., Ahmed, M., Lu, Y., Pan, S., Bo, W., Liu, Y.: Roformer: Enhanced transformer with rotary position embedding. Neurocomputing 568, 127063 (2024)", + "60. Szymanowicz, S., Rupprecht, C., Vedaldi, A.: Splatter image: Ultra-fast single-view 3d reconstruction. arXiv preprint arXiv:2312.13150 (2023)", + "61. T, M.V., Wang, P., Chen, X., Chen, T., Venugopalan, S., Wang, Z.: Is attention all that neRF needs? In: The Eleventh International Conference on Learning Representations (2023), https://openreview.net/forum?id=xE-LtsE-xx", + "62. Tang, H., Xu, D., Sebe, N., Wang, Y., Corso, J.J., Yan, Y.: Multi-channel attention selection gan with cascaded semantic guidance for cross-view image translation. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 2417-2426 (2019)" + ], + "bbox": [ + 215, + 147, + 784, + 839 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "F. Cheng and M. Luo et al.", + "bbox": [ + 271, + 114, + 454, + 127 + ], + "page_idx": 17 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "63. Trevithick, A., Yang, B.: Grf: Learning a general radiance field for 3d scene representation and rendering (2020)", + "64. Tucker, R., Snavely, N.: Single-view view synthesis with multiplane images. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 551-560 (2020)", + "65. Wang, Q., Wang, Z., Genova, K., Srinivasan, P.P., Zhou, H., Barron, J.T., MartinBrualla, R., Snavely, N., Funkhouser, T.: Ibrnet: Learning multi-view image-based rendering. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 4690-4699 (2021)", + "66. Watson, D., Chan, W., Martin-Brualla, R., Ho, J., Tagliasacchi, A., Norouzi, M.: Novel view synthesis with diffusion models. arXiv preprint arXiv:2210.04628 (2022)", + "67. Wiles, O., Gkioxari, G., Szeliski, R., Johnson, J.: Synsin: End-to-end view synthesis from a single image. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 7467-7477 (2020)", + "68. Yang, L., Kang, B., Huang, Z., Xu, X., Feng, J., Zhao, H.: Depth anything: Unleashing the power of large-scale unlabeled data. arXiv preprint arXiv:2401.10891 (2024)", + "69. Yu, A., Ye, V., Tancik, M., Kanazawa, A.: pixelnerf: Neural radiance fields from one or few images. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 4578-4587 (2021)", + "70. Zhai, M., Bessinger, Z., Workman, S., Jacobs, N.: Predicting ground-level scene layout from aerial imagery. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 867-875 (2017)", + "71. Zhang, R., Isola, P., Efros, A.A., Shechtman, E., Wang, O.: The unreasonable effectiveness of deep features as a perceptual metric. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 586-595 (2018)", + "72. Zhou, T., Tucker, R., Flynn, J., Fyffe, G., Snavely, N.: Stereo magnification: Learning view synthesis using multiplane images. arXiv preprint arXiv:1805.09817 (2018)" + ], + "bbox": [ + 215, + 146, + 784, + 535 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "4DIFF: 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation", + "bbox": [ + 228, + 114, + 730, + 128 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 767, + 114, + 785, + 126 + ], + "page_idx": 18 + } +] \ No newline at end of file diff --git a/2024/4Diff_ 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation/26bdb530-b8d4-43d7-9337-55a1116b4a83_model.json b/2024/4Diff_ 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation/26bdb530-b8d4-43d7-9337-55a1116b4a83_model.json new file mode 100644 index 0000000000000000000000000000000000000000..cd7fa06ccaa9adf30f8987f14469039660d97441 --- /dev/null +++ b/2024/4Diff_ 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation/26bdb530-b8d4-43d7-9337-55a1116b4a83_model.json @@ -0,0 +1,2504 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.289, + 0.141, + 0.717, + 0.187 + ], + "angle": 0, + "content": "4DIFF: 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation" + }, + { + "type": "text", + "bbox": [ + 0.221, + 0.212, + 0.782, + 0.244 + ], + "angle": 0, + "content": "Feng Cheng\\(^{1,3*}\\), Mi Luo\\(^{2*}\\), Huiyu Wang\\(^{1}\\), Alex Dimakis\\(^{2}\\), Lorenzo Torresani\\(^{1}\\), Gedas Bertasius\\(^{3\\dagger}\\), and Kristen Grauman\\(^{1,2\\dagger}\\)" + }, + { + "type": "text", + "bbox": [ + 0.346, + 0.254, + 0.655, + 0.314 + ], + "angle": 0, + "content": "\\(^{1}\\) FAIR, Meta AI \n\\(^{2}\\) The University of Texas at Austin \n\\(^{3}\\) University of North Carolina at Chapel Hill \n* Equal contribution, † Co-lead the project" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.35, + 0.744, + 0.573 + ], + "angle": 0, + "content": "Abstract. We present 4DIFF, a 3D-aware diffusion model addressing the exo-to-ego viewpoint translation task — generating first-person (ego-centric) view images from the corresponding third-person (exocentric) images. Building on the diffusion model's ability to generate photorealistic images, we propose a transformer-based diffusion model that incorporates geometry priors through two mechanisms: (i) egocentric point cloud rasterization and (ii) 3D-aware rotary cross-attention. Egocentric point cloud rasterization converts the input exocentric image into an egocentric layout, which is subsequently used by a diffusion image transformer. As a component of the diffusion transformer's denoiser block, the 3D-aware rotary cross-attention further incorporates 3D information and semantic features from the source exocentric view. Our 4DIFF achieves state-of-the-art results on the challenging and diverse Ego-Exo4D multiview dataset and exhibits robust generalization to novel environments not encountered during training. Our code, processed data, and pretrained models are publicly available at https://klauscc.github.io/4diff." + }, + { + "type": "text", + "bbox": [ + 0.263, + 0.585, + 0.587, + 0.6 + ], + "angle": 0, + "content": "Keywords: Egocentric Vision \\(\\cdot\\) View Synthesis" + }, + { + "type": "title", + "bbox": [ + 0.217, + 0.627, + 0.376, + 0.643 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.659, + 0.788, + 0.809 + ], + "angle": 0, + "content": "From early developmental stages, humans adeptly observe external actions (exo) and seamlessly integrate them into their own repertoire (ego), forming the cornerstone of visual learning. This actor-observer translation mechanism not only shapes individual development but also holds profound implications for technological advancements. Imagine the ability to immerse yourself in the first-person perspective of renowned athletes like Messi or glean intricate piano techniques from online tutorials converted to a first-person viewpoint. Such experiences hinge on seamless translation from third-person to first-person perspectives, highlighting the pivotal role of cross-view translation in facilitating immersive and enriching experiences across diverse domains." + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.81, + 0.787, + 0.84 + ], + "angle": 0, + "content": "We leverage the recently released Ego-Exo4D dataset [18] to explore the third-person (exocentric) to first-person (egocentric) viewpoint translation task." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "2" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.456, + 0.129 + ], + "angle": 0, + "content": "F. Cheng and M. Luo et al." + }, + { + "type": "image", + "bbox": [ + 0.23, + 0.148, + 0.782, + 0.249 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.266, + 0.788, + 0.31 + ], + "angle": 0, + "content": "Fig. 1: Given exocentric images of an egocentric camera wearer engaged in daily activities and the corresponding camera trajectories, we aim to synthesize the corresponding egocentric view that captures the scene from the wearer's first-person perspective." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.32, + 0.789, + 0.562 + ], + "angle": 0, + "content": "As illustrated in Figure 1, our focus is on transforming the exocentrically observed images containing a designated individual into images depicting the same scene from the individual's first-person perspective. Our task is a specific instance of the Novel View Synthesis (NVS) task, which aims to generate new views conditioned on a few given views of a scene. However, the Ego-Exo4D dataset presents a formidable challenge compared to traditional novel view synthesis datasets [8, 9, 17, 22, 57, 72] and multiview datasets [1, 26, 55, 70]. As illustrated in Figure 2, the scenes in the Ego-Exo4D dataset are characterized by numerous objects and dynamic actions performed by the participants. The dataset encompasses diverse scenes, ranging from indoor to outdoor activities such as cooking and basketball. Furthermore, the visual differences between exocentric and egocentric images are pronounced due to sharp viewpoint changes. Besides, unlike numerous NVS datasets that use 3D data for arbitrary viewpoint sampling during training, Ego-Exo4D dataset only provides several views (e.g., four exo and one ego view) for each dynamic scene, which presents a challenge for convergence of prior geometry-based methods that regress the entire scene." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.565, + 0.789, + 0.732 + ], + "angle": 0, + "content": "Due to the challenges mentioned above, existing methods exhibit unsatisfactory performance in the exo-to-ego view translation task. Geometry-free generative models, including GAN-based [6,21] and diffusion-based [30,38,66] methods, face challenges in generating geometrically-correct images due to high complexity of the scenes. In contrast, geometry-based approaches, exemplified by NeRF-based methods [2,3,34,37,41,69], encounter limitations in achieving photorealistic images. Recent attempts [7, 10] aim to reconcile this dilemma by integrating a strong geometry-based method (e.g. NeRF-based) into diffusion models. However, these models are typically difficult to optimize on the extremely diverse scenes in the Ego-Exo4D benchmark, as we show in Sec. 4.2. Thus, they often fail to provide constructive geometry priors to the subsequent diffusion model." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.735, + 0.789, + 0.842 + ], + "angle": 0, + "content": "Motivated by these observations, we propose 4DIFF, a 3D-Aware Diffusion model for exocentric to egocentric viewpoint translation. We propose two mechanisms to incorporate 3D geometry into the diffusion model: (i) egocentric point cloud rasterization, and (ii) 3D-aware rotary cross-attention layers. Rather than relying on a complex geometry model like NeRF, we render an egocentric prior image using a lightweight rasterization technique [5, 67]. As a result, our approach is both easy to train and adaptable, allowing it to incorporate existing" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.23, + 0.115, + 0.733, + 0.129 + ], + "angle": 0, + "content": "4DIFF: 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "3" + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.145, + 0.784, + 0.409 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.42, + 0.785, + 0.476 + ], + "angle": 0, + "content": "Fig. 2: Comparison of the Ego-Exo4D viewpoint translation (Ego-Exo4D-VT) benchmark, which we build on the Ego-Exo4D dataset [18], with existing novel view synthesis and cross-view translation benchmarks. Ego-Exo4D-VT presents numerous challenges that require fundamental advances in generative modeling to address." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.481, + 0.788, + 0.663 + ], + "angle": 0, + "content": "open-source pretrained depth estimators. These estimators have demonstrated effectiveness in processing images from previously unseen environments [4, 68]. Solely rendering the egocentric prior feature map through point cloud rasterization can be problematic, as the source exo view often contains occluded and unobserved regions. To address this, we seamlessly integrate rasterization into the diffusion model, leveraging its substantial capacity for extrapolation and generating high-quality images. We further enhance the expressivity of our diffusion model by introducing 3D-aware rotary cross-attention, which is integrated into each denoising block of the model. This functionality aims to improve feature similarities and 3D spatial similarities between ego and exo views, allowing the diffusion feature maps to incorporate information from the semantic features encoded in the exocentric image more effectively." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.663, + 0.788, + 0.739 + ], + "angle": 0, + "content": "Our method 4DIFF surpasses prior state-of-the-art techniques on the challenging Ego-Exo4D viewpoint translation benchmark, achieving a \\(3.6\\%\\) improvement in LPIPS. Furthermore, leveraging the extensive scale of Ego-Exo4D data, our approach demonstrates robust generalization to novel environments not encountered during training." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.762, + 0.388, + 0.779 + ], + "angle": 0, + "content": "2 Related Work" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.795, + 0.788, + 0.842 + ], + "angle": 0, + "content": "Exo-to-Ego Viewpoint Translation. Prior methods [28, 44, 62] tackled this problem predominantly via GAN-based models [11]. Specifically, [43] proposed the X-Fork and X-Seq GAN-based architecture using an additional semantic map" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "4" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.456, + 0.129 + ], + "angle": 0, + "content": "F. Cheng and M. Luo et al." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.268 + ], + "angle": 0, + "content": "for enhanced generation. [29] introduced STA-GAN, which focuses on learning spatial and temporal information to generate egocentric videos from exocentric views. [32] focuses on hand-object interactions, proposing to decouple hand layout generation and ego frame generation with a diffusion model. None of these methods develop an explicit geometry-aware generative framework. In contrast, our work introduces two effective mechanisms to incorporate 3D geometric priors into the diffusion model, specifically tailored to address the challenges posed by the Ego-Exo4D-VT benchmark." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.271, + 0.788, + 0.574 + ], + "angle": 0, + "content": "Novel View Synthesis (NVS). Our exo-to-ego viewpoint translation task represents a distinct facet of the NVS task, which aims to generate a target image with an arbitrary target camera pose from given source images and their camera poses. Previous works in NVS can be categorized into geometry-based [15,16,31,46,47,56,64,72], regression-based methods [25,35,54,63-65,69,72] and generative models [24,45,48,50,66,67]. Recently, several geometry-aware generative models [7,10] have explored ways to integrate NeRF with diffusion models. For instance, GeNVS [7] incorporates geometry priors into their diffusion model using a variant of pixelNeRF [69], which renders a target feature map from a 3D feature field. SSDNeRF [10] proposes a unified approach that employs an expressive diffusion model to learn a generalizable prior of neural radiance field (NeRF). However, these geometry-based models, typically implemented as NeRFs, often struggle to provide meaningful geometry priors to the diffusion model, especially in the challenging Ego-Exo4D-VT benchmark. This is because complex geometry methods require strong supervision (e.g., many densely sampled views of the same scene), which Ego-Exo4D does not provide. In contrast, our method uses simple point-cloud rasterization that relies solely on accurate depth estimation, avoiding the modeling of occluded and unobserved areas in the exocentric view. This approach shows better generalization and benefits from existing large-scale pretrained depth estimators." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.577, + 0.788, + 0.684 + ], + "angle": 0, + "content": "Diffusion Models [12, 19, 49] have made significant strides in producing photorealistic images and videos. They excel in modeling conditional distributions, including scenarios where conditioning is based on text [49, 52] or another image [20, 53]. Prior work has demonstrated a wide range of successful applications of diffusion models, including human pose generation [27] and depth estimation [14]. In our work, we employ a transformer-based diffusion model [39] to model the distribution of egocentric images conditioned on exocentric images." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.708, + 0.381, + 0.725 + ], + "angle": 0, + "content": "3 Methodology" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.74, + 0.388, + 0.757 + ], + "angle": 0, + "content": "3.1 Problem Setup" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.766, + 0.787, + 0.814 + ], + "angle": 0, + "content": "Given an exocentric image \\( x \\in \\mathbb{R}^{h \\times w \\times 3} \\) and the relative camera pose \\( P \\in \\mathbb{R}^{4 \\times 4} \\) from exo camera to the ego camera of the person of interest, our goal is to synthesize an egocentric image \\( y \\in \\mathbb{R}^{h \\times w \\times 3} \\) from the conditional distribution:" + }, + { + "type": "equation", + "bbox": [ + 0.468, + 0.826, + 0.786, + 0.842 + ], + "angle": 0, + "content": "\\[\np (y | x, P) \\tag {1}\n\\]" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.23, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "4DIFF: 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "5" + }, + { + "type": "image", + "bbox": [ + 0.223, + 0.152, + 0.784, + 0.301 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.313, + 0.788, + 0.412 + ], + "angle": 0, + "content": "Fig. 3: We propose 4DIFF, a 3D-Aware Diffusion model for exocentric to egocentric viewpoint translation. Our framework uses a point cloud rasterization scheme first to compute an egocentric prior, which captures egocentric layout cues. Afterward, the egocentric prior is fed into the diffusion model augmented with the proposed 3D-aware rotary cross-attention for egocentric image generation. The proposed 3D-aware rotary cross-attention guides the attention to consider geometric relationships between the egocentric and exocentric diffusion feature maps." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.43, + 0.787, + 0.461 + ], + "angle": 0, + "content": "We assume the relative camera pose \\((P)\\) is known, similar to the standard NVs tasks [40, 61, 69]." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.462, + 0.788, + 0.613 + ], + "angle": 0, + "content": "Relation to the Official Ego-Exo4D Translation Benchmark. Ego-Exo4D [18] introduced an exo-to-ego translation benchmark, with the primary emphasis on object-level synthesis, i.e., generating an object at the correct location in the ego view based on an exo image and an exo segmentation mask of the object of interest. This approach is particularly valuable for precise object placement and detailed object-level interactions. In contrast, we focus on full-image synthesis — allowing for the generation of entire scenes, and enhancing the richness and diversity of generated viewpoints. Both are complementary; while Ego-Exo4D excels in object-specific scenarios, our method expands the scope to full-scene synthesis and can be seen as a new specialized NVS task." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.643, + 0.395, + 0.656 + ], + "angle": 0, + "content": "3.2 Our Framework" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.674, + 0.788, + 0.841 + ], + "angle": 0, + "content": "Due to the inherent complexity and dynamism present in diverse scenes, we use an expressive transformer-based diffusion model to model the conditional distribution in Equation 1. However, due to the inability to explicitly model 3D cues, the standard diffusion model may struggle to generate geometry-consistent images. Thus, we propose two techniques to incorporate geometry into our diffusion model: (i) egocentric point cloud rasterization and (ii) 3D-aware rotary cross-attention. As shown in Figure 3, the point cloud rasterization first renders an egocentric prior from the input exocentric view, which is then fed into the diffusion model. Afterward, the conditioned diffusion model is augmented with the proposed 3D-aware rotary cross-attention to generate the target egocentric image. We now describe each module in more detail." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "6" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.456, + 0.129 + ], + "angle": 0, + "content": "F. Cheng and M. Luo et al." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.147, + 0.573, + 0.163 + ], + "angle": 0, + "content": "3.3 Egocentric Point Cloud Rasterization" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.173, + 0.788, + 0.248 + ], + "angle": 0, + "content": "As a first step in our framework, we render an egocentric prior via the point cloud rasterization from an exocentric view. Specifically, we first use a depth estimator to convert the exocentric 2D image \\( x \\) and a feature map \\( F^{\\mathrm{exo}} \\) into a feature point cloud. Then, a differential renderer [67] projects this point cloud into an egocentric prior \\( H^{\\mathrm{prior}} \\):" + }, + { + "type": "equation", + "bbox": [ + 0.333, + 0.261, + 0.786, + 0.278 + ], + "angle": 0, + "content": "\\[\nH ^ {\\text {p r i o r}} = \\left[ x ^ {\\text {p r i o r}}, F ^ {\\text {p r i o r}} \\right] = \\operatorname {r e n d e r} \\left(\\left[ x, F ^ {\\text {e x o}} \\right], D, P\\right) \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.288, + 0.789, + 0.364 + ], + "angle": 0, + "content": "Here, \\( F^{\\mathrm{exo}} \\) is the semantic features of the exocentric image encoded by a feature encoder \\( f \\), \\( x^{\\mathrm{prior}} \\) and \\( F^{\\mathrm{prior}} \\) are the egocentric prior image and a feature map, rendered from the exocentric image \\( x \\) and a feature map \\( F^{\\mathrm{exo}} \\) respectively. \\( D \\) denotes the depth map predicted by a depth estimator, and \\( P \\) represents the relative camera pose." + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.371, + 0.79, + 0.432 + ], + "angle": 0, + "content": "Depth Estimator. We construct the depth estimator based on the pretrained MiDaS [4]. Since MiDaS predicts relative disparity (the inverse of depth), we introduce two learnable scalars \\( s \\) and \\( t \\) for dataset-specific calibration. The depth map \\( D \\) is predicted using the formula:" + }, + { + "type": "equation", + "bbox": [ + 0.401, + 0.445, + 0.786, + 0.461 + ], + "angle": 0, + "content": "\\[\nD = 1 / \\left(s \\cdot \\operatorname {M i D a S} \\left(x ^ {\\mathrm {e x o}}\\right) + t\\right). \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.479, + 0.789, + 0.585 + ], + "angle": 0, + "content": "Rasterization. We employ the differentiable renderer [67] for our rasterization. This renderer splats 3D points onto the image plane and calculates pixel values by blending point features. In contrast to more intricate rendering techniques like NeRF [34,69] or Gaussian Splatting [23,60], our renderer is simpler to converge. It relies solely on depth estimation from 2D images, leveraging large-scale pretrained depth estimators. This design choice ensures robust generalization across diverse scenarios." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.607, + 0.587, + 0.623 + ], + "angle": 0, + "content": "3.4 3D-Aware Diffusion Image Transformer" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.633, + 0.788, + 0.692 + ], + "angle": 0, + "content": "Our diffusion model uses a denoiser network to predict added noise \\(\\epsilon_{t}\\) from the noisy target egocentric image \\(y_{t} = \\sqrt{\\bar{\\alpha}_{t}} y + \\sqrt{1 - \\bar{\\alpha}_{t}}\\epsilon_{t}\\), conditioned on the previously obtained egocentric prior \\(H^{\\mathrm{prior}}\\) and the exocentric semantic features \\(F^{\\mathrm{exo}}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.413, + 0.693, + 0.786, + 0.71 + ], + "angle": 0, + "content": "\\[\n\\hat {\\epsilon} _ {t} = \\epsilon_ {\\theta} ([ y _ {t}, H ^ {\\text {p r i o r}} ], F ^ {\\text {e x o}}). \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.716, + 0.788, + 0.763 + ], + "angle": 0, + "content": "During inference, the target egocentric image \\( y_0 \\) is generated from a standard Gaussian noise \\( y_T \\) by applying the denoiser network \\( \\epsilon_{\\theta} \\) iteratively with a sampling strategy (e.g. DDIM [58]), i.e. \\( y_T \\to y_{T - \\delta} \\to \\ldots \\to y_0 \\)." + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.765, + 0.789, + 0.843 + ], + "angle": 0, + "content": "Denoiser Network \\(\\epsilon_{\\theta}\\). Our proposed 3D-aware Diffusion image Transformer serves as the denoiser network. As shown in Figure 3 and Equation 4, our Transformer network takes as input the concatenation of the egocentric prior \\(H^{\\mathrm{prior}}\\) and the noisy target egocentric image \\(y_{t}\\) encoded via an off-the-shelf autoencoder from [49]. Following [39], the architecture of DiT is the same as ViT, consisting" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.229, + 0.114, + 0.733, + 0.129 + ], + "angle": 0, + "content": "4DIFF: 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.116, + 0.787, + 0.127 + ], + "angle": 0, + "content": "7" + }, + { + "type": "image", + "bbox": [ + 0.276, + 0.144, + 0.734, + 0.294 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.215, + 0.304, + 0.789, + 0.334 + ], + "angle": 0, + "content": "Fig. 4: An illustration of the calculation of the rotation matrix \\( R_{m,n} \\) in our 3D-aware rotary cross attention." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.346, + 0.789, + 0.409 + ], + "angle": 0, + "content": "of \\( N \\) transformer layers, each with a self-attention layer and a feedforward network. To further enhance the expressivity of our model and incorporate more geometric cues, we propose 3D-aware rotary cross-attention layers, which we describe next." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.414, + 0.789, + 0.579 + ], + "angle": 0, + "content": "3D-aware Rotary Cross-Attention. When conditioning the diffusion model on the exocentric feature map, we should consider similarities in the semantic feature and spatial 3D space. Exocentric features similar in appearance (i.e., semantic feature space) and 3D location with respect to the query features should have higher attention values in the diffusion model. Motivated by RoPE [59], we achieve this by incorporating rotations during attention weight calculations. The degree of rotation between a query and a key is determined by the angle between their 3D coordinates, with the ego camera as the center. Consequently, the cosine similarity between the query and key features can incorporate their 3D spatial angle, effectively capturing the 3D relationships between corresponding points in the egocentric and exocentric views." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.58, + 0.79, + 0.626 + ], + "angle": 0, + "content": "Specifically, given a feature map \\( Z \\in \\mathbb{R}^{l \\times c} \\) in the diffusion model and the exocentric semantic feature map \\( F^{\\mathrm{exo}} \\in \\mathbb{R}^{l \\times c} \\), the 3D-aware rotary cross-attention calculates the output \\( O \\in \\mathbb{R}^{l \\times c} \\) as:" + }, + { + "type": "equation", + "bbox": [ + 0.397, + 0.634, + 0.787, + 0.684 + ], + "angle": 0, + "content": "\\[\na _ {m, n} = \\frac {\\exp \\left(\\frac {q _ {m} ^ {T} R _ {m , n} k _ {n}}{\\sqrt {c}}\\right)}{\\sum_ {j = 1} ^ {l} \\exp \\left(\\frac {q _ {m} ^ {T} R _ {m , j} k _ {j}}{\\sqrt {c}}\\right)} \\tag {5}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.405, + 0.686, + 0.787, + 0.726 + ], + "angle": 0, + "content": "\\[\nO _ {m} = \\sum_ {n = 1} ^ {l} a _ {m, n} v _ {n} \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.734, + 0.789, + 0.844 + ], + "angle": 0, + "content": "Here, \\( q_{m} = Z_{m}W_{q} \\) is the \\( m \\)-th query token, \\( k_{n} = F_{n}^{\\mathrm{exo}}W_{k} \\) is the \\( n \\)-th key token and \\( v_{n} = F_{n}^{\\mathrm{exo}}W_{v} \\) is the \\( n \\)-th value token. \\( W_{q}, W_{k}, W_{v} \\) are learnable project matrices. \\( R_{m,n} \\) is the rotation matrix that rotates the key token to align with the value token in 3D space, where the egocentric camera is used as the center. Since the query token is in the egocentric view, we map its coordinates to the exocentric view using the relative camera pose. The rotation matrix is computed in the exocentric view using the algorithm from [33]. When \\( R_{m,n} \\) is an identity" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "8" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.455, + 0.128 + ], + "angle": 0, + "content": "F. Cheng and M. Luo et al." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.147, + 0.785, + 0.193 + ], + "angle": 0, + "content": "matrix, our 3D-aware rotary cross-attention defaults to standard cross-attention. Figure 4 shows an illustration of this process. We insert such 3D-aware cross-attention layers after each self-attention layer in DiT." + }, + { + "type": "title", + "bbox": [ + 0.217, + 0.214, + 0.451, + 0.229 + ], + "angle": 0, + "content": "3.5 Training and Inference" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.238, + 0.785, + 0.269 + ], + "angle": 0, + "content": "Loss Function. Our model is trained with the diffusion denoising loss, which is the L2 loss between the predicted noise and the ground-truth added noise." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.272, + 0.787, + 0.424 + ], + "angle": 0, + "content": "Implementation Details We employ DINOv2 [36] pretrained ViT-L/14 as our feature encoder \\( f \\) and MiDaS [4] with DPT-L as our depth estimator. Our denoiser network is built on DiT-B/2 [38] augmented with the proposed 3D-aware rotary cross-attention layers. The image sizes are \\( 256 \\times 256 \\) for both egocentric and exocentric images. We freeze the feature encoder, as it is already well pretrained. The model is trained with the Adam optimizer, using a learning rate of \\( 1e - 5 \\) for the depth estimator and \\( 1e - 4 \\) for the other components. We employ a batch size of 4 per GPU and train the model across 32 V100 GPUs for 100 epochs, requiring approximately 48 hours. We set the diffusion steps \\( T \\) to 1000 during training and sample 20 steps during inference using DDIM [58]." + }, + { + "type": "title", + "bbox": [ + 0.217, + 0.446, + 0.375, + 0.463 + ], + "angle": 0, + "content": "4 Experiments" + }, + { + "type": "title", + "bbox": [ + 0.217, + 0.477, + 0.427, + 0.492 + ], + "angle": 0, + "content": "4.1 Experimental Setup" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.501, + 0.785, + 0.606 + ], + "angle": 0, + "content": "Ego-Exo4D-VT Benchmark. Our benchmark is constructed based on the Ego-Exo4D dataset [18]. Adhering to the official splits, we use 2680/708/900 takes for training, validation, and testing, respectively. Each take is approximately 30 seconds to 5 minutes long and depicts a person performing a skilled activity, such as cooking a dish, with footage from 4 exocentric cameras and 1 egocentric camera. This benchmark encompasses five diverse, skilled human activities: basketball, bike repair, cooking, health, and music." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.607, + 0.785, + 0.713 + ], + "angle": 0, + "content": "The benchmark features 131 unique scenes, each characterized by complex backgrounds and numerous objects, demonstrating significant scale variation from 1 meter (e.g., a small kitchen) to 10 meters (e.g., a basketball court). These scenes are dynamic and depict subjects performing actions that involve interactions with objects. Additionally, the considerable viewpoint shift from exocentric to egocentric view causes objects to appear relatively small in the exocentric view compared to the egocentric view." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.716, + 0.785, + 0.806 + ], + "angle": 0, + "content": "Baselines. Since this is a new benchmark, we re-purpose a few state-of-the-art methods for image generation: (a) pix2pix [21], a GAN-based method, (b) GNT [61], a NeRF-based method, (c) diffusion model DiT [39] and 3DiM [66]. To tailor DiT for our task, we eliminate its original class label conditioning and condition it on the exocentric image through concatenation. Additionally, we implement 3DiM based on DiT since the code for 3DiM is unavailable." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.81, + 0.785, + 0.84 + ], + "angle": 0, + "content": "Metrics. Following NVS methods [10, 69], we employ perceptual metrics, including LPIPS [71], DISTS [13] and CLIP score [42], to measure the structural" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.23, + 0.114, + 0.732, + 0.129 + ], + "angle": 0, + "content": "4DIFF: 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.116, + 0.785, + 0.127 + ], + "angle": 0, + "content": "9" + }, + { + "type": "table_caption", + "bbox": [ + 0.214, + 0.145, + 0.788, + 0.201 + ], + "angle": 0, + "content": "Table 1: Quantitative comparison on the test set of Ego-Exo4D-VT benchmark. \\( {}^{ \\dagger } \\) we reimplement 3DiM based on DiT as their code is not publicly available. Our 4DIFF achieves the best results on all the metrics, outperforming the second best method 3DiM by \\( {3.6}\\% \\) in LPIPS and \\( {1.9}\\% \\) in DISTS." + }, + { + "type": "table", + "bbox": [ + 0.266, + 0.213, + 0.74, + 0.322 + ], + "angle": 0, + "content": "
MethodLPIPS ↓DISTS ↓CLIP ↑PSNR ↑SSIM ↑
pix2pix [28]0.3720.26268.8515.800.515
GNT [61]0.4820.39263.7514.610.538
DiT [39]0.4120.23177.9815.470.564
3DiM† [66]0.3850.22678.2215.910.575
4DIFF (ours)0.3490.20779.7216.650.592
" + }, + { + "type": "table_caption", + "bbox": [ + 0.216, + 0.333, + 0.784, + 0.362 + ], + "angle": 0, + "content": "Table 2: Comparison on the seen and unseen test sets of Ego-Exo4D-VT benchmark. \\(\\dagger\\) we reimplement 3DiM based on DiT as their code is not publicly available." + }, + { + "type": "table", + "bbox": [ + 0.226, + 0.374, + 0.779, + 0.551 + ], + "angle": 0, + "content": "
Split SettingMethodLPIPS ↓DISTS ↓CLIP ↑PSNR ↑SSIM ↑
Seen \nScenespix2pix [21]0.3710.26068.6815.900.519
GNT [61]0.4790.39063.4414.710.542
DiT [39]0.4060.22678.7415.640.570
3DiM† [66]0.3650.21778.3015.980.583
4DIFF (ours)0.3160.18482.7917.090.600
Unseen \nScenespix2pix [21]0.3760.27269.8715.230.491
GNT [61]0.4970.40565.6013.970.513
DiT [39]0.4400.25673.6714.860.528
3DiM† [66]0.4360.26973.2614.900.542
4DIFF (ours)0.4270.24676.5414.450.508
" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.58, + 0.788, + 0.655 + ], + "angle": 0, + "content": "and texture similarity between the synthesized egocentric image and the ground-truth image. Additionally, we include PSNR and SSIM for completeness, even though numerous existing works [7,51,53] have demonstrated that these metrics are suboptimal for evaluating image and video generation models, as they tend to favor conservative and blurry estimates." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.678, + 0.616, + 0.694 + ], + "angle": 0, + "content": "4.2 Comparison with State-of-the-art Methods" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.704, + 0.788, + 0.794 + ], + "angle": 0, + "content": "In Table 1, we present the comparison of our method to various baselines. Notably, diffusion-based models—DiT [39], 3DiM [66], and our 4DIFF—outperform other approaches across all metrics by large margins, including the GAN-based pix2pix and NeRF-based GNT. The poor performance of the NeRF-based method GNT on our benchmark can be attributed to itsslimited capacity for modeling hundreds of different scenes." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.796, + 0.788, + 0.842 + ], + "angle": 0, + "content": "In Table 2, we present the results on seen scenes and unseen scenes respectively and show that our method achieves the best performance. Overall, our method surpasses the second-best performing diffusion-based 3DiM by \\(3.6\\%\\)" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "10" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.456, + 0.129 + ], + "angle": 0, + "content": "F. Cheng and M. Luo et al." + }, + { + "type": "image", + "bbox": [ + 0.226, + 0.149, + 0.787, + 0.658 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.673, + 0.788, + 0.729 + ], + "angle": 0, + "content": "Fig. 5: Generated samples from five scenarios: cooking, music, health, basketball, and bike repair. Our 4DIFF demonstrates the best performance across all examples in terms of geometry correctness and object quality. We brighten the images and exclude pix2pix and GNT in the scenario breakdown for a better visual experience." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.734, + 0.788, + 0.765 + ], + "angle": 0, + "content": "in LPIPS and \\(1.9\\%\\) in DISTS, underscoring the effectiveness of our proposed geometry-based approach." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.765, + 0.788, + 0.84 + ], + "angle": 0, + "content": "Figure 5 presents qualitative comparisons with existing methods. GAN-based pix2pix [21] and NeRF-based GNT [61] exhibit challenges in producing photorealistic images, emphasizing the necessity of a robust generative model for the Ego-Exo4D-VT benchmark. Our 4DIFF demonstrates superior performance across various scenarios, excelling in both geometry correctness and object qual" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.23, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "4DIFF: 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation" + }, + { + "type": "page_number", + "bbox": [ + 0.768, + 0.116, + 0.784, + 0.127 + ], + "angle": 0, + "content": "11" + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.145, + 0.361, + 0.472 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.266, + 0.474, + 0.323, + 0.486 + ], + "angle": 0, + "content": "Exo Input" + }, + { + "type": "image", + "bbox": [ + 0.362, + 0.145, + 0.498, + 0.47 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.407, + 0.473, + 0.454, + 0.485 + ], + "angle": 0, + "content": "Ego GT" + }, + { + "type": "image", + "bbox": [ + 0.5, + 0.145, + 0.642, + 0.47 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.495, + 0.473, + 0.626, + 0.487 + ], + "angle": 0, + "content": "NeRF-based Rendering" + }, + { + "type": "image", + "bbox": [ + 0.642, + 0.145, + 0.782, + 0.472 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.633, + 0.473, + 0.777, + 0.486 + ], + "angle": 0, + "content": "Our Rasterization Module" + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.504, + 0.788, + 0.587 + ], + "angle": 0, + "content": "Fig. 6: We evaluate the effectiveness of our egocentric prior rendering module by visualizing the rendered prior image. Compared to NeRF-based rendering (GNT), our rendered prior image exhibits predominantly correct geometry, offering valuable egocentric cues to the diffusion model. Distortions and missing pixels arise from inaccurate depth estimation and occluded or unobserved regions in the exocentric view, which can be corrected by the diffusion model." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.598, + 0.785, + 0.643 + ], + "angle": 0, + "content": "ity. Our 4DIFF is especially advantageous for view synthesis in complex scenes, such as the cooking scenario, where numerous objects exhibit intricate layouts. The qualitative results align well with our quantitative results in Table 1." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.663, + 0.431, + 0.679 + ], + "angle": 0, + "content": "4.3 Qualitative Analysis" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.686, + 0.787, + 0.716 + ], + "angle": 0, + "content": "Investigating the visual results helps to gain a deeper insight into generative models. Thus, we perform a qualitative analysis below." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.72, + 0.788, + 0.84 + ], + "angle": 0, + "content": "Is the egocentric prior useful? We address this question by visualizing the rendered egocentric prior RGB image. In Figure 6, the NeRF-based renderer GNT [61] generates blurry images for all scenes, possibly due to its limited capacity to model many diverse scenes with limited views for supervision. In contrast, our rendered egocentric images produced by point cloud rasterization are mostly correct, offering valuable egocentric cues to the diffusion model. Despite distortions and missing pixels, our diffusion model demonstrates sufficient capacity to rectify these issues effectively." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "12" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.456, + 0.129 + ], + "angle": 0, + "content": "F. Cheng and M. Luo et al." + }, + { + "type": "image", + "bbox": [ + 0.23, + 0.149, + 0.776, + 0.589 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.6, + 0.788, + 0.644 + ], + "angle": 0, + "content": "Fig. 7: Results on the unseen scenes. When synthesizing views from the scenes not encountered during training, our 4DIFF exhibits slight hallucinations but consistently outperforms existing methods, producing significantly improved results." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.657, + 0.788, + 0.746 + ], + "angle": 0, + "content": "Generalization to unseen scenes. Figure 7 shows our generation results on the unseen scenes. We observe that our 4DIFF displays slight hallucinations, particularly noticeable in elements such as walls. Despite this, our method consistently outperforms existing methods. Such a robust performance can be attributed to the highly generalizable depth-based geometry priors used by our model." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.75, + 0.788, + 0.841 + ], + "angle": 0, + "content": "What causes poor generation? We conduct an analysis to discern errors arising from the diffusion model or geometry priors. In Figure 8, we present two representative examples. The first showcases generation results in an unseen scene, where the egocentric prior image is reasonably good, but the diffusion model exhibits significant hallucinations, yielding an incorrectly generated image. We posit that this discrepancy arises because the diffusion model focuses" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.23, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "4DIFF: 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation" + }, + { + "type": "page_number", + "bbox": [ + 0.768, + 0.116, + 0.786, + 0.127 + ], + "angle": 0, + "content": "13" + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.149, + 0.363, + 0.38 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.364, + 0.149, + 0.501, + 0.38 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.149, + 0.644, + 0.38 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.644, + 0.15, + 0.787, + 0.38 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.391, + 0.788, + 0.462 + ], + "angle": 0, + "content": "Fig. 8: Failure case examples of our method. Top: While the point cloud rasterization module performs effectively, the diffusion model produces errors when generating an egocentric view. Bottom: Although the diffusion model accurately predicts objects, the synthesized egocentric view appears more zoomed-out than the ground truth view. This can be attributed to suboptimal egocentric layout synthesis." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.48, + 0.788, + 0.54 + ], + "angle": 0, + "content": "on modeling the conditional training distribution, limiting its generalization to substantially different scenes not present in the training data. This limitation can be mitigated by employing a large-scale pretrained diffusion model that has already acquired knowledge from diverse scenes and objects in 2D space." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.542, + 0.788, + 0.616 + ], + "angle": 0, + "content": "In the second example, we show that despite the incorrectly rendered egocentric prior image, the diffusion model can generate a photorealistic image, which is more zoomed-out than the ground-truth egocentric image. This observation suggests that the diffusion model can robustly handle inaccurately generated egocentric geometry priors." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.642, + 0.403, + 0.656 + ], + "angle": 0, + "content": "4.4 Ablation Studies" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.67, + 0.788, + 0.791 + ], + "angle": 0, + "content": "How important are our proposed modules? We study the importance of (i) 3D-aware rotary cross-attentions and (ii) egocentric point cloud rasterization by sequentially removing them from our framework. As shown in Tab. 3a, removing the 3D cross-attention worsens the LPIPS by \\(2.4\\%\\). Additionally, removing the point cloud rasterization further degrades LPIPS by \\(3.9\\%\\). Moreover, as shown in Figure 5, our 4DIFF with the proposed geometry priors consistently outperforms geometry-free diffusion models DiT and 3DiM in all scenarios. These results show the effectiveness of our proposed modules." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.795, + 0.788, + 0.84 + ], + "angle": 0, + "content": "Can we pretrain the depth estimator from scratch? Tab. 3b shows that training our model without using a pretrained depth estimator results in a significant \\(4.3\\%\\) degradation in LPIPS. This suggests that an inaccurate depth" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "14" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.456, + 0.129 + ], + "angle": 0, + "content": "F. Cheng and M. Luo et al." + }, + { + "type": "table_caption", + "bbox": [ + 0.214, + 0.145, + 0.788, + 0.201 + ], + "angle": 0, + "content": "Table 3: Ablation studies on various design choices. (a) We study the importance of each module by removing each module sequentially; (b) Using a pretrained depth estimator significantly improves the LPIPS by \\(4.3\\%\\); (c) DINOV2 outperforms CLIP by \\(1.7\\%\\) in LPIPS." + }, + { + "type": "table", + "bbox": [ + 0.234, + 0.206, + 0.432, + 0.298 + ], + "angle": 0, + "content": "
(a) Module ablation.
ModelLPIPS ↓
4DIFF0.349
- 3D Rotary CA0.373
- ego rasterization0.412
" + }, + { + "type": "table", + "bbox": [ + 0.458, + 0.206, + 0.603, + 0.284 + ], + "angle": 0, + "content": "
(b) Depth estimator.
PretrainedLPIPS ↓
0.349
X0.392
" + }, + { + "type": "table", + "bbox": [ + 0.61, + 0.206, + 0.753, + 0.284 + ], + "angle": 0, + "content": "
(c) Feature encoder.
Feat. Enc.LPIPS ↓
DinoV20.349
CLIP0.366
" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.308, + 0.784, + 0.381 + ], + "angle": 0, + "content": "estimation may lead to most points from the exocentric view projected outside of the egocentric view. Consequently, these points will not receive sufficient gradient updates during training, leading to poor convergence. Thus, we conclude that a sufficiently accurate initial depth prediction is crucial for good performance." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.387, + 0.788, + 0.493 + ], + "angle": 0, + "content": "Which feature encoder should we use? We evaluate two strong feature encoders for obtaining a semantic representation for an exocentric RGB image: DINOv2 [36], and CLIP [42], both employing a ViT-L/14 backbone. The DINOv2 variant outperforms the CLIP variant by \\(1.7\\%\\) LPIPS. We conjecture that compared to CLIP's vision-language pretraining, DINOv2's self-supervised pretraining leads to higher quality lower-level visual features which are important for exocentric to egocentric image translation problem." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.513, + 0.509, + 0.529 + ], + "angle": 0, + "content": "5 Discussion and Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.541, + 0.789, + 0.798 + ], + "angle": 0, + "content": "In this work, we proposed 4DIFF, a 3D-aware transformer-based diffusion model that significantly outperforms prior approaches on the challenging Ego-Exo4D-VT benchmark. Our method demonstrates robust generalization to novel environments not encountered during training. Despite our excellent results, we also acknowledge a few limitations. Firstly, our method assumes known camera poses during training and inference, limiting its applicability to real-world scenarios. Integrating camera pose estimation via a head pose estimator could address this limitation, while remains difficult to estimate automatically. Secondly, our method focuses on image-to-image translation, leaving room for video generation by incorporating spatial-temporal cues. Thirdly, enhancing the quality of generated objects and improving generalization to unseen environments could be achieved by leveraging a more powerful pretrained diffusion model (e.g., Stable Diffusion [49]). Lastly, extending our framework from frame-level synthesis to object-level synthesis, considering the locations and appearances of objects such as hands and interacted objects, would bring it closer to real-world applications like AR/VR coaching. We plan to explore these research directions in our future work." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.23, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "4DIFF: 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "15" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.283 + ], + "angle": 0, + "content": "Acknowledgment We thank Hanwen Jiang, Yan-Bo Lin, Md Mohaiminul Islam, Ce Zhang, Yue Yang, and Soumitri Chattopadhyay for their helpful discussions. UT Austin is supported by NSF Grants AF 1901292, CNS 2148141, Tripods CCF 1934932, IFML CCF 2019844 and research gifts by Western Digital, Amazon, WNCG IAP, UT Austin Machine Learning Lab (MLL), Cisco, the Stanly P. Finch Centennial Professorship in Engineering. UNC is supported by Sony Faculty Innovation Award, Laboratory for Analytic Sciences via NC State University, ONR Award N00014-23-1-2356. K.G. is paid as a research scientist at Meta." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.307, + 0.325, + 0.323 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.223, + 0.34, + 0.787, + 0.395 + ], + "angle": 0, + "content": "1. Ardeshir, S., Borji, A.: Ego2top: Matching viewers in egocentric and top-view videos. In: Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part V 14. pp. 253-268. Springer (2016)" + }, + { + "type": "ref_text", + "bbox": [ + 0.223, + 0.396, + 0.787, + 0.451 + ], + "angle": 0, + "content": "2. Barron, J.T., Mildenhall, B., Tancik, M., Hedman, P., Martin-Brualla, R., Srinivasan, P.P.: Mip-nerf: A multiscale representation for anti-aliasing neural radiance fields. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 5855-5864 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.223, + 0.452, + 0.787, + 0.506 + ], + "angle": 0, + "content": "3. Barron, J.T., Mildenhall, B., Verbin, D., Srinivasan, P.P., Hedman, P.: Mipnerf 360: Unbounded anti-aliased neural radiance fields. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5470-5479 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.223, + 0.507, + 0.787, + 0.535 + ], + "angle": 0, + "content": "4. Birkl, R., Wofk, D., Müller, M.: Midas v3.1 - a model zoo for robust monocular relative depth estimation. arXiv preprint arXiv:2307.14460 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.223, + 0.535, + 0.787, + 0.576 + ], + "angle": 0, + "content": "5. Cao, A., Rockwell, C., Johnson, J.: Fwd: Real-time novel view synthesis with forward warping and depth. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 15713-15724 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.223, + 0.577, + 0.787, + 0.632 + ], + "angle": 0, + "content": "6. Chan, E.R., Lin, C.Z., Chan, M.A., Nagano, K., Pan, B., De Mello, S., Gallo, O., Guibas, L.J., Tremblay, J., Khamis, S., et al.: Efficient geometry-aware 3d generative adversarial networks. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 16123-16133 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.223, + 0.633, + 0.787, + 0.673 + ], + "angle": 0, + "content": "7. Chan, E.R., Nagano, K., Chan, M.A., Bergman, A.W., Park, J.J., Levy, A., Aittala, M., De Mello, S., Karras, T., Wetzstein, G.: Generative novel view synthesis with 3d-aware diffusion models. arXiv preprint arXiv:2304.02602 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.223, + 0.674, + 0.787, + 0.715 + ], + "angle": 0, + "content": "8. Chang, A., Dai, A., Funkhouser, T., Halber, M., Niessner, M., Savva, M., Song, S., Zeng, A., Zhang, Y.: Matterport3d: Learning from rgb-d data in indoor environments. arXiv preprint arXiv:1709.06158 (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.223, + 0.715, + 0.787, + 0.757 + ], + "angle": 0, + "content": "9. Chang, A.X., Funkhouser, T., Guibas, L., Hanrahan, P., Huang, Q., Li, Z., Savarese, S., Savva, M., Song, S., Su, H., et al.: Shapenet: An information-rich 3d model repository. arXiv preprint arXiv:1512.03012 (2015)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.758, + 0.786, + 0.799 + ], + "angle": 0, + "content": "10. Chen, H., Gu, J., Chen, A., Tian, W., Tu, Z., Liu, L., Su, H.: Single-stage diffusion nef: A unified approach to 3d generation and reconstruction. arXiv preprint arXiv:2304.06714 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.799, + 0.787, + 0.84 + ], + "angle": 0, + "content": "11. Creswell, A., White, T., Dumoulin, V., Arulkumaran, K., Sengupta, B., Bharath, A.A.: Generative adversarial networks: An overview. IEEE signal processing magazine 35(1), 53-65 (2018)" + }, + { + "type": "list", + "bbox": [ + 0.218, + 0.34, + 0.787, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "16" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.455, + 0.128 + ], + "angle": 0, + "content": "F. Cheng and M. Luo et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.148, + 0.785, + 0.175 + ], + "angle": 0, + "content": "12. Dhariwal, P., Nichol, A.: Diffusion models beat gans on image synthesis. Advances in Neural Information Processing Systems 34, 8780-8794 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.177, + 0.785, + 0.218 + ], + "angle": 0, + "content": "13. Ding, K., Ma, K., Wang, S., Simoncelli, E.P.: Image quality assessment: Unifying structure and texture similarity. IEEE transactions on pattern analysis and machine intelligence 44(5), 2567-2581 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.219, + 0.785, + 0.247 + ], + "angle": 0, + "content": "14. Duan, Y., Guo, X., Zhu, Z.: Diffusiondepth: Diffusion denoising approach for monocular depth estimation. arXiv preprint arXiv:2303.05021 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.219, + 0.248, + 0.785, + 0.303 + ], + "angle": 0, + "content": "15. Flynn, J., Broxton, M., Debevec, P., DuVall, M., Fyffe, G., Overbeck, R., Snavely, N., Tucker, R.: Deepview: View synthesis with learned gradient descent. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 2367-2376 (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.219, + 0.304, + 0.785, + 0.346 + ], + "angle": 0, + "content": "16. Flynn, J., Neulander, I., Philbin, J., Snively, N.: Deepstereo: Learning to predict new views from the world's imagery. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 5515-5524 (2016)" + }, + { + "type": "ref_text", + "bbox": [ + 0.219, + 0.346, + 0.785, + 0.374 + ], + "angle": 0, + "content": "17. Geiger, A., Lenz, P., Stiller, C., Urtasun, R.: The kitti vision benchmark suite. URL http://www.cvlibs.net/datasets/kitti2(5) (2015)" + }, + { + "type": "ref_text", + "bbox": [ + 0.219, + 0.375, + 0.785, + 0.43 + ], + "angle": 0, + "content": "18. Grauman, K., Westbury, A., Torresani, L., Kitani, K., Malik, J., Afouras, T., Ashutosh, K., Baiyya, V., Bansal, S., Boote, B., et al.: Ego-exo4d: Understanding skilled human activity from first-and third-person perspectives. arXiv preprint arXiv:2311.18259 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.219, + 0.431, + 0.785, + 0.459 + ], + "angle": 0, + "content": "19. Ho, J., Jain, A., Abbeel, P.: Denoising diffusion probabilistic models. Advances in Neural Information Processing Systems 33, 6840-6851 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.46, + 0.785, + 0.501 + ], + "angle": 0, + "content": "20. Ho, J., Sahara, C., Chan, W., Fleet, D.J., Norouzi, M., Salimans, T.: Cascaded diffusion models for high fidelity image generation. The Journal of Machine Learning Research 23(1), 2249-2281 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.502, + 0.785, + 0.529 + ], + "angle": 0, + "content": "21. Isola, P., Zhu, J.Y., Zhou, T., Efros, A.A.: Image-to-image translation with conditional adversarial networks. CVPR (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.531, + 0.785, + 0.586 + ], + "angle": 0, + "content": "22. Johnson, J., Hariharan, B., Van Der Maaten, L., Fei-Fei, L., Lawrence Zitnick, C., Girshick, R.: Clevr: A diagnostic dataset for compositional language and elementary visual reasoning. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 2901–2910 (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.587, + 0.785, + 0.614 + ], + "angle": 0, + "content": "23. Kerbl, B., Kopanas, G., Leimkuhler, T., Drettakis, G.: 3d gaussian splatting for real-time radiance field rendering. ACM Transactions on Graphics 42(4) (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.615, + 0.785, + 0.657 + ], + "angle": 0, + "content": "24. Koh, J.Y., Lee, H., Yang, Y., Baldridge, J., Anderson, P.: Pathdreamer: A world model for indoor navigation. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 14738-14748 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.658, + 0.785, + 0.699 + ], + "angle": 0, + "content": "25. Kulhánek, J., Derner, E., Sattler, T., Babuška, R.: Viewformer: Nerf-free neural rendering from few images using transformers. In: European Conference on Computer Vision. pp. 198-216. Springer (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.7, + 0.785, + 0.742 + ], + "angle": 0, + "content": "26. Kwon, T., Tekin, B., Stühmer, J., Bogo, F., Pollefeys, M.: H2o: Two hands manipulating objects for first person interaction recognition. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 10138-10148 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.743, + 0.785, + 0.784 + ], + "angle": 0, + "content": "27. Li, J., Liu, K., Wu, J.: Ego-body pose estimation via ego-head pose estimation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 17142-17151 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.785, + 0.785, + 0.84 + ], + "angle": 0, + "content": "28. Liu, G., Tang, H., Latapie, H., Yan, Y.: Exocentric to egocentric image generation via parallel generative adversarial network. In: ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). pp. 1843-1847. IEEE (2020)" + }, + { + "type": "list", + "bbox": [ + 0.218, + 0.148, + 0.785, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.23, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "4DIFF: 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation" + }, + { + "type": "page_number", + "bbox": [ + 0.768, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "17" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.147, + 0.785, + 0.189 + ], + "angle": 0, + "content": "29. Liu, G., Tang, H., Latapie, H.M., Corso, J.J., Yan, Y.: Cross-view exocentric to egocentric video synthesis. In: Proceedings of the 29th ACM International Conference on Multimedia. pp. 974-982 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.19, + 0.785, + 0.231 + ], + "angle": 0, + "content": "30. Liu, R., Wu, R., Van Hoorick, B., Tokmakov, P., Zakharov, S., Vondrick, C.: Zero-1-to-3: Zero-shot one image to 3d object. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 9298-9309 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.231, + 0.785, + 0.271 + ], + "angle": 0, + "content": "31. Lombardi, S., Simon, T., Saragih, J., Schwartz, G., Lehrmann, A., Sheikh, Y.: Neural volumes: Learning dynamic renderable volumes from images. arXiv preprint arXiv:1906.07751 (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.271, + 0.785, + 0.298 + ], + "angle": 0, + "content": "32. Luo, M., Xue, Z., Dimakis, A., Grauman, K.: Put myself in your shoes: Lifting the egocentric perspective from exocentric videos. In: ECCV (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.298, + 0.785, + 0.325 + ], + "angle": 0, + "content": "33. Mathews, J.: Coordinate-free rotation formalism. American Journal of Physics 44(12), 1210-1210 (1976)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.325, + 0.785, + 0.365 + ], + "angle": 0, + "content": "34. Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM 65(1), 99-106 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.365, + 0.785, + 0.393 + ], + "angle": 0, + "content": "35. Niklaus, S., Mai, L., Yang, J., Liu, F.: 3d ken burns effect from a single image. ACM Transactions on Graphics (ToG) 38(6), 1-15 (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.393, + 0.785, + 0.433 + ], + "angle": 0, + "content": "36. Oquab, M., Darcet, T., Moutakanni, T., Vo, H., Szafraniec, M., Khalidov, V., Fernandez, P., Haziza, D., Massa, F., El-Nouby, A., et al.: Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:2304.07193 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.433, + 0.785, + 0.473 + ], + "angle": 0, + "content": "37. Park, K., Sinha, U., Barron, J.T., Bouaziz, S., Goldman, D.B., Seitz, S.M., Martin-Brualla, R.: Nerfies: Deformable neural radiance fields. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 5865-5874 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.473, + 0.785, + 0.5 + ], + "angle": 0, + "content": "38. Peebles, W., Xie, S.: Scalable diffusion models with transformers. arXiv preprint arXiv:2212.09748 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.501, + 0.785, + 0.541 + ], + "angle": 0, + "content": "39. Peebles, W., Xie, S.: Scalable diffusion models with transformers. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 4195-4205 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.541, + 0.785, + 0.595 + ], + "angle": 0, + "content": "40. Popov, S., Bauszat, P., Ferrari, V.: Corenet: Coherent 3d scene reconstruction from a single rgb image. In: Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part II 16. pp. 366-383. Springer (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.596, + 0.785, + 0.637 + ], + "angle": 0, + "content": "41. Pumarola, A., Corona, E., Pons-Moll, G., Moreno-Noguer, F.: D-nerf: Neural radiance fields for dynamic scenes. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 10318-10327 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.637, + 0.785, + 0.691 + ], + "angle": 0, + "content": "42. Radford, A., Kim, J.W., Hallacy, C., Ramesh, A., Goh, G., Agarwal, S., Sastry, G., Askell, A., Mishkin, P., Clark, J., et al.: Learning transferable visual models from natural language supervision. In: International conference on machine learning. pp. 8748-8763. PMLR (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.691, + 0.785, + 0.732 + ], + "angle": 0, + "content": "43. Regmi, K., Borji, A.: Cross-view image synthesis using conditional gans. In: Proceedings of the IEEE conference on Computer Vision and Pattern Recognition. pp. 3501-3510 (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.732, + 0.785, + 0.759 + ], + "angle": 0, + "content": "44. Ren, B., Tang, H., Sebe, N.: Cascaded cross mlp-mixer gans for cross-view image translation. arXiv preprint arXiv:2110.10183 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.759, + 0.785, + 0.8 + ], + "angle": 0, + "content": "45. Ren, X., Wang, X.: Look outside the room: Synthesizing a consistent long-term 3d scene video from a single image. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 3563-3573 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.8, + 0.785, + 0.841 + ], + "angle": 0, + "content": "46. Riegler, G., Koltun, V.: Free view synthesis. In: Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XIX 16. pp. 623-640. Springer (2020)" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.147, + 0.785, + 0.841 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "18" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.455, + 0.128 + ], + "angle": 0, + "content": "F. Cheng and M. Luo et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.148, + 0.785, + 0.175 + ], + "angle": 0, + "content": "47. Riegler, G., Koltun, V.: Stable view synthesis. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 12216-12225 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.177, + 0.785, + 0.217 + ], + "angle": 0, + "content": "48. Rockwell, C., Fouhey, D.F., Johnson, J.: Pixelsynth: Generating a 3d-consistent experience from a single image. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 14104-14113 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.218, + 0.785, + 0.259 + ], + "angle": 0, + "content": "49. Rombach, R., Blattmann, A., Lorenz, D., Esser, P., Ommer, B.: High-resolution image synthesis with latent diffusion models. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 10684-10695 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.26, + 0.785, + 0.3 + ], + "angle": 0, + "content": "50. Rombach, R., Esser, P., Ommer, B.: Geometry-free view synthesis: Transformers and no 3d priors. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 14356-14366 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.301, + 0.785, + 0.342 + ], + "angle": 0, + "content": "51. Sahara, C., Chan, W., Chang, H., Lee, C., Ho, J., Salimans, T., Fleet, D., Norouzi, M.: Palette: Image-to-image diffusion models. In: ACM SIGGRAPH 2022 Conference Proceedings. pp. 1-10 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.343, + 0.785, + 0.397 + ], + "angle": 0, + "content": "52. Sahara, C., Chan, W., Saxena, S., Li, L., Whang, J., Denton, E.L., Ghasemipour, K., Gontijo Lopes, R., Karagol Ayan, B., Salimans, T., et al.: Photorealistic text-to-image diffusion models with deep language understanding. Advances in Neural Information Processing Systems 35, 36479-36494 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.398, + 0.785, + 0.438 + ], + "angle": 0, + "content": "53. Sahara, C., Ho, J., Chan, W., Salimans, T., Fleet, D.J., Norouzi, M.: Image superresolution via iterative refinement. IEEE Transactions on Pattern Analysis and Machine Intelligence 45(4), 4713-4726 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.439, + 0.785, + 0.508 + ], + "angle": 0, + "content": "54. Sajjadi, M.S., Meyer, H., Pot, E., Bergmann, U., Greff, K., Radwan, N., Vora, S., Lucic, M., Duckworth, D., Dosovitskiy, A., et al.: Scene representation transformer: Geometry-free novel view synthesis through set-latent scene representations. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 6229-6238 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.509, + 0.785, + 0.563 + ], + "angle": 0, + "content": "55. Sener, F., Chatterjee, D., Shelepov, D., He, K., Singhania, D., Wang, R., Yao, A.: Assembly101: A large-scale multi-view video dataset for understanding procedural activities. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 21096-21106 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.565, + 0.785, + 0.618 + ], + "angle": 0, + "content": "56. Sitzmann, V., Thies, J., Heide, F., Nießner, M., Wetzstein, G., Zollhofer, M.: Deepvoxels: Learning persistent 3d feature embeddings. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 2437-2446 (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.62, + 0.785, + 0.66 + ], + "angle": 0, + "content": "57. Sitzmann, V., Zollhöfer, M., Wetzstein, G.: Scene representation networks: Continuous 3d-structure-aware neural scene representations. Advances in Neural Information Processing Systems 32 (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.661, + 0.785, + 0.688 + ], + "angle": 0, + "content": "58. Song, J., Meng, C., Ermon, S.: Denoising diffusion implicit models. arXiv:2010.02502 (October 2020), https://arxiv.org/abs/2010.02502" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.689, + 0.785, + 0.716 + ], + "angle": 0, + "content": "59. Su, J., Ahmed, M., Lu, Y., Pan, S., Bo, W., Liu, Y.: Roformer: Enhanced transformer with rotary position embedding. Neurocomputing 568, 127063 (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.717, + 0.785, + 0.743 + ], + "angle": 0, + "content": "60. Szymanowicz, S., Rupprecht, C., Vedaldi, A.: Splatter image: Ultra-fast single-view 3d reconstruction. arXiv preprint arXiv:2312.13150 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.744, + 0.785, + 0.785 + ], + "angle": 0, + "content": "61. T, M.V., Wang, P., Chen, X., Chen, T., Venugopalan, S., Wang, Z.: Is attention all that neRF needs? In: The Eleventh International Conference on Learning Representations (2023), https://openreview.net/forum?id=xE-LtsE-xx" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.786, + 0.785, + 0.84 + ], + "angle": 0, + "content": "62. Tang, H., Xu, D., Sebe, N., Wang, Y., Corso, J.J., Yan, Y.: Multi-channel attention selection gan with cascaded semantic guidance for cross-view image translation. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 2417-2426 (2019)" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.148, + 0.785, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.23, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "4DIFF: 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation" + }, + { + "type": "page_number", + "bbox": [ + 0.768, + 0.116, + 0.786, + 0.127 + ], + "angle": 0, + "content": "19" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.147, + 0.785, + 0.175 + ], + "angle": 0, + "content": "63. Trevithick, A., Yang, B.: Grf: Learning a general radiance field for 3d scene representation and rendering (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.176, + 0.785, + 0.217 + ], + "angle": 0, + "content": "64. Tucker, R., Snavely, N.: Single-view view synthesis with multiplane images. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 551-560 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.218, + 0.785, + 0.272 + ], + "angle": 0, + "content": "65. Wang, Q., Wang, Z., Genova, K., Srinivasan, P.P., Zhou, H., Barron, J.T., MartinBrualla, R., Snavely, N., Funkhouser, T.: Ibrnet: Learning multi-view image-based rendering. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 4690-4699 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.273, + 0.785, + 0.3 + ], + "angle": 0, + "content": "66. Watson, D., Chan, W., Martin-Brualla, R., Ho, J., Tagliasacchi, A., Norouzi, M.: Novel view synthesis with diffusion models. arXiv preprint arXiv:2210.04628 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.3, + 0.785, + 0.342 + ], + "angle": 0, + "content": "67. Wiles, O., Gkioxari, G., Szeliski, R., Johnson, J.: Synsin: End-to-end view synthesis from a single image. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 7467-7477 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.342, + 0.785, + 0.383 + ], + "angle": 0, + "content": "68. Yang, L., Kang, B., Huang, Z., Xu, X., Feng, J., Zhao, H.: Depth anything: Unleashing the power of large-scale unlabeled data. arXiv preprint arXiv:2401.10891 (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.383, + 0.785, + 0.425 + ], + "angle": 0, + "content": "69. Yu, A., Ye, V., Tancik, M., Kanazawa, A.: pixelnerf: Neural radiance fields from one or few images. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 4578-4587 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.425, + 0.785, + 0.467 + ], + "angle": 0, + "content": "70. Zhai, M., Bessinger, Z., Workman, S., Jacobs, N.: Predicting ground-level scene layout from aerial imagery. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 867-875 (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.467, + 0.785, + 0.508 + ], + "angle": 0, + "content": "71. Zhang, R., Isola, P., Efros, A.A., Shechtman, E., Wang, O.: The unreasonable effectiveness of deep features as a perceptual metric. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 586-595 (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.508, + 0.785, + 0.536 + ], + "angle": 0, + "content": "72. Zhou, T., Tucker, R., Flynn, J., Fyffe, G., Snavely, N.: Stereo magnification: Learning view synthesis using multiplane images. arXiv preprint arXiv:1805.09817 (2018)" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.147, + 0.785, + 0.536 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/2024/4Diff_ 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation/26bdb530-b8d4-43d7-9337-55a1116b4a83_origin.pdf b/2024/4Diff_ 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation/26bdb530-b8d4-43d7-9337-55a1116b4a83_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..73d14155a5ee6693362debed42c85aacd9ceb75d --- /dev/null +++ b/2024/4Diff_ 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation/26bdb530-b8d4-43d7-9337-55a1116b4a83_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:26043462628d4089bfa9196a690eff26d88b5ecf549f616ae3629f49fd99a4fc +size 10930879 diff --git a/2024/4Diff_ 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation/full.md b/2024/4Diff_ 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation/full.md new file mode 100644 index 0000000000000000000000000000000000000000..08675f3f49ca88b94ea470787c5b70d9703016b3 --- /dev/null +++ b/2024/4Diff_ 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation/full.md @@ -0,0 +1,303 @@ +# 4DIFF: 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation + +Feng Cheng $^{1,3*}$ , Mi Luo $^{2*}$ , Huiyu Wang $^{1}$ , Alex Dimakis $^{2}$ , Lorenzo Torresani $^{1}$ , Gedas Bertasius $^{3\dagger}$ , and Kristen Grauman $^{1,2\dagger}$ + +$^{1}$ FAIR, Meta AI + $^{2}$ The University of Texas at Austin + $^{3}$ University of North Carolina at Chapel Hill +* Equal contribution, † Co-lead the project + +Abstract. We present 4DIFF, a 3D-aware diffusion model addressing the exo-to-ego viewpoint translation task — generating first-person (ego-centric) view images from the corresponding third-person (exocentric) images. Building on the diffusion model's ability to generate photorealistic images, we propose a transformer-based diffusion model that incorporates geometry priors through two mechanisms: (i) egocentric point cloud rasterization and (ii) 3D-aware rotary cross-attention. Egocentric point cloud rasterization converts the input exocentric image into an egocentric layout, which is subsequently used by a diffusion image transformer. As a component of the diffusion transformer's denoiser block, the 3D-aware rotary cross-attention further incorporates 3D information and semantic features from the source exocentric view. Our 4DIFF achieves state-of-the-art results on the challenging and diverse Ego-Exo4D multiview dataset and exhibits robust generalization to novel environments not encountered during training. Our code, processed data, and pretrained models are publicly available at https://klauscc.github.io/4diff. + +Keywords: Egocentric Vision $\cdot$ View Synthesis + +# 1 Introduction + +From early developmental stages, humans adeptly observe external actions (exo) and seamlessly integrate them into their own repertoire (ego), forming the cornerstone of visual learning. This actor-observer translation mechanism not only shapes individual development but also holds profound implications for technological advancements. Imagine the ability to immerse yourself in the first-person perspective of renowned athletes like Messi or glean intricate piano techniques from online tutorials converted to a first-person viewpoint. Such experiences hinge on seamless translation from third-person to first-person perspectives, highlighting the pivotal role of cross-view translation in facilitating immersive and enriching experiences across diverse domains. + +We leverage the recently released Ego-Exo4D dataset [18] to explore the third-person (exocentric) to first-person (egocentric) viewpoint translation task. + +![](images/a38f371d4df030816d70b53bd9ff161ee79bd243c814920455fc7f8bcd1ed286.jpg) +Fig. 1: Given exocentric images of an egocentric camera wearer engaged in daily activities and the corresponding camera trajectories, we aim to synthesize the corresponding egocentric view that captures the scene from the wearer's first-person perspective. + +As illustrated in Figure 1, our focus is on transforming the exocentrically observed images containing a designated individual into images depicting the same scene from the individual's first-person perspective. Our task is a specific instance of the Novel View Synthesis (NVS) task, which aims to generate new views conditioned on a few given views of a scene. However, the Ego-Exo4D dataset presents a formidable challenge compared to traditional novel view synthesis datasets [8, 9, 17, 22, 57, 72] and multiview datasets [1, 26, 55, 70]. As illustrated in Figure 2, the scenes in the Ego-Exo4D dataset are characterized by numerous objects and dynamic actions performed by the participants. The dataset encompasses diverse scenes, ranging from indoor to outdoor activities such as cooking and basketball. Furthermore, the visual differences between exocentric and egocentric images are pronounced due to sharp viewpoint changes. Besides, unlike numerous NVS datasets that use 3D data for arbitrary viewpoint sampling during training, Ego-Exo4D dataset only provides several views (e.g., four exo and one ego view) for each dynamic scene, which presents a challenge for convergence of prior geometry-based methods that regress the entire scene. + +Due to the challenges mentioned above, existing methods exhibit unsatisfactory performance in the exo-to-ego view translation task. Geometry-free generative models, including GAN-based [6,21] and diffusion-based [30,38,66] methods, face challenges in generating geometrically-correct images due to high complexity of the scenes. In contrast, geometry-based approaches, exemplified by NeRF-based methods [2,3,34,37,41,69], encounter limitations in achieving photorealistic images. Recent attempts [7, 10] aim to reconcile this dilemma by integrating a strong geometry-based method (e.g. NeRF-based) into diffusion models. However, these models are typically difficult to optimize on the extremely diverse scenes in the Ego-Exo4D benchmark, as we show in Sec. 4.2. Thus, they often fail to provide constructive geometry priors to the subsequent diffusion model. + +Motivated by these observations, we propose 4DIFF, a 3D-Aware Diffusion model for exocentric to egocentric viewpoint translation. We propose two mechanisms to incorporate 3D geometry into the diffusion model: (i) egocentric point cloud rasterization, and (ii) 3D-aware rotary cross-attention layers. Rather than relying on a complex geometry model like NeRF, we render an egocentric prior image using a lightweight rasterization technique [5, 67]. As a result, our approach is both easy to train and adaptable, allowing it to incorporate existing + +![](images/9d391c59f0a1e04d3b2bf0aab09cffa0258fa9eeaed4a8728f942276bcac850a.jpg) +Fig. 2: Comparison of the Ego-Exo4D viewpoint translation (Ego-Exo4D-VT) benchmark, which we build on the Ego-Exo4D dataset [18], with existing novel view synthesis and cross-view translation benchmarks. Ego-Exo4D-VT presents numerous challenges that require fundamental advances in generative modeling to address. + +open-source pretrained depth estimators. These estimators have demonstrated effectiveness in processing images from previously unseen environments [4, 68]. Solely rendering the egocentric prior feature map through point cloud rasterization can be problematic, as the source exo view often contains occluded and unobserved regions. To address this, we seamlessly integrate rasterization into the diffusion model, leveraging its substantial capacity for extrapolation and generating high-quality images. We further enhance the expressivity of our diffusion model by introducing 3D-aware rotary cross-attention, which is integrated into each denoising block of the model. This functionality aims to improve feature similarities and 3D spatial similarities between ego and exo views, allowing the diffusion feature maps to incorporate information from the semantic features encoded in the exocentric image more effectively. + +Our method 4DIFF surpasses prior state-of-the-art techniques on the challenging Ego-Exo4D viewpoint translation benchmark, achieving a $3.6\%$ improvement in LPIPS. Furthermore, leveraging the extensive scale of Ego-Exo4D data, our approach demonstrates robust generalization to novel environments not encountered during training. + +# 2 Related Work + +Exo-to-Ego Viewpoint Translation. Prior methods [28, 44, 62] tackled this problem predominantly via GAN-based models [11]. Specifically, [43] proposed the X-Fork and X-Seq GAN-based architecture using an additional semantic map + +for enhanced generation. [29] introduced STA-GAN, which focuses on learning spatial and temporal information to generate egocentric videos from exocentric views. [32] focuses on hand-object interactions, proposing to decouple hand layout generation and ego frame generation with a diffusion model. None of these methods develop an explicit geometry-aware generative framework. In contrast, our work introduces two effective mechanisms to incorporate 3D geometric priors into the diffusion model, specifically tailored to address the challenges posed by the Ego-Exo4D-VT benchmark. + +Novel View Synthesis (NVS). Our exo-to-ego viewpoint translation task represents a distinct facet of the NVS task, which aims to generate a target image with an arbitrary target camera pose from given source images and their camera poses. Previous works in NVS can be categorized into geometry-based [15,16,31,46,47,56,64,72], regression-based methods [25,35,54,63-65,69,72] and generative models [24,45,48,50,66,67]. Recently, several geometry-aware generative models [7,10] have explored ways to integrate NeRF with diffusion models. For instance, GeNVS [7] incorporates geometry priors into their diffusion model using a variant of pixelNeRF [69], which renders a target feature map from a 3D feature field. SSDNeRF [10] proposes a unified approach that employs an expressive diffusion model to learn a generalizable prior of neural radiance field (NeRF). However, these geometry-based models, typically implemented as NeRFs, often struggle to provide meaningful geometry priors to the diffusion model, especially in the challenging Ego-Exo4D-VT benchmark. This is because complex geometry methods require strong supervision (e.g., many densely sampled views of the same scene), which Ego-Exo4D does not provide. In contrast, our method uses simple point-cloud rasterization that relies solely on accurate depth estimation, avoiding the modeling of occluded and unobserved areas in the exocentric view. This approach shows better generalization and benefits from existing large-scale pretrained depth estimators. + +Diffusion Models [12, 19, 49] have made significant strides in producing photorealistic images and videos. They excel in modeling conditional distributions, including scenarios where conditioning is based on text [49, 52] or another image [20, 53]. Prior work has demonstrated a wide range of successful applications of diffusion models, including human pose generation [27] and depth estimation [14]. In our work, we employ a transformer-based diffusion model [39] to model the distribution of egocentric images conditioned on exocentric images. + +# 3 Methodology + +# 3.1 Problem Setup + +Given an exocentric image $x \in \mathbb{R}^{h \times w \times 3}$ and the relative camera pose $P \in \mathbb{R}^{4 \times 4}$ from exo camera to the ego camera of the person of interest, our goal is to synthesize an egocentric image $y \in \mathbb{R}^{h \times w \times 3}$ from the conditional distribution: + +$$ +p (y | x, P) \tag {1} +$$ + +![](images/3d01e8a4ec309dc78928e926a18b1c3bf62b89e64d64c070db82d54a7c62a693.jpg) +Fig. 3: We propose 4DIFF, a 3D-Aware Diffusion model for exocentric to egocentric viewpoint translation. Our framework uses a point cloud rasterization scheme first to compute an egocentric prior, which captures egocentric layout cues. Afterward, the egocentric prior is fed into the diffusion model augmented with the proposed 3D-aware rotary cross-attention for egocentric image generation. The proposed 3D-aware rotary cross-attention guides the attention to consider geometric relationships between the egocentric and exocentric diffusion feature maps. + +We assume the relative camera pose $(P)$ is known, similar to the standard NVs tasks [40, 61, 69]. + +Relation to the Official Ego-Exo4D Translation Benchmark. Ego-Exo4D [18] introduced an exo-to-ego translation benchmark, with the primary emphasis on object-level synthesis, i.e., generating an object at the correct location in the ego view based on an exo image and an exo segmentation mask of the object of interest. This approach is particularly valuable for precise object placement and detailed object-level interactions. In contrast, we focus on full-image synthesis — allowing for the generation of entire scenes, and enhancing the richness and diversity of generated viewpoints. Both are complementary; while Ego-Exo4D excels in object-specific scenarios, our method expands the scope to full-scene synthesis and can be seen as a new specialized NVS task. + +# 3.2 Our Framework + +Due to the inherent complexity and dynamism present in diverse scenes, we use an expressive transformer-based diffusion model to model the conditional distribution in Equation 1. However, due to the inability to explicitly model 3D cues, the standard diffusion model may struggle to generate geometry-consistent images. Thus, we propose two techniques to incorporate geometry into our diffusion model: (i) egocentric point cloud rasterization and (ii) 3D-aware rotary cross-attention. As shown in Figure 3, the point cloud rasterization first renders an egocentric prior from the input exocentric view, which is then fed into the diffusion model. Afterward, the conditioned diffusion model is augmented with the proposed 3D-aware rotary cross-attention to generate the target egocentric image. We now describe each module in more detail. + +# 3.3 Egocentric Point Cloud Rasterization + +As a first step in our framework, we render an egocentric prior via the point cloud rasterization from an exocentric view. Specifically, we first use a depth estimator to convert the exocentric 2D image $x$ and a feature map $F^{\mathrm{exo}}$ into a feature point cloud. Then, a differential renderer [67] projects this point cloud into an egocentric prior $H^{\mathrm{prior}}$ : + +$$ +H ^ {\text {p r i o r}} = \left[ x ^ {\text {p r i o r}}, F ^ {\text {p r i o r}} \right] = \operatorname {r e n d e r} \left(\left[ x, F ^ {\text {e x o}} \right], D, P\right) \tag {2} +$$ + +Here, $F^{\mathrm{exo}}$ is the semantic features of the exocentric image encoded by a feature encoder $f$ , $x^{\mathrm{prior}}$ and $F^{\mathrm{prior}}$ are the egocentric prior image and a feature map, rendered from the exocentric image $x$ and a feature map $F^{\mathrm{exo}}$ respectively. $D$ denotes the depth map predicted by a depth estimator, and $P$ represents the relative camera pose. + +Depth Estimator. We construct the depth estimator based on the pretrained MiDaS [4]. Since MiDaS predicts relative disparity (the inverse of depth), we introduce two learnable scalars $s$ and $t$ for dataset-specific calibration. The depth map $D$ is predicted using the formula: + +$$ +D = 1 / \left(s \cdot \operatorname {M i D a S} \left(x ^ {\mathrm {e x o}}\right) + t\right). \tag {3} +$$ + +Rasterization. We employ the differentiable renderer [67] for our rasterization. This renderer splats 3D points onto the image plane and calculates pixel values by blending point features. In contrast to more intricate rendering techniques like NeRF [34,69] or Gaussian Splatting [23,60], our renderer is simpler to converge. It relies solely on depth estimation from 2D images, leveraging large-scale pretrained depth estimators. This design choice ensures robust generalization across diverse scenarios. + +# 3.4 3D-Aware Diffusion Image Transformer + +Our diffusion model uses a denoiser network to predict added noise $\epsilon_{t}$ from the noisy target egocentric image $y_{t} = \sqrt{\bar{\alpha}_{t}} y + \sqrt{1 - \bar{\alpha}_{t}}\epsilon_{t}$ , conditioned on the previously obtained egocentric prior $H^{\mathrm{prior}}$ and the exocentric semantic features $F^{\mathrm{exo}}$ : + +$$ +\hat {\epsilon} _ {t} = \epsilon_ {\theta} ([ y _ {t}, H ^ {\text {p r i o r}} ], F ^ {\text {e x o}}). \tag {4} +$$ + +During inference, the target egocentric image $y_0$ is generated from a standard Gaussian noise $y_T$ by applying the denoiser network $\epsilon_{\theta}$ iteratively with a sampling strategy (e.g. DDIM [58]), i.e. $y_T \to y_{T - \delta} \to \ldots \to y_0$ . + +Denoiser Network $\epsilon_{\theta}$ . Our proposed 3D-aware Diffusion image Transformer serves as the denoiser network. As shown in Figure 3 and Equation 4, our Transformer network takes as input the concatenation of the egocentric prior $H^{\mathrm{prior}}$ and the noisy target egocentric image $y_{t}$ encoded via an off-the-shelf autoencoder from [49]. Following [39], the architecture of DiT is the same as ViT, consisting + +![](images/8c2ea3fe833a7b406dce9ca37f22a3777b6854d9578326d3d82a2aa79ed62729.jpg) +Fig. 4: An illustration of the calculation of the rotation matrix $R_{m,n}$ in our 3D-aware rotary cross attention. + +of $N$ transformer layers, each with a self-attention layer and a feedforward network. To further enhance the expressivity of our model and incorporate more geometric cues, we propose 3D-aware rotary cross-attention layers, which we describe next. + +3D-aware Rotary Cross-Attention. When conditioning the diffusion model on the exocentric feature map, we should consider similarities in the semantic feature and spatial 3D space. Exocentric features similar in appearance (i.e., semantic feature space) and 3D location with respect to the query features should have higher attention values in the diffusion model. Motivated by RoPE [59], we achieve this by incorporating rotations during attention weight calculations. The degree of rotation between a query and a key is determined by the angle between their 3D coordinates, with the ego camera as the center. Consequently, the cosine similarity between the query and key features can incorporate their 3D spatial angle, effectively capturing the 3D relationships between corresponding points in the egocentric and exocentric views. + +Specifically, given a feature map $Z \in \mathbb{R}^{l \times c}$ in the diffusion model and the exocentric semantic feature map $F^{\mathrm{exo}} \in \mathbb{R}^{l \times c}$ , the 3D-aware rotary cross-attention calculates the output $O \in \mathbb{R}^{l \times c}$ as: + +$$ +a _ {m, n} = \frac {\exp \left(\frac {q _ {m} ^ {T} R _ {m , n} k _ {n}}{\sqrt {c}}\right)}{\sum_ {j = 1} ^ {l} \exp \left(\frac {q _ {m} ^ {T} R _ {m , j} k _ {j}}{\sqrt {c}}\right)} \tag {5} +$$ + +$$ +O _ {m} = \sum_ {n = 1} ^ {l} a _ {m, n} v _ {n} \tag {6} +$$ + +Here, $q_{m} = Z_{m}W_{q}$ is the $m$ -th query token, $k_{n} = F_{n}^{\mathrm{exo}}W_{k}$ is the $n$ -th key token and $v_{n} = F_{n}^{\mathrm{exo}}W_{v}$ is the $n$ -th value token. $W_{q}, W_{k}, W_{v}$ are learnable project matrices. $R_{m,n}$ is the rotation matrix that rotates the key token to align with the value token in 3D space, where the egocentric camera is used as the center. Since the query token is in the egocentric view, we map its coordinates to the exocentric view using the relative camera pose. The rotation matrix is computed in the exocentric view using the algorithm from [33]. When $R_{m,n}$ is an identity + +matrix, our 3D-aware rotary cross-attention defaults to standard cross-attention. Figure 4 shows an illustration of this process. We insert such 3D-aware cross-attention layers after each self-attention layer in DiT. + +# 3.5 Training and Inference + +Loss Function. Our model is trained with the diffusion denoising loss, which is the L2 loss between the predicted noise and the ground-truth added noise. + +Implementation Details We employ DINOv2 [36] pretrained ViT-L/14 as our feature encoder $f$ and MiDaS [4] with DPT-L as our depth estimator. Our denoiser network is built on DiT-B/2 [38] augmented with the proposed 3D-aware rotary cross-attention layers. The image sizes are $256 \times 256$ for both egocentric and exocentric images. We freeze the feature encoder, as it is already well pretrained. The model is trained with the Adam optimizer, using a learning rate of $1e - 5$ for the depth estimator and $1e - 4$ for the other components. We employ a batch size of 4 per GPU and train the model across 32 V100 GPUs for 100 epochs, requiring approximately 48 hours. We set the diffusion steps $T$ to 1000 during training and sample 20 steps during inference using DDIM [58]. + +# 4 Experiments + +# 4.1 Experimental Setup + +Ego-Exo4D-VT Benchmark. Our benchmark is constructed based on the Ego-Exo4D dataset [18]. Adhering to the official splits, we use 2680/708/900 takes for training, validation, and testing, respectively. Each take is approximately 30 seconds to 5 minutes long and depicts a person performing a skilled activity, such as cooking a dish, with footage from 4 exocentric cameras and 1 egocentric camera. This benchmark encompasses five diverse, skilled human activities: basketball, bike repair, cooking, health, and music. + +The benchmark features 131 unique scenes, each characterized by complex backgrounds and numerous objects, demonstrating significant scale variation from 1 meter (e.g., a small kitchen) to 10 meters (e.g., a basketball court). These scenes are dynamic and depict subjects performing actions that involve interactions with objects. Additionally, the considerable viewpoint shift from exocentric to egocentric view causes objects to appear relatively small in the exocentric view compared to the egocentric view. + +Baselines. Since this is a new benchmark, we re-purpose a few state-of-the-art methods for image generation: (a) pix2pix [21], a GAN-based method, (b) GNT [61], a NeRF-based method, (c) diffusion model DiT [39] and 3DiM [66]. To tailor DiT for our task, we eliminate its original class label conditioning and condition it on the exocentric image through concatenation. Additionally, we implement 3DiM based on DiT since the code for 3DiM is unavailable. + +Metrics. Following NVS methods [10, 69], we employ perceptual metrics, including LPIPS [71], DISTS [13] and CLIP score [42], to measure the structural + +Table 1: Quantitative comparison on the test set of Ego-Exo4D-VT benchmark. ${}^{ \dagger }$ we reimplement 3DiM based on DiT as their code is not publicly available. Our 4DIFF achieves the best results on all the metrics, outperforming the second best method 3DiM by ${3.6}\%$ in LPIPS and ${1.9}\%$ in DISTS. + +
MethodLPIPS ↓DISTS ↓CLIP ↑PSNR ↑SSIM ↑
pix2pix [28]0.3720.26268.8515.800.515
GNT [61]0.4820.39263.7514.610.538
DiT [39]0.4120.23177.9815.470.564
3DiM† [66]0.3850.22678.2215.910.575
4DIFF (ours)0.3490.20779.7216.650.592
+ +Table 2: Comparison on the seen and unseen test sets of Ego-Exo4D-VT benchmark. $\dagger$ we reimplement 3DiM based on DiT as their code is not publicly available. + +
Split SettingMethodLPIPS ↓DISTS ↓CLIP ↑PSNR ↑SSIM ↑
Seen +Scenespix2pix [21]0.3710.26068.6815.900.519
GNT [61]0.4790.39063.4414.710.542
DiT [39]0.4060.22678.7415.640.570
3DiM† [66]0.3650.21778.3015.980.583
4DIFF (ours)0.3160.18482.7917.090.600
Unseen +Scenespix2pix [21]0.3760.27269.8715.230.491
GNT [61]0.4970.40565.6013.970.513
DiT [39]0.4400.25673.6714.860.528
3DiM† [66]0.4360.26973.2614.900.542
4DIFF (ours)0.4270.24676.5414.450.508
+ +and texture similarity between the synthesized egocentric image and the ground-truth image. Additionally, we include PSNR and SSIM for completeness, even though numerous existing works [7,51,53] have demonstrated that these metrics are suboptimal for evaluating image and video generation models, as they tend to favor conservative and blurry estimates. + +# 4.2 Comparison with State-of-the-art Methods + +In Table 1, we present the comparison of our method to various baselines. Notably, diffusion-based models—DiT [39], 3DiM [66], and our 4DIFF—outperform other approaches across all metrics by large margins, including the GAN-based pix2pix and NeRF-based GNT. The poor performance of the NeRF-based method GNT on our benchmark can be attributed to itsslimited capacity for modeling hundreds of different scenes. + +In Table 2, we present the results on seen scenes and unseen scenes respectively and show that our method achieves the best performance. Overall, our method surpasses the second-best performing diffusion-based 3DiM by $3.6\%$ + +![](images/8b80d7e937c46b87f0cf87ef1766837fdafd717fd27fecc2e0cba73dcd7dc2aa.jpg) +Fig. 5: Generated samples from five scenarios: cooking, music, health, basketball, and bike repair. Our 4DIFF demonstrates the best performance across all examples in terms of geometry correctness and object quality. We brighten the images and exclude pix2pix and GNT in the scenario breakdown for a better visual experience. + +in LPIPS and $1.9\%$ in DISTS, underscoring the effectiveness of our proposed geometry-based approach. + +Figure 5 presents qualitative comparisons with existing methods. GAN-based pix2pix [21] and NeRF-based GNT [61] exhibit challenges in producing photorealistic images, emphasizing the necessity of a robust generative model for the Ego-Exo4D-VT benchmark. Our 4DIFF demonstrates superior performance across various scenarios, excelling in both geometry correctness and object qual + +![](images/c00a92bce599549a6f698586adccda63aa80e9b00bbf7472192bed31fd128aa3.jpg) +Exo Input +Fig. 6: We evaluate the effectiveness of our egocentric prior rendering module by visualizing the rendered prior image. Compared to NeRF-based rendering (GNT), our rendered prior image exhibits predominantly correct geometry, offering valuable egocentric cues to the diffusion model. Distortions and missing pixels arise from inaccurate depth estimation and occluded or unobserved regions in the exocentric view, which can be corrected by the diffusion model. + +![](images/09ac5589ff26abb17deb1213cb52f061a5ab2b1e421424f6415d5f5fb8bb9eca.jpg) +Ego GT + +![](images/b8b5883e8904ab21488b61fdc9bf1cea87c301f4f878fff1da57837a46476205.jpg) +NeRF-based Rendering + +![](images/36995d5298cf2e4957d0a8f9a6b243beba9230043114f521b6abf018ba45bcf2.jpg) +Our Rasterization Module + +ity. Our 4DIFF is especially advantageous for view synthesis in complex scenes, such as the cooking scenario, where numerous objects exhibit intricate layouts. The qualitative results align well with our quantitative results in Table 1. + +# 4.3 Qualitative Analysis + +Investigating the visual results helps to gain a deeper insight into generative models. Thus, we perform a qualitative analysis below. + +Is the egocentric prior useful? We address this question by visualizing the rendered egocentric prior RGB image. In Figure 6, the NeRF-based renderer GNT [61] generates blurry images for all scenes, possibly due to its limited capacity to model many diverse scenes with limited views for supervision. In contrast, our rendered egocentric images produced by point cloud rasterization are mostly correct, offering valuable egocentric cues to the diffusion model. Despite distortions and missing pixels, our diffusion model demonstrates sufficient capacity to rectify these issues effectively. + +![](images/405f22d7102ceb3f786f727dfba23d882fc90c93d5871ca0cf818e8e96b4c7de.jpg) +Fig. 7: Results on the unseen scenes. When synthesizing views from the scenes not encountered during training, our 4DIFF exhibits slight hallucinations but consistently outperforms existing methods, producing significantly improved results. + +Generalization to unseen scenes. Figure 7 shows our generation results on the unseen scenes. We observe that our 4DIFF displays slight hallucinations, particularly noticeable in elements such as walls. Despite this, our method consistently outperforms existing methods. Such a robust performance can be attributed to the highly generalizable depth-based geometry priors used by our model. + +What causes poor generation? We conduct an analysis to discern errors arising from the diffusion model or geometry priors. In Figure 8, we present two representative examples. The first showcases generation results in an unseen scene, where the egocentric prior image is reasonably good, but the diffusion model exhibits significant hallucinations, yielding an incorrectly generated image. We posit that this discrepancy arises because the diffusion model focuses + +![](images/8d66151c18e681d7a70df2474aa4f366825c46b17d254df1c71448da6cf22f0f.jpg) +Fig. 8: Failure case examples of our method. Top: While the point cloud rasterization module performs effectively, the diffusion model produces errors when generating an egocentric view. Bottom: Although the diffusion model accurately predicts objects, the synthesized egocentric view appears more zoomed-out than the ground truth view. This can be attributed to suboptimal egocentric layout synthesis. + +![](images/e3188bbeb7f6979856526fbfc09a8e3ced963ee0d148bf9fa6d5877ddcab80c7.jpg) + +![](images/6b7038d3a6cbeb3d000e7185604536eb886025b4ef2008c04f6cb751a8a7d438.jpg) + +![](images/4a83e828a2639d07996a7812f789f9be8978a26b13fe095edc6f4dd1e0818e8a.jpg) + +on modeling the conditional training distribution, limiting its generalization to substantially different scenes not present in the training data. This limitation can be mitigated by employing a large-scale pretrained diffusion model that has already acquired knowledge from diverse scenes and objects in 2D space. + +In the second example, we show that despite the incorrectly rendered egocentric prior image, the diffusion model can generate a photorealistic image, which is more zoomed-out than the ground-truth egocentric image. This observation suggests that the diffusion model can robustly handle inaccurately generated egocentric geometry priors. + +# 4.4 Ablation Studies + +How important are our proposed modules? We study the importance of (i) 3D-aware rotary cross-attentions and (ii) egocentric point cloud rasterization by sequentially removing them from our framework. As shown in Tab. 3a, removing the 3D cross-attention worsens the LPIPS by $2.4\%$ . Additionally, removing the point cloud rasterization further degrades LPIPS by $3.9\%$ . Moreover, as shown in Figure 5, our 4DIFF with the proposed geometry priors consistently outperforms geometry-free diffusion models DiT and 3DiM in all scenarios. These results show the effectiveness of our proposed modules. + +Can we pretrain the depth estimator from scratch? Tab. 3b shows that training our model without using a pretrained depth estimator results in a significant $4.3\%$ degradation in LPIPS. This suggests that an inaccurate depth + +Table 3: Ablation studies on various design choices. (a) We study the importance of each module by removing each module sequentially; (b) Using a pretrained depth estimator significantly improves the LPIPS by $4.3\%$ ; (c) DINOV2 outperforms CLIP by $1.7\%$ in LPIPS. + +
(a) Module ablation.
ModelLPIPS ↓
4DIFF0.349
- 3D Rotary CA0.373
- ego rasterization0.412
+ +
(b) Depth estimator.
PretrainedLPIPS ↓
0.349
X0.392
+ +
(c) Feature encoder.
Feat. Enc.LPIPS ↓
DinoV20.349
CLIP0.366
+ +estimation may lead to most points from the exocentric view projected outside of the egocentric view. Consequently, these points will not receive sufficient gradient updates during training, leading to poor convergence. Thus, we conclude that a sufficiently accurate initial depth prediction is crucial for good performance. + +Which feature encoder should we use? We evaluate two strong feature encoders for obtaining a semantic representation for an exocentric RGB image: DINOv2 [36], and CLIP [42], both employing a ViT-L/14 backbone. The DINOv2 variant outperforms the CLIP variant by $1.7\%$ LPIPS. We conjecture that compared to CLIP's vision-language pretraining, DINOv2's self-supervised pretraining leads to higher quality lower-level visual features which are important for exocentric to egocentric image translation problem. + +# 5 Discussion and Conclusion + +In this work, we proposed 4DIFF, a 3D-aware transformer-based diffusion model that significantly outperforms prior approaches on the challenging Ego-Exo4D-VT benchmark. Our method demonstrates robust generalization to novel environments not encountered during training. Despite our excellent results, we also acknowledge a few limitations. Firstly, our method assumes known camera poses during training and inference, limiting its applicability to real-world scenarios. Integrating camera pose estimation via a head pose estimator could address this limitation, while remains difficult to estimate automatically. Secondly, our method focuses on image-to-image translation, leaving room for video generation by incorporating spatial-temporal cues. Thirdly, enhancing the quality of generated objects and improving generalization to unseen environments could be achieved by leveraging a more powerful pretrained diffusion model (e.g., Stable Diffusion [49]). Lastly, extending our framework from frame-level synthesis to object-level synthesis, considering the locations and appearances of objects such as hands and interacted objects, would bring it closer to real-world applications like AR/VR coaching. We plan to explore these research directions in our future work. + +Acknowledgment We thank Hanwen Jiang, Yan-Bo Lin, Md Mohaiminul Islam, Ce Zhang, Yue Yang, and Soumitri Chattopadhyay for their helpful discussions. UT Austin is supported by NSF Grants AF 1901292, CNS 2148141, Tripods CCF 1934932, IFML CCF 2019844 and research gifts by Western Digital, Amazon, WNCG IAP, UT Austin Machine Learning Lab (MLL), Cisco, the Stanly P. Finch Centennial Professorship in Engineering. UNC is supported by Sony Faculty Innovation Award, Laboratory for Analytic Sciences via NC State University, ONR Award N00014-23-1-2356. K.G. is paid as a research scientist at Meta. + +# References + +1. Ardeshir, S., Borji, A.: Ego2top: Matching viewers in egocentric and top-view videos. In: Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part V 14. pp. 253-268. Springer (2016) +2. Barron, J.T., Mildenhall, B., Tancik, M., Hedman, P., Martin-Brualla, R., Srinivasan, P.P.: Mip-nerf: A multiscale representation for anti-aliasing neural radiance fields. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 5855-5864 (2021) +3. Barron, J.T., Mildenhall, B., Verbin, D., Srinivasan, P.P., Hedman, P.: Mipnerf 360: Unbounded anti-aliased neural radiance fields. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5470-5479 (2022) +4. Birkl, R., Wofk, D., Müller, M.: Midas v3.1 - a model zoo for robust monocular relative depth estimation. arXiv preprint arXiv:2307.14460 (2023) +5. Cao, A., Rockwell, C., Johnson, J.: Fwd: Real-time novel view synthesis with forward warping and depth. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 15713-15724 (2022) +6. Chan, E.R., Lin, C.Z., Chan, M.A., Nagano, K., Pan, B., De Mello, S., Gallo, O., Guibas, L.J., Tremblay, J., Khamis, S., et al.: Efficient geometry-aware 3d generative adversarial networks. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 16123-16133 (2022) +7. Chan, E.R., Nagano, K., Chan, M.A., Bergman, A.W., Park, J.J., Levy, A., Aittala, M., De Mello, S., Karras, T., Wetzstein, G.: Generative novel view synthesis with 3d-aware diffusion models. arXiv preprint arXiv:2304.02602 (2023) +8. Chang, A., Dai, A., Funkhouser, T., Halber, M., Niessner, M., Savva, M., Song, S., Zeng, A., Zhang, Y.: Matterport3d: Learning from rgb-d data in indoor environments. arXiv preprint arXiv:1709.06158 (2017) +9. Chang, A.X., Funkhouser, T., Guibas, L., Hanrahan, P., Huang, Q., Li, Z., Savarese, S., Savva, M., Song, S., Su, H., et al.: Shapenet: An information-rich 3d model repository. arXiv preprint arXiv:1512.03012 (2015) +10. Chen, H., Gu, J., Chen, A., Tian, W., Tu, Z., Liu, L., Su, H.: Single-stage diffusion nef: A unified approach to 3d generation and reconstruction. arXiv preprint arXiv:2304.06714 (2023) +11. Creswell, A., White, T., Dumoulin, V., Arulkumaran, K., Sengupta, B., Bharath, A.A.: Generative adversarial networks: An overview. IEEE signal processing magazine 35(1), 53-65 (2018) + +12. Dhariwal, P., Nichol, A.: Diffusion models beat gans on image synthesis. Advances in Neural Information Processing Systems 34, 8780-8794 (2021) +13. Ding, K., Ma, K., Wang, S., Simoncelli, E.P.: Image quality assessment: Unifying structure and texture similarity. IEEE transactions on pattern analysis and machine intelligence 44(5), 2567-2581 (2020) +14. Duan, Y., Guo, X., Zhu, Z.: Diffusiondepth: Diffusion denoising approach for monocular depth estimation. arXiv preprint arXiv:2303.05021 (2023) +15. Flynn, J., Broxton, M., Debevec, P., DuVall, M., Fyffe, G., Overbeck, R., Snavely, N., Tucker, R.: Deepview: View synthesis with learned gradient descent. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 2367-2376 (2019) +16. Flynn, J., Neulander, I., Philbin, J., Snively, N.: Deepstereo: Learning to predict new views from the world's imagery. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 5515-5524 (2016) +17. Geiger, A., Lenz, P., Stiller, C., Urtasun, R.: The kitti vision benchmark suite. URL http://www.cvlibs.net/datasets/kitti2(5) (2015) +18. Grauman, K., Westbury, A., Torresani, L., Kitani, K., Malik, J., Afouras, T., Ashutosh, K., Baiyya, V., Bansal, S., Boote, B., et al.: Ego-exo4d: Understanding skilled human activity from first-and third-person perspectives. arXiv preprint arXiv:2311.18259 (2023) +19. Ho, J., Jain, A., Abbeel, P.: Denoising diffusion probabilistic models. Advances in Neural Information Processing Systems 33, 6840-6851 (2020) +20. Ho, J., Sahara, C., Chan, W., Fleet, D.J., Norouzi, M., Salimans, T.: Cascaded diffusion models for high fidelity image generation. The Journal of Machine Learning Research 23(1), 2249-2281 (2022) +21. Isola, P., Zhu, J.Y., Zhou, T., Efros, A.A.: Image-to-image translation with conditional adversarial networks. CVPR (2017) +22. Johnson, J., Hariharan, B., Van Der Maaten, L., Fei-Fei, L., Lawrence Zitnick, C., Girshick, R.: Clevr: A diagnostic dataset for compositional language and elementary visual reasoning. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 2901–2910 (2017) +23. Kerbl, B., Kopanas, G., Leimkuhler, T., Drettakis, G.: 3d gaussian splatting for real-time radiance field rendering. ACM Transactions on Graphics 42(4) (2023) +24. Koh, J.Y., Lee, H., Yang, Y., Baldridge, J., Anderson, P.: Pathdreamer: A world model for indoor navigation. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 14738-14748 (2021) +25. Kulhánek, J., Derner, E., Sattler, T., Babuška, R.: Viewformer: Nerf-free neural rendering from few images using transformers. In: European Conference on Computer Vision. pp. 198-216. Springer (2022) +26. Kwon, T., Tekin, B., Stühmer, J., Bogo, F., Pollefeys, M.: H2o: Two hands manipulating objects for first person interaction recognition. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 10138-10148 (2021) +27. Li, J., Liu, K., Wu, J.: Ego-body pose estimation via ego-head pose estimation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 17142-17151 (2023) +28. Liu, G., Tang, H., Latapie, H., Yan, Y.: Exocentric to egocentric image generation via parallel generative adversarial network. In: ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). pp. 1843-1847. IEEE (2020) + +29. Liu, G., Tang, H., Latapie, H.M., Corso, J.J., Yan, Y.: Cross-view exocentric to egocentric video synthesis. In: Proceedings of the 29th ACM International Conference on Multimedia. pp. 974-982 (2021) +30. Liu, R., Wu, R., Van Hoorick, B., Tokmakov, P., Zakharov, S., Vondrick, C.: Zero-1-to-3: Zero-shot one image to 3d object. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 9298-9309 (2023) +31. Lombardi, S., Simon, T., Saragih, J., Schwartz, G., Lehrmann, A., Sheikh, Y.: Neural volumes: Learning dynamic renderable volumes from images. arXiv preprint arXiv:1906.07751 (2019) +32. Luo, M., Xue, Z., Dimakis, A., Grauman, K.: Put myself in your shoes: Lifting the egocentric perspective from exocentric videos. In: ECCV (2024) +33. Mathews, J.: Coordinate-free rotation formalism. American Journal of Physics 44(12), 1210-1210 (1976) +34. Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM 65(1), 99-106 (2021) +35. Niklaus, S., Mai, L., Yang, J., Liu, F.: 3d ken burns effect from a single image. ACM Transactions on Graphics (ToG) 38(6), 1-15 (2019) +36. Oquab, M., Darcet, T., Moutakanni, T., Vo, H., Szafraniec, M., Khalidov, V., Fernandez, P., Haziza, D., Massa, F., El-Nouby, A., et al.: Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:2304.07193 (2023) +37. Park, K., Sinha, U., Barron, J.T., Bouaziz, S., Goldman, D.B., Seitz, S.M., Martin-Brualla, R.: Nerfies: Deformable neural radiance fields. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 5865-5874 (2021) +38. Peebles, W., Xie, S.: Scalable diffusion models with transformers. arXiv preprint arXiv:2212.09748 (2022) +39. Peebles, W., Xie, S.: Scalable diffusion models with transformers. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 4195-4205 (2023) +40. Popov, S., Bauszat, P., Ferrari, V.: Corenet: Coherent 3d scene reconstruction from a single rgb image. In: Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part II 16. pp. 366-383. Springer (2020) +41. Pumarola, A., Corona, E., Pons-Moll, G., Moreno-Noguer, F.: D-nerf: Neural radiance fields for dynamic scenes. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 10318-10327 (2021) +42. Radford, A., Kim, J.W., Hallacy, C., Ramesh, A., Goh, G., Agarwal, S., Sastry, G., Askell, A., Mishkin, P., Clark, J., et al.: Learning transferable visual models from natural language supervision. In: International conference on machine learning. pp. 8748-8763. PMLR (2021) +43. Regmi, K., Borji, A.: Cross-view image synthesis using conditional gans. In: Proceedings of the IEEE conference on Computer Vision and Pattern Recognition. pp. 3501-3510 (2018) +44. Ren, B., Tang, H., Sebe, N.: Cascaded cross mlp-mixer gans for cross-view image translation. arXiv preprint arXiv:2110.10183 (2021) +45. Ren, X., Wang, X.: Look outside the room: Synthesizing a consistent long-term 3d scene video from a single image. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 3563-3573 (2022) +46. Riegler, G., Koltun, V.: Free view synthesis. In: Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XIX 16. pp. 623-640. Springer (2020) + +47. Riegler, G., Koltun, V.: Stable view synthesis. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 12216-12225 (2021) +48. Rockwell, C., Fouhey, D.F., Johnson, J.: Pixelsynth: Generating a 3d-consistent experience from a single image. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 14104-14113 (2021) +49. Rombach, R., Blattmann, A., Lorenz, D., Esser, P., Ommer, B.: High-resolution image synthesis with latent diffusion models. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 10684-10695 (2022) +50. Rombach, R., Esser, P., Ommer, B.: Geometry-free view synthesis: Transformers and no 3d priors. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 14356-14366 (2021) +51. Sahara, C., Chan, W., Chang, H., Lee, C., Ho, J., Salimans, T., Fleet, D., Norouzi, M.: Palette: Image-to-image diffusion models. In: ACM SIGGRAPH 2022 Conference Proceedings. pp. 1-10 (2022) +52. Sahara, C., Chan, W., Saxena, S., Li, L., Whang, J., Denton, E.L., Ghasemipour, K., Gontijo Lopes, R., Karagol Ayan, B., Salimans, T., et al.: Photorealistic text-to-image diffusion models with deep language understanding. Advances in Neural Information Processing Systems 35, 36479-36494 (2022) +53. Sahara, C., Ho, J., Chan, W., Salimans, T., Fleet, D.J., Norouzi, M.: Image superresolution via iterative refinement. IEEE Transactions on Pattern Analysis and Machine Intelligence 45(4), 4713-4726 (2022) +54. Sajjadi, M.S., Meyer, H., Pot, E., Bergmann, U., Greff, K., Radwan, N., Vora, S., Lucic, M., Duckworth, D., Dosovitskiy, A., et al.: Scene representation transformer: Geometry-free novel view synthesis through set-latent scene representations. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 6229-6238 (2022) +55. Sener, F., Chatterjee, D., Shelepov, D., He, K., Singhania, D., Wang, R., Yao, A.: Assembly101: A large-scale multi-view video dataset for understanding procedural activities. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 21096-21106 (2022) +56. Sitzmann, V., Thies, J., Heide, F., Nießner, M., Wetzstein, G., Zollhofer, M.: Deepvoxels: Learning persistent 3d feature embeddings. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 2437-2446 (2019) +57. Sitzmann, V., Zollhöfer, M., Wetzstein, G.: Scene representation networks: Continuous 3d-structure-aware neural scene representations. Advances in Neural Information Processing Systems 32 (2019) +58. Song, J., Meng, C., Ermon, S.: Denoising diffusion implicit models. arXiv:2010.02502 (October 2020), https://arxiv.org/abs/2010.02502 +59. Su, J., Ahmed, M., Lu, Y., Pan, S., Bo, W., Liu, Y.: Roformer: Enhanced transformer with rotary position embedding. Neurocomputing 568, 127063 (2024) +60. Szymanowicz, S., Rupprecht, C., Vedaldi, A.: Splatter image: Ultra-fast single-view 3d reconstruction. arXiv preprint arXiv:2312.13150 (2023) +61. T, M.V., Wang, P., Chen, X., Chen, T., Venugopalan, S., Wang, Z.: Is attention all that neRF needs? In: The Eleventh International Conference on Learning Representations (2023), https://openreview.net/forum?id=xE-LtsE-xx +62. Tang, H., Xu, D., Sebe, N., Wang, Y., Corso, J.J., Yan, Y.: Multi-channel attention selection gan with cascaded semantic guidance for cross-view image translation. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 2417-2426 (2019) + +63. Trevithick, A., Yang, B.: Grf: Learning a general radiance field for 3d scene representation and rendering (2020) +64. Tucker, R., Snavely, N.: Single-view view synthesis with multiplane images. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 551-560 (2020) +65. Wang, Q., Wang, Z., Genova, K., Srinivasan, P.P., Zhou, H., Barron, J.T., MartinBrualla, R., Snavely, N., Funkhouser, T.: Ibrnet: Learning multi-view image-based rendering. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 4690-4699 (2021) +66. Watson, D., Chan, W., Martin-Brualla, R., Ho, J., Tagliasacchi, A., Norouzi, M.: Novel view synthesis with diffusion models. arXiv preprint arXiv:2210.04628 (2022) +67. Wiles, O., Gkioxari, G., Szeliski, R., Johnson, J.: Synsin: End-to-end view synthesis from a single image. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 7467-7477 (2020) +68. Yang, L., Kang, B., Huang, Z., Xu, X., Feng, J., Zhao, H.: Depth anything: Unleashing the power of large-scale unlabeled data. arXiv preprint arXiv:2401.10891 (2024) +69. Yu, A., Ye, V., Tancik, M., Kanazawa, A.: pixelnerf: Neural radiance fields from one or few images. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 4578-4587 (2021) +70. Zhai, M., Bessinger, Z., Workman, S., Jacobs, N.: Predicting ground-level scene layout from aerial imagery. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 867-875 (2017) +71. Zhang, R., Isola, P., Efros, A.A., Shechtman, E., Wang, O.: The unreasonable effectiveness of deep features as a perceptual metric. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 586-595 (2018) +72. Zhou, T., Tucker, R., Flynn, J., Fyffe, G., Snavely, N.: Stereo magnification: Learning view synthesis using multiplane images. arXiv preprint arXiv:1805.09817 (2018) \ No newline at end of file diff --git a/2024/4Diff_ 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation/images.zip b/2024/4Diff_ 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..23e8676a01461f4695fdef26c63db01cbf87a1f9 --- /dev/null +++ b/2024/4Diff_ 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe3ffaf9cef24de8f35ee02e9c6c6668a6dcdb2049938dbc51e368fbdea7fe04 +size 833505 diff --git a/2024/4Diff_ 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation/layout.json b/2024/4Diff_ 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..a79f78dd29f17b8ec55a706b3de7b4f5015a098a --- /dev/null +++ b/2024/4Diff_ 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation/layout.json @@ -0,0 +1,9160 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 176, + 111, + 438, + 148 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 176, + 111, + 438, + 148 + ], + "spans": [ + { + "bbox": [ + 176, + 111, + 438, + 148 + ], + "type": "text", + "content": "4DIFF: 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 135, + 167, + 478, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 167, + 478, + 193 + ], + "spans": [ + { + "bbox": [ + 135, + 167, + 478, + 193 + ], + "type": "text", + "content": "Feng Cheng" + }, + { + "bbox": [ + 135, + 167, + 478, + 193 + ], + "type": "inline_equation", + "content": "^{1,3*}" + }, + { + "bbox": [ + 135, + 167, + 478, + 193 + ], + "type": "text", + "content": ", Mi Luo" + }, + { + "bbox": [ + 135, + 167, + 478, + 193 + ], + "type": "inline_equation", + "content": "^{2*}" + }, + { + "bbox": [ + 135, + 167, + 478, + 193 + ], + "type": "text", + "content": ", Huiyu Wang" + }, + { + "bbox": [ + 135, + 167, + 478, + 193 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 135, + 167, + 478, + 193 + ], + "type": "text", + "content": ", Alex Dimakis" + }, + { + "bbox": [ + 135, + 167, + 478, + 193 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 135, + 167, + 478, + 193 + ], + "type": "text", + "content": ", Lorenzo Torresani" + }, + { + "bbox": [ + 135, + 167, + 478, + 193 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 135, + 167, + 478, + 193 + ], + "type": "text", + "content": ", Gedas Bertasius" + }, + { + "bbox": [ + 135, + 167, + 478, + 193 + ], + "type": "inline_equation", + "content": "^{3\\dagger}" + }, + { + "bbox": [ + 135, + 167, + 478, + 193 + ], + "type": "text", + "content": ", and Kristen Grauman" + }, + { + "bbox": [ + 135, + 167, + 478, + 193 + ], + "type": "inline_equation", + "content": "^{1,2\\dagger}" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 211, + 201, + 400, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 211, + 201, + 400, + 248 + ], + "spans": [ + { + "bbox": [ + 211, + 201, + 400, + 248 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 211, + 201, + 400, + 248 + ], + "type": "text", + "content": " FAIR, Meta AI \n" + }, + { + "bbox": [ + 211, + 201, + 400, + 248 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 211, + 201, + 400, + 248 + ], + "type": "text", + "content": " The University of Texas at Austin \n" + }, + { + "bbox": [ + 211, + 201, + 400, + 248 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 211, + 201, + 400, + 248 + ], + "type": "text", + "content": " University of North Carolina at Chapel Hill \n* Equal contribution, † Co-lead the project" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 160, + 277, + 455, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 277, + 455, + 453 + ], + "spans": [ + { + "bbox": [ + 160, + 277, + 455, + 453 + ], + "type": "text", + "content": "Abstract. We present 4DIFF, a 3D-aware diffusion model addressing the exo-to-ego viewpoint translation task — generating first-person (ego-centric) view images from the corresponding third-person (exocentric) images. Building on the diffusion model's ability to generate photorealistic images, we propose a transformer-based diffusion model that incorporates geometry priors through two mechanisms: (i) egocentric point cloud rasterization and (ii) 3D-aware rotary cross-attention. Egocentric point cloud rasterization converts the input exocentric image into an egocentric layout, which is subsequently used by a diffusion image transformer. As a component of the diffusion transformer's denoiser block, the 3D-aware rotary cross-attention further incorporates 3D information and semantic features from the source exocentric view. Our 4DIFF achieves state-of-the-art results on the challenging and diverse Ego-Exo4D multiview dataset and exhibits robust generalization to novel environments not encountered during training. Our code, processed data, and pretrained models are publicly available at https://klauscc.github.io/4diff." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 160, + 463, + 359, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 463, + 359, + 475 + ], + "spans": [ + { + "bbox": [ + 160, + 463, + 359, + 475 + ], + "type": "text", + "content": "Keywords: Egocentric Vision " + }, + { + "bbox": [ + 160, + 463, + 359, + 475 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 160, + 463, + 359, + 475 + ], + "type": "text", + "content": " View Synthesis" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 496, + 230, + 509 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 496, + 230, + 509 + ], + "spans": [ + { + "bbox": [ + 132, + 496, + 230, + 509 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 521, + 482, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 521, + 482, + 640 + ], + "spans": [ + { + "bbox": [ + 130, + 521, + 482, + 640 + ], + "type": "text", + "content": "From early developmental stages, humans adeptly observe external actions (exo) and seamlessly integrate them into their own repertoire (ego), forming the cornerstone of visual learning. This actor-observer translation mechanism not only shapes individual development but also holds profound implications for technological advancements. Imagine the ability to immerse yourself in the first-person perspective of renowned athletes like Messi or glean intricate piano techniques from online tutorials converted to a first-person viewpoint. Such experiences hinge on seamless translation from third-person to first-person perspectives, highlighting the pivotal role of cross-view translation in facilitating immersive and enriching experiences across diverse domains." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 641, + 481, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 641, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 132, + 641, + 481, + 665 + ], + "type": "text", + "content": "We leverage the recently released Ego-Exo4D dataset [18] to explore the third-person (exocentric) to first-person (egocentric) viewpoint translation task." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 140, + 117, + 478, + 197 + ], + "blocks": [ + { + "bbox": [ + 140, + 117, + 478, + 197 + ], + "lines": [ + { + "bbox": [ + 140, + 117, + 478, + 197 + ], + "spans": [ + { + "bbox": [ + 140, + 117, + 478, + 197 + ], + "type": "image", + "image_path": "a38f371d4df030816d70b53bd9ff161ee79bd243c814920455fc7f8bcd1ed286.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 210, + 482, + 245 + ], + "lines": [ + { + "bbox": [ + 130, + 210, + 482, + 245 + ], + "spans": [ + { + "bbox": [ + 130, + 210, + 482, + 245 + ], + "type": "text", + "content": "Fig. 1: Given exocentric images of an egocentric camera wearer engaged in daily activities and the corresponding camera trajectories, we aim to synthesize the corresponding egocentric view that captures the scene from the wearer's first-person perspective." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 253, + 482, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 253, + 482, + 445 + ], + "spans": [ + { + "bbox": [ + 130, + 253, + 482, + 445 + ], + "type": "text", + "content": "As illustrated in Figure 1, our focus is on transforming the exocentrically observed images containing a designated individual into images depicting the same scene from the individual's first-person perspective. Our task is a specific instance of the Novel View Synthesis (NVS) task, which aims to generate new views conditioned on a few given views of a scene. However, the Ego-Exo4D dataset presents a formidable challenge compared to traditional novel view synthesis datasets [8, 9, 17, 22, 57, 72] and multiview datasets [1, 26, 55, 70]. As illustrated in Figure 2, the scenes in the Ego-Exo4D dataset are characterized by numerous objects and dynamic actions performed by the participants. The dataset encompasses diverse scenes, ranging from indoor to outdoor activities such as cooking and basketball. Furthermore, the visual differences between exocentric and egocentric images are pronounced due to sharp viewpoint changes. Besides, unlike numerous NVS datasets that use 3D data for arbitrary viewpoint sampling during training, Ego-Exo4D dataset only provides several views (e.g., four exo and one ego view) for each dynamic scene, which presents a challenge for convergence of prior geometry-based methods that regress the entire scene." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 447, + 482, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 447, + 482, + 579 + ], + "spans": [ + { + "bbox": [ + 130, + 447, + 482, + 579 + ], + "type": "text", + "content": "Due to the challenges mentioned above, existing methods exhibit unsatisfactory performance in the exo-to-ego view translation task. Geometry-free generative models, including GAN-based [6,21] and diffusion-based [30,38,66] methods, face challenges in generating geometrically-correct images due to high complexity of the scenes. In contrast, geometry-based approaches, exemplified by NeRF-based methods [2,3,34,37,41,69], encounter limitations in achieving photorealistic images. Recent attempts [7, 10] aim to reconcile this dilemma by integrating a strong geometry-based method (e.g. NeRF-based) into diffusion models. However, these models are typically difficult to optimize on the extremely diverse scenes in the Ego-Exo4D benchmark, as we show in Sec. 4.2. Thus, they often fail to provide constructive geometry priors to the subsequent diffusion model." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 582, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 582, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 582, + 482, + 666 + ], + "type": "text", + "content": "Motivated by these observations, we propose 4DIFF, a 3D-Aware Diffusion model for exocentric to egocentric viewpoint translation. We propose two mechanisms to incorporate 3D geometry into the diffusion model: (i) egocentric point cloud rasterization, and (ii) 3D-aware rotary cross-attention layers. Rather than relying on a complex geometry model like NeRF, we render an egocentric prior image using a lightweight rasterization technique [5, 67]. As a result, our approach is both easy to train and adaptable, allowing it to incorporate existing" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 279, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 279, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 279, + 102 + ], + "type": "text", + "content": "F. Cheng and M. Luo et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 133, + 114, + 479, + 323 + ], + "blocks": [ + { + "bbox": [ + 133, + 114, + 479, + 323 + ], + "lines": [ + { + "bbox": [ + 133, + 114, + 479, + 323 + ], + "spans": [ + { + "bbox": [ + 133, + 114, + 479, + 323 + ], + "type": "image", + "image_path": "9d391c59f0a1e04d3b2bf0aab09cffa0258fa9eeaed4a8728f942276bcac850a.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 332, + 480, + 376 + ], + "lines": [ + { + "bbox": [ + 130, + 332, + 480, + 376 + ], + "spans": [ + { + "bbox": [ + 130, + 332, + 480, + 376 + ], + "type": "text", + "content": "Fig. 2: Comparison of the Ego-Exo4D viewpoint translation (Ego-Exo4D-VT) benchmark, which we build on the Ego-Exo4D dataset [18], with existing novel view synthesis and cross-view translation benchmarks. Ego-Exo4D-VT presents numerous challenges that require fundamental advances in generative modeling to address." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 380, + 482, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 380, + 482, + 525 + ], + "spans": [ + { + "bbox": [ + 130, + 380, + 482, + 525 + ], + "type": "text", + "content": "open-source pretrained depth estimators. These estimators have demonstrated effectiveness in processing images from previously unseen environments [4, 68]. Solely rendering the egocentric prior feature map through point cloud rasterization can be problematic, as the source exo view often contains occluded and unobserved regions. To address this, we seamlessly integrate rasterization into the diffusion model, leveraging its substantial capacity for extrapolation and generating high-quality images. We further enhance the expressivity of our diffusion model by introducing 3D-aware rotary cross-attention, which is integrated into each denoising block of the model. This functionality aims to improve feature similarities and 3D spatial similarities between ego and exo views, allowing the diffusion feature maps to incorporate information from the semantic features encoded in the exocentric image more effectively." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 525, + 482, + 585 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 525, + 482, + 585 + ], + "spans": [ + { + "bbox": [ + 130, + 525, + 482, + 585 + ], + "type": "text", + "content": "Our method 4DIFF surpasses prior state-of-the-art techniques on the challenging Ego-Exo4D viewpoint translation benchmark, achieving a " + }, + { + "bbox": [ + 130, + 525, + 482, + 585 + ], + "type": "inline_equation", + "content": "3.6\\%" + }, + { + "bbox": [ + 130, + 525, + 482, + 585 + ], + "type": "text", + "content": " improvement in LPIPS. Furthermore, leveraging the extensive scale of Ego-Exo4D data, our approach demonstrates robust generalization to novel environments not encountered during training." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 603, + 237, + 616 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 603, + 237, + 616 + ], + "spans": [ + { + "bbox": [ + 132, + 603, + 237, + 616 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "type": "text", + "content": "Exo-to-Ego Viewpoint Translation. Prior methods [28, 44, 62] tackled this problem predominantly via GAN-based models [11]. Specifically, [43] proposed the X-Fork and X-Seq GAN-based architecture using an additional semantic map" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 140, + 91, + 448, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 91, + 448, + 102 + ], + "spans": [ + { + "bbox": [ + 140, + 91, + 448, + 102 + ], + "type": "text", + "content": "4DIFF: 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 212 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 212 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 212 + ], + "type": "text", + "content": "for enhanced generation. [29] introduced STA-GAN, which focuses on learning spatial and temporal information to generate egocentric videos from exocentric views. [32] focuses on hand-object interactions, proposing to decouple hand layout generation and ego frame generation with a diffusion model. None of these methods develop an explicit geometry-aware generative framework. In contrast, our work introduces two effective mechanisms to incorporate 3D geometric priors into the diffusion model, specifically tailored to address the challenges posed by the Ego-Exo4D-VT benchmark." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 214, + 482, + 454 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 214, + 482, + 454 + ], + "spans": [ + { + "bbox": [ + 130, + 214, + 482, + 454 + ], + "type": "text", + "content": "Novel View Synthesis (NVS). Our exo-to-ego viewpoint translation task represents a distinct facet of the NVS task, which aims to generate a target image with an arbitrary target camera pose from given source images and their camera poses. Previous works in NVS can be categorized into geometry-based [15,16,31,46,47,56,64,72], regression-based methods [25,35,54,63-65,69,72] and generative models [24,45,48,50,66,67]. Recently, several geometry-aware generative models [7,10] have explored ways to integrate NeRF with diffusion models. For instance, GeNVS [7] incorporates geometry priors into their diffusion model using a variant of pixelNeRF [69], which renders a target feature map from a 3D feature field. SSDNeRF [10] proposes a unified approach that employs an expressive diffusion model to learn a generalizable prior of neural radiance field (NeRF). However, these geometry-based models, typically implemented as NeRFs, often struggle to provide meaningful geometry priors to the diffusion model, especially in the challenging Ego-Exo4D-VT benchmark. This is because complex geometry methods require strong supervision (e.g., many densely sampled views of the same scene), which Ego-Exo4D does not provide. In contrast, our method uses simple point-cloud rasterization that relies solely on accurate depth estimation, avoiding the modeling of occluded and unobserved areas in the exocentric view. This approach shows better generalization and benefits from existing large-scale pretrained depth estimators." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 456, + 482, + 541 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 456, + 482, + 541 + ], + "spans": [ + { + "bbox": [ + 130, + 456, + 482, + 541 + ], + "type": "text", + "content": "Diffusion Models [12, 19, 49] have made significant strides in producing photorealistic images and videos. They excel in modeling conditional distributions, including scenarios where conditioning is based on text [49, 52] or another image [20, 53]. Prior work has demonstrated a wide range of successful applications of diffusion models, including human pose generation [27] and depth estimation [14]. In our work, we employ a transformer-based diffusion model [39] to model the distribution of egocentric images conditioned on exocentric images." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 560, + 233, + 574 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 560, + 233, + 574 + ], + "spans": [ + { + "bbox": [ + 132, + 560, + 233, + 574 + ], + "type": "text", + "content": "3 Methodology" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 586, + 237, + 599 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 586, + 237, + 599 + ], + "spans": [ + { + "bbox": [ + 132, + 586, + 237, + 599 + ], + "type": "text", + "content": "3.1 Problem Setup" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 606, + 481, + 644 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 606, + 481, + 644 + ], + "spans": [ + { + "bbox": [ + 130, + 606, + 481, + 644 + ], + "type": "text", + "content": "Given an exocentric image " + }, + { + "bbox": [ + 130, + 606, + 481, + 644 + ], + "type": "inline_equation", + "content": "x \\in \\mathbb{R}^{h \\times w \\times 3}" + }, + { + "bbox": [ + 130, + 606, + 481, + 644 + ], + "type": "text", + "content": " and the relative camera pose " + }, + { + "bbox": [ + 130, + 606, + 481, + 644 + ], + "type": "inline_equation", + "content": "P \\in \\mathbb{R}^{4 \\times 4}" + }, + { + "bbox": [ + 130, + 606, + 481, + 644 + ], + "type": "text", + "content": " from exo camera to the ego camera of the person of interest, our goal is to synthesize an egocentric image " + }, + { + "bbox": [ + 130, + 606, + 481, + 644 + ], + "type": "inline_equation", + "content": "y \\in \\mathbb{R}^{h \\times w \\times 3}" + }, + { + "bbox": [ + 130, + 606, + 481, + 644 + ], + "type": "text", + "content": " from the conditional distribution:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 286, + 654, + 481, + 666 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 654, + 481, + 666 + ], + "spans": [ + { + "bbox": [ + 286, + 654, + 481, + 666 + ], + "type": "interline_equation", + "content": "p (y | x, P) \\tag {1}", + "image_path": "654a768052611fab1604d18157adfdcc78923da633bfd36411740a19b081c448.jpg" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 279, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 279, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 279, + 102 + ], + "type": "text", + "content": "F. Cheng and M. Luo et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 136, + 120, + 479, + 238 + ], + "blocks": [ + { + "bbox": [ + 136, + 120, + 479, + 238 + ], + "lines": [ + { + "bbox": [ + 136, + 120, + 479, + 238 + ], + "spans": [ + { + "bbox": [ + 136, + 120, + 479, + 238 + ], + "type": "image", + "image_path": "3d01e8a4ec309dc78928e926a18b1c3bf62b89e64d64c070db82d54a7c62a693.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 247, + 482, + 326 + ], + "lines": [ + { + "bbox": [ + 130, + 247, + 482, + 326 + ], + "spans": [ + { + "bbox": [ + 130, + 247, + 482, + 326 + ], + "type": "text", + "content": "Fig. 3: We propose 4DIFF, a 3D-Aware Diffusion model for exocentric to egocentric viewpoint translation. Our framework uses a point cloud rasterization scheme first to compute an egocentric prior, which captures egocentric layout cues. Afterward, the egocentric prior is fed into the diffusion model augmented with the proposed 3D-aware rotary cross-attention for egocentric image generation. The proposed 3D-aware rotary cross-attention guides the attention to consider geometric relationships between the egocentric and exocentric diffusion feature maps." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 340, + 481, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 340, + 481, + 365 + ], + "spans": [ + { + "bbox": [ + 130, + 340, + 481, + 365 + ], + "type": "text", + "content": "We assume the relative camera pose " + }, + { + "bbox": [ + 130, + 340, + 481, + 365 + ], + "type": "inline_equation", + "content": "(P)" + }, + { + "bbox": [ + 130, + 340, + 481, + 365 + ], + "type": "text", + "content": " is known, similar to the standard NVs tasks [40, 61, 69]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 365, + 482, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 365, + 482, + 485 + ], + "spans": [ + { + "bbox": [ + 130, + 365, + 482, + 485 + ], + "type": "text", + "content": "Relation to the Official Ego-Exo4D Translation Benchmark. Ego-Exo4D [18] introduced an exo-to-ego translation benchmark, with the primary emphasis on object-level synthesis, i.e., generating an object at the correct location in the ego view based on an exo image and an exo segmentation mask of the object of interest. This approach is particularly valuable for precise object placement and detailed object-level interactions. In contrast, we focus on full-image synthesis — allowing for the generation of entire scenes, and enhancing the richness and diversity of generated viewpoints. Both are complementary; while Ego-Exo4D excels in object-specific scenarios, our method expands the scope to full-scene synthesis and can be seen as a new specialized NVS task." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 509, + 241, + 519 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 509, + 241, + 519 + ], + "spans": [ + { + "bbox": [ + 132, + 509, + 241, + 519 + ], + "type": "text", + "content": "3.2 Our Framework" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 533, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 533, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 533, + 482, + 666 + ], + "type": "text", + "content": "Due to the inherent complexity and dynamism present in diverse scenes, we use an expressive transformer-based diffusion model to model the conditional distribution in Equation 1. However, due to the inability to explicitly model 3D cues, the standard diffusion model may struggle to generate geometry-consistent images. Thus, we propose two techniques to incorporate geometry into our diffusion model: (i) egocentric point cloud rasterization and (ii) 3D-aware rotary cross-attention. As shown in Figure 3, the point cloud rasterization first renders an egocentric prior from the input exocentric view, which is then fed into the diffusion model. Afterward, the conditioned diffusion model is augmented with the proposed 3D-aware rotary cross-attention to generate the target egocentric image. We now describe each module in more detail." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 140, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 140, + 91, + 447, + 102 + ], + "type": "text", + "content": "4DIFF: 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 350, + 129 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 116, + 350, + 129 + ], + "spans": [ + { + "bbox": [ + 132, + 116, + 350, + 129 + ], + "type": "text", + "content": "3.3 Egocentric Point Cloud Rasterization" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 137, + 482, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 137, + 482, + 196 + ], + "spans": [ + { + "bbox": [ + 130, + 137, + 482, + 196 + ], + "type": "text", + "content": "As a first step in our framework, we render an egocentric prior via the point cloud rasterization from an exocentric view. Specifically, we first use a depth estimator to convert the exocentric 2D image " + }, + { + "bbox": [ + 130, + 137, + 482, + 196 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 130, + 137, + 482, + 196 + ], + "type": "text", + "content": " and a feature map " + }, + { + "bbox": [ + 130, + 137, + 482, + 196 + ], + "type": "inline_equation", + "content": "F^{\\mathrm{exo}}" + }, + { + "bbox": [ + 130, + 137, + 482, + 196 + ], + "type": "text", + "content": " into a feature point cloud. Then, a differential renderer [67] projects this point cloud into an egocentric prior " + }, + { + "bbox": [ + 130, + 137, + 482, + 196 + ], + "type": "inline_equation", + "content": "H^{\\mathrm{prior}}" + }, + { + "bbox": [ + 130, + 137, + 482, + 196 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 203, + 206, + 481, + 220 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 203, + 206, + 481, + 220 + ], + "spans": [ + { + "bbox": [ + 203, + 206, + 481, + 220 + ], + "type": "interline_equation", + "content": "H ^ {\\text {p r i o r}} = \\left[ x ^ {\\text {p r i o r}}, F ^ {\\text {p r i o r}} \\right] = \\operatorname {r e n d e r} \\left(\\left[ x, F ^ {\\text {e x o}} \\right], D, P\\right) \\tag {2}", + "image_path": "e7a62b17732ea5615a9431b34f4473f2aa032fd442aa0e73cb31f0ab5f6b1162.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 131, + 228, + 482, + 288 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 228, + 482, + 288 + ], + "spans": [ + { + "bbox": [ + 131, + 228, + 482, + 288 + ], + "type": "text", + "content": "Here, " + }, + { + "bbox": [ + 131, + 228, + 482, + 288 + ], + "type": "inline_equation", + "content": "F^{\\mathrm{exo}}" + }, + { + "bbox": [ + 131, + 228, + 482, + 288 + ], + "type": "text", + "content": " is the semantic features of the exocentric image encoded by a feature encoder " + }, + { + "bbox": [ + 131, + 228, + 482, + 288 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 131, + 228, + 482, + 288 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 131, + 228, + 482, + 288 + ], + "type": "inline_equation", + "content": "x^{\\mathrm{prior}}" + }, + { + "bbox": [ + 131, + 228, + 482, + 288 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 131, + 228, + 482, + 288 + ], + "type": "inline_equation", + "content": "F^{\\mathrm{prior}}" + }, + { + "bbox": [ + 131, + 228, + 482, + 288 + ], + "type": "text", + "content": " are the egocentric prior image and a feature map, rendered from the exocentric image " + }, + { + "bbox": [ + 131, + 228, + 482, + 288 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 131, + 228, + 482, + 288 + ], + "type": "text", + "content": " and a feature map " + }, + { + "bbox": [ + 131, + 228, + 482, + 288 + ], + "type": "inline_equation", + "content": "F^{\\mathrm{exo}}" + }, + { + "bbox": [ + 131, + 228, + 482, + 288 + ], + "type": "text", + "content": " respectively. " + }, + { + "bbox": [ + 131, + 228, + 482, + 288 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 131, + 228, + 482, + 288 + ], + "type": "text", + "content": " denotes the depth map predicted by a depth estimator, and " + }, + { + "bbox": [ + 131, + 228, + 482, + 288 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 131, + 228, + 482, + 288 + ], + "type": "text", + "content": " represents the relative camera pose." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 131, + 293, + 483, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 293, + 483, + 342 + ], + "spans": [ + { + "bbox": [ + 131, + 293, + 483, + 342 + ], + "type": "text", + "content": "Depth Estimator. We construct the depth estimator based on the pretrained MiDaS [4]. Since MiDaS predicts relative disparity (the inverse of depth), we introduce two learnable scalars " + }, + { + "bbox": [ + 131, + 293, + 483, + 342 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 131, + 293, + 483, + 342 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 131, + 293, + 483, + 342 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 131, + 293, + 483, + 342 + ], + "type": "text", + "content": " for dataset-specific calibration. The depth map " + }, + { + "bbox": [ + 131, + 293, + 483, + 342 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 131, + 293, + 483, + 342 + ], + "type": "text", + "content": " is predicted using the formula:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 245, + 352, + 481, + 365 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 245, + 352, + 481, + 365 + ], + "spans": [ + { + "bbox": [ + 245, + 352, + 481, + 365 + ], + "type": "interline_equation", + "content": "D = 1 / \\left(s \\cdot \\operatorname {M i D a S} \\left(x ^ {\\mathrm {e x o}}\\right) + t\\right). \\tag {3}", + "image_path": "c487628839c32c005204359a0a95c24b96b833c365f0c3cc9ddb78cb3755f920.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 131, + 379, + 482, + 463 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 379, + 482, + 463 + ], + "spans": [ + { + "bbox": [ + 131, + 379, + 482, + 463 + ], + "type": "text", + "content": "Rasterization. We employ the differentiable renderer [67] for our rasterization. This renderer splats 3D points onto the image plane and calculates pixel values by blending point features. In contrast to more intricate rendering techniques like NeRF [34,69] or Gaussian Splatting [23,60], our renderer is simpler to converge. It relies solely on depth estimation from 2D images, leveraging large-scale pretrained depth estimators. This design choice ensures robust generalization across diverse scenarios." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 480, + 359, + 493 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 480, + 359, + 493 + ], + "spans": [ + { + "bbox": [ + 132, + 480, + 359, + 493 + ], + "type": "text", + "content": "3.4 3D-Aware Diffusion Image Transformer" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 131, + 501, + 482, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 501, + 482, + 548 + ], + "spans": [ + { + "bbox": [ + 131, + 501, + 482, + 548 + ], + "type": "text", + "content": "Our diffusion model uses a denoiser network to predict added noise " + }, + { + "bbox": [ + 131, + 501, + 482, + 548 + ], + "type": "inline_equation", + "content": "\\epsilon_{t}" + }, + { + "bbox": [ + 131, + 501, + 482, + 548 + ], + "type": "text", + "content": " from the noisy target egocentric image " + }, + { + "bbox": [ + 131, + 501, + 482, + 548 + ], + "type": "inline_equation", + "content": "y_{t} = \\sqrt{\\bar{\\alpha}_{t}} y + \\sqrt{1 - \\bar{\\alpha}_{t}}\\epsilon_{t}" + }, + { + "bbox": [ + 131, + 501, + 482, + 548 + ], + "type": "text", + "content": ", conditioned on the previously obtained egocentric prior " + }, + { + "bbox": [ + 131, + 501, + 482, + 548 + ], + "type": "inline_equation", + "content": "H^{\\mathrm{prior}}" + }, + { + "bbox": [ + 131, + 501, + 482, + 548 + ], + "type": "text", + "content": " and the exocentric semantic features " + }, + { + "bbox": [ + 131, + 501, + 482, + 548 + ], + "type": "inline_equation", + "content": "F^{\\mathrm{exo}}" + }, + { + "bbox": [ + 131, + 501, + 482, + 548 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 252, + 548, + 481, + 562 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 252, + 548, + 481, + 562 + ], + "spans": [ + { + "bbox": [ + 252, + 548, + 481, + 562 + ], + "type": "interline_equation", + "content": "\\hat {\\epsilon} _ {t} = \\epsilon_ {\\theta} ([ y _ {t}, H ^ {\\text {p r i o r}} ], F ^ {\\text {e x o}}). \\tag {4}", + "image_path": "3679f787f5e4f76e59b613a27d0101323a530087784ad28190b1b4bbbb154d40.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 131, + 567, + 482, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 567, + 482, + 604 + ], + "spans": [ + { + "bbox": [ + 131, + 567, + 482, + 604 + ], + "type": "text", + "content": "During inference, the target egocentric image " + }, + { + "bbox": [ + 131, + 567, + 482, + 604 + ], + "type": "inline_equation", + "content": "y_0" + }, + { + "bbox": [ + 131, + 567, + 482, + 604 + ], + "type": "text", + "content": " is generated from a standard Gaussian noise " + }, + { + "bbox": [ + 131, + 567, + 482, + 604 + ], + "type": "inline_equation", + "content": "y_T" + }, + { + "bbox": [ + 131, + 567, + 482, + 604 + ], + "type": "text", + "content": " by applying the denoiser network " + }, + { + "bbox": [ + 131, + 567, + 482, + 604 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\theta}" + }, + { + "bbox": [ + 131, + 567, + 482, + 604 + ], + "type": "text", + "content": " iteratively with a sampling strategy (e.g. DDIM [58]), i.e. " + }, + { + "bbox": [ + 131, + 567, + 482, + 604 + ], + "type": "inline_equation", + "content": "y_T \\to y_{T - \\delta} \\to \\ldots \\to y_0" + }, + { + "bbox": [ + 131, + 567, + 482, + 604 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 131, + 605, + 482, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 605, + 482, + 667 + ], + "spans": [ + { + "bbox": [ + 131, + 605, + 482, + 667 + ], + "type": "text", + "content": "Denoiser Network " + }, + { + "bbox": [ + 131, + 605, + 482, + 667 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\theta}" + }, + { + "bbox": [ + 131, + 605, + 482, + 667 + ], + "type": "text", + "content": ". Our proposed 3D-aware Diffusion image Transformer serves as the denoiser network. As shown in Figure 3 and Equation 4, our Transformer network takes as input the concatenation of the egocentric prior " + }, + { + "bbox": [ + 131, + 605, + 482, + 667 + ], + "type": "inline_equation", + "content": "H^{\\mathrm{prior}}" + }, + { + "bbox": [ + 131, + 605, + 482, + 667 + ], + "type": "text", + "content": " and the noisy target egocentric image " + }, + { + "bbox": [ + 131, + 605, + 482, + 667 + ], + "type": "inline_equation", + "content": "y_{t}" + }, + { + "bbox": [ + 131, + 605, + 482, + 667 + ], + "type": "text", + "content": " encoded via an off-the-shelf autoencoder from [49]. Following [39], the architecture of DiT is the same as ViT, consisting" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 279, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 279, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 279, + 102 + ], + "type": "text", + "content": "F. Cheng and M. Luo et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 168, + 114, + 449, + 232 + ], + "blocks": [ + { + "bbox": [ + 168, + 114, + 449, + 232 + ], + "lines": [ + { + "bbox": [ + 168, + 114, + 449, + 232 + ], + "spans": [ + { + "bbox": [ + 168, + 114, + 449, + 232 + ], + "type": "image", + "image_path": "8c2ea3fe833a7b406dce9ca37f22a3777b6854d9578326d3d82a2aa79ed62729.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 131, + 240, + 482, + 264 + ], + "lines": [ + { + "bbox": [ + 131, + 240, + 482, + 264 + ], + "spans": [ + { + "bbox": [ + 131, + 240, + 482, + 264 + ], + "type": "text", + "content": "Fig. 4: An illustration of the calculation of the rotation matrix " + }, + { + "bbox": [ + 131, + 240, + 482, + 264 + ], + "type": "inline_equation", + "content": "R_{m,n}" + }, + { + "bbox": [ + 131, + 240, + 482, + 264 + ], + "type": "text", + "content": " in our 3D-aware rotary cross attention." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 274, + 482, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 274, + 482, + 323 + ], + "spans": [ + { + "bbox": [ + 130, + 274, + 482, + 323 + ], + "type": "text", + "content": "of " + }, + { + "bbox": [ + 130, + 274, + 482, + 323 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 130, + 274, + 482, + 323 + ], + "type": "text", + "content": " transformer layers, each with a self-attention layer and a feedforward network. To further enhance the expressivity of our model and incorporate more geometric cues, we propose 3D-aware rotary cross-attention layers, which we describe next." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 327, + 482, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 327, + 482, + 458 + ], + "spans": [ + { + "bbox": [ + 130, + 327, + 482, + 458 + ], + "type": "text", + "content": "3D-aware Rotary Cross-Attention. When conditioning the diffusion model on the exocentric feature map, we should consider similarities in the semantic feature and spatial 3D space. Exocentric features similar in appearance (i.e., semantic feature space) and 3D location with respect to the query features should have higher attention values in the diffusion model. Motivated by RoPE [59], we achieve this by incorporating rotations during attention weight calculations. The degree of rotation between a query and a key is determined by the angle between their 3D coordinates, with the ego camera as the center. Consequently, the cosine similarity between the query and key features can incorporate their 3D spatial angle, effectively capturing the 3D relationships between corresponding points in the egocentric and exocentric views." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 459, + 483, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 459, + 483, + 495 + ], + "spans": [ + { + "bbox": [ + 130, + 459, + 483, + 495 + ], + "type": "text", + "content": "Specifically, given a feature map " + }, + { + "bbox": [ + 130, + 459, + 483, + 495 + ], + "type": "inline_equation", + "content": "Z \\in \\mathbb{R}^{l \\times c}" + }, + { + "bbox": [ + 130, + 459, + 483, + 495 + ], + "type": "text", + "content": " in the diffusion model and the exocentric semantic feature map " + }, + { + "bbox": [ + 130, + 459, + 483, + 495 + ], + "type": "inline_equation", + "content": "F^{\\mathrm{exo}} \\in \\mathbb{R}^{l \\times c}" + }, + { + "bbox": [ + 130, + 459, + 483, + 495 + ], + "type": "text", + "content": ", the 3D-aware rotary cross-attention calculates the output " + }, + { + "bbox": [ + 130, + 459, + 483, + 495 + ], + "type": "inline_equation", + "content": "O \\in \\mathbb{R}^{l \\times c}" + }, + { + "bbox": [ + 130, + 459, + 483, + 495 + ], + "type": "text", + "content": " as:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 242, + 502, + 481, + 541 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 242, + 502, + 481, + 541 + ], + "spans": [ + { + "bbox": [ + 242, + 502, + 481, + 541 + ], + "type": "interline_equation", + "content": "a _ {m, n} = \\frac {\\exp \\left(\\frac {q _ {m} ^ {T} R _ {m , n} k _ {n}}{\\sqrt {c}}\\right)}{\\sum_ {j = 1} ^ {l} \\exp \\left(\\frac {q _ {m} ^ {T} R _ {m , j} k _ {j}}{\\sqrt {c}}\\right)} \\tag {5}", + "image_path": "415da34b5a2561beaa542fcca10b44365bb4c106661cb69c424e391f29b3defe.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 247, + 543, + 481, + 574 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 247, + 543, + 481, + 574 + ], + "spans": [ + { + "bbox": [ + 247, + 543, + 481, + 574 + ], + "type": "interline_equation", + "content": "O _ {m} = \\sum_ {n = 1} ^ {l} a _ {m, n} v _ {n} \\tag {6}", + "image_path": "a28346fc5041e6ab64da07fe66def0515258d7f22e8388321baef46c76ce4820.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 581, + 482, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 581, + 482, + 668 + ], + "spans": [ + { + "bbox": [ + 130, + 581, + 482, + 668 + ], + "type": "text", + "content": "Here, " + }, + { + "bbox": [ + 130, + 581, + 482, + 668 + ], + "type": "inline_equation", + "content": "q_{m} = Z_{m}W_{q}" + }, + { + "bbox": [ + 130, + 581, + 482, + 668 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 130, + 581, + 482, + 668 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 130, + 581, + 482, + 668 + ], + "type": "text", + "content": "-th query token, " + }, + { + "bbox": [ + 130, + 581, + 482, + 668 + ], + "type": "inline_equation", + "content": "k_{n} = F_{n}^{\\mathrm{exo}}W_{k}" + }, + { + "bbox": [ + 130, + 581, + 482, + 668 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 130, + 581, + 482, + 668 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 130, + 581, + 482, + 668 + ], + "type": "text", + "content": "-th key token and " + }, + { + "bbox": [ + 130, + 581, + 482, + 668 + ], + "type": "inline_equation", + "content": "v_{n} = F_{n}^{\\mathrm{exo}}W_{v}" + }, + { + "bbox": [ + 130, + 581, + 482, + 668 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 130, + 581, + 482, + 668 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 130, + 581, + 482, + 668 + ], + "type": "text", + "content": "-th value token. " + }, + { + "bbox": [ + 130, + 581, + 482, + 668 + ], + "type": "inline_equation", + "content": "W_{q}, W_{k}, W_{v}" + }, + { + "bbox": [ + 130, + 581, + 482, + 668 + ], + "type": "text", + "content": " are learnable project matrices. " + }, + { + "bbox": [ + 130, + 581, + 482, + 668 + ], + "type": "inline_equation", + "content": "R_{m,n}" + }, + { + "bbox": [ + 130, + 581, + 482, + 668 + ], + "type": "text", + "content": " is the rotation matrix that rotates the key token to align with the value token in 3D space, where the egocentric camera is used as the center. Since the query token is in the egocentric view, we map its coordinates to the exocentric view using the relative camera pose. The rotation matrix is computed in the exocentric view using the algorithm from [33]. When " + }, + { + "bbox": [ + 130, + 581, + 482, + 668 + ], + "type": "inline_equation", + "content": "R_{m,n}" + }, + { + "bbox": [ + 130, + 581, + 482, + 668 + ], + "type": "text", + "content": " is an identity" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 140, + 90, + 448, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 90, + 448, + 102 + ], + "spans": [ + { + "bbox": [ + 140, + 90, + 448, + 102 + ], + "type": "text", + "content": "4DIFF: 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 91, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 91, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 91, + 481, + 100 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 480, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 116, + 480, + 152 + ], + "spans": [ + { + "bbox": [ + 132, + 116, + 480, + 152 + ], + "type": "text", + "content": "matrix, our 3D-aware rotary cross-attention defaults to standard cross-attention. Figure 4 shows an illustration of this process. We insert such 3D-aware cross-attention layers after each self-attention layer in DiT." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 169, + 276, + 181 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 169, + 276, + 181 + ], + "spans": [ + { + "bbox": [ + 132, + 169, + 276, + 181 + ], + "type": "text", + "content": "3.5 Training and Inference" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 188, + 480, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 188, + 480, + 213 + ], + "spans": [ + { + "bbox": [ + 132, + 188, + 480, + 213 + ], + "type": "text", + "content": "Loss Function. Our model is trained with the diffusion denoising loss, which is the L2 loss between the predicted noise and the ground-truth added noise." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 215, + 481, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 215, + 481, + 335 + ], + "spans": [ + { + "bbox": [ + 132, + 215, + 481, + 335 + ], + "type": "text", + "content": "Implementation Details We employ DINOv2 [36] pretrained ViT-L/14 as our feature encoder " + }, + { + "bbox": [ + 132, + 215, + 481, + 335 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 132, + 215, + 481, + 335 + ], + "type": "text", + "content": " and MiDaS [4] with DPT-L as our depth estimator. Our denoiser network is built on DiT-B/2 [38] augmented with the proposed 3D-aware rotary cross-attention layers. The image sizes are " + }, + { + "bbox": [ + 132, + 215, + 481, + 335 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 132, + 215, + 481, + 335 + ], + "type": "text", + "content": " for both egocentric and exocentric images. We freeze the feature encoder, as it is already well pretrained. The model is trained with the Adam optimizer, using a learning rate of " + }, + { + "bbox": [ + 132, + 215, + 481, + 335 + ], + "type": "inline_equation", + "content": "1e - 5" + }, + { + "bbox": [ + 132, + 215, + 481, + 335 + ], + "type": "text", + "content": " for the depth estimator and " + }, + { + "bbox": [ + 132, + 215, + 481, + 335 + ], + "type": "inline_equation", + "content": "1e - 4" + }, + { + "bbox": [ + 132, + 215, + 481, + 335 + ], + "type": "text", + "content": " for the other components. We employ a batch size of 4 per GPU and train the model across 32 V100 GPUs for 100 epochs, requiring approximately 48 hours. We set the diffusion steps " + }, + { + "bbox": [ + 132, + 215, + 481, + 335 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 132, + 215, + 481, + 335 + ], + "type": "text", + "content": " to 1000 during training and sample 20 steps during inference using DDIM [58]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 353, + 229, + 366 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 353, + 229, + 366 + ], + "spans": [ + { + "bbox": [ + 132, + 353, + 229, + 366 + ], + "type": "text", + "content": "4 Experiments" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 377, + 261, + 389 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 377, + 261, + 389 + ], + "spans": [ + { + "bbox": [ + 132, + 377, + 261, + 389 + ], + "type": "text", + "content": "4.1 Experimental Setup" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 396, + 480, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 396, + 480, + 479 + ], + "spans": [ + { + "bbox": [ + 132, + 396, + 480, + 479 + ], + "type": "text", + "content": "Ego-Exo4D-VT Benchmark. Our benchmark is constructed based on the Ego-Exo4D dataset [18]. Adhering to the official splits, we use 2680/708/900 takes for training, validation, and testing, respectively. Each take is approximately 30 seconds to 5 minutes long and depicts a person performing a skilled activity, such as cooking a dish, with footage from 4 exocentric cameras and 1 egocentric camera. This benchmark encompasses five diverse, skilled human activities: basketball, bike repair, cooking, health, and music." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 480, + 480, + 564 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 480, + 480, + 564 + ], + "spans": [ + { + "bbox": [ + 132, + 480, + 480, + 564 + ], + "type": "text", + "content": "The benchmark features 131 unique scenes, each characterized by complex backgrounds and numerous objects, demonstrating significant scale variation from 1 meter (e.g., a small kitchen) to 10 meters (e.g., a basketball court). These scenes are dynamic and depict subjects performing actions that involve interactions with objects. Additionally, the considerable viewpoint shift from exocentric to egocentric view causes objects to appear relatively small in the exocentric view compared to the egocentric view." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 567, + 480, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 567, + 480, + 638 + ], + "spans": [ + { + "bbox": [ + 132, + 567, + 480, + 638 + ], + "type": "text", + "content": "Baselines. Since this is a new benchmark, we re-purpose a few state-of-the-art methods for image generation: (a) pix2pix [21], a GAN-based method, (b) GNT [61], a NeRF-based method, (c) diffusion model DiT [39] and 3DiM [66]. To tailor DiT for our task, we eliminate its original class label conditioning and condition it on the exocentric image through concatenation. Additionally, we implement 3DiM based on DiT since the code for 3DiM is unavailable." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 641, + 480, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 641, + 480, + 665 + ], + "spans": [ + { + "bbox": [ + 132, + 641, + 480, + 665 + ], + "type": "text", + "content": "Metrics. Following NVS methods [10, 69], we employ perceptual metrics, including LPIPS [71], DISTS [13] and CLIP score [42], to measure the structural" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 278, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 278, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 278, + 101 + ], + "type": "text", + "content": "F. Cheng and M. Luo et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 162, + 168, + 452, + 255 + ], + "blocks": [ + { + "bbox": [ + 130, + 114, + 482, + 159 + ], + "lines": [ + { + "bbox": [ + 130, + 114, + 482, + 159 + ], + "spans": [ + { + "bbox": [ + 130, + 114, + 482, + 159 + ], + "type": "text", + "content": "Table 1: Quantitative comparison on the test set of Ego-Exo4D-VT benchmark. " + }, + { + "bbox": [ + 130, + 114, + 482, + 159 + ], + "type": "inline_equation", + "content": "{}^{ \\dagger }" + }, + { + "bbox": [ + 130, + 114, + 482, + 159 + ], + "type": "text", + "content": " we reimplement 3DiM based on DiT as their code is not publicly available. Our 4DIFF achieves the best results on all the metrics, outperforming the second best method 3DiM by " + }, + { + "bbox": [ + 130, + 114, + 482, + 159 + ], + "type": "inline_equation", + "content": "{3.6}\\%" + }, + { + "bbox": [ + 130, + 114, + 482, + 159 + ], + "type": "text", + "content": " in LPIPS and " + }, + { + "bbox": [ + 130, + 114, + 482, + 159 + ], + "type": "inline_equation", + "content": "{1.9}\\%" + }, + { + "bbox": [ + 130, + 114, + 482, + 159 + ], + "type": "text", + "content": " in DISTS." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 162, + 168, + 452, + 255 + ], + "lines": [ + { + "bbox": [ + 162, + 168, + 452, + 255 + ], + "spans": [ + { + "bbox": [ + 162, + 168, + 452, + 255 + ], + "type": "table", + "html": "
MethodLPIPS ↓DISTS ↓CLIP ↑PSNR ↑SSIM ↑
pix2pix [28]0.3720.26268.8515.800.515
GNT [61]0.4820.39263.7514.610.538
DiT [39]0.4120.23177.9815.470.564
3DiM† [66]0.3850.22678.2215.910.575
4DIFF (ours)0.3490.20779.7216.650.592
", + "image_path": "f63fa86229afb740274978461bc78c9a0ef806b298f2df9b2e37e3815589c5c2.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 138, + 296, + 476, + 436 + ], + "blocks": [ + { + "bbox": [ + 132, + 263, + 479, + 286 + ], + "lines": [ + { + "bbox": [ + 132, + 263, + 479, + 286 + ], + "spans": [ + { + "bbox": [ + 132, + 263, + 479, + 286 + ], + "type": "text", + "content": "Table 2: Comparison on the seen and unseen test sets of Ego-Exo4D-VT benchmark. " + }, + { + "bbox": [ + 132, + 263, + 479, + 286 + ], + "type": "inline_equation", + "content": "\\dagger" + }, + { + "bbox": [ + 132, + 263, + 479, + 286 + ], + "type": "text", + "content": " we reimplement 3DiM based on DiT as their code is not publicly available." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 138, + 296, + 476, + 436 + ], + "lines": [ + { + "bbox": [ + 138, + 296, + 476, + 436 + ], + "spans": [ + { + "bbox": [ + 138, + 296, + 476, + 436 + ], + "type": "table", + "html": "
Split SettingMethodLPIPS ↓DISTS ↓CLIP ↑PSNR ↑SSIM ↑
Seen \nScenespix2pix [21]0.3710.26068.6815.900.519
GNT [61]0.4790.39063.4414.710.542
DiT [39]0.4060.22678.7415.640.570
3DiM† [66]0.3650.21778.3015.980.583
4DIFF (ours)0.3160.18482.7917.090.600
Unseen \nScenespix2pix [21]0.3760.27269.8715.230.491
GNT [61]0.4970.40565.6013.970.513
DiT [39]0.4400.25673.6714.860.528
3DiM† [66]0.4360.26973.2614.900.542
4DIFF (ours)0.4270.24676.5414.450.508
", + "image_path": "6303e8e19a372c83144d6091b20e91e3bb6f0419e54c01cbc17bd8c5faf59a23.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 459, + 482, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 459, + 482, + 518 + ], + "spans": [ + { + "bbox": [ + 130, + 459, + 482, + 518 + ], + "type": "text", + "content": "and texture similarity between the synthesized egocentric image and the ground-truth image. Additionally, we include PSNR and SSIM for completeness, even though numerous existing works [7,51,53] have demonstrated that these metrics are suboptimal for evaluating image and video generation models, as they tend to favor conservative and blurry estimates." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 536, + 376, + 549 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 536, + 376, + 549 + ], + "spans": [ + { + "bbox": [ + 132, + 536, + 376, + 549 + ], + "type": "text", + "content": "4.2 Comparison with State-of-the-art Methods" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 557, + 482, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 557, + 482, + 628 + ], + "spans": [ + { + "bbox": [ + 130, + 557, + 482, + 628 + ], + "type": "text", + "content": "In Table 1, we present the comparison of our method to various baselines. Notably, diffusion-based models—DiT [39], 3DiM [66], and our 4DIFF—outperform other approaches across all metrics by large margins, including the GAN-based pix2pix and NeRF-based GNT. The poor performance of the NeRF-based method GNT on our benchmark can be attributed to itsslimited capacity for modeling hundreds of different scenes." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 630, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 630, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 630, + 482, + 666 + ], + "type": "text", + "content": "In Table 2, we present the results on seen scenes and unseen scenes respectively and show that our method achieves the best performance. Overall, our method surpasses the second-best performing diffusion-based 3DiM by " + }, + { + "bbox": [ + 130, + 630, + 482, + 666 + ], + "type": "inline_equation", + "content": "3.6\\%" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 140, + 90, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 90, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 140, + 90, + 447, + 102 + ], + "type": "text", + "content": "4DIFF: 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 91, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 91, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 91, + 480, + 100 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 138, + 118, + 481, + 521 + ], + "blocks": [ + { + "bbox": [ + 138, + 118, + 481, + 521 + ], + "lines": [ + { + "bbox": [ + 138, + 118, + 481, + 521 + ], + "spans": [ + { + "bbox": [ + 138, + 118, + 481, + 521 + ], + "type": "image", + "image_path": "8b80d7e937c46b87f0cf87ef1766837fdafd717fd27fecc2e0cba73dcd7dc2aa.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 533, + 482, + 577 + ], + "lines": [ + { + "bbox": [ + 130, + 533, + 482, + 577 + ], + "spans": [ + { + "bbox": [ + 130, + 533, + 482, + 577 + ], + "type": "text", + "content": "Fig. 5: Generated samples from five scenarios: cooking, music, health, basketball, and bike repair. Our 4DIFF demonstrates the best performance across all examples in terms of geometry correctness and object quality. We brighten the images and exclude pix2pix and GNT in the scenario breakdown for a better visual experience." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 581, + 482, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 581, + 482, + 605 + ], + "spans": [ + { + "bbox": [ + 130, + 581, + 482, + 605 + ], + "type": "text", + "content": "in LPIPS and " + }, + { + "bbox": [ + 130, + 581, + 482, + 605 + ], + "type": "inline_equation", + "content": "1.9\\%" + }, + { + "bbox": [ + 130, + 581, + 482, + 605 + ], + "type": "text", + "content": " in DISTS, underscoring the effectiveness of our proposed geometry-based approach." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 605, + 482, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 605, + 482, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 605, + 482, + 665 + ], + "type": "text", + "content": "Figure 5 presents qualitative comparisons with existing methods. GAN-based pix2pix [21] and NeRF-based GNT [61] exhibit challenges in producing photorealistic images, emphasizing the necessity of a robust generative model for the Ego-Exo4D-VT benchmark. Our 4DIFF demonstrates superior performance across various scenarios, excelling in both geometry correctness and object qual" + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 279, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 279, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 279, + 102 + ], + "type": "text", + "content": "F. Cheng and M. Luo et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 133, + 114, + 220, + 373 + ], + "blocks": [ + { + "bbox": [ + 133, + 114, + 220, + 373 + ], + "lines": [ + { + "bbox": [ + 133, + 114, + 220, + 373 + ], + "spans": [ + { + "bbox": [ + 133, + 114, + 220, + 373 + ], + "type": "image", + "image_path": "c00a92bce599549a6f698586adccda63aa80e9b00bbf7472192bed31fd128aa3.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 162, + 375, + 197, + 384 + ], + "lines": [ + { + "bbox": [ + 162, + 375, + 197, + 384 + ], + "spans": [ + { + "bbox": [ + 162, + 375, + 197, + 384 + ], + "type": "text", + "content": "Exo Input" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 130, + 399, + 482, + 464 + ], + "lines": [ + { + "bbox": [ + 130, + 399, + 482, + 464 + ], + "spans": [ + { + "bbox": [ + 130, + 399, + 482, + 464 + ], + "type": "text", + "content": "Fig. 6: We evaluate the effectiveness of our egocentric prior rendering module by visualizing the rendered prior image. Compared to NeRF-based rendering (GNT), our rendered prior image exhibits predominantly correct geometry, offering valuable egocentric cues to the diffusion model. Distortions and missing pixels arise from inaccurate depth estimation and occluded or unobserved regions in the exocentric view, which can be corrected by the diffusion model." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 221, + 114, + 304, + 372 + ], + "blocks": [ + { + "bbox": [ + 221, + 114, + 304, + 372 + ], + "lines": [ + { + "bbox": [ + 221, + 114, + 304, + 372 + ], + "spans": [ + { + "bbox": [ + 221, + 114, + 304, + 372 + ], + "type": "image", + "image_path": "09ac5589ff26abb17deb1213cb52f061a5ab2b1e421424f6415d5f5fb8bb9eca.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 249, + 374, + 277, + 384 + ], + "lines": [ + { + "bbox": [ + 249, + 374, + 277, + 384 + ], + "spans": [ + { + "bbox": [ + 249, + 374, + 277, + 384 + ], + "type": "text", + "content": "Ego GT" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 306, + 114, + 392, + 372 + ], + "blocks": [ + { + "bbox": [ + 306, + 114, + 392, + 372 + ], + "lines": [ + { + "bbox": [ + 306, + 114, + 392, + 372 + ], + "spans": [ + { + "bbox": [ + 306, + 114, + 392, + 372 + ], + "type": "image", + "image_path": "b8b5883e8904ab21488b61fdc9bf1cea87c301f4f878fff1da57837a46476205.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 302, + 374, + 383, + 385 + ], + "lines": [ + { + "bbox": [ + 302, + 374, + 383, + 385 + ], + "spans": [ + { + "bbox": [ + 302, + 374, + 383, + 385 + ], + "type": "text", + "content": "NeRF-based Rendering" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 392, + 114, + 478, + 373 + ], + "blocks": [ + { + "bbox": [ + 392, + 114, + 478, + 373 + ], + "lines": [ + { + "bbox": [ + 392, + 114, + 478, + 373 + ], + "spans": [ + { + "bbox": [ + 392, + 114, + 478, + 373 + ], + "type": "image", + "image_path": "36995d5298cf2e4957d0a8f9a6b243beba9230043114f521b6abf018ba45bcf2.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 387, + 374, + 475, + 384 + ], + "lines": [ + { + "bbox": [ + 387, + 374, + 475, + 384 + ], + "spans": [ + { + "bbox": [ + 387, + 374, + 475, + 384 + ], + "type": "text", + "content": "Our Rasterization Module" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 473, + 480, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 473, + 480, + 509 + ], + "spans": [ + { + "bbox": [ + 130, + 473, + 480, + 509 + ], + "type": "text", + "content": "ity. Our 4DIFF is especially advantageous for view synthesis in complex scenes, such as the cooking scenario, where numerous objects exhibit intricate layouts. The qualitative results align well with our quantitative results in Table 1." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 131, + 525, + 263, + 537 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 525, + 263, + 537 + ], + "spans": [ + { + "bbox": [ + 131, + 525, + 263, + 537 + ], + "type": "text", + "content": "4.3 Qualitative Analysis" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 130, + 543, + 481, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 543, + 481, + 567 + ], + "spans": [ + { + "bbox": [ + 130, + 543, + 481, + 567 + ], + "type": "text", + "content": "Investigating the visual results helps to gain a deeper insight into generative models. Thus, we perform a qualitative analysis below." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 130, + 570, + 482, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 570, + 482, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 570, + 482, + 665 + ], + "type": "text", + "content": "Is the egocentric prior useful? We address this question by visualizing the rendered egocentric prior RGB image. In Figure 6, the NeRF-based renderer GNT [61] generates blurry images for all scenes, possibly due to its limited capacity to model many diverse scenes with limited views for supervision. In contrast, our rendered egocentric images produced by point cloud rasterization are mostly correct, offering valuable egocentric cues to the diffusion model. Despite distortions and missing pixels, our diffusion model demonstrates sufficient capacity to rectify these issues effectively." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 140, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 140, + 91, + 447, + 102 + ], + "type": "text", + "content": "4DIFF: 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 479, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 479, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 479, + 100 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 140, + 118, + 474, + 466 + ], + "blocks": [ + { + "bbox": [ + 140, + 118, + 474, + 466 + ], + "lines": [ + { + "bbox": [ + 140, + 118, + 474, + 466 + ], + "spans": [ + { + "bbox": [ + 140, + 118, + 474, + 466 + ], + "type": "image", + "image_path": "405f22d7102ceb3f786f727dfba23d882fc90c93d5871ca0cf818e8e96b4c7de.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 475, + 482, + 510 + ], + "lines": [ + { + "bbox": [ + 130, + 475, + 482, + 510 + ], + "spans": [ + { + "bbox": [ + 130, + 475, + 482, + 510 + ], + "type": "text", + "content": "Fig. 7: Results on the unseen scenes. When synthesizing views from the scenes not encountered during training, our 4DIFF exhibits slight hallucinations but consistently outperforms existing methods, producing significantly improved results." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 520, + 482, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 520, + 482, + 590 + ], + "spans": [ + { + "bbox": [ + 130, + 520, + 482, + 590 + ], + "type": "text", + "content": "Generalization to unseen scenes. Figure 7 shows our generation results on the unseen scenes. We observe that our 4DIFF displays slight hallucinations, particularly noticeable in elements such as walls. Despite this, our method consistently outperforms existing methods. Such a robust performance can be attributed to the highly generalizable depth-based geometry priors used by our model." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "type": "text", + "content": "What causes poor generation? We conduct an analysis to discern errors arising from the diffusion model or geometry priors. In Figure 8, we present two representative examples. The first showcases generation results in an unseen scene, where the egocentric prior image is reasonably good, but the diffusion model exhibits significant hallucinations, yielding an incorrectly generated image. We posit that this discrepancy arises because the diffusion model focuses" + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 279, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 279, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 279, + 102 + ], + "type": "text", + "content": "F. Cheng and M. Luo et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 133, + 118, + 222, + 300 + ], + "blocks": [ + { + "bbox": [ + 133, + 118, + 222, + 300 + ], + "lines": [ + { + "bbox": [ + 133, + 118, + 222, + 300 + ], + "spans": [ + { + "bbox": [ + 133, + 118, + 222, + 300 + ], + "type": "image", + "image_path": "8d66151c18e681d7a70df2474aa4f366825c46b17d254df1c71448da6cf22f0f.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 309, + 482, + 365 + ], + "lines": [ + { + "bbox": [ + 130, + 309, + 482, + 365 + ], + "spans": [ + { + "bbox": [ + 130, + 309, + 482, + 365 + ], + "type": "text", + "content": "Fig. 8: Failure case examples of our method. Top: While the point cloud rasterization module performs effectively, the diffusion model produces errors when generating an egocentric view. Bottom: Although the diffusion model accurately predicts objects, the synthesized egocentric view appears more zoomed-out than the ground truth view. This can be attributed to suboptimal egocentric layout synthesis." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 222, + 118, + 306, + 300 + ], + "blocks": [ + { + "bbox": [ + 222, + 118, + 306, + 300 + ], + "lines": [ + { + "bbox": [ + 222, + 118, + 306, + 300 + ], + "spans": [ + { + "bbox": [ + 222, + 118, + 306, + 300 + ], + "type": "image", + "image_path": "e3188bbeb7f6979856526fbfc09a8e3ced963ee0d148bf9fa6d5877ddcab80c7.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 306, + 118, + 394, + 300 + ], + "blocks": [ + { + "bbox": [ + 306, + 118, + 394, + 300 + ], + "lines": [ + { + "bbox": [ + 306, + 118, + 394, + 300 + ], + "spans": [ + { + "bbox": [ + 306, + 118, + 394, + 300 + ], + "type": "image", + "image_path": "6b7038d3a6cbeb3d000e7185604536eb886025b4ef2008c04f6cb751a8a7d438.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 394, + 118, + 481, + 300 + ], + "blocks": [ + { + "bbox": [ + 394, + 118, + 481, + 300 + ], + "lines": [ + { + "bbox": [ + 394, + 118, + 481, + 300 + ], + "spans": [ + { + "bbox": [ + 394, + 118, + 481, + 300 + ], + "type": "image", + "image_path": "4a83e828a2639d07996a7812f789f9be8978a26b13fe095edc6f4dd1e0818e8a.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 380, + 482, + 427 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 380, + 482, + 427 + ], + "spans": [ + { + "bbox": [ + 130, + 380, + 482, + 427 + ], + "type": "text", + "content": "on modeling the conditional training distribution, limiting its generalization to substantially different scenes not present in the training data. This limitation can be mitigated by employing a large-scale pretrained diffusion model that has already acquired knowledge from diverse scenes and objects in 2D space." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 429, + 482, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 429, + 482, + 487 + ], + "spans": [ + { + "bbox": [ + 130, + 429, + 482, + 487 + ], + "type": "text", + "content": "In the second example, we show that despite the incorrectly rendered egocentric prior image, the diffusion model can generate a photorealistic image, which is more zoomed-out than the ground-truth egocentric image. This observation suggests that the diffusion model can robustly handle inaccurately generated egocentric geometry priors." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 508, + 246, + 519 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 508, + 246, + 519 + ], + "spans": [ + { + "bbox": [ + 132, + 508, + 246, + 519 + ], + "type": "text", + "content": "4.4 Ablation Studies" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 530, + 482, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 530, + 482, + 626 + ], + "spans": [ + { + "bbox": [ + 130, + 530, + 482, + 626 + ], + "type": "text", + "content": "How important are our proposed modules? We study the importance of (i) 3D-aware rotary cross-attentions and (ii) egocentric point cloud rasterization by sequentially removing them from our framework. As shown in Tab. 3a, removing the 3D cross-attention worsens the LPIPS by " + }, + { + "bbox": [ + 130, + 530, + 482, + 626 + ], + "type": "inline_equation", + "content": "2.4\\%" + }, + { + "bbox": [ + 130, + 530, + 482, + 626 + ], + "type": "text", + "content": ". Additionally, removing the point cloud rasterization further degrades LPIPS by " + }, + { + "bbox": [ + 130, + 530, + 482, + 626 + ], + "type": "inline_equation", + "content": "3.9\\%" + }, + { + "bbox": [ + 130, + 530, + 482, + 626 + ], + "type": "text", + "content": ". Moreover, as shown in Figure 5, our 4DIFF with the proposed geometry priors consistently outperforms geometry-free diffusion models DiT and 3DiM in all scenarios. These results show the effectiveness of our proposed modules." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 629, + 482, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 629, + 482, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 629, + 482, + 665 + ], + "type": "text", + "content": "Can we pretrain the depth estimator from scratch? Tab. 3b shows that training our model without using a pretrained depth estimator results in a significant " + }, + { + "bbox": [ + 130, + 629, + 482, + 665 + ], + "type": "inline_equation", + "content": "4.3\\%" + }, + { + "bbox": [ + 130, + 629, + 482, + 665 + ], + "type": "text", + "content": " degradation in LPIPS. This suggests that an inaccurate depth" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 140, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 140, + 91, + 447, + 102 + ], + "type": "text", + "content": "4DIFF: 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 143, + 163, + 264, + 236 + ], + "blocks": [ + { + "bbox": [ + 130, + 114, + 482, + 159 + ], + "lines": [ + { + "bbox": [ + 130, + 114, + 482, + 159 + ], + "spans": [ + { + "bbox": [ + 130, + 114, + 482, + 159 + ], + "type": "text", + "content": "Table 3: Ablation studies on various design choices. (a) We study the importance of each module by removing each module sequentially; (b) Using a pretrained depth estimator significantly improves the LPIPS by " + }, + { + "bbox": [ + 130, + 114, + 482, + 159 + ], + "type": "inline_equation", + "content": "4.3\\%" + }, + { + "bbox": [ + 130, + 114, + 482, + 159 + ], + "type": "text", + "content": "; (c) DINOV2 outperforms CLIP by " + }, + { + "bbox": [ + 130, + 114, + 482, + 159 + ], + "type": "inline_equation", + "content": "1.7\\%" + }, + { + "bbox": [ + 130, + 114, + 482, + 159 + ], + "type": "text", + "content": " in LPIPS." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 143, + 163, + 264, + 236 + ], + "lines": [ + { + "bbox": [ + 143, + 163, + 264, + 236 + ], + "spans": [ + { + "bbox": [ + 143, + 163, + 264, + 236 + ], + "type": "table", + "html": "
(a) Module ablation.
ModelLPIPS ↓
4DIFF0.349
- 3D Rotary CA0.373
- ego rasterization0.412
", + "image_path": "9e4d48121a4fa428522c4f6081311b199de48c065816ded43548a28561642953.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 280, + 163, + 369, + 224 + ], + "blocks": [ + { + "bbox": [ + 280, + 163, + 369, + 224 + ], + "lines": [ + { + "bbox": [ + 280, + 163, + 369, + 224 + ], + "spans": [ + { + "bbox": [ + 280, + 163, + 369, + 224 + ], + "type": "table", + "html": "
(b) Depth estimator.
PretrainedLPIPS ↓
0.349
X0.392
", + "image_path": "854e0ff8063ceb46619854e375352c01367255cf9a7d1c6d829a0b102325d1ea.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 373, + 163, + 460, + 224 + ], + "blocks": [ + { + "bbox": [ + 373, + 163, + 460, + 224 + ], + "lines": [ + { + "bbox": [ + 373, + 163, + 460, + 224 + ], + "spans": [ + { + "bbox": [ + 373, + 163, + 460, + 224 + ], + "type": "table", + "html": "
(c) Feature encoder.
Feat. Enc.LPIPS ↓
DinoV20.349
CLIP0.366
", + "image_path": "69e10122c803e26b2051d7d9d6099c92ba0b60ca51c4bd1175681f910896973d.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 243, + 479, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 243, + 479, + 301 + ], + "spans": [ + { + "bbox": [ + 130, + 243, + 479, + 301 + ], + "type": "text", + "content": "estimation may lead to most points from the exocentric view projected outside of the egocentric view. Consequently, these points will not receive sufficient gradient updates during training, leading to poor convergence. Thus, we conclude that a sufficiently accurate initial depth prediction is crucial for good performance." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 306, + 482, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 306, + 482, + 390 + ], + "spans": [ + { + "bbox": [ + 130, + 306, + 482, + 390 + ], + "type": "text", + "content": "Which feature encoder should we use? We evaluate two strong feature encoders for obtaining a semantic representation for an exocentric RGB image: DINOv2 [36], and CLIP [42], both employing a ViT-L/14 backbone. The DINOv2 variant outperforms the CLIP variant by " + }, + { + "bbox": [ + 130, + 306, + 482, + 390 + ], + "type": "inline_equation", + "content": "1.7\\%" + }, + { + "bbox": [ + 130, + 306, + 482, + 390 + ], + "type": "text", + "content": " LPIPS. We conjecture that compared to CLIP's vision-language pretraining, DINOv2's self-supervised pretraining leads to higher quality lower-level visual features which are important for exocentric to egocentric image translation problem." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 406, + 311, + 418 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 406, + 311, + 418 + ], + "spans": [ + { + "bbox": [ + 132, + 406, + 311, + 418 + ], + "type": "text", + "content": "5 Discussion and Conclusion" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 428, + 482, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 428, + 482, + 632 + ], + "spans": [ + { + "bbox": [ + 130, + 428, + 482, + 632 + ], + "type": "text", + "content": "In this work, we proposed 4DIFF, a 3D-aware transformer-based diffusion model that significantly outperforms prior approaches on the challenging Ego-Exo4D-VT benchmark. Our method demonstrates robust generalization to novel environments not encountered during training. Despite our excellent results, we also acknowledge a few limitations. Firstly, our method assumes known camera poses during training and inference, limiting its applicability to real-world scenarios. Integrating camera pose estimation via a head pose estimator could address this limitation, while remains difficult to estimate automatically. Secondly, our method focuses on image-to-image translation, leaving room for video generation by incorporating spatial-temporal cues. Thirdly, enhancing the quality of generated objects and improving generalization to unseen environments could be achieved by leveraging a more powerful pretrained diffusion model (e.g., Stable Diffusion [49]). Lastly, extending our framework from frame-level synthesis to object-level synthesis, considering the locations and appearances of objects such as hands and interacted objects, would bring it closer to real-world applications like AR/VR coaching. We plan to explore these research directions in our future work." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 279, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 279, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 279, + 102 + ], + "type": "text", + "content": "F. Cheng and M. Luo et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 224 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 224 + ], + "type": "text", + "content": "Acknowledgment We thank Hanwen Jiang, Yan-Bo Lin, Md Mohaiminul Islam, Ce Zhang, Yue Yang, and Soumitri Chattopadhyay for their helpful discussions. UT Austin is supported by NSF Grants AF 1901292, CNS 2148141, Tripods CCF 1934932, IFML CCF 2019844 and research gifts by Western Digital, Amazon, WNCG IAP, UT Austin Machine Learning Lab (MLL), Cisco, the Stanly P. Finch Centennial Professorship in Engineering. UNC is supported by Sony Faculty Innovation Award, Laboratory for Analytic Sciences via NC State University, ONR Award N00014-23-1-2356. K.G. is paid as a research scientist at Meta." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 243, + 198, + 255 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 243, + 198, + 255 + ], + "spans": [ + { + "bbox": [ + 132, + 243, + 198, + 255 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 133, + 269, + 481, + 665 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 136, + 269, + 481, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 269, + 481, + 312 + ], + "spans": [ + { + "bbox": [ + 136, + 269, + 481, + 312 + ], + "type": "text", + "content": "1. Ardeshir, S., Borji, A.: Ego2top: Matching viewers in egocentric and top-view videos. In: Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part V 14. pp. 253-268. Springer (2016)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 136, + 313, + 481, + 357 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 313, + 481, + 357 + ], + "spans": [ + { + "bbox": [ + 136, + 313, + 481, + 357 + ], + "type": "text", + "content": "2. Barron, J.T., Mildenhall, B., Tancik, M., Hedman, P., Martin-Brualla, R., Srinivasan, P.P.: Mip-nerf: A multiscale representation for anti-aliasing neural radiance fields. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 5855-5864 (2021)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 136, + 357, + 481, + 400 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 357, + 481, + 400 + ], + "spans": [ + { + "bbox": [ + 136, + 357, + 481, + 400 + ], + "type": "text", + "content": "3. Barron, J.T., Mildenhall, B., Verbin, D., Srinivasan, P.P., Hedman, P.: Mipnerf 360: Unbounded anti-aliased neural radiance fields. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5470-5479 (2022)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 136, + 401, + 481, + 423 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 401, + 481, + 423 + ], + "spans": [ + { + "bbox": [ + 136, + 401, + 481, + 423 + ], + "type": "text", + "content": "4. Birkl, R., Wofk, D., Müller, M.: Midas v3.1 - a model zoo for robust monocular relative depth estimation. arXiv preprint arXiv:2307.14460 (2023)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 136, + 423, + 481, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 423, + 481, + 456 + ], + "spans": [ + { + "bbox": [ + 136, + 423, + 481, + 456 + ], + "type": "text", + "content": "5. Cao, A., Rockwell, C., Johnson, J.: Fwd: Real-time novel view synthesis with forward warping and depth. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 15713-15724 (2022)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 136, + 456, + 481, + 500 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 456, + 481, + 500 + ], + "spans": [ + { + "bbox": [ + 136, + 456, + 481, + 500 + ], + "type": "text", + "content": "6. Chan, E.R., Lin, C.Z., Chan, M.A., Nagano, K., Pan, B., De Mello, S., Gallo, O., Guibas, L.J., Tremblay, J., Khamis, S., et al.: Efficient geometry-aware 3d generative adversarial networks. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 16123-16133 (2022)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 136, + 501, + 481, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 501, + 481, + 533 + ], + "spans": [ + { + "bbox": [ + 136, + 501, + 481, + 533 + ], + "type": "text", + "content": "7. Chan, E.R., Nagano, K., Chan, M.A., Bergman, A.W., Park, J.J., Levy, A., Aittala, M., De Mello, S., Karras, T., Wetzstein, G.: Generative novel view synthesis with 3d-aware diffusion models. arXiv preprint arXiv:2304.02602 (2023)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 136, + 533, + 481, + 566 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 533, + 481, + 566 + ], + "spans": [ + { + "bbox": [ + 136, + 533, + 481, + 566 + ], + "type": "text", + "content": "8. Chang, A., Dai, A., Funkhouser, T., Halber, M., Niessner, M., Savva, M., Song, S., Zeng, A., Zhang, Y.: Matterport3d: Learning from rgb-d data in indoor environments. arXiv preprint arXiv:1709.06158 (2017)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 136, + 566, + 481, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 566, + 481, + 599 + ], + "spans": [ + { + "bbox": [ + 136, + 566, + 481, + 599 + ], + "type": "text", + "content": "9. Chang, A.X., Funkhouser, T., Guibas, L., Hanrahan, P., Huang, Q., Li, Z., Savarese, S., Savva, M., Song, S., Su, H., et al.: Shapenet: An information-rich 3d model repository. arXiv preprint arXiv:1512.03012 (2015)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 133, + 600, + 481, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 600, + 481, + 632 + ], + "spans": [ + { + "bbox": [ + 133, + 600, + 481, + 632 + ], + "type": "text", + "content": "10. Chen, H., Gu, J., Chen, A., Tian, W., Tu, Z., Liu, L., Su, H.: Single-stage diffusion nef: A unified approach to 3d generation and reconstruction. arXiv preprint arXiv:2304.06714 (2023)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 133, + 632, + 481, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 632, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 133, + 632, + 481, + 665 + ], + "type": "text", + "content": "11. Creswell, A., White, T., Dumoulin, V., Arulkumaran, K., Sengupta, B., Bharath, A.A.: Generative adversarial networks: An overview. IEEE signal processing magazine 35(1), 53-65 (2018)" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 140, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 140, + 91, + 447, + 102 + ], + "type": "text", + "content": "4DIFF: 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 133, + 117, + 480, + 665 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 133, + 117, + 480, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 117, + 480, + 138 + ], + "spans": [ + { + "bbox": [ + 133, + 117, + 480, + 138 + ], + "type": "text", + "content": "12. Dhariwal, P., Nichol, A.: Diffusion models beat gans on image synthesis. Advances in Neural Information Processing Systems 34, 8780-8794 (2021)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 133, + 140, + 480, + 172 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 140, + 480, + 172 + ], + "spans": [ + { + "bbox": [ + 133, + 140, + 480, + 172 + ], + "type": "text", + "content": "13. Ding, K., Ma, K., Wang, S., Simoncelli, E.P.: Image quality assessment: Unifying structure and texture similarity. IEEE transactions on pattern analysis and machine intelligence 44(5), 2567-2581 (2020)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 133, + 173, + 480, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 173, + 480, + 195 + ], + "spans": [ + { + "bbox": [ + 133, + 173, + 480, + 195 + ], + "type": "text", + "content": "14. Duan, Y., Guo, X., Zhu, Z.: Diffusiondepth: Diffusion denoising approach for monocular depth estimation. arXiv preprint arXiv:2303.05021 (2023)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 134, + 196, + 480, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 196, + 480, + 239 + ], + "spans": [ + { + "bbox": [ + 134, + 196, + 480, + 239 + ], + "type": "text", + "content": "15. Flynn, J., Broxton, M., Debevec, P., DuVall, M., Fyffe, G., Overbeck, R., Snavely, N., Tucker, R.: Deepview: View synthesis with learned gradient descent. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 2367-2376 (2019)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 134, + 240, + 480, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 240, + 480, + 274 + ], + "spans": [ + { + "bbox": [ + 134, + 240, + 480, + 274 + ], + "type": "text", + "content": "16. Flynn, J., Neulander, I., Philbin, J., Snively, N.: Deepstereo: Learning to predict new views from the world's imagery. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 5515-5524 (2016)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 134, + 274, + 480, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 274, + 480, + 296 + ], + "spans": [ + { + "bbox": [ + 134, + 274, + 480, + 296 + ], + "type": "text", + "content": "17. Geiger, A., Lenz, P., Stiller, C., Urtasun, R.: The kitti vision benchmark suite. URL http://www.cvlibs.net/datasets/kitti2(5) (2015)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 134, + 297, + 480, + 340 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 297, + 480, + 340 + ], + "spans": [ + { + "bbox": [ + 134, + 297, + 480, + 340 + ], + "type": "text", + "content": "18. Grauman, K., Westbury, A., Torresani, L., Kitani, K., Malik, J., Afouras, T., Ashutosh, K., Baiyya, V., Bansal, S., Boote, B., et al.: Ego-exo4d: Understanding skilled human activity from first-and third-person perspectives. arXiv preprint arXiv:2311.18259 (2023)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 134, + 341, + 480, + 363 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 341, + 480, + 363 + ], + "spans": [ + { + "bbox": [ + 134, + 341, + 480, + 363 + ], + "type": "text", + "content": "19. Ho, J., Jain, A., Abbeel, P.: Denoising diffusion probabilistic models. Advances in Neural Information Processing Systems 33, 6840-6851 (2020)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 133, + 364, + 480, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 364, + 480, + 396 + ], + "spans": [ + { + "bbox": [ + 133, + 364, + 480, + 396 + ], + "type": "text", + "content": "20. Ho, J., Sahara, C., Chan, W., Fleet, D.J., Norouzi, M., Salimans, T.: Cascaded diffusion models for high fidelity image generation. The Journal of Machine Learning Research 23(1), 2249-2281 (2022)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 133, + 397, + 480, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 397, + 480, + 418 + ], + "spans": [ + { + "bbox": [ + 133, + 397, + 480, + 418 + ], + "type": "text", + "content": "21. Isola, P., Zhu, J.Y., Zhou, T., Efros, A.A.: Image-to-image translation with conditional adversarial networks. CVPR (2017)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 133, + 420, + 480, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 420, + 480, + 464 + ], + "spans": [ + { + "bbox": [ + 133, + 420, + 480, + 464 + ], + "type": "text", + "content": "22. Johnson, J., Hariharan, B., Van Der Maaten, L., Fei-Fei, L., Lawrence Zitnick, C., Girshick, R.: Clevr: A diagnostic dataset for compositional language and elementary visual reasoning. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 2901–2910 (2017)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 133, + 464, + 480, + 486 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 464, + 480, + 486 + ], + "spans": [ + { + "bbox": [ + 133, + 464, + 480, + 486 + ], + "type": "text", + "content": "23. Kerbl, B., Kopanas, G., Leimkuhler, T., Drettakis, G.: 3d gaussian splatting for real-time radiance field rendering. ACM Transactions on Graphics 42(4) (2023)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 133, + 487, + 480, + 520 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 487, + 480, + 520 + ], + "spans": [ + { + "bbox": [ + 133, + 487, + 480, + 520 + ], + "type": "text", + "content": "24. Koh, J.Y., Lee, H., Yang, Y., Baldridge, J., Anderson, P.: Pathdreamer: A world model for indoor navigation. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 14738-14748 (2021)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 133, + 521, + 480, + 553 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 521, + 480, + 553 + ], + "spans": [ + { + "bbox": [ + 133, + 521, + 480, + 553 + ], + "type": "text", + "content": "25. Kulhánek, J., Derner, E., Sattler, T., Babuška, R.: Viewformer: Nerf-free neural rendering from few images using transformers. In: European Conference on Computer Vision. pp. 198-216. Springer (2022)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 133, + 554, + 480, + 587 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 554, + 480, + 587 + ], + "spans": [ + { + "bbox": [ + 133, + 554, + 480, + 587 + ], + "type": "text", + "content": "26. Kwon, T., Tekin, B., Stühmer, J., Bogo, F., Pollefeys, M.: H2o: Two hands manipulating objects for first person interaction recognition. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 10138-10148 (2021)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 133, + 588, + 480, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 588, + 480, + 620 + ], + "spans": [ + { + "bbox": [ + 133, + 588, + 480, + 620 + ], + "type": "text", + "content": "27. Li, J., Liu, K., Wu, J.: Ego-body pose estimation via ego-head pose estimation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 17142-17151 (2023)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 133, + 621, + 480, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 621, + 480, + 665 + ], + "spans": [ + { + "bbox": [ + 133, + 621, + 480, + 665 + ], + "type": "text", + "content": "28. Liu, G., Tang, H., Latapie, H., Yan, Y.: Exocentric to egocentric image generation via parallel generative adversarial network. In: ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). pp. 1843-1847. IEEE (2020)" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 278, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 278, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 278, + 101 + ], + "type": "text", + "content": "F. Cheng and M. Luo et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 480, + 666 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 132, + 116, + 480, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 116, + 480, + 149 + ], + "spans": [ + { + "bbox": [ + 132, + 116, + 480, + 149 + ], + "type": "text", + "content": "29. Liu, G., Tang, H., Latapie, H.M., Corso, J.J., Yan, Y.: Cross-view exocentric to egocentric video synthesis. In: Proceedings of the 29th ACM International Conference on Multimedia. pp. 974-982 (2021)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 150, + 480, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 150, + 480, + 182 + ], + "spans": [ + { + "bbox": [ + 132, + 150, + 480, + 182 + ], + "type": "text", + "content": "30. Liu, R., Wu, R., Van Hoorick, B., Tokmakov, P., Zakharov, S., Vondrick, C.: Zero-1-to-3: Zero-shot one image to 3d object. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 9298-9309 (2023)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 182, + 480, + 214 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 182, + 480, + 214 + ], + "spans": [ + { + "bbox": [ + 132, + 182, + 480, + 214 + ], + "type": "text", + "content": "31. Lombardi, S., Simon, T., Saragih, J., Schwartz, G., Lehrmann, A., Sheikh, Y.: Neural volumes: Learning dynamic renderable volumes from images. arXiv preprint arXiv:1906.07751 (2019)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 214, + 480, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 214, + 480, + 236 + ], + "spans": [ + { + "bbox": [ + 132, + 214, + 480, + 236 + ], + "type": "text", + "content": "32. Luo, M., Xue, Z., Dimakis, A., Grauman, K.: Put myself in your shoes: Lifting the egocentric perspective from exocentric videos. In: ECCV (2024)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 236, + 480, + 257 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 236, + 480, + 257 + ], + "spans": [ + { + "bbox": [ + 132, + 236, + 480, + 257 + ], + "type": "text", + "content": "33. Mathews, J.: Coordinate-free rotation formalism. American Journal of Physics 44(12), 1210-1210 (1976)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 257, + 480, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 257, + 480, + 289 + ], + "spans": [ + { + "bbox": [ + 132, + 257, + 480, + 289 + ], + "type": "text", + "content": "34. Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM 65(1), 99-106 (2021)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 289, + 480, + 311 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 289, + 480, + 311 + ], + "spans": [ + { + "bbox": [ + 132, + 289, + 480, + 311 + ], + "type": "text", + "content": "35. Niklaus, S., Mai, L., Yang, J., Liu, F.: 3d ken burns effect from a single image. ACM Transactions on Graphics (ToG) 38(6), 1-15 (2019)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 311, + 480, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 311, + 480, + 342 + ], + "spans": [ + { + "bbox": [ + 132, + 311, + 480, + 342 + ], + "type": "text", + "content": "36. Oquab, M., Darcet, T., Moutakanni, T., Vo, H., Szafraniec, M., Khalidov, V., Fernandez, P., Haziza, D., Massa, F., El-Nouby, A., et al.: Dinov2: Learning robust visual features without supervision. arXiv preprint arXiv:2304.07193 (2023)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 342, + 480, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 342, + 480, + 374 + ], + "spans": [ + { + "bbox": [ + 132, + 342, + 480, + 374 + ], + "type": "text", + "content": "37. Park, K., Sinha, U., Barron, J.T., Bouaziz, S., Goldman, D.B., Seitz, S.M., Martin-Brualla, R.: Nerfies: Deformable neural radiance fields. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 5865-5874 (2021)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 374, + 480, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 374, + 480, + 396 + ], + "spans": [ + { + "bbox": [ + 132, + 374, + 480, + 396 + ], + "type": "text", + "content": "38. Peebles, W., Xie, S.: Scalable diffusion models with transformers. arXiv preprint arXiv:2212.09748 (2022)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 396, + 480, + 428 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 396, + 480, + 428 + ], + "spans": [ + { + "bbox": [ + 132, + 396, + 480, + 428 + ], + "type": "text", + "content": "39. Peebles, W., Xie, S.: Scalable diffusion models with transformers. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 4195-4205 (2023)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 132, + 428, + 480, + 471 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 428, + 480, + 471 + ], + "spans": [ + { + "bbox": [ + 132, + 428, + 480, + 471 + ], + "type": "text", + "content": "40. Popov, S., Bauszat, P., Ferrari, V.: Corenet: Coherent 3d scene reconstruction from a single rgb image. In: Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part II 16. pp. 366-383. Springer (2020)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 132, + 472, + 480, + 504 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 472, + 480, + 504 + ], + "spans": [ + { + "bbox": [ + 132, + 472, + 480, + 504 + ], + "type": "text", + "content": "41. Pumarola, A., Corona, E., Pons-Moll, G., Moreno-Noguer, F.: D-nerf: Neural radiance fields for dynamic scenes. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 10318-10327 (2021)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 132, + 504, + 480, + 547 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 504, + 480, + 547 + ], + "spans": [ + { + "bbox": [ + 132, + 504, + 480, + 547 + ], + "type": "text", + "content": "42. Radford, A., Kim, J.W., Hallacy, C., Ramesh, A., Goh, G., Agarwal, S., Sastry, G., Askell, A., Mishkin, P., Clark, J., et al.: Learning transferable visual models from natural language supervision. In: International conference on machine learning. pp. 8748-8763. PMLR (2021)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 132, + 547, + 480, + 579 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 547, + 480, + 579 + ], + "spans": [ + { + "bbox": [ + 132, + 547, + 480, + 579 + ], + "type": "text", + "content": "43. Regmi, K., Borji, A.: Cross-view image synthesis using conditional gans. In: Proceedings of the IEEE conference on Computer Vision and Pattern Recognition. pp. 3501-3510 (2018)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 132, + 579, + 480, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 579, + 480, + 601 + ], + "spans": [ + { + "bbox": [ + 132, + 579, + 480, + 601 + ], + "type": "text", + "content": "44. Ren, B., Tang, H., Sebe, N.: Cascaded cross mlp-mixer gans for cross-view image translation. arXiv preprint arXiv:2110.10183 (2021)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 132, + 601, + 480, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 601, + 480, + 633 + ], + "spans": [ + { + "bbox": [ + 132, + 601, + 480, + 633 + ], + "type": "text", + "content": "45. Ren, X., Wang, X.: Look outside the room: Synthesizing a consistent long-term 3d scene video from a single image. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 3563-3573 (2022)" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 132, + 633, + 480, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 633, + 480, + 666 + ], + "spans": [ + { + "bbox": [ + 132, + 633, + 480, + 666 + ], + "type": "text", + "content": "46. Riegler, G., Koltun, V.: Free view synthesis. In: Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XIX 16. pp. 623-640. Springer (2020)" + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 140, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 140, + 91, + 447, + 102 + ], + "type": "text", + "content": "4DIFF: 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 117, + 480, + 665 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 133, + 117, + 480, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 117, + 480, + 138 + ], + "spans": [ + { + "bbox": [ + 133, + 117, + 480, + 138 + ], + "type": "text", + "content": "47. Riegler, G., Koltun, V.: Stable view synthesis. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 12216-12225 (2021)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 133, + 140, + 480, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 140, + 480, + 171 + ], + "spans": [ + { + "bbox": [ + 133, + 140, + 480, + 171 + ], + "type": "text", + "content": "48. Rockwell, C., Fouhey, D.F., Johnson, J.: Pixelsynth: Generating a 3d-consistent experience from a single image. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 14104-14113 (2021)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 133, + 172, + 480, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 172, + 480, + 205 + ], + "spans": [ + { + "bbox": [ + 133, + 172, + 480, + 205 + ], + "type": "text", + "content": "49. Rombach, R., Blattmann, A., Lorenz, D., Esser, P., Ommer, B.: High-resolution image synthesis with latent diffusion models. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 10684-10695 (2022)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 205, + 480, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 205, + 480, + 237 + ], + "spans": [ + { + "bbox": [ + 132, + 205, + 480, + 237 + ], + "type": "text", + "content": "50. Rombach, R., Esser, P., Ommer, B.: Geometry-free view synthesis: Transformers and no 3d priors. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 14356-14366 (2021)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 238, + 480, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 238, + 480, + 270 + ], + "spans": [ + { + "bbox": [ + 132, + 238, + 480, + 270 + ], + "type": "text", + "content": "51. Sahara, C., Chan, W., Chang, H., Lee, C., Ho, J., Salimans, T., Fleet, D., Norouzi, M.: Palette: Image-to-image diffusion models. In: ACM SIGGRAPH 2022 Conference Proceedings. pp. 1-10 (2022)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 271, + 480, + 314 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 271, + 480, + 314 + ], + "spans": [ + { + "bbox": [ + 132, + 271, + 480, + 314 + ], + "type": "text", + "content": "52. Sahara, C., Chan, W., Saxena, S., Li, L., Whang, J., Denton, E.L., Ghasemipour, K., Gontijo Lopes, R., Karagol Ayan, B., Salimans, T., et al.: Photorealistic text-to-image diffusion models with deep language understanding. Advances in Neural Information Processing Systems 35, 36479-36494 (2022)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 315, + 480, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 315, + 480, + 346 + ], + "spans": [ + { + "bbox": [ + 132, + 315, + 480, + 346 + ], + "type": "text", + "content": "53. Sahara, C., Ho, J., Chan, W., Salimans, T., Fleet, D.J., Norouzi, M.: Image superresolution via iterative refinement. IEEE Transactions on Pattern Analysis and Machine Intelligence 45(4), 4713-4726 (2022)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 347, + 480, + 402 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 347, + 480, + 402 + ], + "spans": [ + { + "bbox": [ + 132, + 347, + 480, + 402 + ], + "type": "text", + "content": "54. Sajjadi, M.S., Meyer, H., Pot, E., Bergmann, U., Greff, K., Radwan, N., Vora, S., Lucic, M., Duckworth, D., Dosovitskiy, A., et al.: Scene representation transformer: Geometry-free novel view synthesis through set-latent scene representations. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 6229-6238 (2022)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 403, + 480, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 403, + 480, + 445 + ], + "spans": [ + { + "bbox": [ + 132, + 403, + 480, + 445 + ], + "type": "text", + "content": "55. Sener, F., Chatterjee, D., Shelepov, D., He, K., Singhania, D., Wang, R., Yao, A.: Assembly101: A large-scale multi-view video dataset for understanding procedural activities. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 21096-21106 (2022)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 447, + 480, + 489 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 447, + 480, + 489 + ], + "spans": [ + { + "bbox": [ + 132, + 447, + 480, + 489 + ], + "type": "text", + "content": "56. Sitzmann, V., Thies, J., Heide, F., Nießner, M., Wetzstein, G., Zollhofer, M.: Deepvoxels: Learning persistent 3d feature embeddings. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 2437-2446 (2019)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 491, + 480, + 522 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 491, + 480, + 522 + ], + "spans": [ + { + "bbox": [ + 132, + 491, + 480, + 522 + ], + "type": "text", + "content": "57. Sitzmann, V., Zollhöfer, M., Wetzstein, G.: Scene representation networks: Continuous 3d-structure-aware neural scene representations. Advances in Neural Information Processing Systems 32 (2019)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 132, + 523, + 480, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 523, + 480, + 544 + ], + "spans": [ + { + "bbox": [ + 132, + 523, + 480, + 544 + ], + "type": "text", + "content": "58. Song, J., Meng, C., Ermon, S.: Denoising diffusion implicit models. arXiv:2010.02502 (October 2020), https://arxiv.org/abs/2010.02502" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 132, + 545, + 480, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 545, + 480, + 567 + ], + "spans": [ + { + "bbox": [ + 132, + 545, + 480, + 567 + ], + "type": "text", + "content": "59. Su, J., Ahmed, M., Lu, Y., Pan, S., Bo, W., Liu, Y.: Roformer: Enhanced transformer with rotary position embedding. Neurocomputing 568, 127063 (2024)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 132, + 567, + 480, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 567, + 480, + 588 + ], + "spans": [ + { + "bbox": [ + 132, + 567, + 480, + 588 + ], + "type": "text", + "content": "60. Szymanowicz, S., Rupprecht, C., Vedaldi, A.: Splatter image: Ultra-fast single-view 3d reconstruction. arXiv preprint arXiv:2312.13150 (2023)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 132, + 589, + 480, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 589, + 480, + 621 + ], + "spans": [ + { + "bbox": [ + 132, + 589, + 480, + 621 + ], + "type": "text", + "content": "61. T, M.V., Wang, P., Chen, X., Chen, T., Venugopalan, S., Wang, Z.: Is attention all that neRF needs? In: The Eleventh International Conference on Learning Representations (2023), https://openreview.net/forum?id=xE-LtsE-xx" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 132, + 622, + 480, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 622, + 480, + 665 + ], + "spans": [ + { + "bbox": [ + 132, + 622, + 480, + 665 + ], + "type": "text", + "content": "62. Tang, H., Xu, D., Sebe, N., Wang, Y., Corso, J.J., Yan, Y.: Multi-channel attention selection gan with cascaded semantic guidance for cross-view image translation. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 2417-2426 (2019)" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 278, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 278, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 278, + 101 + ], + "type": "text", + "content": "F. Cheng and M. Luo et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 480, + 424 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 132, + 116, + 480, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 116, + 480, + 138 + ], + "spans": [ + { + "bbox": [ + 132, + 116, + 480, + 138 + ], + "type": "text", + "content": "63. Trevithick, A., Yang, B.: Grf: Learning a general radiance field for 3d scene representation and rendering (2020)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 139, + 480, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 139, + 480, + 171 + ], + "spans": [ + { + "bbox": [ + 132, + 139, + 480, + 171 + ], + "type": "text", + "content": "64. Tucker, R., Snavely, N.: Single-view view synthesis with multiplane images. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 551-560 (2020)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 172, + 480, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 172, + 480, + 215 + ], + "spans": [ + { + "bbox": [ + 132, + 172, + 480, + 215 + ], + "type": "text", + "content": "65. Wang, Q., Wang, Z., Genova, K., Srinivasan, P.P., Zhou, H., Barron, J.T., MartinBrualla, R., Snavely, N., Funkhouser, T.: Ibrnet: Learning multi-view image-based rendering. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 4690-4699 (2021)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 216, + 480, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 216, + 480, + 237 + ], + "spans": [ + { + "bbox": [ + 132, + 216, + 480, + 237 + ], + "type": "text", + "content": "66. Watson, D., Chan, W., Martin-Brualla, R., Ho, J., Tagliasacchi, A., Norouzi, M.: Novel view synthesis with diffusion models. arXiv preprint arXiv:2210.04628 (2022)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 237, + 480, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 237, + 480, + 270 + ], + "spans": [ + { + "bbox": [ + 132, + 237, + 480, + 270 + ], + "type": "text", + "content": "67. Wiles, O., Gkioxari, G., Szeliski, R., Johnson, J.: Synsin: End-to-end view synthesis from a single image. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 7467-7477 (2020)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 270, + 480, + 303 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 270, + 480, + 303 + ], + "spans": [ + { + "bbox": [ + 132, + 270, + 480, + 303 + ], + "type": "text", + "content": "68. Yang, L., Kang, B., Huang, Z., Xu, X., Feng, J., Zhao, H.: Depth anything: Unleashing the power of large-scale unlabeled data. arXiv preprint arXiv:2401.10891 (2024)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 303, + 480, + 336 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 303, + 480, + 336 + ], + "spans": [ + { + "bbox": [ + 132, + 303, + 480, + 336 + ], + "type": "text", + "content": "69. Yu, A., Ye, V., Tancik, M., Kanazawa, A.: pixelnerf: Neural radiance fields from one or few images. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 4578-4587 (2021)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 336, + 480, + 369 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 336, + 480, + 369 + ], + "spans": [ + { + "bbox": [ + 132, + 336, + 480, + 369 + ], + "type": "text", + "content": "70. Zhai, M., Bessinger, Z., Workman, S., Jacobs, N.: Predicting ground-level scene layout from aerial imagery. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 867-875 (2017)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 369, + 480, + 402 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 369, + 480, + 402 + ], + "spans": [ + { + "bbox": [ + 132, + 369, + 480, + 402 + ], + "type": "text", + "content": "71. Zhang, R., Isola, P., Efros, A.A., Shechtman, E., Wang, O.: The unreasonable effectiveness of deep features as a perceptual metric. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 586-595 (2018)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 402, + 480, + 424 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 402, + 480, + 424 + ], + "spans": [ + { + "bbox": [ + 132, + 402, + 480, + 424 + ], + "type": "text", + "content": "72. Zhou, T., Tucker, R., Flynn, J., Fyffe, G., Snavely, N.: Stereo magnification: Learning view synthesis using multiplane images. arXiv preprint arXiv:1805.09817 (2018)" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 140, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 140, + 91, + 447, + 102 + ], + "type": "text", + "content": "4DIFF: 3D-Aware Diffusion Model for Third-to-First Viewpoint Translation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/6DGS_ 6D Pose Estimation from a Single Image and a 3D Gaussian Splatting Model/76913771-7094-44e1-8b30-8ea4e2210b42_content_list.json b/2024/6DGS_ 6D Pose Estimation from a Single Image and a 3D Gaussian Splatting Model/76913771-7094-44e1-8b30-8ea4e2210b42_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..af2ed2c289bac745fc37b99398ea6508215cec0b --- /dev/null +++ b/2024/6DGS_ 6D Pose Estimation from a Single Image and a 3D Gaussian Splatting Model/76913771-7094-44e1-8b30-8ea4e2210b42_content_list.json @@ -0,0 +1,2131 @@ +[ + { + "type": "text", + "text": "6DGS: 6D Pose Estimation from a Single Image and a 3D Gaussian Splitting Model", + "text_level": 1, + "bbox": [ + 256, + 140, + 746, + 186 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Bortolon Matteo $^{1,2,3}$ , Theodore Tsesmelis $^{1}$ , Stuart James $^{1,4}$ , Fabio Poiesi $^{2}$ , and Alessio Del Bue $^{1}$", + "bbox": [ + 267, + 210, + 735, + 246 + ], + "page_idx": 0 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "$^{1}$ PAVIS, Fondazione Istituto Italiano di Tecnologia (IIT), Genoa, IT $^{2}$ TeV, Fondazione Bruno Kessler (FBK), Trento, IT", + "3 Università di Trento, Trento, IT", + "4 Durham University, Durham, UK" + ], + "bbox": [ + 269, + 253, + 733, + 310 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract. We propose 6DGS to estimate the camera pose of a target RGB image given a 3D Gaussian Splatting (3DGS) model representing the scene. 6DGS avoids the iterative process typical of analysis-by-synthesis methods (e.g. iNeRF) that also require an initialization of the camera pose in order to converge. Instead, our method estimates a 6DoF pose by inverting the 3DGS rendering process. Starting from the object surface, we define a radiant Ellicell that uniformly generates rays departing from each ellipsoid that parameterize the 3DGS model. Each Ellicell ray is associated with the rendering parameters of each ellipsoid, which in turn is used to obtain the best bindings between the target image pixels and the cast rays. These pixel-ray bindings are then ranked to select the best scoring bundle of rays, which their intersection provides the camera center and, in turn, the camera rotation. The proposed solution obviates the necessity of an \"a priori\" pose for initialization, and it solves 6DoF pose estimation in closed form, without the need for iterations. Moreover, compared to the existing Novel View Synthesis (NVS) baselines for pose estimation, 6DGS can improve the overall average rotational accuracy by $12\\%$ and translation accuracy by $22\\%$ on real scenes, despite not requiring any initialization pose. At the same time, our method operates near real-time, reaching 15fps on consumer hardware.", + "bbox": [ + 261, + 347, + 743, + 625 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 215, + 648, + 375, + 666 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Neural and geometrical 3D representations for Novel View Synthesis (NVS) have recently surged in popularity [18,33], and they have been quickly integrated into daily applications, e.g. mapping services [1]. The change in 3D representation creates new challenges on how to solve classical problems, such as 6D pose estimation, and on how to leverage NVS implicit advantages [25,29,34,44,46].", + "bbox": [ + 212, + 680, + 787, + 757 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The method of iNeRF [46] pioneered 6D pose estimation using an NVS model by proposing an iterative analysis-by-synthesis, as illustrated in the left panel of Fig. [1]. Given a nearby pose initialization (iteration $\\# 1$ ), the NVS model", + "bbox": [ + 212, + 757, + 787, + 805 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "Project page: https://mbortolon97.github.io/6dns/", + "bbox": [ + 230, + 811, + 602, + 825 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "Corresponding author: mbortolon@fbk.eu", + "bbox": [ + 230, + 825, + 514, + 839 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/ffacea6ed92893f6bd426a80d4c0a448fb631637e9ed44f41c233b4e1d2efc9f.jpg", + "image_caption": [ + "Fig. 1: Our 6DGS method introduces a novel approach to 6DoF pose estimation, departing from conventional analysis-by-synthesis methodologies. Standard NeRF-based methods (left) employ an iterative process, rendering candidate poses and comparing them with the target image before updating the pose, which often results in slow performance and limited precision. In contrast, 6DGS (right) estimates the camera pose by selecting a bundle of rays projected from the ellipsoid surface (a radiant Ellicell) and learning an attention map to output ray/image pixel correspondences (based on DINOv2). The optimal bundle of rays should intersect the optical center of the camera and then are used to estimate the camera rotation in closed-form. Our 6GDS method offers significantly improved accuracy and speed, enabling the recovery of the pose within a one-shot estimate." + ], + "image_footnote": [], + "bbox": [ + 217, + 146, + 785, + 313 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "is used to render the image related to the initial pose. Then iteratively, the rendered image is compared with the target image using a photometric loss, and the initial pose guess is updated so that the two views achieve the best image overlap at the final step (iteration $\\# N$ ). The authors in iNeRF [46] use the popular NeRF [33] NVS model where backpropagation updates every new pose guess. This procedure leverages the remarkable NeRF capabilities in synthesizing realistic novel views, however, at the computational expense of synthesizing a newly rendered image at each iteration. This limitation restricts iNeRF to offline use while requiring a close initial pose estimate for a successful convergence.", + "bbox": [ + 212, + 502, + 787, + 638 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Recent works in 3D Gaussian Splatting (3DGS) [18,28,45] are an alternative to Neural NVs models, providing fast rendering speed through the use of explicit geometric primitives that do not require the optimization of a neural network. 3DGS represents a 3D scene as a set of ellipsoids paired with photometric information, such as color and opacity. The ellipsoids are first initialized using Structure from Motion (SfM), and then they are optimized to reduce the photometric error between the rasterized ellipsoids and a set of known images. During the rasterization stage, the 3DGS model is projected onto the image plane as ellipses and for each pixel the algorithm computes its photometric contribution.", + "bbox": [ + 212, + 640, + 787, + 777 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "By leveraging the 3DGS model properties, we design a novel 6DoF pose estimation method (6DGS) that surpasses the limitations of NeRF-based iterative approaches. 6DGS does not require any pose initialization, and it estimates the camera translation and rotation without an iterating analysis-by-synthesis walk", + "bbox": [ + 212, + 779, + 787, + 839 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "M. Bortolon et al.", + "bbox": [ + 271, + 114, + 393, + 126 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "through. This is a key factor for achieving near real-time performance (15fps), also due to the quick rendering capabilities of 3DGS. The right panel of Fig. 1 presents the gist of our approach for 6DoF pose estimation. If we knew the camera pose, the first NVS step of 3DGS would be to project the ellipsoid centers onto the image plane. Practically, this is a ray casting through the camera's optical center. Our 6DGS attempts to invert this process and, by doing so, to estimate the camera pose. If the target image camera pose is unknown, and thus neither where the optical center is, we are unable to cast the single ray from each ellipsoid that passes through the correct target image pixels. For this reason, instead, we radiate uniformly distributed rays from each ellipsoid through the introduction of a novel casting procedure named Ellicell. Only one radiated ray per ellipsoid would be accurate, i.e., the one that renders the pixel photometrically by projecting the correct ellipse onto the target image plane.", + "bbox": [ + 212, + 146, + 787, + 343 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Now, the 6DGS problem is to select, given all the casted rays from the Ellicells, the correct bundle of rays that can generate most of the target image pixels with high confidence. This selection stage is addressed by binding pixels and rays through the learning of an attention map. Notice that this step is also unsupervised, as it leverages the known camera poses and images used to compute the 3DGS model to obtain the pixel and ray pairs used for training. After the bundle of rays is selected, the intersection of these rays identifies the camera center, which is solved using weighted Least Squares (wLS), with the weights being the scores from the previous selection stage. After the optical center is estimated, the optical axis can be used to obtain the camera rotation degrees of freedom from the rays bundle, thus solving the 6DoF pose. By design, 6DGS eliminates the need for an initial camera pose, which is one of the limitations of analysis-by-synthesis pose estimation methods [34,44,46], as well as the tendency to converge to local minima during the iteration procedure, especially if the initial pose is initialized far from the optimal position.", + "bbox": [ + 212, + 345, + 787, + 571 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We evaluate 6DGS on datasets featuring real-world objects and scenes, comparing against the current NVS state-of-the-art approaches such as iNeRF [46], Parallel iNeRF [25] and NeMO + VoGE [44]. Our experimental results show that 6DGS is competitive, especially if the initial pose is not provided \"a priori\". Finally, we achieve near real-time 6DoF pose estimation on consumer hardware, which is one rather challenging limitation in the practical application of NVS-based approaches for camera pose estimation. To summarize, 6DGS contributions are threefold:", + "bbox": [ + 212, + 574, + 787, + 693 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Our approach for 6DoF camera pose estimation eliminates the need for an initial camera pose and iterations to converge, which is typically required in analysis-by-synthesis approaches;", + "- 6DGS employs a novel ray casting pipeline, i.e. Ellicell, and an attention-based mechanism that efficiently matches pixel-level image information with 3DGS ellipsoids:", + "- The proposed method is state-of-the-art in the NVS benchmarks for camera pose estimation both for accuracy and real-time performance." + ], + "bbox": [ + 223, + 714, + 782, + 837 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "6DGS: 6D Pose Estimation from a Single Image and a 3DGS Model", + "bbox": [ + 277, + 114, + 732, + 128 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 774, + 116, + 785, + 126 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Related works", + "text_level": 1, + "bbox": [ + 215, + 143, + 390, + 160 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We review relevant works on 6DoF camera pose estimation based on Neural Radiance Fields (NeRF) models, ellipsoid-based approaches, and correspondence matching methods that are related to key components of 6DGS.", + "bbox": [ + 212, + 175, + 785, + 220 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Pose estimation from neural radiance fields. iNeRF [46] pioneered NeRF-based 6D camera pose estimation, using iterative alignment of target and rendered images based on photometric error. However, iNeRF is prone to local minima in the optimization function, leading to recent developments like Parallel iNeRF [25], which employs parallel optimization of multiple candidate poses. While these approaches rely on NeRF-based models, $\\mathrm{NeMo + VoGe}$ [43,44] have explored 6D camera pose estimation using object models based on volumetric Gaussian reconstruction kernels as geometric primitives. The rendering strategy (VoGE) differs from 3DGS as it is based on ray marching. Therefore, $\\mathrm{NeMo + VoGe}$ iteratively aligns learned features from target and rendered images. Notably, $\\mathrm{NeMo + VoGe}$ 's training requires multiple objects, in contrast to our method, which leverages a single object 3DGS model. Alternatively, CROSS-FIRE [34] addresses the local minima issue by integrating learned local features, which describes not only the visual content but also the 3D location of the scene in the NeRF model. Despite these advancements, analysis-by-synthesis approaches often struggle with inefficient pose updates due to the nature of the optimization refinement and the dependence on accurate initial pose priors. These factors can limit their real-world applicability. Recently, IFFNeRF [6] utilized a method that inverts the NeRF model to re-render an image to match a target one. However, unlike our approach, it does not consider the specificities of 3DGS, which include ellipsoid elongation and rotation, and their non-uniform distribution across the scene surface.", + "bbox": [ + 212, + 220, + 787, + 551 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Pose estimation from ellipsoids. Recovery of the camera pose from ellipsoids has been explored for both SfM [8,9,12-14,37] and SLAM [11,16,21,24,32,47] scenarios, where methods frequently recover the object's ellipsoid representation as well as the camera 6DoF. Such approaches typically solve linear systems to recover the pose, most commonly minimizing a loss of the projection to and from an object detection. However, this methodological framework often presents limitations when confronted with large numbers of ellipsoids, as they are more indicated for handling few large ellipsoids that model a single object occupancy, 3D position and orientation.", + "bbox": [ + 212, + 553, + 787, + 686 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Correspondences Matching. In traditional 6DoF image matching, feature-based approaches are used, which often rely on hand-crafted features, e.g., SIFT [27] or more recent deep approaches such as SuperGlue [36] and Transformer [19]. SuperGlue utilizes a Graph Neural Network (GNN) for feature attention and Sinkhorn [39] for matching, while LightGlue replaces the GNN with a lightweight transformer. Unlike these, Transformer [19] performs global match-to-match attention, allowing for accurate match localization. In addition, there is a body of work around feature equivariance [22,23] for improving the robustness of matching. However, these methods rely on the hypothesis that both feature sets exist in a homogeneous feature space, i.e. extracted from the image,", + "bbox": [ + 212, + 688, + 787, + 840 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "M. Bortolon et al.", + "bbox": [ + 271, + 114, + 393, + 126 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "while in 6DGS we have the specific problem to match pixel to rays emitted from the Ellicells. Therefore, we rely on the proposed attention model to handle these ray-to-pixel bindings. OnePose++ [15] instead adopts a multi-modal approach matching a point cloud with an image. Another proposed alternative is to regress directly the pose parameters, as in CamNet [10]. Nevertheless, these approaches require a large amount of training data ( $\\approx$ 500 or more images), sometimes across multiple scenes and, like with CamNet, these need to be available also at inference time. 6DGS however, requires only $\\approx$ 100 or less images, which are utilized only once during training.", + "bbox": [ + 212, + 146, + 787, + 282 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3 Preliminaries", + "text_level": 1, + "bbox": [ + 215, + 305, + 382, + 320 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We first review 3D Gaussian Splatting (3DGS) [18] to understand the underlying principles and provide the mathematical formalization of the model. 3DGS objective is to synthesize novel views of a scene by optimizing the position, the orientation and the color of a set of 3D Gaussians approximated as ellipsoids $\\mathcal{Q} = \\{\\mathbf{Q}\\}_{i=1}^{K}$ from a given set of input images $\\mathcal{I} = \\{\\mathbf{I}\\}_{i=1}^{J}$ and their corresponding camera projection matrices $\\mathcal{P} = \\{\\mathbf{P}\\}_{i=1}^{J} \\in \\mathbb{R}^{3 \\times 4}$ . A point $\\mathbf{d}$ for being on the surface of an ellipsoid must satisfy the equation $(\\mathbf{d} - \\mathbf{x}) \\boldsymbol{\\Sigma} (\\mathbf{d} - \\mathbf{x})^T = 1$ , where $\\mathbf{x} \\in \\mathbb{R}^3$ is the ellipsoid center and $\\boldsymbol{\\Sigma} \\in \\mathbb{R}^{3 \\times 3}$ its covariance matrix. We can further decompose the covariance of the ellipsoid $\\boldsymbol{\\Sigma}$ as:", + "bbox": [ + 212, + 335, + 787, + 472 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {\\Sigma} = \\mathbf {R} \\mathbf {U} \\mathbf {U} ^ {T} \\mathbf {R} ^ {T}, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 442, + 483, + 784, + 500 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathbf{R} \\in \\mathbb{R}^{3 \\times 3}$ is the ellipsoid rotation matrix and $\\mathbf{U}^{3 \\times 3}$ denotes the scaling matrix. The projection matrix $\\mathbf{P} \\in \\mathbb{R}^{3 \\times 4}$ allows the projection of the ellipsoid $\\mathbf{Q}$ onto the image plane generating the corresponding ellipse representation:", + "bbox": [ + 212, + 510, + 787, + 556 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\check {\\mathbf {y}} = \\mathbf {P} \\check {\\mathbf {x}} ^ {T}, \\check {\\mathbf {E}} = \\mathbf {P} \\boldsymbol {\\Sigma} \\mathbf {P} ^ {T}, \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 418, + 566, + 784, + 583 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathbf{y} \\in \\mathbb{R}^2$ and $\\breve{\\mathbf{y}} \\in \\mathbb{R}^3$ correspond to the Euclidean and homogeneous coordinates of the ellipse center point. The homogeneous coordinates $\\breve{\\mathbf{y}}$ originate from the projection of the corresponding ellipsoid center in the homogeneous coordinates $\\breve{\\mathbf{x}} \\in \\mathbb{R}^4$ . The matrix $\\breve{\\mathbf{E}} \\in \\mathbb{R}^{3 \\times 3}$ is the ellipse covariance in homogeneous space. The covariance of the ellipse $\\mathbf{E} \\in \\mathbb{R}^{2 \\times 2}$ , is derived by selecting only the first two rows and columns of $\\breve{\\mathbf{E}}$ and dividing by the last element on $\\breve{\\mathbf{E}}$ diagonal.", + "bbox": [ + 212, + 594, + 787, + 685 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The splatted ellipses, denoted as $\\mathcal{B} = \\{\\langle \\mathbf{y},\\mathbf{E}\\rangle \\}_{i = 1}^{K}$ , generate a pixel color with the rendering function $\\phi$ using rasterization techniques [2,18]. The function $\\phi$ acts independently on every single pixel of the image $\\mathbf{p}$ . The pixel value depends on the neighboring projected ellipses, taking into account their center points' distances to the pixel coordinates, as well as their orientations and scales. $\\phi$ assumes that the ellipses are ordered based on the depth, so they should be sorted. Formally, $\\phi$ can be expressed as:", + "bbox": [ + 212, + 685, + 787, + 792 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\phi (\\mathcal {B}, \\mathbf {p}) = \\sum_ {i = 1} ^ {K} \\rho_ {i} \\alpha_ {i} e ^ {- \\tau (\\mathcal {B} _ {i}, \\mathbf {p})} \\gamma (i, \\mathbf {p}), \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 380, + 801, + 784, + 842 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "6DGS: 6D Pose Estimation from a Single Image and a 3DGS Model", + "bbox": [ + 277, + 113, + 732, + 128 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 774, + 116, + 785, + 126 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/354bcc52e0ce0e23e9c07e9fe0d38c9d9a85d1f2bf00ba1e98cb62059859d6d2.jpg", + "image_caption": [ + "Fig. 2: The figure illustrates the pipeline of our 6DGS methodology. The image is encoded using a visual backbone (a). Concurrently, rays are uniformly projected from the center of the 3DGS ellipsoids (b), and their corresponding color is estimated. Subsequently, an attention map mechanism is employed to compare the encoded ray and image features (c). Following this comparison, the $N_{top}$ matches are selected via attenuation, and the camera location is estimated (d) as the solution of a weighted Least Squares problem, resulting in a distinct 6DoF pose for the image." + ], + "image_footnote": [], + "bbox": [ + 236, + 143, + 767, + 332 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\rho$ and $\\alpha$ represent the color and opacity attributes associated with the ellipsoid, which are inherited by the splatted ellipse. Similar to the volumetric rendering equation in NeRF, $\\gamma$ denotes the inverse of the volume density accumulated up to the $i^{th}$ ellipse on pixel $\\mathbf{p}$ and is defined as:", + "bbox": [ + 212, + 450, + 787, + 510 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\gamma (i, \\mathbf {p}) = \\prod_ {j = 1} ^ {i - 1} \\left(1 - \\alpha_ {j} e ^ {- \\tau \\left(\\mathcal {B} _ {i}, \\mathbf {p}\\right)}\\right). \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 388, + 521, + 785, + 564 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The purpose of $\\tau$ is to determine the light absorption by the ellipse when represented as a 2D Gaussian. Light absorption depends on the orientation and distance between the ellipse center, denoted as $\\mathbf{y}$ , and the pixel location, expressed as $\\mathbf{d} = \\mathbf{p} - \\mathbf{y}$ . Consequently, we can formally define $\\tau$ as:", + "bbox": [ + 212, + 574, + 787, + 636 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\tau (\\mathbf {B}, \\mathbf {p}) = \\frac {1}{2} \\left(\\mathbf {1} _ {2} \\mathbf {d} ^ {T} \\mathbf {E} \\mathbf {d} \\mathbf {1} _ {2} ^ {T}\\right), \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 401, + 643, + 785, + 674 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\mathbf{1}_2\\in \\mathbb{R}^2$ denotes a vector filled with ones. Following the processing of all pixels onto the image plane, the rendering function $\\phi$ generates an image $\\hat{\\mathbf{l}}\\in \\mathbb{R}_{+}^{H\\times W}$ , where $W$ and $H$ represent the width and height of the image.", + "bbox": [ + 212, + 684, + 787, + 732 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4 Our approach", + "text_level": 1, + "bbox": [ + 214, + 753, + 387, + 771 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1 Overview", + "text_level": 1, + "bbox": [ + 214, + 784, + 341, + 797 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "6DGS estimates the camera pose $\\hat{\\mathbf{P}}\\in \\mathbb{R}^{3\\times 4}$ , given a target image $\\mathbf{I}_t$ and a set of ellipsoids $\\mathcal{Q}$ from a pre-computed 3DGS model (Fig. 2). To solve for the", + "bbox": [ + 212, + 808, + 785, + 842 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 217, + 116, + 228, + 126 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "M. Bortolon et al.", + "bbox": [ + 271, + 114, + 393, + 126 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/1aca5089144560451b0ae3c65e1a0197a3a157fd7bffac263bc1dc30722b0845.jpg", + "image_caption": [ + "(a) Ellicell components" + ], + "image_footnote": [], + "bbox": [ + 261, + 146, + 431, + 229 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/860e5082e321dc375ebb1202e6057a515c6539940ee47e212935a4d91ba13347.jpg", + "image_caption": [ + "(b) 3D Ellicell grid" + ], + "image_footnote": [], + "bbox": [ + 450, + 162, + 576, + 231 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/084ec0cf5a3cc2c050e24018fe7040088ba4d4b8c8a6480d161b864c025bc86a.jpg", + "image_caption": [ + "(c) 3D radiant Ellicell", + "Fig. 3: The illustration depicts the three primary stages involved in the radiant Ellicell generation. Firstly, (a) delineates the formulation of components required to compute the geometric information for each cell. Secondly, (b) shows the resulting Ellicell grid positioned on the surface of the ellipsoid along with their respective center points. Finally, (c) demonstrates the generation of rays originating from the center point of the ellipsoid going through the Ellicell center." + ], + "image_footnote": [], + "bbox": [ + 607, + 159, + 741, + 233 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "camera pose, we propose a casting method from the ellipsoid's surface, called Ellicell, that divides it in equal area cells (Sec. 4.2). The ellipsoids cast a set of $N$ rays, denoted as $\\mathcal{V} = \\{\\langle \\mathbf{v}_o,\\mathbf{v}_d,\\mathbf{v}_c\\rangle \\}_{i = 1}^N$ , one for each of the generated cell (Fig. 3c). Each ray is identified by $i$ the origin $\\mathbf{v}_o\\in \\mathbb{R}^3$ , $ii$ the center point of each ellipsoid, $iii$ the direction $\\mathbf{v}_d\\in \\mathbb{R}^3$ originating from the ellipsoid center to the cell center and through the space, and $iv$ the color information $\\mathbf{v}_c\\in \\mathbb{R}^3$ as RGB values. We synthesize the rays' color using the 3DGS rendering function $\\phi$ (Eq. 3). A subset of these rays, depending on the view perspective, may intersect the camera's optical center. For binding the rays to the image pixels we compute the target image pixels features $\\psi (\\mathbf{I}_t)$ (Fig. 2a) and the rays features $\\psi (\\mathcal{V})$ (Fig. 2b). These features are used to identify the intersecting rays by using an attention map $\\mathcal{A}$ (Fig. 2c), see Sec. 4.4. The higher the attention value for a ray-pixel pair is, the more likely the intersection on the image plane is a valid one. Lastly, we determine $\\hat{\\mathbf{P}}_t$ (Fig. 2d) by computing the intersection point of rays using the weighted Least Squares algorithm (Sec. 4.5).", + "bbox": [ + 212, + 363, + 787, + 592 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2 Radiant Ellicell", + "text_level": 1, + "bbox": [ + 215, + 609, + 390, + 625 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We create rays spanning in every direction allowing 6DGS to recover the camera pose. We introduce the concept of radiant Ellicell for generating rays that uniformly emanate from the ellipsoid surface, as illustrated in Fig. 3. Ellicell generation is deterministic [5,31] and achieves higher precision with fewer rays [17,42] compared to other sampling methods like Monte-Carlo [30].", + "bbox": [ + 212, + 633, + 787, + 710 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "First, we compute the area of each Ellicell. This is achieved by calculating the ellipsoid surface area, using a computationally efficient approach, namely Ramanujan approximation 3:", + "bbox": [ + 212, + 709, + 787, + 758 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nh = 4 \\pi \\left(\\frac {(a b) ^ {1 . 6} + (a c) ^ {1 . 6} + (b c) ^ {1 . 6}}{3}\\right) ^ {\\frac {1}{1 . 6}}, \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 354, + 763, + 785, + 804 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $a, b, c = \\text{diag}(\\mathcal{S})$ are the ellipsoid axis scales. Each Ellicell cell's target area equals $\\mu = h / G$ , with $G$ being the number of cells dividing each ellipsoid.", + "bbox": [ + 214, + 810, + 785, + 840 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "6DGS: 6D Pose Estimation from a Single Image and a 3DGS Model", + "bbox": [ + 277, + 114, + 732, + 128 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 774, + 116, + 785, + 126 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Approximating each cell as a square with side $z = \\sqrt{\\mu}$ we slice the ellipsoids along the major axis into ribbons, each as wide as $z$ (Fig. 3a). The extremity of each ribbon is called a ring. The total number of rings is $e = \\lfloor \\kappa(a, b) / (2z) \\rfloor \\in \\mathbb{N}$ , where $\\kappa(a, b)$ computes the ring perimeter. Ignoring ellipsoid's rotation, we compute the ring perimeter by treating them as 2D ellipses, thus defining $\\kappa(a, b)$ as:", + "bbox": [ + 215, + 146, + 787, + 218 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\kappa (a, b) = \\pi \\left((a + b) + \\frac {3 (a - b) ^ {2}}{1 0 (a + b) + \\sqrt {a ^ {2} + 1 4 a b + b ^ {2}}}\\right). \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 323, + 228, + 785, + 260 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Given the total number of rings $e$ it is possible to compute the ribbon's centerline geometric parameters. In particular, we compute the scale parameter of each ribbon as:", + "bbox": [ + 214, + 268, + 787, + 313 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\varrho (n, \\Delta r, a, b) = \\sqrt {1 - \\frac {(0 . 5 \\Delta r + n \\Delta r - a) ^ {2}}{b ^ {2}}} \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 349, + 324, + 785, + 357 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "where $\\varDelta r=a/e$ is the distance between two consecutive rings. This equation derives from the manipulation of the standard ellipse equation. While ribbon size $z$ should be equal to $\\varDelta r$ , these two values will likely differ due to the need for the number of rings being a natural number. Eq. 8 is also used to compute the other ribbon scaling parameter by replacing $b$ with $c$ . $\\varrho$ is then used to compute the number of cells inside each ribbon as:", + "bbox": [ + 214, + 367, + 787, + 457 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\xi (n, e, a, b, c) = \\left\\lfloor \\frac {\\kappa (\\varrho (n , e , a , b) , \\varrho (n , e , a , c))}{z} \\right\\rfloor , \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 338, + 467, + 785, + 501 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "where $\\xi$ is the number of cells inside the ring. We compute the center of each cell, equally spaced along the ribbon's centerline, by sampling $\\xi$ points along it. This is challenging as the perimeter distance does not linearly correlate with the $x$ and $y$ variations. However, we can solve this by using a statistical method. Knowing a distribution's Cumulative Distribution Function (CDF) allows us to sample uniformly between 0 and 1 and then use the CDF inverse to map the sample to the distribution space. This approach applies to our case, where samples are distributed as follows:", + "bbox": [ + 214, + 510, + 787, + 628 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\nd s ^ {2} = d x ^ {2} + d y ^ {2}, \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 442, + 631, + 784, + 648 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "and, by taking its inverse, we can retrieve the coordinates of each cell center. To simplify the equations, we define $r = \\varrho(n, e, a, b)$ and $w = \\varrho(n, e, a, c)$ to indicate the scale of the ellipse under consideration. Then we express Eq. [10] in polar coordinates to simplify the differentiation:", + "bbox": [ + 214, + 655, + 787, + 715 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\frac {d s}{d \\theta} = \\sqrt {r ^ {2} \\sin^ {2} \\theta + w ^ {2} \\cos^ {2} \\theta}, \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 401, + 728, + 785, + 756 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "then, we can express the set of points on the perimeter of the ribbon centerline as an angular position in the polar coordinate system as:", + "bbox": [ + 214, + 766, + 787, + 796 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\theta^ {\\prime} = \\left(\\frac {d s}{d \\theta}\\right) ^ {- 1} \\left(g \\cdot \\frac {1}{\\xi (n , e , a , b , c)}\\right), \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 379, + 808, + 785, + 844 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "M. Bortolon et al.", + "bbox": [ + 271, + 114, + 393, + 127 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "with $g$ being the cell identifier. Given $\\theta'$ we can use it inside the ellipse equation in polar coordinates to obtain the 3D position of each cell center:", + "bbox": [ + 212, + 146, + 782, + 176 + ], + "page_idx": 8 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {u} = \\left( \\begin{array}{c} w \\cos \\left(\\theta^ {\\prime}\\right) \\\\ g \\sin \\left(\\theta^ {\\prime}\\right) \\\\ - a + n \\Delta r \\end{array} \\right). \\tag {13}\n$$\n", + "text_format": "latex", + "bbox": [ + 431, + 188, + 785, + 234 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.3 Ray generation", + "text_level": 1, + "bbox": [ + 215, + 253, + 388, + 268 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Once we have divided each ellipsoid of the 3DGS model into equidistant cells, we cast the rays originating from the center point of the ellipsoid i.e. $\\mathbf{v}_o = \\mathbf{x}$ and oriented towards the Ellicell center $\\mathbf{v}_d = \\mathbf{u} - \\mathbf{x}$ . We reduce the number of potential rays cast from each ellipsoid by considering only the rays oriented in the same hemisphere as the estimated surface normal of the ellipsoid. We obtain the surface normals by treating the ellipsoid centroids as a point cloud, and the surface normal is estimated using the nearby points [41].", + "bbox": [ + 212, + 277, + 787, + 383 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Finally, each ray has also been associated with the color information $\\mathbf{v}_c$ , which we compute through the same pixel-level approach of 3DGS (Eq. 5). We note that the application of the volumetric rendering function of Eq. 5 produces a single pixel for each ray. The generated rays represent a collection of potential hypotheses, meaning that a subset of them will intersect the target image $\\mathbf{I}_t$ .", + "bbox": [ + 212, + 383, + 787, + 460 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.4 Binding by attenuation of rays to image", + "text_level": 1, + "bbox": [ + 215, + 481, + 591, + 498 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Given all the cast rays $\\mathbf{v}$ , we identify a subset of $\\mathbf{v}$ correlating with the target image $\\mathbf{I}_t$ . A learned attention map $\\mathcal{A}$ assigns scores $\\hat{\\mathbf{s}}$ based on the highest correlation to image pixels; higher similarity results in higher scores. Based on scores $\\hat{\\mathbf{s}}$ , we select the top candidate's rays $(N_{top})$ that present maximal association and use them to recover the pose $(\\hat{\\mathbf{P}}_t)$ .", + "bbox": [ + 212, + 506, + 787, + 583 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "To select rays with similar appearance and position, we use a Multi-Layer Perceptron (MLP) defined as $\\mathbf{V} = \\psi (\\mathbf{v})$ , where $\\mathbf{V}\\in \\mathbb{R}^{N\\times C}$ with $C$ being the feature size and $N$ the overall number of rays. The MLP input is enriched by incorporating Positional Encoding that maps the data in the Fourier domain [40] to better distinguish between similar data.", + "bbox": [ + 212, + 583, + 787, + 657 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We generate features from $\\mathbf{I}_t$ using DINOv2 [35] as a pre-trained backbone feature extractor. This results in a set of features $\\mathbf{F}_t \\in \\mathbb{R}^{M \\times C}$ , where $M = W \\times H$ . Both the image and ray features sets are processed by a single attention module $\\mathcal{A}(\\mathbf{V}_f, \\mathbf{F}_t) \\in \\mathbb{R}^{M \\times N}$ producing a set of scores. Inside the attention module the ray features, $\\mathbf{V}$ , are used as queries and the image features, $\\mathbf{F}_t$ , as a key. We optimize the attention map by summing along the rows and converting it into a per-ray correlation score as follows $\\hat{\\mathbf{s}} = \\sum_{i=1}^{M} \\mathcal{A}_i$ . The higher the score value given by $\\hat{\\mathbf{s}}$ , the better the association between the rays and image pixels. At test-time we select the $N_{top}$ rays with the highest ranking scores.", + "bbox": [ + 212, + 657, + 787, + 794 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Because a ray and an image pixel should be associated with each other based on the distance between the camera origin and its projection onto the corresponding ray, we supervise the predicted scores $\\hat{\\mathbf{s}}$ using the same images used to", + "bbox": [ + 212, + 795, + 787, + 840 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "6DGS: 6D Pose Estimation from a Single Image and a 3DGS Model", + "bbox": [ + 277, + 113, + 732, + 128 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 774, + 114, + 785, + 126 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "estimate the 3DGS model at training time. We compute the projection of the point on the line as $l = \\max((\\mathbf{O} - \\mathbf{v}_o)\\mathbf{v}_d, 0)$ , where $\\mathbf{O}$ is the camera position, $\\mathbf{v}_o$ the generated ray origin and $\\mathbf{v}_d$ the corresponding direction. Rays are infinite only in one direction, so we restrict $l \\in \\mathbb{R}^+$ using the max operator. Then, we can compute the distance between the camera origin and its projection on the ray as $\\mathbf{h} = \\| (\\mathbf{v}_o + l\\mathbf{v}_d) - \\mathbf{O}\\|_2$ . The value $\\mathbf{h}$ can span from 0 to $+\\infty$ , with 0 indicating a ray that passes through the camera's optical center. We map distances to the attention map score using:", + "bbox": [ + 212, + 146, + 787, + 268 + ], + "page_idx": 9 + }, + { + "type": "equation", + "text": "\n$$\n\\delta = 1 - \\tanh \\left(\\frac {\\mathbf {h}}{\\lambda}\\right), \\mathbf {s} = \\delta \\frac {M}{\\sum \\delta}, \\tag {14}\n$$\n", + "text_format": "latex", + "bbox": [ + 388, + 273, + 785, + 310 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "where $\\lambda$ regulates the number of rays to assign to a specific camera. Lastly, the softmax inside the attention map computation requires we normalize the ground truth scores. We use the $L2$ loss to minimize the difference between the predicted $\\hat{\\mathbf{s}}$ and the computed ground truth $\\mathbf{s}$ scores as:", + "bbox": [ + 212, + 314, + 787, + 376 + ], + "page_idx": 9 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\frac {1}{M N} \\sum_ {i = 1} ^ {M} \\sum_ {j = 1} ^ {N} \\| \\hat {\\mathbf {s}} _ {i, j} - \\mathbf {s} _ {i, j} \\| _ {2}, \\tag {15}\n$$\n", + "text_format": "latex", + "bbox": [ + 388, + 381, + 785, + 425 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "where $M, N$ are the size of the attention map $\\mathcal{A}$ . During each training iteration, we predict an image and a pose utilized for estimating the 3DGS model.", + "bbox": [ + 214, + 431, + 785, + 463 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4.5 Test-time pose estimation", + "text_level": 1, + "bbox": [ + 215, + 483, + 475, + 498 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "During the test phase, the predicted scores $\\hat{\\mathbf{s}}$ are used to select the top $N_{top}$ rays, identified as the utmost relevant, and constrained to choose at most one ray per ellipsoid. Note that only a small set of rays is sufficient to estimate the camera pose. However, based on an ablation study we set $N_{top} = 100$ , see Tab. 3a.", + "bbox": [ + 212, + 505, + 785, + 566 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "The camera position is found at the intersection of selected rays, solved as a weighted Least Squares problem. Since 3D lines usually do not intersect at a single point due to discretization noise introduced by the Ellicell, we minimize the sum of squared perpendicular distances instead.", + "bbox": [ + 212, + 566, + 785, + 625 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "For the selected ray $\\mathbf{v}_j$ with $f = 1\\ldots N_{top}$ , the error is given by the square of the distance from the camera position to predict $\\hat{\\mathbf{O}}$ to its projection on $\\mathbf{v}_j$ :", + "bbox": [ + 212, + 626, + 785, + 659 + ], + "page_idx": 9 + }, + { + "type": "equation", + "text": "\n$$\n\\sum_ {f = 1} ^ {N _ {t o p}} \\left(\\left(\\hat {\\mathbf {O}} - \\mathbf {v} _ {o, f}\\right) ^ {T} \\left(\\hat {\\mathbf {O}} - \\mathbf {v} _ {o, f}\\right) - \\left(\\left(\\hat {\\mathbf {O}} - \\mathbf {v} _ {o, f}\\right) ^ {T} \\mathbf {v} _ {d, f}\\right) ^ {2}\\right), \\tag {16}\n$$\n", + "text_format": "latex", + "bbox": [ + 313, + 666, + 785, + 710 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "where $\\mathbf{v}_{o,f}$ indicating the origin of the $f$ -th ray and $\\mathbf{v}_{d,f}$ the respective direction. To minimize Eq. [16] we differentiate it with respect to $\\hat{\\mathbf{O}}$ , resulting in", + "bbox": [ + 214, + 718, + 784, + 751 + ], + "page_idx": 9 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\mathbf {O}} = \\sum_ {f = 1} ^ {N _ {t o p}} \\hat {\\mathbf {s}} _ {f} \\left(\\mathbb {I} - \\mathbf {v} _ {d, f} \\mathbf {v} _ {d, f} ^ {T}\\right) \\mathbf {v} _ {o, f}, \\tag {17}\n$$\n", + "text_format": "latex", + "bbox": [ + 392, + 758, + 785, + 801 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "where $\\mathbb{I}$ is the identity matrix and $\\hat{\\mathbf{s}}_f$ are the predicted ray scores. This expression can be solved as a weighted system of linear equations.", + "bbox": [ + 214, + 809, + 785, + 839 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "M. Bortolon et al.", + "bbox": [ + 271, + 114, + 393, + 127 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/a2e9d3a579b184a61ff9612da0b6735f1eaa85d3cf3fb1991033710b046a579e.jpg", + "table_caption": [ + "Table 1: Evaluation of 6DoF pose estimation on the Mip-NeRF $360^{\\circ}$ dataset. We report results in terms of Mean Angular Error (MAE) and Mean Translation Error (MTE) in terms of degrees and units, $u$ , respectively. Where $1u$ is equal to the object's largest dimension. For both metrics lower is better. Best-performing results are highlighted in bold and green, while second best values are highlighted in orange." + ], + "table_footnote": [], + "table_body": "
Fixed pose prior (eval. protocol by 46)Random pose priorNo pose prior
iNeRFiNeRF46NeMo + VoGE44Parallel iNeRF25iNeRF46NeMo + VoGE44Parallel iNeRF256DGS (Ours)
MAE ↓MTE ↓MAE ↓MTE ↓MAE ↓MTE ↓MAE ↓MTE ↓MAE ↓MTE ↓MAE ↓MTE ↓MAE ↓MTE ↓MAE ↓MTE ↓
Bicycle39.50.11643.80.01535.90.11676.60.217111.80.03844.40.15012.10.010
Bonsai51.30.22852.50.03641.10.22396.70.38598.90.07358.20.29810.50.038
Counter40.70.32445.60.07224.70.21270.30.48798.10.13942.10.43519.60.043
Garden31.00.12131.80.02618.20.09072.80.21089.20.03860.00.14437.80.015
Kitchen38.20.11341.60.04237.30.109100.20.266122.20.08265.00.19323.20.018
Room38.80.27444.90.04530.70.25791.60.444110.00.01063.50.27138.30.019
Stump21.40.03026.30.01614.80.01686.90.03596.30.02572.60.03328.30.009
Avg.37.30.17240.90.03628.90.14685.00.292103.80.05858.00.21824.30.022
", + "bbox": [ + 217, + 223, + 787, + 323 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "5 Results", + "text_level": 1, + "bbox": [ + 215, + 343, + 323, + 359 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "5.1 Experimental setup", + "text_level": 1, + "bbox": [ + 215, + 375, + 423, + 390 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "We evaluate 6DGS and compare with other analysis-by-synthesis methods for 6D pose estimation, including iNeRF [46], Parallel iNeRF [25], and NeMo+VoGE [43, 44]. We reproduce the results using their published code. We follow iNeRF's evaluation protocol and test on two real-world datasets: Tanks & Temples [20] and Mip-NeRF $360^{\\circ}$ [4]. For each dataset, we use the predefined training-test splits and evaluate them with two pose initialization pipelines: $i)$ the original iNeRF initialization, where the starting pose is sampled uniformly between $[-40^{\\circ}, +40^{\\circ}]$ degrees of errors and $[-0.1, +0.1]$ units of translation error from the ground-truth target pose; $ii)$ by randomly choosing an initialization pose from the ones used to create the 3DGS mode. Although analysis-by-synthesis methods were tested with a prior, in reality it is rarely available, so we present a second scenario to assess them under more realistic conditions. We perform multiple ablation studies to assess the sensitivity of 6DGS to different hyperparameters and settings. We quantify pose estimation results in terms of mean angular (MAE) and translation (MTE) errors (see Tab. 1 and Tab. 2) and measure the inference time.", + "bbox": [ + 212, + 398, + 787, + 641 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Implementation Details. 6DGS is implemented in PyTorch and the attention map was trained for 1.5K iterations ( $\\sim$ 45mins) with an NVIDIA GeForce RTX 3090. We use the Adafactor optimizer [38] with weight decay of $10^{-3}$ . For speedup training, we uniformly sample 2000 3DGS ellipsoids at each iteration.", + "bbox": [ + 212, + 641, + 787, + 702 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "5.2 Datasets", + "text_level": 1, + "bbox": [ + 215, + 724, + 333, + 738 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "To demonstrate the applicability of 6DGS, we test on two datasets featuring real world challenges. **Tanks&Temples** [20] was created to evaluate 3D reconstruction methods with challenging real-world objects of varying sizes, acquired from human-like viewpoints and with difficult conditions (illumination, shadows, and reflections). We use the five scenes (Barn, Caterpillar, Family, Ignatius, Truck) and the train test splits given in [7,26]. The splits are object dependent, having", + "bbox": [ + 212, + 750, + 787, + 842 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "6DGS: 6D Pose Estimation from a Single Image and a 3DGS Model", + "bbox": [ + 277, + 114, + 732, + 128 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 767, + 116, + 782, + 126 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/3012aa449ba9918baf363c515df23ce67a411d53a3ad2ea18f243f23bbed3fa9.jpg", + "table_caption": [ + "Table 2: Evaluation of 6DoF pose estimation on the Tanks&Temples [20] dataset. We show the same metrics and analysis as in Table 1" + ], + "table_footnote": [], + "table_body": "
ObjectsFixed pose prior (eval. protocol by 46)Random pose priorNo pose prior
iNeRF 46NeMo + VoGE 44Parallel iNeRF 25iNeRF 46NeMo + VoGE 44Parallel iNeRF 256DGS (Ours)
MAE ↓MTE ↓MAE ↓MTE ↓MAE ↓MTE ↓MAE ↓MTE ↓MAE ↓MTE ↓MAE ↓MTE ↓MAE ↓MTE ↓
Barn26.50.20851.20.75222.90.13189.20.68292.50.68485.20.57230.30.162
Caterpillar42.90.16652.60.51625.20.13889.32.55990.52.55986.80.84314.50.027
Family42.80.79458.41.13022.90.50793.91.50597.01.50699.02.02820.60.468
Ignatius31.40.72351.21.19323.40.60484.11.48985.41.49186.91.32615.50.441
Truck31.60.37054.61.23629.40.35194.41.04297.71.04597.60.88327.50.242
Avg.35.00.45253.60.96524.70.34690.21.45592.61.45791.11.13021.70.268
", + "bbox": [ + 217, + 181, + 787, + 265 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "on average $\\approx 247$ training images $(87\\%)$ and $\\approx 35$ testing images $(12\\%)$ . Mip-NeRF $360^{\\circ}$ consists of seven scenes: two outdoors and four indoors, with a structured scenario and background. We use the original train-test splits at a ratio of 1:8. Following [25], we resize all the objects to fit inside a unit box. The translation error is relative to the object size, defined as a unit.", + "bbox": [ + 212, + 287, + 787, + 364 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "5.3 Analysis", + "text_level": 1, + "bbox": [ + 215, + 388, + 333, + 404 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Quantitative Analysis: Tab. 1 and Tab. 2 present the results obtained across both datasets. 6DGS consistently outperforms baseline methods across all datasets and pose initialization pipelines. Notably, 6DGS achieves lower error rates than the second-best results, especially under identical comparison conditions (i.e., random pose prior). Even when initialized from a fixed pose proximal to the known camera, 6DGS still excels over baselines in most scenes. Parallel iNeRF demonstrates improvement over iNeRF across all tested scenarios, consistent with its reported enhancements, but both methods' performance drops with random initialization. Likewise, $\\mathrm{NeMo + VoGE}$ performs worst, especially with random pose prior due to the utilization of a smaller number of larger ellipsoids in their approach. In contrast, 6DGS leverages approximately 300,000 ellipsoids of varying sizes obtained via 3DGS, as opposed to their mesh-to-ellipsoid method, which utilizes only about 5,000 larger ellipsoids. This fundamental disparity in ellipsoid size and quantity is a crucial factor contributing to the performance difference. Additionally, 6DGS exhibits faster processing speeds, operating nearly in real-time at 15 frames per second (fps) compared to the 0.05fps of Parallel iNeRF and 0.16fps of iNeRF. Please refer to the supplementary material for the complete table on timings.", + "bbox": [ + 212, + 415, + 792, + 686 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Qualitative Analysis: Figure 4 illustrates qualitative findings revealing notable observations. Particularly, we notice that the estimated poses exhibit proximity to the object relative to ground truth, attributable to the quantization effect introduced by the Ellicell. The qualitative findings verify the quantitative outcomes, albeit occasional inconsistencies in results, such as in the Counter scene, with the analysis-by-synthesis approaches showcasing a total incoherent output in regards to the overall scene (notice how the estimated poses are completely off the target). Moreover, the performance of 6DGS demonstrates consistency across varied scenarios, encompassing single-object instances and indoor settings, despite substantial variations in the models utilized.", + "bbox": [ + 212, + 688, + 787, + 840 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "M. Bortolon et al.", + "bbox": [ + 271, + 114, + 393, + 126 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/13310f7eee6dc8d4b1bad5467d85fbff55008497deac449b82571e3f8d9c51fc.jpg", + "image_caption": [ + "Truck", + "Target image", + "Estimated NVS" + ], + "image_footnote": [], + "bbox": [ + 243, + 157, + 464, + 244 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/51049ae0156eb17b5a00d841750630afb800339bf63ae6bc11b1be9008aac152.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 254, + 253, + 349, + 296 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/50c6d3e729bc8ed1f50636857cc5361d711f4de464eb51e8131541dcb970d7be.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 377, + 253, + 449, + 296 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/13ec49e8e182d08ce184f3eb64387be68a5212b64deeb6419bb5644a5f663d81.jpg", + "image_caption": [ + "Family" + ], + "image_footnote": [], + "bbox": [ + 539, + 162, + 699, + 244 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/8136735097001b97815e0b8ac4f39707f6c490ca7d8360f39d3bc9ce3830fb80.jpg", + "image_caption": [ + "Target image" + ], + "image_footnote": [], + "bbox": [ + 545, + 255, + 642, + 296 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/39a8ed92b2fc622381ef0a80afe613e0ea5c6533e3aab3c4ed7082cfc9b5b8dc.jpg", + "image_caption": [ + "Estimated NVS" + ], + "image_footnote": [], + "bbox": [ + 650, + 253, + 743, + 296 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/fa23308f2b98af0b8ced5f9ed8a2a95eff6d216ad1fa09fce59f9ae9dc0d656d.jpg", + "image_caption": [ + "Counter", + "Target image", + "Estimated NVS" + ], + "image_footnote": [], + "bbox": [ + 254, + 316, + 473, + 404 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/6aa65dd389f7df608f16281eef0d4cbd6234809eb7a7951d334bc68520db6483.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 261, + 411, + 357, + 460 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/5ad4d8add8c2d8b4ef68e8887e708974801af9e44633d130bd164eb0352c42de.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 362, + 411, + 457, + 459 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/7f6a3a23ad870c0197c6411415fd8c0ee0ad55ad8f3ff31ce2f8da7438b3feb1.jpg", + "image_caption": [ + "Bonsai", + "Target image", + "Estimated NVS" + ], + "image_footnote": [], + "bbox": [ + 540, + 318, + 736, + 404 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/1b3c82462a897deaaf06b210b8778693ea46106f3d06a8016dea5ec849bcf72e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 537, + 411, + 633, + 460 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/3b6f0ae544df771fa6bce565993ea8a003cb284359d4d4a6f5d242b24f5b96e1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 638, + 411, + 733, + 460 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/eb872e69fea2e95491e7b9198300b6a6d3e301447ee3395d18f5f38325cace1a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 227, + 477, + 254, + 500 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/24fbd16aa5751e943e9bf8d21cbc5cc7c7e4da4bc1633952df7338ed9afd945b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 256, + 477, + 281, + 500 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/bb4ff5db10655d3bca5c6394c0149a06a86f7717f4a651ba84bf97217aaa147b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 282, + 477, + 303, + 500 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/13f2b9b34dde64712afdf43686a02f00c7b42e4b5d1505576377925d6a815d18.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 305, + 478, + 331, + 489 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/2a0ec10c3ac048e38a42f3cdcdde3c5204ba0759ecaa3a936196eb720fa70631.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 307, + 489, + 331, + 498 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/27d37ab08d225afa573cdde677b4bc371d3ddeb06b7051c8ba454cb107eb115e.jpg", + "image_caption": [ + "#" + ], + "image_footnote": [], + "bbox": [ + 338, + 478, + 362, + 489 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/990a949f1333feeb91c3d837ff02c130b4db1c053ae6931abf01bb8a60de7e1e.jpg", + "image_caption": [ + "prior" + ], + "image_footnote": [], + "bbox": [ + 367, + 478, + 393, + 489 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/a8b5579e01770265e4875d125da47fedd52bf5131356aa4e4a100ec8bfc444e3.jpg", + "image_caption": [ + "D" + ], + "image_footnote": [], + "bbox": [ + 401, + 477, + 426, + 489 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/ba9a5a8d015e2c3432c3eec472695b604bebef4c913c3b8907ae242acc6dd5a6.jpg", + "image_caption": [ + "Parallel", + "jNeRFw/", + "north w" + ], + "image_footnote": [], + "bbox": [ + 475, + 478, + 501, + 500 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/d221bc754f3503497ac01adac637d1455865d72b47b42046e69e6fef60bf1071.jpg", + "image_caption": [ + "Parallel", + "prior" + ], + "image_footnote": [], + "bbox": [ + 539, + 478, + 565, + 500 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/2f3721800e1b80d25f6bc516aa30fb91044e9784d09b1702ab2dd418f2c32c8b.jpg", + "image_caption": [ + "NeRF w/c", + "NeM", + "VoGE w/ prior", + "Fig. 4: The illustration presents qualitative results from Tanks & Temple (upper row) and Mip-NeRF $360^{\\circ}$ (lower row) datasets. Each scene showcases the target images (bottom left) along with their corresponding Novel View Synthesis (NVS) outputs (bottom right), derived from the camera poses estimated by 6DGS (located on the top). Furthermore, the estimated camera poses from the comparative baselines are visualized, with distinct colors as indicated in the image legend. The NVS of each scene is rendered based on the provided 3DGS model. Please check the supplementary material for more qualitative results." + ], + "image_footnote": [], + "bbox": [ + 611, + 478, + 637, + 501 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/b0336dcb57af9db42df841c33466de04eba655c2efda0676255b98d3d490cc4e.jpg", + "image_caption": [ + "NeMo +", + "VoGE w/o", + "prior" + ], + "image_footnote": [], + "bbox": [ + 694, + 477, + 720, + 500 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "5.4 Ablation studies", + "text_level": 1, + "bbox": [ + 215, + 648, + 397, + 662 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Our ablation studies involve the analysis of the number of rays selected for the pose estimation (Tab. 3a), the number of rays that we cast from a Ellicell (Tab. 3b) as well as the different feature size on the MLP channels (Tab. 3c). The supplementary material contains additional ablations that analyze 6DGS performance with low-quality 3DGS models.", + "bbox": [ + 212, + 672, + 787, + 748 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We find that the number of selected rays mainly affects the angular error, while the translation error remains relatively stable. Increasing the number of rays decreases the angular error but slightly increases the translation error, likely due to less confident rays contributing to the pose estimation. The optimal balance between translation and angular errors is achieved between 100 to 150 rays, with 100 being the best. The slight increase in error with more $N_{top}$ rays is due to", + "bbox": [ + 212, + 750, + 787, + 842 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "6DGS: 6D Pose Estimation from a Single Image and a 3DGS Model", + "bbox": [ + 277, + 114, + 732, + 128 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 767, + 114, + 785, + 126 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Table 3: Ablation study on the number of rays selected for pose estimation, on the rays cast from each ellipsoid and on the MLP channels using Mip-NeRF 360 [4]. Underline indicates the default values used.", + "bbox": [ + 215, + 143, + 787, + 186 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "(a) Number of rays used for pose estimation.", + "bbox": [ + 215, + 194, + 372, + 215 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "(b) Number of cast rays per ellipsoid.", + "bbox": [ + 375, + 194, + 576, + 215 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/f30f633f20ae79fba7749ed3982a17038a11267a53e9af76f2f3258761784557.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
\\( {N}_{\\text{top }} \\)MAE (°) \\( \\downarrow \\)MTE (u) \\( \\downarrow \\)Time (s)
2029.00.02350.03
5026.30.02270.04
10024.30.02170.06
15024.40.02190.9
20024.50.02220.11
", + "bbox": [ + 217, + 217, + 372, + 275 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/e0925e54eb1e7eb5a1051d421eefac774d30439fe1a8e077ef71ed1434bdd9d7.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
# of cast raysMAE (°)↓MTE (u)↓Time (s)
2029.00.02350.04
3524.70.02200.04
5024.30.02170.06
6525.10.02180.09
8025.20.02210.15
", + "bbox": [ + 375, + 215, + 578, + 275 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "(c) MLP channel feature size.", + "bbox": [ + 593, + 194, + 763, + 205 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/a1c10670ff200d5ebc3a7bd1b9adc5668557af9d04517a2a2b10743b9e045689.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MLP channels MAE (°)↓ MTE (u)↓ Time (s)
25629.40.02730.04
51224.30.02170.06
102430.10.02280.27
", + "bbox": [ + 583, + 215, + 774, + 253 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "introducing rays not pointing precisely to the camera's optical center. Similar to what we observed in the qualitative examples, the noisy rays make the weighted Least Squares estimating the camera closer to the object than it actually is.", + "bbox": [ + 212, + 299, + 785, + 344 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Regarding the impact of the varying number of rays cast from the Ellicells, the angular error tends to remain relatively constant across different configurations. In contrast, the translation error decreases when 50 cast rays are used, and then increases again. This behavior is connected to network generalization capability. Increasing the number of rays allows the network to fit the training set better, but at test time, it makes the network more prone to noise and consequently selecting the wrong rays, thus increasing the error. We observe this generalization issue when increasing the MLP channels, see Tab. 3c, particularly given the limited and uneven distribution of training images ( $\\approx$ 150). Moreover, the processing time increases proportionally with the number of rays and the MLP channels; upon exceeding the default values for rays and feature size, a notable surge in processing time is observed, reaching approximately 10fps and 13fps, respectively.", + "bbox": [ + 212, + 345, + 787, + 541 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "6 Conclusions", + "text_level": 1, + "bbox": [ + 215, + 559, + 369, + 574 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "In this study, we proposed a novel ray sampling by attention method for estimating 6DoF camera poses from a single image and a 3DGS scene model. Our analytical evaluation demonstrates its robustness and efficiency without requiring initialization, up to $22\\%$ in accuracy and while being faster by a big margin, approx. $94\\mathrm{x}$ faster. Furthermore, the proposed method formulates and utilizes a novel ray generation methodology in order to explore diverse camera pose hypotheses in accordance to an effective attention mechanism. Our method exhibits enhanced robustness across real-world datasets and holds promise for real-time deployment in robotics and other fields. Future research endeavors will focus on improving accuracy and extending applicability to diverse scenes and objects.", + "bbox": [ + 212, + 590, + 787, + 741 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Limitations. The main constraint of 6DGS is the need for retraining with each new scene. This could be mitigated with meta-learning, particularly when similar objects or scenes are under consideration.", + "bbox": [ + 212, + 742, + 785, + 787 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "M. Bortolon et al.", + "bbox": [ + 271, + 114, + 393, + 126 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Acknowledgments", + "text_level": 1, + "bbox": [ + 217, + 143, + 392, + 162 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "This work is part of the RePAIR project that has received funding from the European Union's Horizon 2020 research and innovation programme under grant agreement No. 964854. This work has also received funding from the European Union's Horizon Europe research and innovation programme under grant agreement No. 101092043, project AGILEHAND (Smart Grading, Handling and Packaging Solutions for Soft and Deformable Products in Agile and Reconfigurable Lines). We thank S. Fiorini for the discussion on the optimizers.", + "bbox": [ + 212, + 175, + 787, + 282 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 217, + 303, + 323, + 318 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "1. Google maps nerf integration. https://blog.google/products/maps/sustainable-immersive-maps-announcements/, accessed: 2024-03-07", + "2. Akenine-Mo, T., Haines, E., Hoffman, N., et al.: Real-time rendering. AK Pe- ters/CRC Press (2018)", + "3. Almkvist, G., Berndt, B.: Gauss, landen, ramanujan, the arithmetic-geometric mean, ellipses, $\\pi$ , and the ladies diary. The American Mathematical Monthly 95(7), 585-608 (1988)", + "4. Barron, J.T., Mildenhall, B., Verbin, D., Srinivasan, P.P., Hedman, P.: Mip-nerf 360: Unbounded anti-aliased neural radiance fields. In: CVPR (2022)", + "5. Beckers, B., Beckers, P.: Fast and accurate view factor generation. In: FICUP (2016)", + "6. Bortolon, M., Tsesmelis, T., James, S., Poiesi, F., Del Bue, A.: Ifnrf: Initialization free and fast 6 dof pose estimation from a single image and a nef model. In: ICRA (2024)", + "7. Chen, A., Xu, Z., Geiger, A., Yu, J., Su, H.: Tensorf: Tensorial radiance fields. In: ECCV (2022)", + "8. Chen, S., Song, S., Zhao, J., Feng, T., Ye, C., Xiong, L., Li, D.: Robust dual quadric initialization for forward-translating camera movements. RAL 6(3), 4712-4719 (2021)", + "9. Crocco, M., Rubino, C., Del Bue, A.: Structure from motion with objects. In: CVPR (2016)", + "0. Ding, M., Wang, Z., Sun, J., Shi, J., Luo, P.: Camnet: Coarse-to-fine retrieval for camera re-localization. In: ICCV (2019)", + "1. Gaudilliere, V., Simon, G., Berger, M.O.: Camera relocalization with ellipsoidal abstraction of objects. In: ISMAR (2019)", + "2. Gaudilliere, V., Simon, G., Berger, M.O.: Perspective-2-ellipsoid: Bridging the gap between object detections and 6-dof camera pose. RAL 5(4), 5189-5196 (2020)", + "3. Gay, P., Rubino, C., Bansal, V., Del Bue, A.: Probabilistic structure from motion with objects (psfmo). In: ICCV", + "4. Gay, P., Stuart, J., Del Bue, A.: Visual graphs from motion (vgfm): Scene understanding with object geometry reasoning. In: ACCV (2019)", + "5. He, X., Sun, J., Wang, Y., Huang, D., Bao, H., Zhou, X.: Onepose++: Keypoint-free one-shot object pose estimation without cad models. In: NeurIPS (2022)", + "6. Hosseinzadeh, M., Latif, Y., Pham, T., Suenderhauf, N., Reid, I.: Structure aware slam using quadrics and planes. In: ACCV (2019)", + "7. Jacques, L., Masset, L., Kerschen, G.: Direction and surface sampling in ray tracing for spacecraft radiative heat transfer. Aerospace Science and Technology 47 (2015)" + ], + "bbox": [ + 225, + 333, + 787, + 839 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "6DGS: 6D Pose Estimation from a Single Image and a 3DGS Model", + "bbox": [ + 277, + 114, + 732, + 128 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 767, + 116, + 785, + 126 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "18. Kerbl, B., Kopanas, G., Leimkuhler, T., Drettakis, G.: 3d gaussian splatting for real-time radiance field rendering. TCG 42(4) (2023)", + "19. Kim, S., Min, J., Cho, M.: Transformers: Match-to-match attention for semantic correspondence. In: CVPR (2022)", + "20. Knapitsch, A., Park, J., Zhou, Q.Y., Koltun, V.: Tanks and temples: Benchmarking large-scale scene reconstruction. TCG 36(4) (2017)", + "21. Laidlow, T., Davison, A.J.: Simultaneous localisation and mapping with quadric surfaces. In: 3DV (2022)", + "22. Lee, J., Kim, B., Cho, M.: Self-supervised equivariant learning for oriented keypoint detection. In: CVPR (2022)", + "23. Lee, J., Kim, B., Kim, S., Cho, M.: Learning rotation-equivariant features for visual correspondence. In: CVPR", + "24. Liao, Z., Hu, Y., Zhang, J., Qi, X., Zhang, X., Wang, W.: So-slam: Semantic object slam with scale proportional and symmetrical texture constraints. RAL 7(2), 4008-4015 (2022)", + "25. Lin, Y., Müller, T., Tremblay, J., Wen, B., Tyree, S., Evans, A., Vela, P.A., Birchfield, S.: Parallel inversion of neural radiance fields for robust pose estimation. In: ICRA (2023)", + "26. Liu, L., Gu, J., Lin, K.Z., Chua, T.S., Theobalt, C.: Neural sparse voxel fields. In: NeurIPS (2020)", + "27. Lowe, D.G.: Object recognition from local scale-invariant features. In: ICCV", + "28. Luiten, J., Kopanas, G., Leibe, B., Ramanan, D.: Dynamic 3d gaussians: Tracking by persistent dynamic view synthesis. In: 3DV (2024)", + "29. Maggio, D., Mario, C., Carlone, L.: Verf: Runtime monitoring of pose estimation with neural radiance fields. In: ICCV (2023)", + "30. Malley, T.: A shading method for computer generated images. Master's thesis, Dept. of Computer Science, University of Utah (1988)", + "31. Masset, L., Brüls, O., Kerschen, G.: Partition of the circle in cells of equal area and shape. Tech. rep., Structural Dynamics Research Group, Aerospace and Mechanical Engineering Department, University of Liege, 'Institut de Mecanique et G 'enie Civil (B52/3) (2011)", + "32. Meng, Y., Zhou, B.: Ellipsoid slam with novel object initialization. In: CASE (2022)", + "33. Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: Nerf: Representing scenes as neural radiance fields for view synthesis. In: ECCV (2020)", + "34. Moreau, A., Piasco, N., Bennehar, M., Tsishkou, D., Stanciulescu, B., de La Fortelle, A.: Crossfire: Camera relocalization on self-supervised features from an implicit representation. In: ICCV (2023)", + "35. Oquab, M., Darcet, T., Moutakanni, T., Vo, H., Szafraniec, M., Khalidov, V., Fernandez, P., Haziza, D., Massa, F., El-Nouby, A., Assran, M., Ballas, N., Galuba, W., Howes, R., Huang, P.Y., Li, S.W., Misra, I., Rabbat, M., Sharma, V., Synnaeve, G., Xu, H., Jegou, H., Mairal, J., Labatut, P., Joulin, A., Bojanowski, P.: Dinov2: Learning robust visual features without supervision (2023)", + "36. Sarlin, P.E., DeTone, D., Malisiewicz, T., Rabinovich, A.: Superglue: Learning feature matching with graph neural networks. In: CVPR (2020)", + "37. Shan, M., Feng, Q., Jau, Y.Y., Atanasov, N.: Ellipsdf: joint object pose and shape optimization with a bi-level ellipsoid and signed distance function description. In: ICCV (2021)", + "38. Shazeer, N., Stern, M.: Adafactor: Adaptive learning rates with sublinear memory cost. In: ICML (2018)" + ], + "bbox": [ + 215, + 147, + 784, + 839 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "M. Bortolon et al.", + "bbox": [ + 271, + 114, + 393, + 126 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "39. Sinkhorn, R.: A Relationship Between Arbitrary Positive Matrices and Doubly Stochastic Matrices. The Annals of Mathematical Statistics 35(2), 876-879 (1964)", + "40. Tancik, M., Srinivasan, P.P., Mildenhall, B., Fridovich-Keil, S., Raghavan, N., Singhal, U., Ramamoorthi, R., Barron, J.T., Ng, R.: Fourier features let networks learn high frequency functions in low dimensional domains. In: NeurIPS (2020)", + "41. Tombari, F., Salti, S., di Stefano, L.: Unique signatures of histograms for local surface description. In: ECCV (2010)", + "42. Tsesmelis, T., Hasan, I., Cristani, M., Bue, A.D., Galasso, F.: Rgbd2lux: Dense light intensity estimation with an rgbd sensor. In: WACV (2018)", + "43. Wang, A., Kortylewski, A., Yuille, A.: Nemo: Neural mesh models of contrastive features for robust 3d pose estimation. In: ICLR (2020)", + "44. Wang, A., Wang, P., Sun, J., Kortylewski, A., Yuille, A.: Voge: a differentiable volume renderer using gaussian ellipsoids for analysis-by-synthesis. In: ICLR (2022)", + "45. Xie, T., Zong, Z., Qiu, Y., Li, X., Feng, Y., Yang, Y., Jiang, C.: Physgaussian: Physics-integrated 3d gaussians for generative dynamics. In: CVPR (2024)", + "46. Yen-Chen, L., Florence, P., Barron, J.T., Rodriguez, A., Isola, P., Lin, T.Y.: iNeRF: Inverting neural radiance fields for pose estimation. In: IROS (2021)", + "47. Zins, M., Simon, G., Berger, M.O.: Oa-slam: Leveraging objects for camera localization in visual slam. In: ISMAR (2022)" + ], + "bbox": [ + 215, + 147, + 785, + 410 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "6DGS: 6D Pose Estimation from a Single Image and a 3DGS Model", + "bbox": [ + 277, + 114, + 730, + 128 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 767, + 116, + 785, + 126 + ], + "page_idx": 16 + } +] \ No newline at end of file diff --git a/2024/6DGS_ 6D Pose Estimation from a Single Image and a 3D Gaussian Splatting Model/76913771-7094-44e1-8b30-8ea4e2210b42_model.json b/2024/6DGS_ 6D Pose Estimation from a Single Image and a 3D Gaussian Splatting Model/76913771-7094-44e1-8b30-8ea4e2210b42_model.json new file mode 100644 index 0000000000000000000000000000000000000000..c795de449c350e1a6afba399ec68000aafbc34c9 --- /dev/null +++ b/2024/6DGS_ 6D Pose Estimation from a Single Image and a 3D Gaussian Splatting Model/76913771-7094-44e1-8b30-8ea4e2210b42_model.json @@ -0,0 +1,2907 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.258, + 0.141, + 0.747, + 0.187 + ], + "angle": 0, + "content": "6DGS: 6D Pose Estimation from a Single Image and a 3D Gaussian Splitting Model" + }, + { + "type": "text", + "bbox": [ + 0.268, + 0.211, + 0.736, + 0.247 + ], + "angle": 0, + "content": "Bortolon Matteo\\(^{1,2,3}\\), Theodore Tsesmelis\\(^{1}\\), Stuart James\\(^{1,4}\\), Fabio Poiesi\\(^{2}\\), and Alessio Del Bue\\(^{1}\\)" + }, + { + "type": "text", + "bbox": [ + 0.27, + 0.255, + 0.734, + 0.282 + ], + "angle": 0, + "content": "\\(^{1}\\) PAVIS, Fondazione Istituto Italiano di Tecnologia (IIT), Genoa, IT \\(^{2}\\) TeV, Fondazione Bruno Kessler (FBK), Trento, IT" + }, + { + "type": "text", + "bbox": [ + 0.388, + 0.283, + 0.616, + 0.297 + ], + "angle": 0, + "content": "3 Università di Trento, Trento, IT" + }, + { + "type": "text", + "bbox": [ + 0.383, + 0.297, + 0.621, + 0.311 + ], + "angle": 0, + "content": "4 Durham University, Durham, UK" + }, + { + "type": "list", + "bbox": [ + 0.27, + 0.255, + 0.734, + 0.311 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.348, + 0.744, + 0.626 + ], + "angle": 0, + "content": "Abstract. We propose 6DGS to estimate the camera pose of a target RGB image given a 3D Gaussian Splatting (3DGS) model representing the scene. 6DGS avoids the iterative process typical of analysis-by-synthesis methods (e.g. iNeRF) that also require an initialization of the camera pose in order to converge. Instead, our method estimates a 6DoF pose by inverting the 3DGS rendering process. Starting from the object surface, we define a radiant Ellicell that uniformly generates rays departing from each ellipsoid that parameterize the 3DGS model. Each Ellicell ray is associated with the rendering parameters of each ellipsoid, which in turn is used to obtain the best bindings between the target image pixels and the cast rays. These pixel-ray bindings are then ranked to select the best scoring bundle of rays, which their intersection provides the camera center and, in turn, the camera rotation. The proposed solution obviates the necessity of an \"a priori\" pose for initialization, and it solves 6DoF pose estimation in closed form, without the need for iterations. Moreover, compared to the existing Novel View Synthesis (NVS) baselines for pose estimation, 6DGS can improve the overall average rotational accuracy by \\(12\\%\\) and translation accuracy by \\(22\\%\\) on real scenes, despite not requiring any initialization pose. At the same time, our method operates near real-time, reaching 15fps on consumer hardware." + }, + { + "type": "title", + "bbox": [ + 0.217, + 0.65, + 0.376, + 0.667 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.681, + 0.788, + 0.758 + ], + "angle": 0, + "content": "Neural and geometrical 3D representations for Novel View Synthesis (NVS) have recently surged in popularity [18,33], and they have been quickly integrated into daily applications, e.g. mapping services [1]. The change in 3D representation creates new challenges on how to solve classical problems, such as 6D pose estimation, and on how to leverage NVS implicit advantages [25,29,34,44,46]." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.758, + 0.788, + 0.806 + ], + "angle": 0, + "content": "The method of iNeRF [46] pioneered 6D pose estimation using an NVS model by proposing an iterative analysis-by-synthesis, as illustrated in the left panel of Fig. [1]. Given a nearby pose initialization (iteration \\(\\# 1\\)), the NVS model" + }, + { + "type": "page_footnote", + "bbox": [ + 0.232, + 0.812, + 0.603, + 0.827 + ], + "angle": 0, + "content": "Project page: https://mbortolon97.github.io/6dns/" + }, + { + "type": "page_footnote", + "bbox": [ + 0.232, + 0.827, + 0.516, + 0.84 + ], + "angle": 0, + "content": "Corresponding author: mbortolon@fbk.eu" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "2" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.395, + 0.127 + ], + "angle": 0, + "content": "M. Bortolon et al." + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.147, + 0.787, + 0.314 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.327, + 0.788, + 0.478 + ], + "angle": 0, + "content": "Fig. 1: Our 6DGS method introduces a novel approach to 6DoF pose estimation, departing from conventional analysis-by-synthesis methodologies. Standard NeRF-based methods (left) employ an iterative process, rendering candidate poses and comparing them with the target image before updating the pose, which often results in slow performance and limited precision. In contrast, 6DGS (right) estimates the camera pose by selecting a bundle of rays projected from the ellipsoid surface (a radiant Ellicell) and learning an attention map to output ray/image pixel correspondences (based on DINOv2). The optimal bundle of rays should intersect the optical center of the camera and then are used to estimate the camera rotation in closed-form. Our 6GDS method offers significantly improved accuracy and speed, enabling the recovery of the pose within a one-shot estimate." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.503, + 0.788, + 0.639 + ], + "angle": 0, + "content": "is used to render the image related to the initial pose. Then iteratively, the rendered image is compared with the target image using a photometric loss, and the initial pose guess is updated so that the two views achieve the best image overlap at the final step (iteration \\(\\# N\\)). The authors in iNeRF [46] use the popular NeRF [33] NVS model where backpropagation updates every new pose guess. This procedure leverages the remarkable NeRF capabilities in synthesizing realistic novel views, however, at the computational expense of synthesizing a newly rendered image at each iteration. This limitation restricts iNeRF to offline use while requiring a close initial pose estimate for a successful convergence." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.641, + 0.788, + 0.778 + ], + "angle": 0, + "content": "Recent works in 3D Gaussian Splatting (3DGS) [18,28,45] are an alternative to Neural NVs models, providing fast rendering speed through the use of explicit geometric primitives that do not require the optimization of a neural network. 3DGS represents a 3D scene as a set of ellipsoids paired with photometric information, such as color and opacity. The ellipsoids are first initialized using Structure from Motion (SfM), and then they are optimized to reduce the photometric error between the rasterized ellipsoids and a set of known images. During the rasterization stage, the 3DGS model is projected onto the image plane as ellipses and for each pixel the algorithm computes its photometric contribution." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.78, + 0.788, + 0.84 + ], + "angle": 0, + "content": "By leveraging the 3DGS model properties, we design a novel 6DoF pose estimation method (6DGS) that surpasses the limitations of NeRF-based iterative approaches. 6DGS does not require any pose initialization, and it estimates the camera translation and rotation without an iterating analysis-by-synthesis walk" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.279, + 0.115, + 0.733, + 0.129 + ], + "angle": 0, + "content": "6DGS: 6D Pose Estimation from a Single Image and a 3DGS Model" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.787, + 0.127 + ], + "angle": 0, + "content": "3" + }, + { + "type": "text", + "bbox": [ + 0.213, + 0.147, + 0.788, + 0.344 + ], + "angle": 0, + "content": "through. This is a key factor for achieving near real-time performance (15fps), also due to the quick rendering capabilities of 3DGS. The right panel of Fig. 1 presents the gist of our approach for 6DoF pose estimation. If we knew the camera pose, the first NVS step of 3DGS would be to project the ellipsoid centers onto the image plane. Practically, this is a ray casting through the camera's optical center. Our 6DGS attempts to invert this process and, by doing so, to estimate the camera pose. If the target image camera pose is unknown, and thus neither where the optical center is, we are unable to cast the single ray from each ellipsoid that passes through the correct target image pixels. For this reason, instead, we radiate uniformly distributed rays from each ellipsoid through the introduction of a novel casting procedure named Ellicell. Only one radiated ray per ellipsoid would be accurate, i.e., the one that renders the pixel photometrically by projecting the correct ellipse onto the target image plane." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.346, + 0.789, + 0.572 + ], + "angle": 0, + "content": "Now, the 6DGS problem is to select, given all the casted rays from the Ellicells, the correct bundle of rays that can generate most of the target image pixels with high confidence. This selection stage is addressed by binding pixels and rays through the learning of an attention map. Notice that this step is also unsupervised, as it leverages the known camera poses and images used to compute the 3DGS model to obtain the pixel and ray pairs used for training. After the bundle of rays is selected, the intersection of these rays identifies the camera center, which is solved using weighted Least Squares (wLS), with the weights being the scores from the previous selection stage. After the optical center is estimated, the optical axis can be used to obtain the camera rotation degrees of freedom from the rays bundle, thus solving the 6DoF pose. By design, 6DGS eliminates the need for an initial camera pose, which is one of the limitations of analysis-by-synthesis pose estimation methods [34,44,46], as well as the tendency to converge to local minima during the iteration procedure, especially if the initial pose is initialized far from the optimal position." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.575, + 0.789, + 0.694 + ], + "angle": 0, + "content": "We evaluate 6DGS on datasets featuring real-world objects and scenes, comparing against the current NVS state-of-the-art approaches such as iNeRF [46], Parallel iNeRF [25] and NeMO + VoGE [44]. Our experimental results show that 6DGS is competitive, especially if the initial pose is not provided \"a priori\". Finally, we achieve near real-time 6DoF pose estimation on consumer hardware, which is one rather challenging limitation in the practical application of NVS-based approaches for camera pose estimation. To summarize, 6DGS contributions are threefold:" + }, + { + "type": "text", + "bbox": [ + 0.225, + 0.715, + 0.784, + 0.758 + ], + "angle": 0, + "content": "- Our approach for 6DoF camera pose estimation eliminates the need for an initial camera pose and iterations to converge, which is typically required in analysis-by-synthesis approaches;" + }, + { + "type": "text", + "bbox": [ + 0.225, + 0.762, + 0.784, + 0.804 + ], + "angle": 0, + "content": "- 6DGS employs a novel ray casting pipeline, i.e. Ellicell, and an attention-based mechanism that efficiently matches pixel-level image information with 3DGS ellipsoids:" + }, + { + "type": "text", + "bbox": [ + 0.225, + 0.81, + 0.784, + 0.838 + ], + "angle": 0, + "content": "- The proposed method is state-of-the-art in the NVS benchmarks for camera pose estimation both for accuracy and real-time performance." + }, + { + "type": "list", + "bbox": [ + 0.225, + 0.715, + 0.784, + 0.838 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "4" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.395, + 0.127 + ], + "angle": 0, + "content": "M. Bortolon et al." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.145, + 0.392, + 0.161 + ], + "angle": 0, + "content": "2 Related works" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.176, + 0.787, + 0.221 + ], + "angle": 0, + "content": "We review relevant works on 6DoF camera pose estimation based on Neural Radiance Fields (NeRF) models, ellipsoid-based approaches, and correspondence matching methods that are related to key components of 6DGS." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.221, + 0.789, + 0.552 + ], + "angle": 0, + "content": "Pose estimation from neural radiance fields. iNeRF [46] pioneered NeRF-based 6D camera pose estimation, using iterative alignment of target and rendered images based on photometric error. However, iNeRF is prone to local minima in the optimization function, leading to recent developments like Parallel iNeRF [25], which employs parallel optimization of multiple candidate poses. While these approaches rely on NeRF-based models, \\(\\mathrm{NeMo + VoGe}\\) [43,44] have explored 6D camera pose estimation using object models based on volumetric Gaussian reconstruction kernels as geometric primitives. The rendering strategy (VoGE) differs from 3DGS as it is based on ray marching. Therefore, \\(\\mathrm{NeMo + VoGe}\\) iteratively aligns learned features from target and rendered images. Notably, \\(\\mathrm{NeMo + VoGe}\\)'s training requires multiple objects, in contrast to our method, which leverages a single object 3DGS model. Alternatively, CROSS-FIRE [34] addresses the local minima issue by integrating learned local features, which describes not only the visual content but also the 3D location of the scene in the NeRF model. Despite these advancements, analysis-by-synthesis approaches often struggle with inefficient pose updates due to the nature of the optimization refinement and the dependence on accurate initial pose priors. These factors can limit their real-world applicability. Recently, IFFNeRF [6] utilized a method that inverts the NeRF model to re-render an image to match a target one. However, unlike our approach, it does not consider the specificities of 3DGS, which include ellipsoid elongation and rotation, and their non-uniform distribution across the scene surface." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.554, + 0.789, + 0.688 + ], + "angle": 0, + "content": "Pose estimation from ellipsoids. Recovery of the camera pose from ellipsoids has been explored for both SfM [8,9,12-14,37] and SLAM [11,16,21,24,32,47] scenarios, where methods frequently recover the object's ellipsoid representation as well as the camera 6DoF. Such approaches typically solve linear systems to recover the pose, most commonly minimizing a loss of the projection to and from an object detection. However, this methodological framework often presents limitations when confronted with large numbers of ellipsoids, as they are more indicated for handling few large ellipsoids that model a single object occupancy, 3D position and orientation." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.689, + 0.789, + 0.841 + ], + "angle": 0, + "content": "Correspondences Matching. In traditional 6DoF image matching, feature-based approaches are used, which often rely on hand-crafted features, e.g., SIFT [27] or more recent deep approaches such as SuperGlue [36] and Transformer [19]. SuperGlue utilizes a Graph Neural Network (GNN) for feature attention and Sinkhorn [39] for matching, while LightGlue replaces the GNN with a lightweight transformer. Unlike these, Transformer [19] performs global match-to-match attention, allowing for accurate match localization. In addition, there is a body of work around feature equivariance [22,23] for improving the robustness of matching. However, these methods rely on the hypothesis that both feature sets exist in a homogeneous feature space, i.e. extracted from the image," + } + ], + [ + { + "type": "header", + "bbox": [ + 0.278, + 0.114, + 0.733, + 0.129 + ], + "angle": 0, + "content": "6DGS: 6D Pose Estimation from a Single Image and a 3DGS Model" + }, + { + "type": "page_number", + "bbox": [ + 0.776, + 0.117, + 0.786, + 0.127 + ], + "angle": 0, + "content": "5" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.284 + ], + "angle": 0, + "content": "while in 6DGS we have the specific problem to match pixel to rays emitted from the Ellicells. Therefore, we rely on the proposed attention model to handle these ray-to-pixel bindings. OnePose++ [15] instead adopts a multi-modal approach matching a point cloud with an image. Another proposed alternative is to regress directly the pose parameters, as in CamNet [10]. Nevertheless, these approaches require a large amount of training data (\\(\\approx\\) 500 or more images), sometimes across multiple scenes and, like with CamNet, these need to be available also at inference time. 6DGS however, requires only \\(\\approx\\) 100 or less images, which are utilized only once during training." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.306, + 0.383, + 0.321 + ], + "angle": 0, + "content": "3 Preliminaries" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.336, + 0.789, + 0.473 + ], + "angle": 0, + "content": "We first review 3D Gaussian Splatting (3DGS) [18] to understand the underlying principles and provide the mathematical formalization of the model. 3DGS objective is to synthesize novel views of a scene by optimizing the position, the orientation and the color of a set of 3D Gaussians approximated as ellipsoids \\(\\mathcal{Q} = \\{\\mathbf{Q}\\}_{i=1}^{K}\\) from a given set of input images \\(\\mathcal{I} = \\{\\mathbf{I}\\}_{i=1}^{J}\\) and their corresponding camera projection matrices \\(\\mathcal{P} = \\{\\mathbf{P}\\}_{i=1}^{J} \\in \\mathbb{R}^{3 \\times 4}\\). A point \\(\\mathbf{d}\\) for being on the surface of an ellipsoid must satisfy the equation \\((\\mathbf{d} - \\mathbf{x}) \\boldsymbol{\\Sigma} (\\mathbf{d} - \\mathbf{x})^T = 1\\), where \\(\\mathbf{x} \\in \\mathbb{R}^3\\) is the ellipsoid center and \\(\\boldsymbol{\\Sigma} \\in \\mathbb{R}^{3 \\times 3}\\) its covariance matrix. We can further decompose the covariance of the ellipsoid \\(\\boldsymbol{\\Sigma}\\) as:" + }, + { + "type": "equation", + "bbox": [ + 0.444, + 0.484, + 0.785, + 0.5 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {\\Sigma} = \\mathbf {R} \\mathbf {U} \\mathbf {U} ^ {T} \\mathbf {R} ^ {T}, \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.511, + 0.788, + 0.558 + ], + "angle": 0, + "content": "where \\(\\mathbf{R} \\in \\mathbb{R}^{3 \\times 3}\\) is the ellipsoid rotation matrix and \\(\\mathbf{U}^{3 \\times 3}\\) denotes the scaling matrix. The projection matrix \\(\\mathbf{P} \\in \\mathbb{R}^{3 \\times 4}\\) allows the projection of the ellipsoid \\(\\mathbf{Q}\\) onto the image plane generating the corresponding ellipse representation:" + }, + { + "type": "equation", + "bbox": [ + 0.419, + 0.568, + 0.785, + 0.584 + ], + "angle": 0, + "content": "\\[\n\\check {\\mathbf {y}} = \\mathbf {P} \\check {\\mathbf {x}} ^ {T}, \\check {\\mathbf {E}} = \\mathbf {P} \\boldsymbol {\\Sigma} \\mathbf {P} ^ {T}, \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.595, + 0.788, + 0.686 + ], + "angle": 0, + "content": "where \\(\\mathbf{y} \\in \\mathbb{R}^2\\) and \\(\\breve{\\mathbf{y}} \\in \\mathbb{R}^3\\) correspond to the Euclidean and homogeneous coordinates of the ellipse center point. The homogeneous coordinates \\(\\breve{\\mathbf{y}}\\) originate from the projection of the corresponding ellipsoid center in the homogeneous coordinates \\(\\breve{\\mathbf{x}} \\in \\mathbb{R}^4\\). The matrix \\(\\breve{\\mathbf{E}} \\in \\mathbb{R}^{3 \\times 3}\\) is the ellipse covariance in homogeneous space. The covariance of the ellipse \\(\\mathbf{E} \\in \\mathbb{R}^{2 \\times 2}\\), is derived by selecting only the first two rows and columns of \\(\\breve{\\mathbf{E}}\\) and dividing by the last element on \\(\\breve{\\mathbf{E}}\\) diagonal." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.686, + 0.788, + 0.793 + ], + "angle": 0, + "content": "The splatted ellipses, denoted as \\(\\mathcal{B} = \\{\\langle \\mathbf{y},\\mathbf{E}\\rangle \\}_{i = 1}^{K}\\), generate a pixel color with the rendering function \\(\\phi\\) using rasterization techniques [2,18]. The function \\(\\phi\\) acts independently on every single pixel of the image \\(\\mathbf{p}\\). The pixel value depends on the neighboring projected ellipses, taking into account their center points' distances to the pixel coordinates, as well as their orientations and scales. \\(\\phi\\) assumes that the ellipses are ordered based on the depth, so they should be sorted. Formally, \\(\\phi\\) can be expressed as:" + }, + { + "type": "equation", + "bbox": [ + 0.381, + 0.803, + 0.785, + 0.843 + ], + "angle": 0, + "content": "\\[\n\\phi (\\mathcal {B}, \\mathbf {p}) = \\sum_ {i = 1} ^ {K} \\rho_ {i} \\alpha_ {i} e ^ {- \\tau (\\mathcal {B} _ {i}, \\mathbf {p})} \\gamma (i, \\mathbf {p}), \\tag {3}\n\\]" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.117, + 0.23, + 0.127 + ], + "angle": 0, + "content": "6" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.395, + 0.127 + ], + "angle": 0, + "content": "M. Bortolon et al." + }, + { + "type": "image", + "bbox": [ + 0.237, + 0.145, + 0.768, + 0.333 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.336, + 0.788, + 0.435 + ], + "angle": 0, + "content": "Fig. 2: The figure illustrates the pipeline of our 6DGS methodology. The image is encoded using a visual backbone (a). Concurrently, rays are uniformly projected from the center of the 3DGS ellipsoids (b), and their corresponding color is estimated. Subsequently, an attention map mechanism is employed to compare the encoded ray and image features (c). Following this comparison, the \\( N_{top} \\) matches are selected via attenuation, and the camera location is estimated (d) as the solution of a weighted Least Squares problem, resulting in a distinct 6DoF pose for the image." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.451, + 0.788, + 0.511 + ], + "angle": 0, + "content": "where \\(\\rho\\) and \\(\\alpha\\) represent the color and opacity attributes associated with the ellipsoid, which are inherited by the splatted ellipse. Similar to the volumetric rendering equation in NeRF, \\(\\gamma\\) denotes the inverse of the volume density accumulated up to the \\(i^{th}\\) ellipse on pixel \\(\\mathbf{p}\\) and is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.39, + 0.522, + 0.787, + 0.565 + ], + "angle": 0, + "content": "\\[\n\\gamma (i, \\mathbf {p}) = \\prod_ {j = 1} ^ {i - 1} \\left(1 - \\alpha_ {j} e ^ {- \\tau \\left(\\mathcal {B} _ {i}, \\mathbf {p}\\right)}\\right). \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.575, + 0.788, + 0.637 + ], + "angle": 0, + "content": "The purpose of \\(\\tau\\) is to determine the light absorption by the ellipse when represented as a 2D Gaussian. Light absorption depends on the orientation and distance between the ellipse center, denoted as \\(\\mathbf{y}\\), and the pixel location, expressed as \\(\\mathbf{d} = \\mathbf{p} - \\mathbf{y}\\). Consequently, we can formally define \\(\\tau\\) as:" + }, + { + "type": "equation", + "bbox": [ + 0.402, + 0.645, + 0.787, + 0.675 + ], + "angle": 0, + "content": "\\[\n\\tau (\\mathbf {B}, \\mathbf {p}) = \\frac {1}{2} \\left(\\mathbf {1} _ {2} \\mathbf {d} ^ {T} \\mathbf {E} \\mathbf {d} \\mathbf {1} _ {2} ^ {T}\\right), \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.685, + 0.788, + 0.733 + ], + "angle": 0, + "content": "where \\(\\mathbf{1}_2\\in \\mathbb{R}^2\\) denotes a vector filled with ones. Following the processing of all pixels onto the image plane, the rendering function \\(\\phi\\) generates an image \\(\\hat{\\mathbf{l}}\\in \\mathbb{R}_{+}^{H\\times W}\\), where \\(W\\) and \\(H\\) represent the width and height of the image." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.754, + 0.388, + 0.772 + ], + "angle": 0, + "content": "4 Our approach" + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.785, + 0.342, + 0.799 + ], + "angle": 0, + "content": "4.1 Overview" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.809, + 0.787, + 0.843 + ], + "angle": 0, + "content": "6DGS estimates the camera pose \\(\\hat{\\mathbf{P}}\\in \\mathbb{R}^{3\\times 4}\\), given a target image \\(\\mathbf{I}_t\\) and a set of ellipsoids \\(\\mathcal{Q}\\) from a pre-computed 3DGS model (Fig. 2). To solve for the" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.279, + 0.115, + 0.733, + 0.129 + ], + "angle": 0, + "content": "6DGS: 6D Pose Estimation from a Single Image and a 3DGS Model" + }, + { + "type": "page_number", + "bbox": [ + 0.776, + 0.117, + 0.786, + 0.127 + ], + "angle": 0, + "content": "7" + }, + { + "type": "image", + "bbox": [ + 0.262, + 0.147, + 0.432, + 0.231 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.277, + 0.234, + 0.413, + 0.247 + ], + "angle": 0, + "content": "(a) Ellicell components" + }, + { + "type": "image", + "bbox": [ + 0.451, + 0.164, + 0.578, + 0.232 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.459, + 0.235, + 0.572, + 0.247 + ], + "angle": 0, + "content": "(b) 3D Ellicell grid" + }, + { + "type": "image", + "bbox": [ + 0.609, + 0.16, + 0.743, + 0.234 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.61, + 0.235, + 0.74, + 0.247 + ], + "angle": 0, + "content": "(c) 3D radiant Ellicell" + }, + { + "type": "image_caption", + "bbox": [ + 0.215, + 0.258, + 0.789, + 0.342 + ], + "angle": 0, + "content": "Fig. 3: The illustration depicts the three primary stages involved in the radiant Ellicell generation. Firstly, (a) delineates the formulation of components required to compute the geometric information for each cell. Secondly, (b) shows the resulting Ellicell grid positioned on the surface of the ellipsoid along with their respective center points. Finally, (c) demonstrates the generation of rays originating from the center point of the ellipsoid going through the Ellicell center." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.364, + 0.789, + 0.593 + ], + "angle": 0, + "content": "camera pose, we propose a casting method from the ellipsoid's surface, called Ellicell, that divides it in equal area cells (Sec. 4.2). The ellipsoids cast a set of \\(N\\) rays, denoted as \\(\\mathcal{V} = \\{\\langle \\mathbf{v}_o,\\mathbf{v}_d,\\mathbf{v}_c\\rangle \\}_{i = 1}^N\\), one for each of the generated cell (Fig. 3c). Each ray is identified by \\(i\\) the origin \\(\\mathbf{v}_o\\in \\mathbb{R}^3\\), \\(ii\\) the center point of each ellipsoid, \\(iii\\) the direction \\(\\mathbf{v}_d\\in \\mathbb{R}^3\\) originating from the ellipsoid center to the cell center and through the space, and \\(iv\\) the color information \\(\\mathbf{v}_c\\in \\mathbb{R}^3\\) as RGB values. We synthesize the rays' color using the 3DGS rendering function \\(\\phi\\) (Eq. 3). A subset of these rays, depending on the view perspective, may intersect the camera's optical center. For binding the rays to the image pixels we compute the target image pixels features \\(\\psi (\\mathbf{I}_t)\\) (Fig. 2a) and the rays features \\(\\psi (\\mathcal{V})\\) (Fig. 2b). These features are used to identify the intersecting rays by using an attention map \\(\\mathcal{A}\\) (Fig. 2c), see Sec. 4.4. The higher the attention value for a ray-pixel pair is, the more likely the intersection on the image plane is a valid one. Lastly, we determine \\(\\hat{\\mathbf{P}}_t\\) (Fig. 2d) by computing the intersection point of rays using the weighted Least Squares algorithm (Sec. 4.5)." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.611, + 0.391, + 0.625 + ], + "angle": 0, + "content": "4.2 Radiant Ellicell" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.635, + 0.788, + 0.711 + ], + "angle": 0, + "content": "We create rays spanning in every direction allowing 6DGS to recover the camera pose. We introduce the concept of radiant Ellicell for generating rays that uniformly emanate from the ellipsoid surface, as illustrated in Fig. 3. Ellicell generation is deterministic [5,31] and achieves higher precision with fewer rays [17,42] compared to other sampling methods like Monte-Carlo [30]." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.71, + 0.788, + 0.759 + ], + "angle": 0, + "content": "First, we compute the area of each Ellicell. This is achieved by calculating the ellipsoid surface area, using a computationally efficient approach, namely Ramanujan approximation 3:" + }, + { + "type": "equation", + "bbox": [ + 0.356, + 0.765, + 0.787, + 0.805 + ], + "angle": 0, + "content": "\\[\nh = 4 \\pi \\left(\\frac {(a b) ^ {1 . 6} + (a c) ^ {1 . 6} + (b c) ^ {1 . 6}}{3}\\right) ^ {\\frac {1}{1 . 6}}, \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.811, + 0.787, + 0.842 + ], + "angle": 0, + "content": "where \\(a, b, c = \\text{diag}(\\mathcal{S})\\) are the ellipsoid axis scales. Each Ellicell cell's target area equals \\(\\mu = h / G\\), with \\(G\\) being the number of cells dividing each ellipsoid." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "8" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.395, + 0.128 + ], + "angle": 0, + "content": "M. Bortolon et al." + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.147, + 0.788, + 0.219 + ], + "angle": 0, + "content": "Approximating each cell as a square with side \\( z = \\sqrt{\\mu} \\) we slice the ellipsoids along the major axis into ribbons, each as wide as \\( z \\) (Fig. 3a). The extremity of each ribbon is called a ring. The total number of rings is \\( e = \\lfloor \\kappa(a, b) / (2z) \\rfloor \\in \\mathbb{N} \\), where \\( \\kappa(a, b) \\) computes the ring perimeter. Ignoring ellipsoid's rotation, we compute the ring perimeter by treating them as 2D ellipses, thus defining \\( \\kappa(a, b) \\) as:" + }, + { + "type": "equation", + "bbox": [ + 0.325, + 0.229, + 0.786, + 0.261 + ], + "angle": 0, + "content": "\\[\n\\kappa (a, b) = \\pi \\left((a + b) + \\frac {3 (a - b) ^ {2}}{1 0 (a + b) + \\sqrt {a ^ {2} + 1 4 a b + b ^ {2}}}\\right). \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.269, + 0.789, + 0.314 + ], + "angle": 0, + "content": "Given the total number of rings \\( e \\) it is possible to compute the ribbon's centerline geometric parameters. In particular, we compute the scale parameter of each ribbon as:" + }, + { + "type": "equation", + "bbox": [ + 0.35, + 0.325, + 0.786, + 0.358 + ], + "angle": 0, + "content": "\\[\n\\varrho (n, \\Delta r, a, b) = \\sqrt {1 - \\frac {(0 . 5 \\Delta r + n \\Delta r - a) ^ {2}}{b ^ {2}}} \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.368, + 0.789, + 0.458 + ], + "angle": 0, + "content": "where \\(\\varDelta r=a/e\\) is the distance between two consecutive rings. This equation derives from the manipulation of the standard ellipse equation. While ribbon size \\(z\\) should be equal to \\(\\varDelta r\\), these two values will likely differ due to the need for the number of rings being a natural number. Eq. 8 is also used to compute the other ribbon scaling parameter by replacing \\(b\\) with \\(c\\). \\(\\varrho\\) is then used to compute the number of cells inside each ribbon as:" + }, + { + "type": "equation", + "bbox": [ + 0.339, + 0.468, + 0.786, + 0.502 + ], + "angle": 0, + "content": "\\[\n\\xi (n, e, a, b, c) = \\left\\lfloor \\frac {\\kappa (\\varrho (n , e , a , b) , \\varrho (n , e , a , c))}{z} \\right\\rfloor , \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.511, + 0.788, + 0.629 + ], + "angle": 0, + "content": "where \\(\\xi\\) is the number of cells inside the ring. We compute the center of each cell, equally spaced along the ribbon's centerline, by sampling \\(\\xi\\) points along it. This is challenging as the perimeter distance does not linearly correlate with the \\(x\\) and \\(y\\) variations. However, we can solve this by using a statistical method. Knowing a distribution's Cumulative Distribution Function (CDF) allows us to sample uniformly between 0 and 1 and then use the CDF inverse to map the sample to the distribution space. This approach applies to our case, where samples are distributed as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.443, + 0.632, + 0.785, + 0.649 + ], + "angle": 0, + "content": "\\[\nd s ^ {2} = d x ^ {2} + d y ^ {2}, \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.656, + 0.788, + 0.716 + ], + "angle": 0, + "content": "and, by taking its inverse, we can retrieve the coordinates of each cell center. To simplify the equations, we define \\( r = \\varrho(n, e, a, b) \\) and \\( w = \\varrho(n, e, a, c) \\) to indicate the scale of the ellipse under consideration. Then we express Eq. [10] in polar coordinates to simplify the differentiation:" + }, + { + "type": "equation", + "bbox": [ + 0.403, + 0.729, + 0.786, + 0.757 + ], + "angle": 0, + "content": "\\[\n\\frac {d s}{d \\theta} = \\sqrt {r ^ {2} \\sin^ {2} \\theta + w ^ {2} \\cos^ {2} \\theta}, \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.767, + 0.788, + 0.797 + ], + "angle": 0, + "content": "then, we can express the set of points on the perimeter of the ribbon centerline as an angular position in the polar coordinate system as:" + }, + { + "type": "equation", + "bbox": [ + 0.38, + 0.809, + 0.786, + 0.845 + ], + "angle": 0, + "content": "\\[\n\\theta^ {\\prime} = \\left(\\frac {d s}{d \\theta}\\right) ^ {- 1} \\left(g \\cdot \\frac {1}{\\xi (n , e , a , b , c)}\\right), \\tag {12}\n\\]" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.278, + 0.114, + 0.733, + 0.129 + ], + "angle": 0, + "content": "6DGS: 6D Pose Estimation from a Single Image and a 3DGS Model" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.116, + 0.787, + 0.127 + ], + "angle": 0, + "content": "9" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.784, + 0.178 + ], + "angle": 0, + "content": "with \\( g \\) being the cell identifier. Given \\( \\theta' \\) we can use it inside the ellipse equation in polar coordinates to obtain the 3D position of each cell center:" + }, + { + "type": "equation", + "bbox": [ + 0.433, + 0.189, + 0.786, + 0.236 + ], + "angle": 0, + "content": "\\[\n\\mathbf {u} = \\left( \\begin{array}{c} w \\cos \\left(\\theta^ {\\prime}\\right) \\\\ g \\sin \\left(\\theta^ {\\prime}\\right) \\\\ - a + n \\Delta r \\end{array} \\right). \\tag {13}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.255, + 0.389, + 0.27 + ], + "angle": 0, + "content": "4.3 Ray generation" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.279, + 0.788, + 0.385 + ], + "angle": 0, + "content": "Once we have divided each ellipsoid of the 3DGS model into equidistant cells, we cast the rays originating from the center point of the ellipsoid i.e. \\(\\mathbf{v}_o = \\mathbf{x}\\) and oriented towards the Ellicell center \\(\\mathbf{v}_d = \\mathbf{u} - \\mathbf{x}\\). We reduce the number of potential rays cast from each ellipsoid by considering only the rays oriented in the same hemisphere as the estimated surface normal of the ellipsoid. We obtain the surface normals by treating the ellipsoid centroids as a point cloud, and the surface normal is estimated using the nearby points [41]." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.385, + 0.788, + 0.462 + ], + "angle": 0, + "content": "Finally, each ray has also been associated with the color information \\(\\mathbf{v}_c\\), which we compute through the same pixel-level approach of 3DGS (Eq. 5). We note that the application of the volumetric rendering function of Eq. 5 produces a single pixel for each ray. The generated rays represent a collection of potential hypotheses, meaning that a subset of them will intersect the target image \\(\\mathbf{I}_t\\)." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.482, + 0.593, + 0.499 + ], + "angle": 0, + "content": "4.4 Binding by attenuation of rays to image" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.507, + 0.788, + 0.584 + ], + "angle": 0, + "content": "Given all the cast rays \\(\\mathbf{v}\\), we identify a subset of \\(\\mathbf{v}\\) correlating with the target image \\(\\mathbf{I}_t\\). A learned attention map \\(\\mathcal{A}\\) assigns scores \\(\\hat{\\mathbf{s}}\\) based on the highest correlation to image pixels; higher similarity results in higher scores. Based on scores \\(\\hat{\\mathbf{s}}\\), we select the top candidate's rays \\((N_{top})\\) that present maximal association and use them to recover the pose \\((\\hat{\\mathbf{P}}_t)\\)." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.584, + 0.788, + 0.658 + ], + "angle": 0, + "content": "To select rays with similar appearance and position, we use a Multi-Layer Perceptron (MLP) defined as \\(\\mathbf{V} = \\psi (\\mathbf{v})\\), where \\(\\mathbf{V}\\in \\mathbb{R}^{N\\times C}\\) with \\(C\\) being the feature size and \\(N\\) the overall number of rays. The MLP input is enriched by incorporating Positional Encoding that maps the data in the Fourier domain [40] to better distinguish between similar data." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.659, + 0.788, + 0.795 + ], + "angle": 0, + "content": "We generate features from \\(\\mathbf{I}_t\\) using DINOv2 [35] as a pre-trained backbone feature extractor. This results in a set of features \\(\\mathbf{F}_t \\in \\mathbb{R}^{M \\times C}\\), where \\(M = W \\times H\\). Both the image and ray features sets are processed by a single attention module \\(\\mathcal{A}(\\mathbf{V}_f, \\mathbf{F}_t) \\in \\mathbb{R}^{M \\times N}\\) producing a set of scores. Inside the attention module the ray features, \\(\\mathbf{V}\\), are used as queries and the image features, \\(\\mathbf{F}_t\\), as a key. We optimize the attention map by summing along the rows and converting it into a per-ray correlation score as follows \\(\\hat{\\mathbf{s}} = \\sum_{i=1}^{M} \\mathcal{A}_i\\). The higher the score value given by \\(\\hat{\\mathbf{s}}\\), the better the association between the rays and image pixels. At test-time we select the \\(N_{top}\\) rays with the highest ranking scores." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.796, + 0.788, + 0.842 + ], + "angle": 0, + "content": "Because a ray and an image pixel should be associated with each other based on the distance between the camera origin and its projection onto the corresponding ray, we supervise the predicted scores \\(\\hat{\\mathbf{s}}\\) using the same images used to" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "10" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.395, + 0.128 + ], + "angle": 0, + "content": "M. Bortolon et al." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.269 + ], + "angle": 0, + "content": "estimate the 3DGS model at training time. We compute the projection of the point on the line as \\( l = \\max((\\mathbf{O} - \\mathbf{v}_o)\\mathbf{v}_d, 0) \\), where \\( \\mathbf{O} \\) is the camera position, \\( \\mathbf{v}_o \\) the generated ray origin and \\( \\mathbf{v}_d \\) the corresponding direction. Rays are infinite only in one direction, so we restrict \\( l \\in \\mathbb{R}^+ \\) using the max operator. Then, we can compute the distance between the camera origin and its projection on the ray as \\( \\mathbf{h} = \\| (\\mathbf{v}_o + l\\mathbf{v}_d) - \\mathbf{O}\\|_2 \\). The value \\( \\mathbf{h} \\) can span from 0 to \\( +\\infty \\), with 0 indicating a ray that passes through the camera's optical center. We map distances to the attention map score using:" + }, + { + "type": "equation", + "bbox": [ + 0.389, + 0.274, + 0.786, + 0.311 + ], + "angle": 0, + "content": "\\[\n\\delta = 1 - \\tanh \\left(\\frac {\\mathbf {h}}{\\lambda}\\right), \\mathbf {s} = \\delta \\frac {M}{\\sum \\delta}, \\tag {14}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.315, + 0.788, + 0.377 + ], + "angle": 0, + "content": "where \\(\\lambda\\) regulates the number of rays to assign to a specific camera. Lastly, the softmax inside the attention map computation requires we normalize the ground truth scores. We use the \\(L2\\) loss to minimize the difference between the predicted \\(\\hat{\\mathbf{s}}\\) and the computed ground truth \\(\\mathbf{s}\\) scores as:" + }, + { + "type": "equation", + "bbox": [ + 0.39, + 0.382, + 0.786, + 0.426 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\frac {1}{M N} \\sum_ {i = 1} ^ {M} \\sum_ {j = 1} ^ {N} \\| \\hat {\\mathbf {s}} _ {i, j} - \\mathbf {s} _ {i, j} \\| _ {2}, \\tag {15}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.433, + 0.786, + 0.464 + ], + "angle": 0, + "content": "where \\(M, N\\) are the size of the attention map \\(\\mathcal{A}\\). During each training iteration, we predict an image and a pose utilized for estimating the 3DGS model." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.484, + 0.476, + 0.499 + ], + "angle": 0, + "content": "4.5 Test-time pose estimation" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.506, + 0.786, + 0.568 + ], + "angle": 0, + "content": "During the test phase, the predicted scores \\(\\hat{\\mathbf{s}}\\) are used to select the top \\(N_{top}\\) rays, identified as the utmost relevant, and constrained to choose at most one ray per ellipsoid. Note that only a small set of rays is sufficient to estimate the camera pose. However, based on an ablation study we set \\(N_{top} = 100\\), see Tab. 3a." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.567, + 0.787, + 0.626 + ], + "angle": 0, + "content": "The camera position is found at the intersection of selected rays, solved as a weighted Least Squares problem. Since 3D lines usually do not intersect at a single point due to discretization noise introduced by the Ellicell, we minimize the sum of squared perpendicular distances instead." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.627, + 0.787, + 0.66 + ], + "angle": 0, + "content": "For the selected ray \\(\\mathbf{v}_j\\) with \\(f = 1\\ldots N_{top}\\), the error is given by the square of the distance from the camera position to predict \\(\\hat{\\mathbf{O}}\\) to its projection on \\(\\mathbf{v}_j\\):" + }, + { + "type": "equation", + "bbox": [ + 0.315, + 0.667, + 0.786, + 0.711 + ], + "angle": 0, + "content": "\\[\n\\sum_ {f = 1} ^ {N _ {t o p}} \\left(\\left(\\hat {\\mathbf {O}} - \\mathbf {v} _ {o, f}\\right) ^ {T} \\left(\\hat {\\mathbf {O}} - \\mathbf {v} _ {o, f}\\right) - \\left(\\left(\\hat {\\mathbf {O}} - \\mathbf {v} _ {o, f}\\right) ^ {T} \\mathbf {v} _ {d, f}\\right) ^ {2}\\right), \\tag {16}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.719, + 0.785, + 0.752 + ], + "angle": 0, + "content": "where \\(\\mathbf{v}_{o,f}\\) indicating the origin of the \\(f\\)-th ray and \\(\\mathbf{v}_{d,f}\\) the respective direction. To minimize Eq. [16] we differentiate it with respect to \\(\\hat{\\mathbf{O}}\\), resulting in" + }, + { + "type": "equation", + "bbox": [ + 0.393, + 0.759, + 0.786, + 0.802 + ], + "angle": 0, + "content": "\\[\n\\hat {\\mathbf {O}} = \\sum_ {f = 1} ^ {N _ {t o p}} \\hat {\\mathbf {s}} _ {f} \\left(\\mathbb {I} - \\mathbf {v} _ {d, f} \\mathbf {v} _ {d, f} ^ {T}\\right) \\mathbf {v} _ {o, f}, \\tag {17}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.81, + 0.786, + 0.84 + ], + "angle": 0, + "content": "where \\(\\mathbb{I}\\) is the identity matrix and \\(\\hat{\\mathbf{s}}_f\\) are the predicted ray scores. This expression can be solved as a weighted system of linear equations." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.279, + 0.115, + 0.733, + 0.129 + ], + "angle": 0, + "content": "6DGS: 6D Pose Estimation from a Single Image and a 3DGS Model" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.117, + 0.784, + 0.127 + ], + "angle": 0, + "content": "11" + }, + { + "type": "table_caption", + "bbox": [ + 0.214, + 0.144, + 0.788, + 0.215 + ], + "angle": 0, + "content": "Table 1: Evaluation of 6DoF pose estimation on the Mip-NeRF \\(360^{\\circ}\\) dataset. We report results in terms of Mean Angular Error (MAE) and Mean Translation Error (MTE) in terms of degrees and units, \\(u\\), respectively. Where \\(1u\\) is equal to the object's largest dimension. For both metrics lower is better. Best-performing results are highlighted in bold and green, while second best values are highlighted in orange." + }, + { + "type": "table", + "bbox": [ + 0.218, + 0.224, + 0.788, + 0.324 + ], + "angle": 0, + "content": "
Fixed pose prior (eval. protocol by 46)Random pose priorNo pose prior
iNeRFiNeRF46NeMo + VoGE44Parallel iNeRF25iNeRF46NeMo + VoGE44Parallel iNeRF256DGS (Ours)
MAE ↓MTE ↓MAE ↓MTE ↓MAE ↓MTE ↓MAE ↓MTE ↓MAE ↓MTE ↓MAE ↓MTE ↓MAE ↓MTE ↓MAE ↓MTE ↓
Bicycle39.50.11643.80.01535.90.11676.60.217111.80.03844.40.15012.10.010
Bonsai51.30.22852.50.03641.10.22396.70.38598.90.07358.20.29810.50.038
Counter40.70.32445.60.07224.70.21270.30.48798.10.13942.10.43519.60.043
Garden31.00.12131.80.02618.20.09072.80.21089.20.03860.00.14437.80.015
Kitchen38.20.11341.60.04237.30.109100.20.266122.20.08265.00.19323.20.018
Room38.80.27444.90.04530.70.25791.60.444110.00.01063.50.27138.30.019
Stump21.40.03026.30.01614.80.01686.90.03596.30.02572.60.03328.30.009
Avg.37.30.17240.90.03628.90.14685.00.292103.80.05858.00.21824.30.022
" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.344, + 0.325, + 0.36 + ], + "angle": 0, + "content": "5 Results" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.376, + 0.424, + 0.391 + ], + "angle": 0, + "content": "5.1 Experimental setup" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.4, + 0.789, + 0.642 + ], + "angle": 0, + "content": "We evaluate 6DGS and compare with other analysis-by-synthesis methods for 6D pose estimation, including iNeRF [46], Parallel iNeRF [25], and NeMo+VoGE [43, 44]. We reproduce the results using their published code. We follow iNeRF's evaluation protocol and test on two real-world datasets: Tanks & Temples [20] and Mip-NeRF \\(360^{\\circ}\\) [4]. For each dataset, we use the predefined training-test splits and evaluate them with two pose initialization pipelines: \\(i)\\) the original iNeRF initialization, where the starting pose is sampled uniformly between \\([-40^{\\circ}, +40^{\\circ}]\\) degrees of errors and \\([-0.1, +0.1]\\) units of translation error from the ground-truth target pose; \\(ii)\\) by randomly choosing an initialization pose from the ones used to create the 3DGS mode. Although analysis-by-synthesis methods were tested with a prior, in reality it is rarely available, so we present a second scenario to assess them under more realistic conditions. We perform multiple ablation studies to assess the sensitivity of 6DGS to different hyperparameters and settings. We quantify pose estimation results in terms of mean angular (MAE) and translation (MTE) errors (see Tab. 1 and Tab. 2) and measure the inference time." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.642, + 0.788, + 0.703 + ], + "angle": 0, + "content": "Implementation Details. 6DGS is implemented in PyTorch and the attention map was trained for 1.5K iterations (\\(\\sim\\)45mins) with an NVIDIA GeForce RTX 3090. We use the Adafactor optimizer [38] with weight decay of \\(10^{-3}\\). For speedup training, we uniformly sample 2000 3DGS ellipsoids at each iteration." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.725, + 0.334, + 0.739 + ], + "angle": 0, + "content": "5.2 Datasets" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.75, + 0.788, + 0.843 + ], + "angle": 0, + "content": "To demonstrate the applicability of 6DGS, we test on two datasets featuring real world challenges. **Tanks&Temples** [20] was created to evaluate 3D reconstruction methods with challenging real-world objects of varying sizes, acquired from human-like viewpoints and with difficult conditions (illumination, shadows, and reflections). We use the five scenes (Barn, Caterpillar, Family, Ignatius, Truck) and the train test splits given in [7,26]. The splits are object dependent, having" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "12" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.395, + 0.127 + ], + "angle": 0, + "content": "M. Bortolon et al." + }, + { + "type": "table_caption", + "bbox": [ + 0.216, + 0.144, + 0.788, + 0.176 + ], + "angle": 0, + "content": "Table 2: Evaluation of 6DoF pose estimation on the Tanks&Temples [20] dataset. We show the same metrics and analysis as in Table 1" + }, + { + "type": "table", + "bbox": [ + 0.218, + 0.182, + 0.788, + 0.266 + ], + "angle": 0, + "content": "
ObjectsFixed pose prior (eval. protocol by 46)Random pose priorNo pose prior
iNeRF 46NeMo + VoGE 44Parallel iNeRF 25iNeRF 46NeMo + VoGE 44Parallel iNeRF 256DGS (Ours)
MAE ↓MTE ↓MAE ↓MTE ↓MAE ↓MTE ↓MAE ↓MTE ↓MAE ↓MTE ↓MAE ↓MTE ↓MAE ↓MTE ↓
Barn26.50.20851.20.75222.90.13189.20.68292.50.68485.20.57230.30.162
Caterpillar42.90.16652.60.51625.20.13889.32.55990.52.55986.80.84314.50.027
Family42.80.79458.41.13022.90.50793.91.50597.01.50699.02.02820.60.468
Ignatius31.40.72351.21.19323.40.60484.11.48985.41.49186.91.32615.50.441
Truck31.60.37054.61.23629.40.35194.41.04297.71.04597.60.88327.50.242
Avg.35.00.45253.60.96524.70.34690.21.45592.61.45791.11.13021.70.268
" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.288, + 0.789, + 0.365 + ], + "angle": 0, + "content": "on average \\(\\approx 247\\) training images \\((87\\%)\\) and \\(\\approx 35\\) testing images \\((12\\%)\\). Mip-NeRF \\(360^{\\circ}\\) consists of seven scenes: two outdoors and four indoors, with a structured scenario and background. We use the original train-test splits at a ratio of 1:8. Following [25], we resize all the objects to fit inside a unit box. The translation error is relative to the object size, defined as a unit." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.389, + 0.334, + 0.405 + ], + "angle": 0, + "content": "5.3 Analysis" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.416, + 0.794, + 0.688 + ], + "angle": 0, + "content": "Quantitative Analysis: Tab. 1 and Tab. 2 present the results obtained across both datasets. 6DGS consistently outperforms baseline methods across all datasets and pose initialization pipelines. Notably, 6DGS achieves lower error rates than the second-best results, especially under identical comparison conditions (i.e., random pose prior). Even when initialized from a fixed pose proximal to the known camera, 6DGS still excels over baselines in most scenes. Parallel iNeRF demonstrates improvement over iNeRF across all tested scenarios, consistent with its reported enhancements, but both methods' performance drops with random initialization. Likewise, \\(\\mathrm{NeMo + VoGE}\\) performs worst, especially with random pose prior due to the utilization of a smaller number of larger ellipsoids in their approach. In contrast, 6DGS leverages approximately 300,000 ellipsoids of varying sizes obtained via 3DGS, as opposed to their mesh-to-ellipsoid method, which utilizes only about 5,000 larger ellipsoids. This fundamental disparity in ellipsoid size and quantity is a crucial factor contributing to the performance difference. Additionally, 6DGS exhibits faster processing speeds, operating nearly in real-time at 15 frames per second (fps) compared to the 0.05fps of Parallel iNeRF and 0.16fps of iNeRF. Please refer to the supplementary material for the complete table on timings." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.689, + 0.789, + 0.841 + ], + "angle": 0, + "content": "Qualitative Analysis: Figure 4 illustrates qualitative findings revealing notable observations. Particularly, we notice that the estimated poses exhibit proximity to the object relative to ground truth, attributable to the quantization effect introduced by the Ellicell. The qualitative findings verify the quantitative outcomes, albeit occasional inconsistencies in results, such as in the Counter scene, with the analysis-by-synthesis approaches showcasing a total incoherent output in regards to the overall scene (notice how the estimated poses are completely off the target). Moreover, the performance of 6DGS demonstrates consistency across varied scenarios, encompassing single-object instances and indoor settings, despite substantial variations in the models utilized." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.279, + 0.115, + 0.733, + 0.129 + ], + "angle": 0, + "content": "6DGS: 6D Pose Estimation from a Single Image and a 3DGS Model" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.116, + 0.786, + 0.127 + ], + "angle": 0, + "content": "13" + }, + { + "type": "image_caption", + "bbox": [ + 0.338, + 0.145, + 0.368, + 0.155 + ], + "angle": 0, + "content": "Truck" + }, + { + "type": "image", + "bbox": [ + 0.245, + 0.159, + 0.465, + 0.246 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.277, + 0.246, + 0.326, + 0.254 + ], + "angle": 0, + "content": "Target image" + }, + { + "type": "image_caption", + "bbox": [ + 0.374, + 0.246, + 0.431, + 0.254 + ], + "angle": 0, + "content": "Estimated NVS" + }, + { + "type": "image", + "bbox": [ + 0.256, + 0.254, + 0.351, + 0.297 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.378, + 0.255, + 0.45, + 0.297 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.341, + 0.306, + 0.384, + 0.315 + ], + "angle": 0, + "content": "Counter" + }, + { + "type": "image_caption", + "bbox": [ + 0.628, + 0.144, + 0.665, + 0.155 + ], + "angle": 0, + "content": "Family" + }, + { + "type": "image", + "bbox": [ + 0.54, + 0.163, + 0.7, + 0.245 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.571, + 0.247, + 0.62, + 0.256 + ], + "angle": 0, + "content": "Target image" + }, + { + "type": "image", + "bbox": [ + 0.547, + 0.256, + 0.643, + 0.297 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.667, + 0.247, + 0.724, + 0.255 + ], + "angle": 0, + "content": "Estimated NVS" + }, + { + "type": "image", + "bbox": [ + 0.651, + 0.255, + 0.744, + 0.297 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.619, + 0.306, + 0.656, + 0.315 + ], + "angle": 0, + "content": "Bonsai" + }, + { + "type": "image", + "bbox": [ + 0.255, + 0.318, + 0.475, + 0.405 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.288, + 0.404, + 0.335, + 0.411 + ], + "angle": 0, + "content": "Target image" + }, + { + "type": "image", + "bbox": [ + 0.263, + 0.412, + 0.358, + 0.461 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.383, + 0.404, + 0.439, + 0.412 + ], + "angle": 0, + "content": "Estimated NVS" + }, + { + "type": "image", + "bbox": [ + 0.364, + 0.412, + 0.459, + 0.46 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.541, + 0.319, + 0.737, + 0.405 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.564, + 0.404, + 0.61, + 0.412 + ], + "angle": 0, + "content": "Target image" + }, + { + "type": "image", + "bbox": [ + 0.538, + 0.412, + 0.634, + 0.461 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.659, + 0.404, + 0.714, + 0.412 + ], + "angle": 0, + "content": "Estimated NVS" + }, + { + "type": "image", + "bbox": [ + 0.639, + 0.412, + 0.734, + 0.461 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.228, + 0.478, + 0.256, + 0.501 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.258, + 0.478, + 0.282, + 0.501 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.284, + 0.478, + 0.305, + 0.5 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.307, + 0.479, + 0.333, + 0.491 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.308, + 0.491, + 0.333, + 0.499 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.339, + 0.479, + 0.364, + 0.491 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.34, + 0.491, + 0.364, + 0.499 + ], + "angle": 0, + "content": "#" + }, + { + "type": "image", + "bbox": [ + 0.368, + 0.479, + 0.394, + 0.491 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.37, + 0.491, + 0.394, + 0.499 + ], + "angle": 0, + "content": "prior" + }, + { + "type": "image", + "bbox": [ + 0.402, + 0.478, + 0.428, + 0.491 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.404, + 0.491, + 0.439, + 0.499 + ], + "angle": 0, + "content": "D" + }, + { + "type": "image", + "bbox": [ + 0.476, + 0.479, + 0.502, + 0.501 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.504, + 0.479, + 0.534, + 0.487 + ], + "angle": 0, + "content": "Parallel" + }, + { + "type": "image_caption", + "bbox": [ + 0.503, + 0.487, + 0.537, + 0.493 + ], + "angle": 0, + "content": "jNeRFw/" + }, + { + "type": "image_caption", + "bbox": [ + 0.503, + 0.493, + 0.537, + 0.499 + ], + "angle": 0, + "content": "north w" + }, + { + "type": "image", + "bbox": [ + 0.54, + 0.479, + 0.566, + 0.501 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.572, + 0.481, + 0.604, + 0.488 + ], + "angle": 0, + "content": "Parallel" + }, + { + "type": "image_caption", + "bbox": [ + 0.572, + 0.488, + 0.607, + 0.495 + ], + "angle": 0, + "content": "NeRF w/c" + }, + { + "type": "image_caption", + "bbox": [ + 0.579, + 0.495, + 0.597, + 0.502 + ], + "angle": 0, + "content": "prior" + }, + { + "type": "image", + "bbox": [ + 0.612, + 0.479, + 0.638, + 0.502 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.64, + 0.483, + 0.665, + 0.491 + ], + "angle": 0, + "content": "NeM" + }, + { + "type": "image_caption", + "bbox": [ + 0.64, + 0.491, + 0.69, + 0.498 + ], + "angle": 0, + "content": "VoGE w/ prior" + }, + { + "type": "image", + "bbox": [ + 0.696, + 0.478, + 0.721, + 0.501 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.73, + 0.479, + 0.765, + 0.487 + ], + "angle": 0, + "content": "NeMo +" + }, + { + "type": "image_caption", + "bbox": [ + 0.731, + 0.487, + 0.767, + 0.493 + ], + "angle": 0, + "content": "VoGE w/o" + }, + { + "type": "image_caption", + "bbox": [ + 0.737, + 0.493, + 0.757, + 0.501 + ], + "angle": 0, + "content": "prior" + }, + { + "type": "image_caption", + "bbox": [ + 0.215, + 0.518, + 0.788, + 0.629 + ], + "angle": 0, + "content": "Fig. 4: The illustration presents qualitative results from Tanks & Temple (upper row) and Mip-NeRF \\(360^{\\circ}\\) (lower row) datasets. Each scene showcases the target images (bottom left) along with their corresponding Novel View Synthesis (NVS) outputs (bottom right), derived from the camera poses estimated by 6DGS (located on the top). Furthermore, the estimated camera poses from the comparative baselines are visualized, with distinct colors as indicated in the image legend. The NVS of each scene is rendered based on the provided 3DGS model. Please check the supplementary material for more qualitative results." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.65, + 0.398, + 0.663 + ], + "angle": 0, + "content": "5.4 Ablation studies" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.674, + 0.788, + 0.749 + ], + "angle": 0, + "content": "Our ablation studies involve the analysis of the number of rays selected for the pose estimation (Tab. 3a), the number of rays that we cast from a Ellicell (Tab. 3b) as well as the different feature size on the MLP channels (Tab. 3c). The supplementary material contains additional ablations that analyze 6DGS performance with low-quality 3DGS models." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.75, + 0.788, + 0.843 + ], + "angle": 0, + "content": "We find that the number of selected rays mainly affects the angular error, while the translation error remains relatively stable. Increasing the number of rays decreases the angular error but slightly increases the translation error, likely due to less confident rays contributing to the pose estimation. The optimal balance between translation and angular errors is achieved between 100 to 150 rays, with 100 being the best. The slight increase in error with more \\( N_{top} \\) rays is due to" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "14" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.395, + 0.127 + ], + "angle": 0, + "content": "M. Bortolon et al." + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.145, + 0.788, + 0.187 + ], + "angle": 0, + "content": "Table 3: Ablation study on the number of rays selected for pose estimation, on the rays cast from each ellipsoid and on the MLP channels using Mip-NeRF 360 [4]. Underline indicates the default values used." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.195, + 0.374, + 0.216 + ], + "angle": 0, + "content": "(a) Number of rays used for pose estimation." + }, + { + "type": "text", + "bbox": [ + 0.377, + 0.195, + 0.578, + 0.216 + ], + "angle": 0, + "content": "(b) Number of cast rays per ellipsoid." + }, + { + "type": "table", + "bbox": [ + 0.218, + 0.218, + 0.374, + 0.276 + ], + "angle": 0, + "content": "
\\( {N}_{\\text{top }} \\)MAE (°) \\( \\downarrow \\)MTE (u) \\( \\downarrow \\)Time (s)
2029.00.02350.03
5026.30.02270.04
10024.30.02170.06
15024.40.02190.9
20024.50.02220.11
" + }, + { + "type": "table", + "bbox": [ + 0.377, + 0.217, + 0.579, + 0.276 + ], + "angle": 0, + "content": "
# of cast raysMAE (°)↓MTE (u)↓Time (s)
2029.00.02350.04
3524.70.02200.04
5024.30.02170.06
6525.10.02180.09
8025.20.02210.15
" + }, + { + "type": "text", + "bbox": [ + 0.594, + 0.195, + 0.764, + 0.207 + ], + "angle": 0, + "content": "(c) MLP channel feature size." + }, + { + "type": "table", + "bbox": [ + 0.584, + 0.217, + 0.776, + 0.255 + ], + "angle": 0, + "content": "
MLP channels MAE (°)↓ MTE (u)↓ Time (s)
25629.40.02730.04
51224.30.02170.06
102430.10.02280.27
" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.3, + 0.786, + 0.345 + ], + "angle": 0, + "content": "introducing rays not pointing precisely to the camera's optical center. Similar to what we observed in the qualitative examples, the noisy rays make the weighted Least Squares estimating the camera closer to the object than it actually is." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.346, + 0.788, + 0.542 + ], + "angle": 0, + "content": "Regarding the impact of the varying number of rays cast from the Ellicells, the angular error tends to remain relatively constant across different configurations. In contrast, the translation error decreases when 50 cast rays are used, and then increases again. This behavior is connected to network generalization capability. Increasing the number of rays allows the network to fit the training set better, but at test time, it makes the network more prone to noise and consequently selecting the wrong rays, thus increasing the error. We observe this generalization issue when increasing the MLP channels, see Tab. 3c, particularly given the limited and uneven distribution of training images (\\(\\approx\\) 150). Moreover, the processing time increases proportionally with the number of rays and the MLP channels; upon exceeding the default values for rays and feature size, a notable surge in processing time is observed, reaching approximately 10fps and 13fps, respectively." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.56, + 0.37, + 0.575 + ], + "angle": 0, + "content": "6 Conclusions" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.592, + 0.788, + 0.742 + ], + "angle": 0, + "content": "In this study, we proposed a novel ray sampling by attention method for estimating 6DoF camera poses from a single image and a 3DGS scene model. Our analytical evaluation demonstrates its robustness and efficiency without requiring initialization, up to \\(22\\%\\) in accuracy and while being faster by a big margin, approx. \\(94\\mathrm{x}\\) faster. Furthermore, the proposed method formulates and utilizes a novel ray generation methodology in order to explore diverse camera pose hypotheses in accordance to an effective attention mechanism. Our method exhibits enhanced robustness across real-world datasets and holds promise for real-time deployment in robotics and other fields. Future research endeavors will focus on improving accuracy and extending applicability to diverse scenes and objects." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.743, + 0.787, + 0.788 + ], + "angle": 0, + "content": "Limitations. The main constraint of 6DGS is the need for retraining with each new scene. This could be mitigated with meta-learning, particularly when similar objects or scenes are under consideration." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.279, + 0.115, + 0.733, + 0.129 + ], + "angle": 0, + "content": "6DGS: 6D Pose Estimation from a Single Image and a 3DGS Model" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.117, + 0.786, + 0.127 + ], + "angle": 0, + "content": "15" + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.145, + 0.393, + 0.163 + ], + "angle": 0, + "content": "Acknowledgments" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.176, + 0.788, + 0.283 + ], + "angle": 0, + "content": "This work is part of the RePAIR project that has received funding from the European Union's Horizon 2020 research and innovation programme under grant agreement No. 964854. This work has also received funding from the European Union's Horizon Europe research and innovation programme under grant agreement No. 101092043, project AGILEHAND (Smart Grading, Handling and Packaging Solutions for Soft and Deformable Products in Agile and Reconfigurable Lines). We thank S. Fiorini for the discussion on the optimizers." + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.304, + 0.324, + 0.319 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.334, + 0.788, + 0.361 + ], + "angle": 0, + "content": "1. Google maps nerf integration. https://blog.google/products/maps/sustainable-immersive-maps-announcements/, accessed: 2024-03-07" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.362, + 0.786, + 0.388 + ], + "angle": 0, + "content": "2. Akenine-Mo, T., Haines, E., Hoffman, N., et al.: Real-time rendering. AK Pe- ters/CRC Press (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.389, + 0.787, + 0.429 + ], + "angle": 0, + "content": "3. Almkvist, G., Berndt, B.: Gauss, landen, ramanujan, the arithmetic-geometric mean, ellipses, \\(\\pi\\), and the ladies diary. The American Mathematical Monthly 95(7), 585-608 (1988)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.43, + 0.787, + 0.457 + ], + "angle": 0, + "content": "4. Barron, J.T., Mildenhall, B., Verbin, D., Srinivasan, P.P., Hedman, P.: Mip-nerf 360: Unbounded anti-aliased neural radiance fields. In: CVPR (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.458, + 0.786, + 0.484 + ], + "angle": 0, + "content": "5. Beckers, B., Beckers, P.: Fast and accurate view factor generation. In: FICUP (2016)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.485, + 0.786, + 0.525 + ], + "angle": 0, + "content": "6. Bortolon, M., Tsesmelis, T., James, S., Poiesi, F., Del Bue, A.: Ifnrf: Initialization free and fast 6 dof pose estimation from a single image and a nef model. In: ICRA (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.526, + 0.786, + 0.553 + ], + "angle": 0, + "content": "7. Chen, A., Xu, Z., Geiger, A., Yu, J., Su, H.: Tensorf: Tensorial radiance fields. In: ECCV (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.553, + 0.786, + 0.594 + ], + "angle": 0, + "content": "8. Chen, S., Song, S., Zhao, J., Feng, T., Ye, C., Xiong, L., Li, D.: Robust dual quadric initialization for forward-translating camera movements. RAL 6(3), 4712-4719 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.595, + 0.786, + 0.621 + ], + "angle": 0, + "content": "9. Crocco, M., Rubino, C., Del Bue, A.: Structure from motion with objects. In: CVPR (2016)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.622, + 0.786, + 0.649 + ], + "angle": 0, + "content": "0. Ding, M., Wang, Z., Sun, J., Shi, J., Luo, P.: Camnet: Coarse-to-fine retrieval for camera re-localization. In: ICCV (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.649, + 0.786, + 0.676 + ], + "angle": 0, + "content": "1. Gaudilliere, V., Simon, G., Berger, M.O.: Camera relocalization with ellipsoidal abstraction of objects. In: ISMAR (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.677, + 0.786, + 0.703 + ], + "angle": 0, + "content": "2. Gaudilliere, V., Simon, G., Berger, M.O.: Perspective-2-ellipsoid: Bridging the gap between object detections and 6-dof camera pose. RAL 5(4), 5189-5196 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.704, + 0.786, + 0.731 + ], + "angle": 0, + "content": "3. Gay, P., Rubino, C., Bansal, V., Del Bue, A.: Probabilistic structure from motion with objects (psfmo). In: ICCV" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.732, + 0.786, + 0.758 + ], + "angle": 0, + "content": "4. Gay, P., Stuart, J., Del Bue, A.: Visual graphs from motion (vgfm): Scene understanding with object geometry reasoning. In: ACCV (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.759, + 0.786, + 0.785 + ], + "angle": 0, + "content": "5. He, X., Sun, J., Wang, Y., Huang, D., Bao, H., Zhou, X.: Onepose++: Keypoint-free one-shot object pose estimation without cad models. In: NeurIPS (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.786, + 0.786, + 0.813 + ], + "angle": 0, + "content": "6. Hosseinzadeh, M., Latif, Y., Pham, T., Suenderhauf, N., Reid, I.: Structure aware slam using quadrics and planes. In: ACCV (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.814, + 0.786, + 0.84 + ], + "angle": 0, + "content": "7. Jacques, L., Masset, L., Kerschen, G.: Direction and surface sampling in ray tracing for spacecraft radiative heat transfer. Aerospace Science and Technology 47 (2015)" + }, + { + "type": "list", + "bbox": [ + 0.226, + 0.334, + 0.788, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "16" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.395, + 0.127 + ], + "angle": 0, + "content": "M. Bortolon et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.148, + 0.785, + 0.175 + ], + "angle": 0, + "content": "18. Kerbl, B., Kopanas, G., Leimkuhler, T., Drettakis, G.: 3d gaussian splatting for real-time radiance field rendering. TCG 42(4) (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.177, + 0.785, + 0.203 + ], + "angle": 0, + "content": "19. Kim, S., Min, J., Cho, M.: Transformers: Match-to-match attention for semantic correspondence. In: CVPR (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.204, + 0.785, + 0.231 + ], + "angle": 0, + "content": "20. Knapitsch, A., Park, J., Zhou, Q.Y., Koltun, V.: Tanks and temples: Benchmarking large-scale scene reconstruction. TCG 36(4) (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.232, + 0.785, + 0.258 + ], + "angle": 0, + "content": "21. Laidlow, T., Davison, A.J.: Simultaneous localisation and mapping with quadric surfaces. In: 3DV (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.26, + 0.785, + 0.285 + ], + "angle": 0, + "content": "22. Lee, J., Kim, B., Cho, M.: Self-supervised equivariant learning for oriented keypoint detection. In: CVPR (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.287, + 0.785, + 0.313 + ], + "angle": 0, + "content": "23. Lee, J., Kim, B., Kim, S., Cho, M.: Learning rotation-equivariant features for visual correspondence. In: CVPR" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.315, + 0.785, + 0.355 + ], + "angle": 0, + "content": "24. Liao, Z., Hu, Y., Zhang, J., Qi, X., Zhang, X., Wang, W.: So-slam: Semantic object slam with scale proportional and symmetrical texture constraints. RAL 7(2), 4008-4015 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.357, + 0.785, + 0.396 + ], + "angle": 0, + "content": "25. Lin, Y., Müller, T., Tremblay, J., Wen, B., Tyree, S., Evans, A., Vela, P.A., Birchfield, S.: Parallel inversion of neural radiance fields for robust pose estimation. In: ICRA (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.398, + 0.785, + 0.424 + ], + "angle": 0, + "content": "26. Liu, L., Gu, J., Lin, K.Z., Chua, T.S., Theobalt, C.: Neural sparse voxel fields. In: NeurIPS (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.426, + 0.753, + 0.438 + ], + "angle": 0, + "content": "27. Lowe, D.G.: Object recognition from local scale-invariant features. In: ICCV" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.44, + 0.785, + 0.466 + ], + "angle": 0, + "content": "28. Luiten, J., Kopanas, G., Leibe, B., Ramanan, D.: Dynamic 3d gaussians: Tracking by persistent dynamic view synthesis. In: 3DV (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.468, + 0.785, + 0.493 + ], + "angle": 0, + "content": "29. Maggio, D., Mario, C., Carlone, L.: Verf: Runtime monitoring of pose estimation with neural radiance fields. In: ICCV (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.495, + 0.785, + 0.521 + ], + "angle": 0, + "content": "30. Malley, T.: A shading method for computer generated images. Master's thesis, Dept. of Computer Science, University of Utah (1988)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.523, + 0.785, + 0.577 + ], + "angle": 0, + "content": "31. Masset, L., Brüls, O., Kerschen, G.: Partition of the circle in cells of equal area and shape. Tech. rep., Structural Dynamics Research Group, Aerospace and Mechanical Engineering Department, University of Liege, 'Institut de Mecanique et G 'enie Civil (B52/3) (2011)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.578, + 0.785, + 0.591 + ], + "angle": 0, + "content": "32. Meng, Y., Zhou, B.: Ellipsoid slam with novel object initialization. In: CASE (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.592, + 0.785, + 0.632 + ], + "angle": 0, + "content": "33. Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: Nerf: Representing scenes as neural radiance fields for view synthesis. In: ECCV (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.634, + 0.785, + 0.673 + ], + "angle": 0, + "content": "34. Moreau, A., Piasco, N., Bennehar, M., Tsishkou, D., Stanciulescu, B., de La Fortelle, A.: Crossfire: Camera relocalization on self-supervised features from an implicit representation. In: ICCV (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.675, + 0.785, + 0.742 + ], + "angle": 0, + "content": "35. Oquab, M., Darcet, T., Moutakanni, T., Vo, H., Szafraniec, M., Khalidov, V., Fernandez, P., Haziza, D., Massa, F., El-Nouby, A., Assran, M., Ballas, N., Galuba, W., Howes, R., Huang, P.Y., Li, S.W., Misra, I., Rabbat, M., Sharma, V., Synnaeve, G., Xu, H., Jegou, H., Mairal, J., Labatut, P., Joulin, A., Bojanowski, P.: Dinov2: Learning robust visual features without supervision (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.744, + 0.785, + 0.77 + ], + "angle": 0, + "content": "36. Sarlin, P.E., DeTone, D., Malisiewicz, T., Rabinovich, A.: Superglue: Learning feature matching with graph neural networks. In: CVPR (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.772, + 0.785, + 0.812 + ], + "angle": 0, + "content": "37. Shan, M., Feng, Q., Jau, Y.Y., Atanasov, N.: Ellipsdf: joint object pose and shape optimization with a bi-level ellipsoid and signed distance function description. In: ICCV (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.814, + 0.785, + 0.84 + ], + "angle": 0, + "content": "38. Shazeer, N., Stern, M.: Adafactor: Adaptive learning rates with sublinear memory cost. In: ICML (2018)" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.148, + 0.785, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.279, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "6DGS: 6D Pose Estimation from a Single Image and a 3DGS Model" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.117, + 0.786, + 0.127 + ], + "angle": 0, + "content": "17" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.148, + 0.786, + 0.175 + ], + "angle": 0, + "content": "39. Sinkhorn, R.: A Relationship Between Arbitrary Positive Matrices and Doubly Stochastic Matrices. The Annals of Mathematical Statistics 35(2), 876-879 (1964)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.177, + 0.787, + 0.217 + ], + "angle": 0, + "content": "40. Tancik, M., Srinivasan, P.P., Mildenhall, B., Fridovich-Keil, S., Raghavan, N., Singhal, U., Ramamoorthi, R., Barron, J.T., Ng, R.: Fourier features let networks learn high frequency functions in low dimensional domains. In: NeurIPS (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.218, + 0.786, + 0.245 + ], + "angle": 0, + "content": "41. Tombari, F., Salti, S., di Stefano, L.: Unique signatures of histograms for local surface description. In: ECCV (2010)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.246, + 0.786, + 0.273 + ], + "angle": 0, + "content": "42. Tsesmelis, T., Hasan, I., Cristani, M., Bue, A.D., Galasso, F.: Rgbd2lux: Dense light intensity estimation with an rgbd sensor. In: WACV (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.273, + 0.786, + 0.3 + ], + "angle": 0, + "content": "43. Wang, A., Kortylewski, A., Yuille, A.: Nemo: Neural mesh models of contrastive features for robust 3d pose estimation. In: ICLR (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.3, + 0.786, + 0.328 + ], + "angle": 0, + "content": "44. Wang, A., Wang, P., Sun, J., Kortylewski, A., Yuille, A.: Voge: a differentiable volume renderer using gaussian ellipsoids for analysis-by-synthesis. In: ICLR (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.328, + 0.786, + 0.356 + ], + "angle": 0, + "content": "45. Xie, T., Zong, Z., Qiu, Y., Li, X., Feng, Y., Yang, Y., Jiang, C.: Physgaussian: Physics-integrated 3d gaussians for generative dynamics. In: CVPR (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.356, + 0.786, + 0.384 + ], + "angle": 0, + "content": "46. Yen-Chen, L., Florence, P., Barron, J.T., Rodriguez, A., Isola, P., Lin, T.Y.: iNeRF: Inverting neural radiance fields for pose estimation. In: IROS (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.384, + 0.786, + 0.411 + ], + "angle": 0, + "content": "47. Zins, M., Simon, G., Berger, M.O.: Oa-slam: Leveraging objects for camera localization in visual slam. In: ISMAR (2022)" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.148, + 0.787, + 0.411 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/2024/6DGS_ 6D Pose Estimation from a Single Image and a 3D Gaussian Splatting Model/76913771-7094-44e1-8b30-8ea4e2210b42_origin.pdf b/2024/6DGS_ 6D Pose Estimation from a Single Image and a 3D Gaussian Splatting Model/76913771-7094-44e1-8b30-8ea4e2210b42_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..92a2c6c68b9f4d09bf45b5ca3ef8662b87f978e3 --- /dev/null +++ b/2024/6DGS_ 6D Pose Estimation from a Single Image and a 3D Gaussian Splatting Model/76913771-7094-44e1-8b30-8ea4e2210b42_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19d0383471fe68311c8913091ec0d33ec7af317ed3627abfd82c749d4f22ff6d +size 15015655 diff --git a/2024/6DGS_ 6D Pose Estimation from a Single Image and a 3D Gaussian Splatting Model/full.md b/2024/6DGS_ 6D Pose Estimation from a Single Image and a 3D Gaussian Splatting Model/full.md new file mode 100644 index 0000000000000000000000000000000000000000..b6532a54801f410cd15ecc73428bf32d6efdbd6a --- /dev/null +++ b/2024/6DGS_ 6D Pose Estimation from a Single Image and a 3D Gaussian Splatting Model/full.md @@ -0,0 +1,396 @@ +# 6DGS: 6D Pose Estimation from a Single Image and a 3D Gaussian Splitting Model + +Bortolon Matteo $^{1,2,3}$ , Theodore Tsesmelis $^{1}$ , Stuart James $^{1,4}$ , Fabio Poiesi $^{2}$ , and Alessio Del Bue $^{1}$ + +$^{1}$ PAVIS, Fondazione Istituto Italiano di Tecnologia (IIT), Genoa, IT $^{2}$ TeV, Fondazione Bruno Kessler (FBK), Trento, IT +3 Università di Trento, Trento, IT +4 Durham University, Durham, UK + +Abstract. We propose 6DGS to estimate the camera pose of a target RGB image given a 3D Gaussian Splatting (3DGS) model representing the scene. 6DGS avoids the iterative process typical of analysis-by-synthesis methods (e.g. iNeRF) that also require an initialization of the camera pose in order to converge. Instead, our method estimates a 6DoF pose by inverting the 3DGS rendering process. Starting from the object surface, we define a radiant Ellicell that uniformly generates rays departing from each ellipsoid that parameterize the 3DGS model. Each Ellicell ray is associated with the rendering parameters of each ellipsoid, which in turn is used to obtain the best bindings between the target image pixels and the cast rays. These pixel-ray bindings are then ranked to select the best scoring bundle of rays, which their intersection provides the camera center and, in turn, the camera rotation. The proposed solution obviates the necessity of an "a priori" pose for initialization, and it solves 6DoF pose estimation in closed form, without the need for iterations. Moreover, compared to the existing Novel View Synthesis (NVS) baselines for pose estimation, 6DGS can improve the overall average rotational accuracy by $12\%$ and translation accuracy by $22\%$ on real scenes, despite not requiring any initialization pose. At the same time, our method operates near real-time, reaching 15fps on consumer hardware. + +# 1 Introduction + +Neural and geometrical 3D representations for Novel View Synthesis (NVS) have recently surged in popularity [18,33], and they have been quickly integrated into daily applications, e.g. mapping services [1]. The change in 3D representation creates new challenges on how to solve classical problems, such as 6D pose estimation, and on how to leverage NVS implicit advantages [25,29,34,44,46]. + +The method of iNeRF [46] pioneered 6D pose estimation using an NVS model by proposing an iterative analysis-by-synthesis, as illustrated in the left panel of Fig. [1]. Given a nearby pose initialization (iteration $\# 1$ ), the NVS model + +![](images/ffacea6ed92893f6bd426a80d4c0a448fb631637e9ed44f41c233b4e1d2efc9f.jpg) +Fig. 1: Our 6DGS method introduces a novel approach to 6DoF pose estimation, departing from conventional analysis-by-synthesis methodologies. Standard NeRF-based methods (left) employ an iterative process, rendering candidate poses and comparing them with the target image before updating the pose, which often results in slow performance and limited precision. In contrast, 6DGS (right) estimates the camera pose by selecting a bundle of rays projected from the ellipsoid surface (a radiant Ellicell) and learning an attention map to output ray/image pixel correspondences (based on DINOv2). The optimal bundle of rays should intersect the optical center of the camera and then are used to estimate the camera rotation in closed-form. Our 6GDS method offers significantly improved accuracy and speed, enabling the recovery of the pose within a one-shot estimate. + +is used to render the image related to the initial pose. Then iteratively, the rendered image is compared with the target image using a photometric loss, and the initial pose guess is updated so that the two views achieve the best image overlap at the final step (iteration $\# N$ ). The authors in iNeRF [46] use the popular NeRF [33] NVS model where backpropagation updates every new pose guess. This procedure leverages the remarkable NeRF capabilities in synthesizing realistic novel views, however, at the computational expense of synthesizing a newly rendered image at each iteration. This limitation restricts iNeRF to offline use while requiring a close initial pose estimate for a successful convergence. + +Recent works in 3D Gaussian Splatting (3DGS) [18,28,45] are an alternative to Neural NVs models, providing fast rendering speed through the use of explicit geometric primitives that do not require the optimization of a neural network. 3DGS represents a 3D scene as a set of ellipsoids paired with photometric information, such as color and opacity. The ellipsoids are first initialized using Structure from Motion (SfM), and then they are optimized to reduce the photometric error between the rasterized ellipsoids and a set of known images. During the rasterization stage, the 3DGS model is projected onto the image plane as ellipses and for each pixel the algorithm computes its photometric contribution. + +By leveraging the 3DGS model properties, we design a novel 6DoF pose estimation method (6DGS) that surpasses the limitations of NeRF-based iterative approaches. 6DGS does not require any pose initialization, and it estimates the camera translation and rotation without an iterating analysis-by-synthesis walk + +through. This is a key factor for achieving near real-time performance (15fps), also due to the quick rendering capabilities of 3DGS. The right panel of Fig. 1 presents the gist of our approach for 6DoF pose estimation. If we knew the camera pose, the first NVS step of 3DGS would be to project the ellipsoid centers onto the image plane. Practically, this is a ray casting through the camera's optical center. Our 6DGS attempts to invert this process and, by doing so, to estimate the camera pose. If the target image camera pose is unknown, and thus neither where the optical center is, we are unable to cast the single ray from each ellipsoid that passes through the correct target image pixels. For this reason, instead, we radiate uniformly distributed rays from each ellipsoid through the introduction of a novel casting procedure named Ellicell. Only one radiated ray per ellipsoid would be accurate, i.e., the one that renders the pixel photometrically by projecting the correct ellipse onto the target image plane. + +Now, the 6DGS problem is to select, given all the casted rays from the Ellicells, the correct bundle of rays that can generate most of the target image pixels with high confidence. This selection stage is addressed by binding pixels and rays through the learning of an attention map. Notice that this step is also unsupervised, as it leverages the known camera poses and images used to compute the 3DGS model to obtain the pixel and ray pairs used for training. After the bundle of rays is selected, the intersection of these rays identifies the camera center, which is solved using weighted Least Squares (wLS), with the weights being the scores from the previous selection stage. After the optical center is estimated, the optical axis can be used to obtain the camera rotation degrees of freedom from the rays bundle, thus solving the 6DoF pose. By design, 6DGS eliminates the need for an initial camera pose, which is one of the limitations of analysis-by-synthesis pose estimation methods [34,44,46], as well as the tendency to converge to local minima during the iteration procedure, especially if the initial pose is initialized far from the optimal position. + +We evaluate 6DGS on datasets featuring real-world objects and scenes, comparing against the current NVS state-of-the-art approaches such as iNeRF [46], Parallel iNeRF [25] and NeMO + VoGE [44]. Our experimental results show that 6DGS is competitive, especially if the initial pose is not provided "a priori". Finally, we achieve near real-time 6DoF pose estimation on consumer hardware, which is one rather challenging limitation in the practical application of NVS-based approaches for camera pose estimation. To summarize, 6DGS contributions are threefold: + +- Our approach for 6DoF camera pose estimation eliminates the need for an initial camera pose and iterations to converge, which is typically required in analysis-by-synthesis approaches; +- 6DGS employs a novel ray casting pipeline, i.e. Ellicell, and an attention-based mechanism that efficiently matches pixel-level image information with 3DGS ellipsoids: +- The proposed method is state-of-the-art in the NVS benchmarks for camera pose estimation both for accuracy and real-time performance. + +# 2 Related works + +We review relevant works on 6DoF camera pose estimation based on Neural Radiance Fields (NeRF) models, ellipsoid-based approaches, and correspondence matching methods that are related to key components of 6DGS. + +Pose estimation from neural radiance fields. iNeRF [46] pioneered NeRF-based 6D camera pose estimation, using iterative alignment of target and rendered images based on photometric error. However, iNeRF is prone to local minima in the optimization function, leading to recent developments like Parallel iNeRF [25], which employs parallel optimization of multiple candidate poses. While these approaches rely on NeRF-based models, $\mathrm{NeMo + VoGe}$ [43,44] have explored 6D camera pose estimation using object models based on volumetric Gaussian reconstruction kernels as geometric primitives. The rendering strategy (VoGE) differs from 3DGS as it is based on ray marching. Therefore, $\mathrm{NeMo + VoGe}$ iteratively aligns learned features from target and rendered images. Notably, $\mathrm{NeMo + VoGe}$ 's training requires multiple objects, in contrast to our method, which leverages a single object 3DGS model. Alternatively, CROSS-FIRE [34] addresses the local minima issue by integrating learned local features, which describes not only the visual content but also the 3D location of the scene in the NeRF model. Despite these advancements, analysis-by-synthesis approaches often struggle with inefficient pose updates due to the nature of the optimization refinement and the dependence on accurate initial pose priors. These factors can limit their real-world applicability. Recently, IFFNeRF [6] utilized a method that inverts the NeRF model to re-render an image to match a target one. However, unlike our approach, it does not consider the specificities of 3DGS, which include ellipsoid elongation and rotation, and their non-uniform distribution across the scene surface. + +Pose estimation from ellipsoids. Recovery of the camera pose from ellipsoids has been explored for both SfM [8,9,12-14,37] and SLAM [11,16,21,24,32,47] scenarios, where methods frequently recover the object's ellipsoid representation as well as the camera 6DoF. Such approaches typically solve linear systems to recover the pose, most commonly minimizing a loss of the projection to and from an object detection. However, this methodological framework often presents limitations when confronted with large numbers of ellipsoids, as they are more indicated for handling few large ellipsoids that model a single object occupancy, 3D position and orientation. + +Correspondences Matching. In traditional 6DoF image matching, feature-based approaches are used, which often rely on hand-crafted features, e.g., SIFT [27] or more recent deep approaches such as SuperGlue [36] and Transformer [19]. SuperGlue utilizes a Graph Neural Network (GNN) for feature attention and Sinkhorn [39] for matching, while LightGlue replaces the GNN with a lightweight transformer. Unlike these, Transformer [19] performs global match-to-match attention, allowing for accurate match localization. In addition, there is a body of work around feature equivariance [22,23] for improving the robustness of matching. However, these methods rely on the hypothesis that both feature sets exist in a homogeneous feature space, i.e. extracted from the image, + +while in 6DGS we have the specific problem to match pixel to rays emitted from the Ellicells. Therefore, we rely on the proposed attention model to handle these ray-to-pixel bindings. OnePose++ [15] instead adopts a multi-modal approach matching a point cloud with an image. Another proposed alternative is to regress directly the pose parameters, as in CamNet [10]. Nevertheless, these approaches require a large amount of training data ( $\approx$ 500 or more images), sometimes across multiple scenes and, like with CamNet, these need to be available also at inference time. 6DGS however, requires only $\approx$ 100 or less images, which are utilized only once during training. + +# 3 Preliminaries + +We first review 3D Gaussian Splatting (3DGS) [18] to understand the underlying principles and provide the mathematical formalization of the model. 3DGS objective is to synthesize novel views of a scene by optimizing the position, the orientation and the color of a set of 3D Gaussians approximated as ellipsoids $\mathcal{Q} = \{\mathbf{Q}\}_{i=1}^{K}$ from a given set of input images $\mathcal{I} = \{\mathbf{I}\}_{i=1}^{J}$ and their corresponding camera projection matrices $\mathcal{P} = \{\mathbf{P}\}_{i=1}^{J} \in \mathbb{R}^{3 \times 4}$ . A point $\mathbf{d}$ for being on the surface of an ellipsoid must satisfy the equation $(\mathbf{d} - \mathbf{x}) \boldsymbol{\Sigma} (\mathbf{d} - \mathbf{x})^T = 1$ , where $\mathbf{x} \in \mathbb{R}^3$ is the ellipsoid center and $\boldsymbol{\Sigma} \in \mathbb{R}^{3 \times 3}$ its covariance matrix. We can further decompose the covariance of the ellipsoid $\boldsymbol{\Sigma}$ as: + +$$ +\boldsymbol {\Sigma} = \mathbf {R} \mathbf {U} \mathbf {U} ^ {T} \mathbf {R} ^ {T}, \tag {1} +$$ + +where $\mathbf{R} \in \mathbb{R}^{3 \times 3}$ is the ellipsoid rotation matrix and $\mathbf{U}^{3 \times 3}$ denotes the scaling matrix. The projection matrix $\mathbf{P} \in \mathbb{R}^{3 \times 4}$ allows the projection of the ellipsoid $\mathbf{Q}$ onto the image plane generating the corresponding ellipse representation: + +$$ +\check {\mathbf {y}} = \mathbf {P} \check {\mathbf {x}} ^ {T}, \check {\mathbf {E}} = \mathbf {P} \boldsymbol {\Sigma} \mathbf {P} ^ {T}, \tag {2} +$$ + +where $\mathbf{y} \in \mathbb{R}^2$ and $\breve{\mathbf{y}} \in \mathbb{R}^3$ correspond to the Euclidean and homogeneous coordinates of the ellipse center point. The homogeneous coordinates $\breve{\mathbf{y}}$ originate from the projection of the corresponding ellipsoid center in the homogeneous coordinates $\breve{\mathbf{x}} \in \mathbb{R}^4$ . The matrix $\breve{\mathbf{E}} \in \mathbb{R}^{3 \times 3}$ is the ellipse covariance in homogeneous space. The covariance of the ellipse $\mathbf{E} \in \mathbb{R}^{2 \times 2}$ , is derived by selecting only the first two rows and columns of $\breve{\mathbf{E}}$ and dividing by the last element on $\breve{\mathbf{E}}$ diagonal. + +The splatted ellipses, denoted as $\mathcal{B} = \{\langle \mathbf{y},\mathbf{E}\rangle \}_{i = 1}^{K}$ , generate a pixel color with the rendering function $\phi$ using rasterization techniques [2,18]. The function $\phi$ acts independently on every single pixel of the image $\mathbf{p}$ . The pixel value depends on the neighboring projected ellipses, taking into account their center points' distances to the pixel coordinates, as well as their orientations and scales. $\phi$ assumes that the ellipses are ordered based on the depth, so they should be sorted. Formally, $\phi$ can be expressed as: + +$$ +\phi (\mathcal {B}, \mathbf {p}) = \sum_ {i = 1} ^ {K} \rho_ {i} \alpha_ {i} e ^ {- \tau (\mathcal {B} _ {i}, \mathbf {p})} \gamma (i, \mathbf {p}), \tag {3} +$$ + +![](images/354bcc52e0ce0e23e9c07e9fe0d38c9d9a85d1f2bf00ba1e98cb62059859d6d2.jpg) +Fig. 2: The figure illustrates the pipeline of our 6DGS methodology. The image is encoded using a visual backbone (a). Concurrently, rays are uniformly projected from the center of the 3DGS ellipsoids (b), and their corresponding color is estimated. Subsequently, an attention map mechanism is employed to compare the encoded ray and image features (c). Following this comparison, the $N_{top}$ matches are selected via attenuation, and the camera location is estimated (d) as the solution of a weighted Least Squares problem, resulting in a distinct 6DoF pose for the image. + +where $\rho$ and $\alpha$ represent the color and opacity attributes associated with the ellipsoid, which are inherited by the splatted ellipse. Similar to the volumetric rendering equation in NeRF, $\gamma$ denotes the inverse of the volume density accumulated up to the $i^{th}$ ellipse on pixel $\mathbf{p}$ and is defined as: + +$$ +\gamma (i, \mathbf {p}) = \prod_ {j = 1} ^ {i - 1} \left(1 - \alpha_ {j} e ^ {- \tau \left(\mathcal {B} _ {i}, \mathbf {p}\right)}\right). \tag {4} +$$ + +The purpose of $\tau$ is to determine the light absorption by the ellipse when represented as a 2D Gaussian. Light absorption depends on the orientation and distance between the ellipse center, denoted as $\mathbf{y}$ , and the pixel location, expressed as $\mathbf{d} = \mathbf{p} - \mathbf{y}$ . Consequently, we can formally define $\tau$ as: + +$$ +\tau (\mathbf {B}, \mathbf {p}) = \frac {1}{2} \left(\mathbf {1} _ {2} \mathbf {d} ^ {T} \mathbf {E} \mathbf {d} \mathbf {1} _ {2} ^ {T}\right), \tag {5} +$$ + +where $\mathbf{1}_2\in \mathbb{R}^2$ denotes a vector filled with ones. Following the processing of all pixels onto the image plane, the rendering function $\phi$ generates an image $\hat{\mathbf{l}}\in \mathbb{R}_{+}^{H\times W}$ , where $W$ and $H$ represent the width and height of the image. + +# 4 Our approach + +# 4.1 Overview + +6DGS estimates the camera pose $\hat{\mathbf{P}}\in \mathbb{R}^{3\times 4}$ , given a target image $\mathbf{I}_t$ and a set of ellipsoids $\mathcal{Q}$ from a pre-computed 3DGS model (Fig. 2). To solve for the + +![](images/1aca5089144560451b0ae3c65e1a0197a3a157fd7bffac263bc1dc30722b0845.jpg) +(a) Ellicell components + +![](images/860e5082e321dc375ebb1202e6057a515c6539940ee47e212935a4d91ba13347.jpg) +(b) 3D Ellicell grid + +![](images/084ec0cf5a3cc2c050e24018fe7040088ba4d4b8c8a6480d161b864c025bc86a.jpg) +(c) 3D radiant Ellicell +Fig. 3: The illustration depicts the three primary stages involved in the radiant Ellicell generation. Firstly, (a) delineates the formulation of components required to compute the geometric information for each cell. Secondly, (b) shows the resulting Ellicell grid positioned on the surface of the ellipsoid along with their respective center points. Finally, (c) demonstrates the generation of rays originating from the center point of the ellipsoid going through the Ellicell center. + +camera pose, we propose a casting method from the ellipsoid's surface, called Ellicell, that divides it in equal area cells (Sec. 4.2). The ellipsoids cast a set of $N$ rays, denoted as $\mathcal{V} = \{\langle \mathbf{v}_o,\mathbf{v}_d,\mathbf{v}_c\rangle \}_{i = 1}^N$ , one for each of the generated cell (Fig. 3c). Each ray is identified by $i$ the origin $\mathbf{v}_o\in \mathbb{R}^3$ , $ii$ the center point of each ellipsoid, $iii$ the direction $\mathbf{v}_d\in \mathbb{R}^3$ originating from the ellipsoid center to the cell center and through the space, and $iv$ the color information $\mathbf{v}_c\in \mathbb{R}^3$ as RGB values. We synthesize the rays' color using the 3DGS rendering function $\phi$ (Eq. 3). A subset of these rays, depending on the view perspective, may intersect the camera's optical center. For binding the rays to the image pixels we compute the target image pixels features $\psi (\mathbf{I}_t)$ (Fig. 2a) and the rays features $\psi (\mathcal{V})$ (Fig. 2b). These features are used to identify the intersecting rays by using an attention map $\mathcal{A}$ (Fig. 2c), see Sec. 4.4. The higher the attention value for a ray-pixel pair is, the more likely the intersection on the image plane is a valid one. Lastly, we determine $\hat{\mathbf{P}}_t$ (Fig. 2d) by computing the intersection point of rays using the weighted Least Squares algorithm (Sec. 4.5). + +# 4.2 Radiant Ellicell + +We create rays spanning in every direction allowing 6DGS to recover the camera pose. We introduce the concept of radiant Ellicell for generating rays that uniformly emanate from the ellipsoid surface, as illustrated in Fig. 3. Ellicell generation is deterministic [5,31] and achieves higher precision with fewer rays [17,42] compared to other sampling methods like Monte-Carlo [30]. + +First, we compute the area of each Ellicell. This is achieved by calculating the ellipsoid surface area, using a computationally efficient approach, namely Ramanujan approximation 3: + +$$ +h = 4 \pi \left(\frac {(a b) ^ {1 . 6} + (a c) ^ {1 . 6} + (b c) ^ {1 . 6}}{3}\right) ^ {\frac {1}{1 . 6}}, \tag {6} +$$ + +where $a, b, c = \text{diag}(\mathcal{S})$ are the ellipsoid axis scales. Each Ellicell cell's target area equals $\mu = h / G$ , with $G$ being the number of cells dividing each ellipsoid. + +Approximating each cell as a square with side $z = \sqrt{\mu}$ we slice the ellipsoids along the major axis into ribbons, each as wide as $z$ (Fig. 3a). The extremity of each ribbon is called a ring. The total number of rings is $e = \lfloor \kappa(a, b) / (2z) \rfloor \in \mathbb{N}$ , where $\kappa(a, b)$ computes the ring perimeter. Ignoring ellipsoid's rotation, we compute the ring perimeter by treating them as 2D ellipses, thus defining $\kappa(a, b)$ as: + +$$ +\kappa (a, b) = \pi \left((a + b) + \frac {3 (a - b) ^ {2}}{1 0 (a + b) + \sqrt {a ^ {2} + 1 4 a b + b ^ {2}}}\right). \tag {7} +$$ + +Given the total number of rings $e$ it is possible to compute the ribbon's centerline geometric parameters. In particular, we compute the scale parameter of each ribbon as: + +$$ +\varrho (n, \Delta r, a, b) = \sqrt {1 - \frac {(0 . 5 \Delta r + n \Delta r - a) ^ {2}}{b ^ {2}}} \tag {8} +$$ + +where $\varDelta r=a/e$ is the distance between two consecutive rings. This equation derives from the manipulation of the standard ellipse equation. While ribbon size $z$ should be equal to $\varDelta r$ , these two values will likely differ due to the need for the number of rings being a natural number. Eq. 8 is also used to compute the other ribbon scaling parameter by replacing $b$ with $c$ . $\varrho$ is then used to compute the number of cells inside each ribbon as: + +$$ +\xi (n, e, a, b, c) = \left\lfloor \frac {\kappa (\varrho (n , e , a , b) , \varrho (n , e , a , c))}{z} \right\rfloor , \tag {9} +$$ + +where $\xi$ is the number of cells inside the ring. We compute the center of each cell, equally spaced along the ribbon's centerline, by sampling $\xi$ points along it. This is challenging as the perimeter distance does not linearly correlate with the $x$ and $y$ variations. However, we can solve this by using a statistical method. Knowing a distribution's Cumulative Distribution Function (CDF) allows us to sample uniformly between 0 and 1 and then use the CDF inverse to map the sample to the distribution space. This approach applies to our case, where samples are distributed as follows: + +$$ +d s ^ {2} = d x ^ {2} + d y ^ {2}, \tag {10} +$$ + +and, by taking its inverse, we can retrieve the coordinates of each cell center. To simplify the equations, we define $r = \varrho(n, e, a, b)$ and $w = \varrho(n, e, a, c)$ to indicate the scale of the ellipse under consideration. Then we express Eq. [10] in polar coordinates to simplify the differentiation: + +$$ +\frac {d s}{d \theta} = \sqrt {r ^ {2} \sin^ {2} \theta + w ^ {2} \cos^ {2} \theta}, \tag {11} +$$ + +then, we can express the set of points on the perimeter of the ribbon centerline as an angular position in the polar coordinate system as: + +$$ +\theta^ {\prime} = \left(\frac {d s}{d \theta}\right) ^ {- 1} \left(g \cdot \frac {1}{\xi (n , e , a , b , c)}\right), \tag {12} +$$ + +with $g$ being the cell identifier. Given $\theta'$ we can use it inside the ellipse equation in polar coordinates to obtain the 3D position of each cell center: + +$$ +\mathbf {u} = \left( \begin{array}{c} w \cos \left(\theta^ {\prime}\right) \\ g \sin \left(\theta^ {\prime}\right) \\ - a + n \Delta r \end{array} \right). \tag {13} +$$ + +# 4.3 Ray generation + +Once we have divided each ellipsoid of the 3DGS model into equidistant cells, we cast the rays originating from the center point of the ellipsoid i.e. $\mathbf{v}_o = \mathbf{x}$ and oriented towards the Ellicell center $\mathbf{v}_d = \mathbf{u} - \mathbf{x}$ . We reduce the number of potential rays cast from each ellipsoid by considering only the rays oriented in the same hemisphere as the estimated surface normal of the ellipsoid. We obtain the surface normals by treating the ellipsoid centroids as a point cloud, and the surface normal is estimated using the nearby points [41]. + +Finally, each ray has also been associated with the color information $\mathbf{v}_c$ , which we compute through the same pixel-level approach of 3DGS (Eq. 5). We note that the application of the volumetric rendering function of Eq. 5 produces a single pixel for each ray. The generated rays represent a collection of potential hypotheses, meaning that a subset of them will intersect the target image $\mathbf{I}_t$ . + +# 4.4 Binding by attenuation of rays to image + +Given all the cast rays $\mathbf{v}$ , we identify a subset of $\mathbf{v}$ correlating with the target image $\mathbf{I}_t$ . A learned attention map $\mathcal{A}$ assigns scores $\hat{\mathbf{s}}$ based on the highest correlation to image pixels; higher similarity results in higher scores. Based on scores $\hat{\mathbf{s}}$ , we select the top candidate's rays $(N_{top})$ that present maximal association and use them to recover the pose $(\hat{\mathbf{P}}_t)$ . + +To select rays with similar appearance and position, we use a Multi-Layer Perceptron (MLP) defined as $\mathbf{V} = \psi (\mathbf{v})$ , where $\mathbf{V}\in \mathbb{R}^{N\times C}$ with $C$ being the feature size and $N$ the overall number of rays. The MLP input is enriched by incorporating Positional Encoding that maps the data in the Fourier domain [40] to better distinguish between similar data. + +We generate features from $\mathbf{I}_t$ using DINOv2 [35] as a pre-trained backbone feature extractor. This results in a set of features $\mathbf{F}_t \in \mathbb{R}^{M \times C}$ , where $M = W \times H$ . Both the image and ray features sets are processed by a single attention module $\mathcal{A}(\mathbf{V}_f, \mathbf{F}_t) \in \mathbb{R}^{M \times N}$ producing a set of scores. Inside the attention module the ray features, $\mathbf{V}$ , are used as queries and the image features, $\mathbf{F}_t$ , as a key. We optimize the attention map by summing along the rows and converting it into a per-ray correlation score as follows $\hat{\mathbf{s}} = \sum_{i=1}^{M} \mathcal{A}_i$ . The higher the score value given by $\hat{\mathbf{s}}$ , the better the association between the rays and image pixels. At test-time we select the $N_{top}$ rays with the highest ranking scores. + +Because a ray and an image pixel should be associated with each other based on the distance between the camera origin and its projection onto the corresponding ray, we supervise the predicted scores $\hat{\mathbf{s}}$ using the same images used to + +estimate the 3DGS model at training time. We compute the projection of the point on the line as $l = \max((\mathbf{O} - \mathbf{v}_o)\mathbf{v}_d, 0)$ , where $\mathbf{O}$ is the camera position, $\mathbf{v}_o$ the generated ray origin and $\mathbf{v}_d$ the corresponding direction. Rays are infinite only in one direction, so we restrict $l \in \mathbb{R}^+$ using the max operator. Then, we can compute the distance between the camera origin and its projection on the ray as $\mathbf{h} = \| (\mathbf{v}_o + l\mathbf{v}_d) - \mathbf{O}\|_2$ . The value $\mathbf{h}$ can span from 0 to $+\infty$ , with 0 indicating a ray that passes through the camera's optical center. We map distances to the attention map score using: + +$$ +\delta = 1 - \tanh \left(\frac {\mathbf {h}}{\lambda}\right), \mathbf {s} = \delta \frac {M}{\sum \delta}, \tag {14} +$$ + +where $\lambda$ regulates the number of rays to assign to a specific camera. Lastly, the softmax inside the attention map computation requires we normalize the ground truth scores. We use the $L2$ loss to minimize the difference between the predicted $\hat{\mathbf{s}}$ and the computed ground truth $\mathbf{s}$ scores as: + +$$ +\mathcal {L} = \frac {1}{M N} \sum_ {i = 1} ^ {M} \sum_ {j = 1} ^ {N} \| \hat {\mathbf {s}} _ {i, j} - \mathbf {s} _ {i, j} \| _ {2}, \tag {15} +$$ + +where $M, N$ are the size of the attention map $\mathcal{A}$ . During each training iteration, we predict an image and a pose utilized for estimating the 3DGS model. + +# 4.5 Test-time pose estimation + +During the test phase, the predicted scores $\hat{\mathbf{s}}$ are used to select the top $N_{top}$ rays, identified as the utmost relevant, and constrained to choose at most one ray per ellipsoid. Note that only a small set of rays is sufficient to estimate the camera pose. However, based on an ablation study we set $N_{top} = 100$ , see Tab. 3a. + +The camera position is found at the intersection of selected rays, solved as a weighted Least Squares problem. Since 3D lines usually do not intersect at a single point due to discretization noise introduced by the Ellicell, we minimize the sum of squared perpendicular distances instead. + +For the selected ray $\mathbf{v}_j$ with $f = 1\ldots N_{top}$ , the error is given by the square of the distance from the camera position to predict $\hat{\mathbf{O}}$ to its projection on $\mathbf{v}_j$ : + +$$ +\sum_ {f = 1} ^ {N _ {t o p}} \left(\left(\hat {\mathbf {O}} - \mathbf {v} _ {o, f}\right) ^ {T} \left(\hat {\mathbf {O}} - \mathbf {v} _ {o, f}\right) - \left(\left(\hat {\mathbf {O}} - \mathbf {v} _ {o, f}\right) ^ {T} \mathbf {v} _ {d, f}\right) ^ {2}\right), \tag {16} +$$ + +where $\mathbf{v}_{o,f}$ indicating the origin of the $f$ -th ray and $\mathbf{v}_{d,f}$ the respective direction. To minimize Eq. [16] we differentiate it with respect to $\hat{\mathbf{O}}$ , resulting in + +$$ +\hat {\mathbf {O}} = \sum_ {f = 1} ^ {N _ {t o p}} \hat {\mathbf {s}} _ {f} \left(\mathbb {I} - \mathbf {v} _ {d, f} \mathbf {v} _ {d, f} ^ {T}\right) \mathbf {v} _ {o, f}, \tag {17} +$$ + +where $\mathbb{I}$ is the identity matrix and $\hat{\mathbf{s}}_f$ are the predicted ray scores. This expression can be solved as a weighted system of linear equations. + +Table 1: Evaluation of 6DoF pose estimation on the Mip-NeRF $360^{\circ}$ dataset. We report results in terms of Mean Angular Error (MAE) and Mean Translation Error (MTE) in terms of degrees and units, $u$ , respectively. Where $1u$ is equal to the object's largest dimension. For both metrics lower is better. Best-performing results are highlighted in bold and green, while second best values are highlighted in orange. + +
Fixed pose prior (eval. protocol by 46)Random pose priorNo pose prior
iNeRFiNeRF46NeMo + VoGE44Parallel iNeRF25iNeRF46NeMo + VoGE44Parallel iNeRF256DGS (Ours)
MAE ↓MTE ↓MAE ↓MTE ↓MAE ↓MTE ↓MAE ↓MTE ↓MAE ↓MTE ↓MAE ↓MTE ↓MAE ↓MTE ↓MAE ↓MTE ↓
Bicycle39.50.11643.80.01535.90.11676.60.217111.80.03844.40.15012.10.010
Bonsai51.30.22852.50.03641.10.22396.70.38598.90.07358.20.29810.50.038
Counter40.70.32445.60.07224.70.21270.30.48798.10.13942.10.43519.60.043
Garden31.00.12131.80.02618.20.09072.80.21089.20.03860.00.14437.80.015
Kitchen38.20.11341.60.04237.30.109100.20.266122.20.08265.00.19323.20.018
Room38.80.27444.90.04530.70.25791.60.444110.00.01063.50.27138.30.019
Stump21.40.03026.30.01614.80.01686.90.03596.30.02572.60.03328.30.009
Avg.37.30.17240.90.03628.90.14685.00.292103.80.05858.00.21824.30.022
+ +# 5 Results + +# 5.1 Experimental setup + +We evaluate 6DGS and compare with other analysis-by-synthesis methods for 6D pose estimation, including iNeRF [46], Parallel iNeRF [25], and NeMo+VoGE [43, 44]. We reproduce the results using their published code. We follow iNeRF's evaluation protocol and test on two real-world datasets: Tanks & Temples [20] and Mip-NeRF $360^{\circ}$ [4]. For each dataset, we use the predefined training-test splits and evaluate them with two pose initialization pipelines: $i)$ the original iNeRF initialization, where the starting pose is sampled uniformly between $[-40^{\circ}, +40^{\circ}]$ degrees of errors and $[-0.1, +0.1]$ units of translation error from the ground-truth target pose; $ii)$ by randomly choosing an initialization pose from the ones used to create the 3DGS mode. Although analysis-by-synthesis methods were tested with a prior, in reality it is rarely available, so we present a second scenario to assess them under more realistic conditions. We perform multiple ablation studies to assess the sensitivity of 6DGS to different hyperparameters and settings. We quantify pose estimation results in terms of mean angular (MAE) and translation (MTE) errors (see Tab. 1 and Tab. 2) and measure the inference time. + +Implementation Details. 6DGS is implemented in PyTorch and the attention map was trained for 1.5K iterations ( $\sim$ 45mins) with an NVIDIA GeForce RTX 3090. We use the Adafactor optimizer [38] with weight decay of $10^{-3}$ . For speedup training, we uniformly sample 2000 3DGS ellipsoids at each iteration. + +# 5.2 Datasets + +To demonstrate the applicability of 6DGS, we test on two datasets featuring real world challenges. **Tanks&Temples** [20] was created to evaluate 3D reconstruction methods with challenging real-world objects of varying sizes, acquired from human-like viewpoints and with difficult conditions (illumination, shadows, and reflections). We use the five scenes (Barn, Caterpillar, Family, Ignatius, Truck) and the train test splits given in [7,26]. The splits are object dependent, having + +Table 2: Evaluation of 6DoF pose estimation on the Tanks&Temples [20] dataset. We show the same metrics and analysis as in Table 1 + +
ObjectsFixed pose prior (eval. protocol by 46)Random pose priorNo pose prior
iNeRF 46NeMo + VoGE 44Parallel iNeRF 25iNeRF 46NeMo + VoGE 44Parallel iNeRF 256DGS (Ours)
MAE ↓MTE ↓MAE ↓MTE ↓MAE ↓MTE ↓MAE ↓MTE ↓MAE ↓MTE ↓MAE ↓MTE ↓MAE ↓MTE ↓
Barn26.50.20851.20.75222.90.13189.20.68292.50.68485.20.57230.30.162
Caterpillar42.90.16652.60.51625.20.13889.32.55990.52.55986.80.84314.50.027
Family42.80.79458.41.13022.90.50793.91.50597.01.50699.02.02820.60.468
Ignatius31.40.72351.21.19323.40.60484.11.48985.41.49186.91.32615.50.441
Truck31.60.37054.61.23629.40.35194.41.04297.71.04597.60.88327.50.242
Avg.35.00.45253.60.96524.70.34690.21.45592.61.45791.11.13021.70.268
+ +on average $\approx 247$ training images $(87\%)$ and $\approx 35$ testing images $(12\%)$ . Mip-NeRF $360^{\circ}$ consists of seven scenes: two outdoors and four indoors, with a structured scenario and background. We use the original train-test splits at a ratio of 1:8. Following [25], we resize all the objects to fit inside a unit box. The translation error is relative to the object size, defined as a unit. + +# 5.3 Analysis + +Quantitative Analysis: Tab. 1 and Tab. 2 present the results obtained across both datasets. 6DGS consistently outperforms baseline methods across all datasets and pose initialization pipelines. Notably, 6DGS achieves lower error rates than the second-best results, especially under identical comparison conditions (i.e., random pose prior). Even when initialized from a fixed pose proximal to the known camera, 6DGS still excels over baselines in most scenes. Parallel iNeRF demonstrates improvement over iNeRF across all tested scenarios, consistent with its reported enhancements, but both methods' performance drops with random initialization. Likewise, $\mathrm{NeMo + VoGE}$ performs worst, especially with random pose prior due to the utilization of a smaller number of larger ellipsoids in their approach. In contrast, 6DGS leverages approximately 300,000 ellipsoids of varying sizes obtained via 3DGS, as opposed to their mesh-to-ellipsoid method, which utilizes only about 5,000 larger ellipsoids. This fundamental disparity in ellipsoid size and quantity is a crucial factor contributing to the performance difference. Additionally, 6DGS exhibits faster processing speeds, operating nearly in real-time at 15 frames per second (fps) compared to the 0.05fps of Parallel iNeRF and 0.16fps of iNeRF. Please refer to the supplementary material for the complete table on timings. + +Qualitative Analysis: Figure 4 illustrates qualitative findings revealing notable observations. Particularly, we notice that the estimated poses exhibit proximity to the object relative to ground truth, attributable to the quantization effect introduced by the Ellicell. The qualitative findings verify the quantitative outcomes, albeit occasional inconsistencies in results, such as in the Counter scene, with the analysis-by-synthesis approaches showcasing a total incoherent output in regards to the overall scene (notice how the estimated poses are completely off the target). Moreover, the performance of 6DGS demonstrates consistency across varied scenarios, encompassing single-object instances and indoor settings, despite substantial variations in the models utilized. + +![](images/13310f7eee6dc8d4b1bad5467d85fbff55008497deac449b82571e3f8d9c51fc.jpg) +Truck +Target image +Estimated NVS + +![](images/51049ae0156eb17b5a00d841750630afb800339bf63ae6bc11b1be9008aac152.jpg) + +![](images/50c6d3e729bc8ed1f50636857cc5361d711f4de464eb51e8131541dcb970d7be.jpg) + +![](images/13ec49e8e182d08ce184f3eb64387be68a5212b64deeb6419bb5644a5f663d81.jpg) +Family + +![](images/8136735097001b97815e0b8ac4f39707f6c490ca7d8360f39d3bc9ce3830fb80.jpg) +Target image + +![](images/39a8ed92b2fc622381ef0a80afe613e0ea5c6533e3aab3c4ed7082cfc9b5b8dc.jpg) +Estimated NVS + +![](images/fa23308f2b98af0b8ced5f9ed8a2a95eff6d216ad1fa09fce59f9ae9dc0d656d.jpg) +Counter +Target image +Estimated NVS + +![](images/6aa65dd389f7df608f16281eef0d4cbd6234809eb7a7951d334bc68520db6483.jpg) + +![](images/5ad4d8add8c2d8b4ef68e8887e708974801af9e44633d130bd164eb0352c42de.jpg) + +![](images/7f6a3a23ad870c0197c6411415fd8c0ee0ad55ad8f3ff31ce2f8da7438b3feb1.jpg) +Bonsai +Target image +Estimated NVS + +![](images/1b3c82462a897deaaf06b210b8778693ea46106f3d06a8016dea5ec849bcf72e.jpg) + +![](images/3b6f0ae544df771fa6bce565993ea8a003cb284359d4d4a6f5d242b24f5b96e1.jpg) + +![](images/eb872e69fea2e95491e7b9198300b6a6d3e301447ee3395d18f5f38325cace1a.jpg) + +![](images/24fbd16aa5751e943e9bf8d21cbc5cc7c7e4da4bc1633952df7338ed9afd945b.jpg) + +![](images/bb4ff5db10655d3bca5c6394c0149a06a86f7717f4a651ba84bf97217aaa147b.jpg) + +![](images/13f2b9b34dde64712afdf43686a02f00c7b42e4b5d1505576377925d6a815d18.jpg) + +![](images/2a0ec10c3ac048e38a42f3cdcdde3c5204ba0759ecaa3a936196eb720fa70631.jpg) + +![](images/27d37ab08d225afa573cdde677b4bc371d3ddeb06b7051c8ba454cb107eb115e.jpg) +# + +![](images/990a949f1333feeb91c3d837ff02c130b4db1c053ae6931abf01bb8a60de7e1e.jpg) +prior + +![](images/a8b5579e01770265e4875d125da47fedd52bf5131356aa4e4a100ec8bfc444e3.jpg) +D + +![](images/ba9a5a8d015e2c3432c3eec472695b604bebef4c913c3b8907ae242acc6dd5a6.jpg) +Parallel +jNeRFw/ +north w + +![](images/d221bc754f3503497ac01adac637d1455865d72b47b42046e69e6fef60bf1071.jpg) +Parallel +prior + +![](images/2f3721800e1b80d25f6bc516aa30fb91044e9784d09b1702ab2dd418f2c32c8b.jpg) +NeRF w/c +NeM +VoGE w/ prior +Fig. 4: The illustration presents qualitative results from Tanks & Temple (upper row) and Mip-NeRF $360^{\circ}$ (lower row) datasets. Each scene showcases the target images (bottom left) along with their corresponding Novel View Synthesis (NVS) outputs (bottom right), derived from the camera poses estimated by 6DGS (located on the top). Furthermore, the estimated camera poses from the comparative baselines are visualized, with distinct colors as indicated in the image legend. The NVS of each scene is rendered based on the provided 3DGS model. Please check the supplementary material for more qualitative results. + +![](images/b0336dcb57af9db42df841c33466de04eba655c2efda0676255b98d3d490cc4e.jpg) +NeMo + +VoGE w/o +prior + +# 5.4 Ablation studies + +Our ablation studies involve the analysis of the number of rays selected for the pose estimation (Tab. 3a), the number of rays that we cast from a Ellicell (Tab. 3b) as well as the different feature size on the MLP channels (Tab. 3c). The supplementary material contains additional ablations that analyze 6DGS performance with low-quality 3DGS models. + +We find that the number of selected rays mainly affects the angular error, while the translation error remains relatively stable. Increasing the number of rays decreases the angular error but slightly increases the translation error, likely due to less confident rays contributing to the pose estimation. The optimal balance between translation and angular errors is achieved between 100 to 150 rays, with 100 being the best. The slight increase in error with more $N_{top}$ rays is due to + +Table 3: Ablation study on the number of rays selected for pose estimation, on the rays cast from each ellipsoid and on the MLP channels using Mip-NeRF 360 [4]. Underline indicates the default values used. + +(a) Number of rays used for pose estimation. + +(b) Number of cast rays per ellipsoid. + +
\( {N}_{\text{top }} \)MAE (°) \( \downarrow \)MTE (u) \( \downarrow \)Time (s)
2029.00.02350.03
5026.30.02270.04
10024.30.02170.06
15024.40.02190.9
20024.50.02220.11
+ +
# of cast raysMAE (°)↓MTE (u)↓Time (s)
2029.00.02350.04
3524.70.02200.04
5024.30.02170.06
6525.10.02180.09
8025.20.02210.15
+ +(c) MLP channel feature size. + +
MLP channels MAE (°)↓ MTE (u)↓ Time (s)
25629.40.02730.04
51224.30.02170.06
102430.10.02280.27
+ +introducing rays not pointing precisely to the camera's optical center. Similar to what we observed in the qualitative examples, the noisy rays make the weighted Least Squares estimating the camera closer to the object than it actually is. + +Regarding the impact of the varying number of rays cast from the Ellicells, the angular error tends to remain relatively constant across different configurations. In contrast, the translation error decreases when 50 cast rays are used, and then increases again. This behavior is connected to network generalization capability. Increasing the number of rays allows the network to fit the training set better, but at test time, it makes the network more prone to noise and consequently selecting the wrong rays, thus increasing the error. We observe this generalization issue when increasing the MLP channels, see Tab. 3c, particularly given the limited and uneven distribution of training images ( $\approx$ 150). Moreover, the processing time increases proportionally with the number of rays and the MLP channels; upon exceeding the default values for rays and feature size, a notable surge in processing time is observed, reaching approximately 10fps and 13fps, respectively. + +# 6 Conclusions + +In this study, we proposed a novel ray sampling by attention method for estimating 6DoF camera poses from a single image and a 3DGS scene model. Our analytical evaluation demonstrates its robustness and efficiency without requiring initialization, up to $22\%$ in accuracy and while being faster by a big margin, approx. $94\mathrm{x}$ faster. Furthermore, the proposed method formulates and utilizes a novel ray generation methodology in order to explore diverse camera pose hypotheses in accordance to an effective attention mechanism. Our method exhibits enhanced robustness across real-world datasets and holds promise for real-time deployment in robotics and other fields. Future research endeavors will focus on improving accuracy and extending applicability to diverse scenes and objects. + +Limitations. The main constraint of 6DGS is the need for retraining with each new scene. This could be mitigated with meta-learning, particularly when similar objects or scenes are under consideration. + +# Acknowledgments + +This work is part of the RePAIR project that has received funding from the European Union's Horizon 2020 research and innovation programme under grant agreement No. 964854. This work has also received funding from the European Union's Horizon Europe research and innovation programme under grant agreement No. 101092043, project AGILEHAND (Smart Grading, Handling and Packaging Solutions for Soft and Deformable Products in Agile and Reconfigurable Lines). We thank S. Fiorini for the discussion on the optimizers. + +# References + +1. Google maps nerf integration. https://blog.google/products/maps/sustainable-immersive-maps-announcements/, accessed: 2024-03-07 +2. Akenine-Mo, T., Haines, E., Hoffman, N., et al.: Real-time rendering. AK Pe- ters/CRC Press (2018) +3. Almkvist, G., Berndt, B.: Gauss, landen, ramanujan, the arithmetic-geometric mean, ellipses, $\pi$ , and the ladies diary. The American Mathematical Monthly 95(7), 585-608 (1988) +4. Barron, J.T., Mildenhall, B., Verbin, D., Srinivasan, P.P., Hedman, P.: Mip-nerf 360: Unbounded anti-aliased neural radiance fields. In: CVPR (2022) +5. Beckers, B., Beckers, P.: Fast and accurate view factor generation. In: FICUP (2016) +6. Bortolon, M., Tsesmelis, T., James, S., Poiesi, F., Del Bue, A.: Ifnrf: Initialization free and fast 6 dof pose estimation from a single image and a nef model. In: ICRA (2024) +7. Chen, A., Xu, Z., Geiger, A., Yu, J., Su, H.: Tensorf: Tensorial radiance fields. In: ECCV (2022) +8. Chen, S., Song, S., Zhao, J., Feng, T., Ye, C., Xiong, L., Li, D.: Robust dual quadric initialization for forward-translating camera movements. RAL 6(3), 4712-4719 (2021) +9. Crocco, M., Rubino, C., Del Bue, A.: Structure from motion with objects. In: CVPR (2016) +0. Ding, M., Wang, Z., Sun, J., Shi, J., Luo, P.: Camnet: Coarse-to-fine retrieval for camera re-localization. In: ICCV (2019) +1. Gaudilliere, V., Simon, G., Berger, M.O.: Camera relocalization with ellipsoidal abstraction of objects. In: ISMAR (2019) +2. Gaudilliere, V., Simon, G., Berger, M.O.: Perspective-2-ellipsoid: Bridging the gap between object detections and 6-dof camera pose. RAL 5(4), 5189-5196 (2020) +3. Gay, P., Rubino, C., Bansal, V., Del Bue, A.: Probabilistic structure from motion with objects (psfmo). In: ICCV +4. Gay, P., Stuart, J., Del Bue, A.: Visual graphs from motion (vgfm): Scene understanding with object geometry reasoning. In: ACCV (2019) +5. He, X., Sun, J., Wang, Y., Huang, D., Bao, H., Zhou, X.: Onepose++: Keypoint-free one-shot object pose estimation without cad models. In: NeurIPS (2022) +6. Hosseinzadeh, M., Latif, Y., Pham, T., Suenderhauf, N., Reid, I.: Structure aware slam using quadrics and planes. In: ACCV (2019) +7. Jacques, L., Masset, L., Kerschen, G.: Direction and surface sampling in ray tracing for spacecraft radiative heat transfer. Aerospace Science and Technology 47 (2015) + +18. Kerbl, B., Kopanas, G., Leimkuhler, T., Drettakis, G.: 3d gaussian splatting for real-time radiance field rendering. TCG 42(4) (2023) +19. Kim, S., Min, J., Cho, M.: Transformers: Match-to-match attention for semantic correspondence. In: CVPR (2022) +20. Knapitsch, A., Park, J., Zhou, Q.Y., Koltun, V.: Tanks and temples: Benchmarking large-scale scene reconstruction. TCG 36(4) (2017) +21. Laidlow, T., Davison, A.J.: Simultaneous localisation and mapping with quadric surfaces. In: 3DV (2022) +22. Lee, J., Kim, B., Cho, M.: Self-supervised equivariant learning for oriented keypoint detection. In: CVPR (2022) +23. Lee, J., Kim, B., Kim, S., Cho, M.: Learning rotation-equivariant features for visual correspondence. In: CVPR +24. Liao, Z., Hu, Y., Zhang, J., Qi, X., Zhang, X., Wang, W.: So-slam: Semantic object slam with scale proportional and symmetrical texture constraints. RAL 7(2), 4008-4015 (2022) +25. Lin, Y., Müller, T., Tremblay, J., Wen, B., Tyree, S., Evans, A., Vela, P.A., Birchfield, S.: Parallel inversion of neural radiance fields for robust pose estimation. In: ICRA (2023) +26. Liu, L., Gu, J., Lin, K.Z., Chua, T.S., Theobalt, C.: Neural sparse voxel fields. In: NeurIPS (2020) +27. Lowe, D.G.: Object recognition from local scale-invariant features. In: ICCV +28. Luiten, J., Kopanas, G., Leibe, B., Ramanan, D.: Dynamic 3d gaussians: Tracking by persistent dynamic view synthesis. In: 3DV (2024) +29. Maggio, D., Mario, C., Carlone, L.: Verf: Runtime monitoring of pose estimation with neural radiance fields. In: ICCV (2023) +30. Malley, T.: A shading method for computer generated images. Master's thesis, Dept. of Computer Science, University of Utah (1988) +31. Masset, L., Brüls, O., Kerschen, G.: Partition of the circle in cells of equal area and shape. Tech. rep., Structural Dynamics Research Group, Aerospace and Mechanical Engineering Department, University of Liege, 'Institut de Mecanique et G 'enie Civil (B52/3) (2011) +32. Meng, Y., Zhou, B.: Ellipsoid slam with novel object initialization. In: CASE (2022) +33. Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: Nerf: Representing scenes as neural radiance fields for view synthesis. In: ECCV (2020) +34. Moreau, A., Piasco, N., Bennehar, M., Tsishkou, D., Stanciulescu, B., de La Fortelle, A.: Crossfire: Camera relocalization on self-supervised features from an implicit representation. In: ICCV (2023) +35. Oquab, M., Darcet, T., Moutakanni, T., Vo, H., Szafraniec, M., Khalidov, V., Fernandez, P., Haziza, D., Massa, F., El-Nouby, A., Assran, M., Ballas, N., Galuba, W., Howes, R., Huang, P.Y., Li, S.W., Misra, I., Rabbat, M., Sharma, V., Synnaeve, G., Xu, H., Jegou, H., Mairal, J., Labatut, P., Joulin, A., Bojanowski, P.: Dinov2: Learning robust visual features without supervision (2023) +36. Sarlin, P.E., DeTone, D., Malisiewicz, T., Rabinovich, A.: Superglue: Learning feature matching with graph neural networks. In: CVPR (2020) +37. Shan, M., Feng, Q., Jau, Y.Y., Atanasov, N.: Ellipsdf: joint object pose and shape optimization with a bi-level ellipsoid and signed distance function description. In: ICCV (2021) +38. Shazeer, N., Stern, M.: Adafactor: Adaptive learning rates with sublinear memory cost. In: ICML (2018) + +39. Sinkhorn, R.: A Relationship Between Arbitrary Positive Matrices and Doubly Stochastic Matrices. The Annals of Mathematical Statistics 35(2), 876-879 (1964) +40. Tancik, M., Srinivasan, P.P., Mildenhall, B., Fridovich-Keil, S., Raghavan, N., Singhal, U., Ramamoorthi, R., Barron, J.T., Ng, R.: Fourier features let networks learn high frequency functions in low dimensional domains. In: NeurIPS (2020) +41. Tombari, F., Salti, S., di Stefano, L.: Unique signatures of histograms for local surface description. In: ECCV (2010) +42. Tsesmelis, T., Hasan, I., Cristani, M., Bue, A.D., Galasso, F.: Rgbd2lux: Dense light intensity estimation with an rgbd sensor. In: WACV (2018) +43. Wang, A., Kortylewski, A., Yuille, A.: Nemo: Neural mesh models of contrastive features for robust 3d pose estimation. In: ICLR (2020) +44. Wang, A., Wang, P., Sun, J., Kortylewski, A., Yuille, A.: Voge: a differentiable volume renderer using gaussian ellipsoids for analysis-by-synthesis. In: ICLR (2022) +45. Xie, T., Zong, Z., Qiu, Y., Li, X., Feng, Y., Yang, Y., Jiang, C.: Physgaussian: Physics-integrated 3d gaussians for generative dynamics. In: CVPR (2024) +46. Yen-Chen, L., Florence, P., Barron, J.T., Rodriguez, A., Isola, P., Lin, T.Y.: iNeRF: Inverting neural radiance fields for pose estimation. In: IROS (2021) +47. Zins, M., Simon, G., Berger, M.O.: Oa-slam: Leveraging objects for camera localization in visual slam. In: ISMAR (2022) \ No newline at end of file diff --git a/2024/6DGS_ 6D Pose Estimation from a Single Image and a 3D Gaussian Splatting Model/images.zip b/2024/6DGS_ 6D Pose Estimation from a Single Image and a 3D Gaussian Splatting Model/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..99b2bdecd040c2347285e1661a0870e08f17f084 --- /dev/null +++ b/2024/6DGS_ 6D Pose Estimation from a Single Image and a 3D Gaussian Splatting Model/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b580c456618648b6fa446aeefe11c27b6d1283bccbce8d31536707d11d9b251 +size 477655 diff --git a/2024/6DGS_ 6D Pose Estimation from a Single Image and a 3D Gaussian Splatting Model/layout.json b/2024/6DGS_ 6D Pose Estimation from a Single Image and a 3D Gaussian Splatting Model/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..4d503ab20c1b0aabd6dea68a03de8ad94f081992 --- /dev/null +++ b/2024/6DGS_ 6D Pose Estimation from a Single Image and a 3D Gaussian Splatting Model/layout.json @@ -0,0 +1,12561 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 157, + 111, + 457, + 148 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 157, + 111, + 457, + 148 + ], + "spans": [ + { + "bbox": [ + 157, + 111, + 457, + 148 + ], + "type": "text", + "content": "6DGS: 6D Pose Estimation from a Single Image and a 3D Gaussian Splitting Model" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 164, + 167, + 450, + 195 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 167, + 450, + 195 + ], + "spans": [ + { + "bbox": [ + 164, + 167, + 450, + 195 + ], + "type": "text", + "content": "Bortolon Matteo" + }, + { + "bbox": [ + 164, + 167, + 450, + 195 + ], + "type": "inline_equation", + "content": "^{1,2,3}" + }, + { + "bbox": [ + 164, + 167, + 450, + 195 + ], + "type": "text", + "content": ", Theodore Tsesmelis" + }, + { + "bbox": [ + 164, + 167, + 450, + 195 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 164, + 167, + 450, + 195 + ], + "type": "text", + "content": ", Stuart James" + }, + { + "bbox": [ + 164, + 167, + 450, + 195 + ], + "type": "inline_equation", + "content": "^{1,4}" + }, + { + "bbox": [ + 164, + 167, + 450, + 195 + ], + "type": "text", + "content": ", Fabio Poiesi" + }, + { + "bbox": [ + 164, + 167, + 450, + 195 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 164, + 167, + 450, + 195 + ], + "type": "text", + "content": ", and Alessio Del Bue" + }, + { + "bbox": [ + 164, + 167, + 450, + 195 + ], + "type": "inline_equation", + "content": "^{1}" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 165, + 201, + 449, + 246 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 165, + 201, + 449, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 201, + 449, + 223 + ], + "spans": [ + { + "bbox": [ + 165, + 201, + 449, + 223 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 165, + 201, + 449, + 223 + ], + "type": "text", + "content": " PAVIS, Fondazione Istituto Italiano di Tecnologia (IIT), Genoa, IT " + }, + { + "bbox": [ + 165, + 201, + 449, + 223 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 165, + 201, + 449, + 223 + ], + "type": "text", + "content": " TeV, Fondazione Bruno Kessler (FBK), Trento, IT" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 237, + 224, + 376, + 235 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 237, + 224, + 376, + 235 + ], + "spans": [ + { + "bbox": [ + 237, + 224, + 376, + 235 + ], + "type": "text", + "content": "3 Università di Trento, Trento, IT" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 234, + 235, + 380, + 246 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 234, + 235, + 380, + 246 + ], + "spans": [ + { + "bbox": [ + 234, + 235, + 380, + 246 + ], + "type": "text", + "content": "4 Durham University, Durham, UK" + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 160, + 275, + 455, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 275, + 455, + 495 + ], + "spans": [ + { + "bbox": [ + 160, + 275, + 455, + 495 + ], + "type": "text", + "content": "Abstract. We propose 6DGS to estimate the camera pose of a target RGB image given a 3D Gaussian Splatting (3DGS) model representing the scene. 6DGS avoids the iterative process typical of analysis-by-synthesis methods (e.g. iNeRF) that also require an initialization of the camera pose in order to converge. Instead, our method estimates a 6DoF pose by inverting the 3DGS rendering process. Starting from the object surface, we define a radiant Ellicell that uniformly generates rays departing from each ellipsoid that parameterize the 3DGS model. Each Ellicell ray is associated with the rendering parameters of each ellipsoid, which in turn is used to obtain the best bindings between the target image pixels and the cast rays. These pixel-ray bindings are then ranked to select the best scoring bundle of rays, which their intersection provides the camera center and, in turn, the camera rotation. The proposed solution obviates the necessity of an \"a priori\" pose for initialization, and it solves 6DoF pose estimation in closed form, without the need for iterations. Moreover, compared to the existing Novel View Synthesis (NVS) baselines for pose estimation, 6DGS can improve the overall average rotational accuracy by " + }, + { + "bbox": [ + 160, + 275, + 455, + 495 + ], + "type": "inline_equation", + "content": "12\\%" + }, + { + "bbox": [ + 160, + 275, + 455, + 495 + ], + "type": "text", + "content": " and translation accuracy by " + }, + { + "bbox": [ + 160, + 275, + 455, + 495 + ], + "type": "inline_equation", + "content": "22\\%" + }, + { + "bbox": [ + 160, + 275, + 455, + 495 + ], + "type": "text", + "content": " on real scenes, despite not requiring any initialization pose. At the same time, our method operates near real-time, reaching 15fps on consumer hardware." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 514, + 230, + 528 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 514, + 230, + 528 + ], + "spans": [ + { + "bbox": [ + 132, + 514, + 230, + 528 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 539, + 482, + 600 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 539, + 482, + 600 + ], + "spans": [ + { + "bbox": [ + 130, + 539, + 482, + 600 + ], + "type": "text", + "content": "Neural and geometrical 3D representations for Novel View Synthesis (NVS) have recently surged in popularity [18,33], and they have been quickly integrated into daily applications, e.g. mapping services [1]. The change in 3D representation creates new challenges on how to solve classical problems, such as 6D pose estimation, and on how to leverage NVS implicit advantages [25,29,34,44,46]." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 600, + 482, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 600, + 482, + 638 + ], + "spans": [ + { + "bbox": [ + 130, + 600, + 482, + 638 + ], + "type": "text", + "content": "The method of iNeRF [46] pioneered 6D pose estimation using an NVS model by proposing an iterative analysis-by-synthesis, as illustrated in the left panel of Fig. [1]. Given a nearby pose initialization (iteration " + }, + { + "bbox": [ + 130, + 600, + 482, + 638 + ], + "type": "inline_equation", + "content": "\\# 1" + }, + { + "bbox": [ + 130, + 600, + 482, + 638 + ], + "type": "text", + "content": "), the NVS model" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 141, + 643, + 369, + 654 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 643, + 369, + 654 + ], + "spans": [ + { + "bbox": [ + 141, + 643, + 369, + 654 + ], + "type": "text", + "content": "Project page: https://mbortolon97.github.io/6dns/" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 141, + 654, + 315, + 665 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 654, + 315, + 665 + ], + "spans": [ + { + "bbox": [ + 141, + 654, + 315, + 665 + ], + "type": "text", + "content": "Corresponding author: mbortolon@fbk.eu" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 133, + 116, + 481, + 248 + ], + "blocks": [ + { + "bbox": [ + 133, + 116, + 481, + 248 + ], + "lines": [ + { + "bbox": [ + 133, + 116, + 481, + 248 + ], + "spans": [ + { + "bbox": [ + 133, + 116, + 481, + 248 + ], + "type": "image", + "image_path": "ffacea6ed92893f6bd426a80d4c0a448fb631637e9ed44f41c233b4e1d2efc9f.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 258, + 482, + 378 + ], + "lines": [ + { + "bbox": [ + 130, + 258, + 482, + 378 + ], + "spans": [ + { + "bbox": [ + 130, + 258, + 482, + 378 + ], + "type": "text", + "content": "Fig. 1: Our 6DGS method introduces a novel approach to 6DoF pose estimation, departing from conventional analysis-by-synthesis methodologies. Standard NeRF-based methods (left) employ an iterative process, rendering candidate poses and comparing them with the target image before updating the pose, which often results in slow performance and limited precision. In contrast, 6DGS (right) estimates the camera pose by selecting a bundle of rays projected from the ellipsoid surface (a radiant Ellicell) and learning an attention map to output ray/image pixel correspondences (based on DINOv2). The optimal bundle of rays should intersect the optical center of the camera and then are used to estimate the camera rotation in closed-form. Our 6GDS method offers significantly improved accuracy and speed, enabling the recovery of the pose within a one-shot estimate." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 398, + 482, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 398, + 482, + 506 + ], + "spans": [ + { + "bbox": [ + 130, + 398, + 482, + 506 + ], + "type": "text", + "content": "is used to render the image related to the initial pose. Then iteratively, the rendered image is compared with the target image using a photometric loss, and the initial pose guess is updated so that the two views achieve the best image overlap at the final step (iteration " + }, + { + "bbox": [ + 130, + 398, + 482, + 506 + ], + "type": "inline_equation", + "content": "\\# N" + }, + { + "bbox": [ + 130, + 398, + 482, + 506 + ], + "type": "text", + "content": "). The authors in iNeRF [46] use the popular NeRF [33] NVS model where backpropagation updates every new pose guess. This procedure leverages the remarkable NeRF capabilities in synthesizing realistic novel views, however, at the computational expense of synthesizing a newly rendered image at each iteration. This limitation restricts iNeRF to offline use while requiring a close initial pose estimate for a successful convergence." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 507, + 482, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 507, + 482, + 616 + ], + "spans": [ + { + "bbox": [ + 130, + 507, + 482, + 616 + ], + "type": "text", + "content": "Recent works in 3D Gaussian Splatting (3DGS) [18,28,45] are an alternative to Neural NVs models, providing fast rendering speed through the use of explicit geometric primitives that do not require the optimization of a neural network. 3DGS represents a 3D scene as a set of ellipsoids paired with photometric information, such as color and opacity. The ellipsoids are first initialized using Structure from Motion (SfM), and then they are optimized to reduce the photometric error between the rasterized ellipsoids and a set of known images. During the rasterization stage, the 3DGS model is projected onto the image plane as ellipses and for each pixel the algorithm computes its photometric contribution." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 617, + 482, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 617, + 482, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 617, + 482, + 665 + ], + "type": "text", + "content": "By leveraging the 3DGS model properties, we design a novel 6DoF pose estimation method (6DGS) that surpasses the limitations of NeRF-based iterative approaches. 6DGS does not require any pose initialization, and it estimates the camera translation and rotation without an iterating analysis-by-synthesis walk" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 241, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 241, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 241, + 100 + ], + "type": "text", + "content": "M. Bortolon et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 272 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 272 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 272 + ], + "type": "text", + "content": "through. This is a key factor for achieving near real-time performance (15fps), also due to the quick rendering capabilities of 3DGS. The right panel of Fig. 1 presents the gist of our approach for 6DoF pose estimation. If we knew the camera pose, the first NVS step of 3DGS would be to project the ellipsoid centers onto the image plane. Practically, this is a ray casting through the camera's optical center. Our 6DGS attempts to invert this process and, by doing so, to estimate the camera pose. If the target image camera pose is unknown, and thus neither where the optical center is, we are unable to cast the single ray from each ellipsoid that passes through the correct target image pixels. For this reason, instead, we radiate uniformly distributed rays from each ellipsoid through the introduction of a novel casting procedure named Ellicell. Only one radiated ray per ellipsoid would be accurate, i.e., the one that renders the pixel photometrically by projecting the correct ellipse onto the target image plane." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 274, + 482, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 274, + 482, + 453 + ], + "spans": [ + { + "bbox": [ + 130, + 274, + 482, + 453 + ], + "type": "text", + "content": "Now, the 6DGS problem is to select, given all the casted rays from the Ellicells, the correct bundle of rays that can generate most of the target image pixels with high confidence. This selection stage is addressed by binding pixels and rays through the learning of an attention map. Notice that this step is also unsupervised, as it leverages the known camera poses and images used to compute the 3DGS model to obtain the pixel and ray pairs used for training. After the bundle of rays is selected, the intersection of these rays identifies the camera center, which is solved using weighted Least Squares (wLS), with the weights being the scores from the previous selection stage. After the optical center is estimated, the optical axis can be used to obtain the camera rotation degrees of freedom from the rays bundle, thus solving the 6DoF pose. By design, 6DGS eliminates the need for an initial camera pose, which is one of the limitations of analysis-by-synthesis pose estimation methods [34,44,46], as well as the tendency to converge to local minima during the iteration procedure, especially if the initial pose is initialized far from the optimal position." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 455, + 482, + 549 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 455, + 482, + 549 + ], + "spans": [ + { + "bbox": [ + 130, + 455, + 482, + 549 + ], + "type": "text", + "content": "We evaluate 6DGS on datasets featuring real-world objects and scenes, comparing against the current NVS state-of-the-art approaches such as iNeRF [46], Parallel iNeRF [25] and NeMO + VoGE [44]. Our experimental results show that 6DGS is competitive, especially if the initial pose is not provided \"a priori\". Finally, we achieve near real-time 6DoF pose estimation on consumer hardware, which is one rather challenging limitation in the practical application of NVS-based approaches for camera pose estimation. To summarize, 6DGS contributions are threefold:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 137, + 566, + 479, + 663 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 137, + 566, + 479, + 600 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 566, + 479, + 600 + ], + "spans": [ + { + "bbox": [ + 137, + 566, + 479, + 600 + ], + "type": "text", + "content": "- Our approach for 6DoF camera pose estimation eliminates the need for an initial camera pose and iterations to converge, which is typically required in analysis-by-synthesis approaches;" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 137, + 603, + 479, + 636 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 603, + 479, + 636 + ], + "spans": [ + { + "bbox": [ + 137, + 603, + 479, + 636 + ], + "type": "text", + "content": "- 6DGS employs a novel ray casting pipeline, i.e. Ellicell, and an attention-based mechanism that efficiently matches pixel-level image information with 3DGS ellipsoids:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 137, + 641, + 479, + 663 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 641, + 479, + 663 + ], + "spans": [ + { + "bbox": [ + 137, + 641, + 479, + 663 + ], + "type": "text", + "content": "- The proposed method is state-of-the-art in the NVS benchmarks for camera pose estimation both for accuracy and real-time performance." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 170, + 91, + 448, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 170, + 91, + 448, + 102 + ], + "spans": [ + { + "bbox": [ + 170, + 91, + 448, + 102 + ], + "type": "text", + "content": "6DGS: 6D Pose Estimation from a Single Image and a 3DGS Model" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 481, + 100 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 114, + 239, + 127 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 114, + 239, + 127 + ], + "spans": [ + { + "bbox": [ + 132, + 114, + 239, + 127 + ], + "type": "text", + "content": "2 Related works" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 139, + 481, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 139, + 481, + 175 + ], + "spans": [ + { + "bbox": [ + 130, + 139, + 481, + 175 + ], + "type": "text", + "content": "We review relevant works on 6DoF camera pose estimation based on Neural Radiance Fields (NeRF) models, ellipsoid-based approaches, and correspondence matching methods that are related to key components of 6DGS." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 175, + 482, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 175, + 482, + 437 + ], + "spans": [ + { + "bbox": [ + 130, + 175, + 482, + 437 + ], + "type": "text", + "content": "Pose estimation from neural radiance fields. iNeRF [46] pioneered NeRF-based 6D camera pose estimation, using iterative alignment of target and rendered images based on photometric error. However, iNeRF is prone to local minima in the optimization function, leading to recent developments like Parallel iNeRF [25], which employs parallel optimization of multiple candidate poses. While these approaches rely on NeRF-based models, " + }, + { + "bbox": [ + 130, + 175, + 482, + 437 + ], + "type": "inline_equation", + "content": "\\mathrm{NeMo + VoGe}" + }, + { + "bbox": [ + 130, + 175, + 482, + 437 + ], + "type": "text", + "content": " [43,44] have explored 6D camera pose estimation using object models based on volumetric Gaussian reconstruction kernels as geometric primitives. The rendering strategy (VoGE) differs from 3DGS as it is based on ray marching. Therefore, " + }, + { + "bbox": [ + 130, + 175, + 482, + 437 + ], + "type": "inline_equation", + "content": "\\mathrm{NeMo + VoGe}" + }, + { + "bbox": [ + 130, + 175, + 482, + 437 + ], + "type": "text", + "content": " iteratively aligns learned features from target and rendered images. Notably, " + }, + { + "bbox": [ + 130, + 175, + 482, + 437 + ], + "type": "inline_equation", + "content": "\\mathrm{NeMo + VoGe}" + }, + { + "bbox": [ + 130, + 175, + 482, + 437 + ], + "type": "text", + "content": "'s training requires multiple objects, in contrast to our method, which leverages a single object 3DGS model. Alternatively, CROSS-FIRE [34] addresses the local minima issue by integrating learned local features, which describes not only the visual content but also the 3D location of the scene in the NeRF model. Despite these advancements, analysis-by-synthesis approaches often struggle with inefficient pose updates due to the nature of the optimization refinement and the dependence on accurate initial pose priors. These factors can limit their real-world applicability. Recently, IFFNeRF [6] utilized a method that inverts the NeRF model to re-render an image to match a target one. However, unlike our approach, it does not consider the specificities of 3DGS, which include ellipsoid elongation and rotation, and their non-uniform distribution across the scene surface." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 438, + 482, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 438, + 482, + 544 + ], + "spans": [ + { + "bbox": [ + 130, + 438, + 482, + 544 + ], + "type": "text", + "content": "Pose estimation from ellipsoids. Recovery of the camera pose from ellipsoids has been explored for both SfM [8,9,12-14,37] and SLAM [11,16,21,24,32,47] scenarios, where methods frequently recover the object's ellipsoid representation as well as the camera 6DoF. Such approaches typically solve linear systems to recover the pose, most commonly minimizing a loss of the projection to and from an object detection. However, this methodological framework often presents limitations when confronted with large numbers of ellipsoids, as they are more indicated for handling few large ellipsoids that model a single object occupancy, 3D position and orientation." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 545, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 545, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 545, + 482, + 666 + ], + "type": "text", + "content": "Correspondences Matching. In traditional 6DoF image matching, feature-based approaches are used, which often rely on hand-crafted features, e.g., SIFT [27] or more recent deep approaches such as SuperGlue [36] and Transformer [19]. SuperGlue utilizes a Graph Neural Network (GNN) for feature attention and Sinkhorn [39] for matching, while LightGlue replaces the GNN with a lightweight transformer. Unlike these, Transformer [19] performs global match-to-match attention, allowing for accurate match localization. In addition, there is a body of work around feature equivariance [22,23] for improving the robustness of matching. However, these methods rely on the hypothesis that both feature sets exist in a homogeneous feature space, i.e. extracted from the image," + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 241, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 241, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 241, + 100 + ], + "type": "text", + "content": "M. Bortolon et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 224 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 224 + ], + "type": "text", + "content": "while in 6DGS we have the specific problem to match pixel to rays emitted from the Ellicells. Therefore, we rely on the proposed attention model to handle these ray-to-pixel bindings. OnePose++ [15] instead adopts a multi-modal approach matching a point cloud with an image. Another proposed alternative is to regress directly the pose parameters, as in CamNet [10]. Nevertheless, these approaches require a large amount of training data (" + }, + { + "bbox": [ + 130, + 116, + 482, + 224 + ], + "type": "inline_equation", + "content": "\\approx" + }, + { + "bbox": [ + 130, + 116, + 482, + 224 + ], + "type": "text", + "content": " 500 or more images), sometimes across multiple scenes and, like with CamNet, these need to be available also at inference time. 6DGS however, requires only " + }, + { + "bbox": [ + 130, + 116, + 482, + 224 + ], + "type": "inline_equation", + "content": "\\approx" + }, + { + "bbox": [ + 130, + 116, + 482, + 224 + ], + "type": "text", + "content": " 100 or less images, which are utilized only once during training." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 242, + 234, + 254 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 242, + 234, + 254 + ], + "spans": [ + { + "bbox": [ + 132, + 242, + 234, + 254 + ], + "type": "text", + "content": "3 Preliminaries" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 266, + 482, + 374 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 266, + 482, + 374 + ], + "spans": [ + { + "bbox": [ + 130, + 266, + 482, + 374 + ], + "type": "text", + "content": "We first review 3D Gaussian Splatting (3DGS) [18] to understand the underlying principles and provide the mathematical formalization of the model. 3DGS objective is to synthesize novel views of a scene by optimizing the position, the orientation and the color of a set of 3D Gaussians approximated as ellipsoids " + }, + { + "bbox": [ + 130, + 266, + 482, + 374 + ], + "type": "inline_equation", + "content": "\\mathcal{Q} = \\{\\mathbf{Q}\\}_{i=1}^{K}" + }, + { + "bbox": [ + 130, + 266, + 482, + 374 + ], + "type": "text", + "content": " from a given set of input images " + }, + { + "bbox": [ + 130, + 266, + 482, + 374 + ], + "type": "inline_equation", + "content": "\\mathcal{I} = \\{\\mathbf{I}\\}_{i=1}^{J}" + }, + { + "bbox": [ + 130, + 266, + 482, + 374 + ], + "type": "text", + "content": " and their corresponding camera projection matrices " + }, + { + "bbox": [ + 130, + 266, + 482, + 374 + ], + "type": "inline_equation", + "content": "\\mathcal{P} = \\{\\mathbf{P}\\}_{i=1}^{J} \\in \\mathbb{R}^{3 \\times 4}" + }, + { + "bbox": [ + 130, + 266, + 482, + 374 + ], + "type": "text", + "content": ". A point " + }, + { + "bbox": [ + 130, + 266, + 482, + 374 + ], + "type": "inline_equation", + "content": "\\mathbf{d}" + }, + { + "bbox": [ + 130, + 266, + 482, + 374 + ], + "type": "text", + "content": " for being on the surface of an ellipsoid must satisfy the equation " + }, + { + "bbox": [ + 130, + 266, + 482, + 374 + ], + "type": "inline_equation", + "content": "(\\mathbf{d} - \\mathbf{x}) \\boldsymbol{\\Sigma} (\\mathbf{d} - \\mathbf{x})^T = 1" + }, + { + "bbox": [ + 130, + 266, + 482, + 374 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 130, + 266, + 482, + 374 + ], + "type": "inline_equation", + "content": "\\mathbf{x} \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 130, + 266, + 482, + 374 + ], + "type": "text", + "content": " is the ellipsoid center and " + }, + { + "bbox": [ + 130, + 266, + 482, + 374 + ], + "type": "inline_equation", + "content": "\\boldsymbol{\\Sigma} \\in \\mathbb{R}^{3 \\times 3}" + }, + { + "bbox": [ + 130, + 266, + 482, + 374 + ], + "type": "text", + "content": " its covariance matrix. We can further decompose the covariance of the ellipsoid " + }, + { + "bbox": [ + 130, + 266, + 482, + 374 + ], + "type": "inline_equation", + "content": "\\boldsymbol{\\Sigma}" + }, + { + "bbox": [ + 130, + 266, + 482, + 374 + ], + "type": "text", + "content": " as:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 271, + 383, + 480, + 396 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 271, + 383, + 480, + 396 + ], + "spans": [ + { + "bbox": [ + 271, + 383, + 480, + 396 + ], + "type": "interline_equation", + "content": "\\boldsymbol {\\Sigma} = \\mathbf {R} \\mathbf {U} \\mathbf {U} ^ {T} \\mathbf {R} ^ {T}, \\tag {1}", + "image_path": "026f7cdd1f3839c1c9521a08b2bd740eeec5d80cd3a146b025fd2a7c8ae3dedd.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 404, + 482, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 404, + 482, + 441 + ], + "spans": [ + { + "bbox": [ + 130, + 404, + 482, + 441 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 404, + 482, + 441 + ], + "type": "inline_equation", + "content": "\\mathbf{R} \\in \\mathbb{R}^{3 \\times 3}" + }, + { + "bbox": [ + 130, + 404, + 482, + 441 + ], + "type": "text", + "content": " is the ellipsoid rotation matrix and " + }, + { + "bbox": [ + 130, + 404, + 482, + 441 + ], + "type": "inline_equation", + "content": "\\mathbf{U}^{3 \\times 3}" + }, + { + "bbox": [ + 130, + 404, + 482, + 441 + ], + "type": "text", + "content": " denotes the scaling matrix. The projection matrix " + }, + { + "bbox": [ + 130, + 404, + 482, + 441 + ], + "type": "inline_equation", + "content": "\\mathbf{P} \\in \\mathbb{R}^{3 \\times 4}" + }, + { + "bbox": [ + 130, + 404, + 482, + 441 + ], + "type": "text", + "content": " allows the projection of the ellipsoid " + }, + { + "bbox": [ + 130, + 404, + 482, + 441 + ], + "type": "inline_equation", + "content": "\\mathbf{Q}" + }, + { + "bbox": [ + 130, + 404, + 482, + 441 + ], + "type": "text", + "content": " onto the image plane generating the corresponding ellipse representation:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 256, + 449, + 480, + 462 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 256, + 449, + 480, + 462 + ], + "spans": [ + { + "bbox": [ + 256, + 449, + 480, + 462 + ], + "type": "interline_equation", + "content": "\\check {\\mathbf {y}} = \\mathbf {P} \\check {\\mathbf {x}} ^ {T}, \\check {\\mathbf {E}} = \\mathbf {P} \\boldsymbol {\\Sigma} \\mathbf {P} ^ {T}, \\tag {2}", + "image_path": "ff65d2240316610870c023b14a288208b381b04c547ae762207e1985d7c1468f.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 471, + 482, + 543 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 471, + 482, + 543 + ], + "spans": [ + { + "bbox": [ + 130, + 471, + 482, + 543 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 471, + 482, + 543 + ], + "type": "inline_equation", + "content": "\\mathbf{y} \\in \\mathbb{R}^2" + }, + { + "bbox": [ + 130, + 471, + 482, + 543 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 471, + 482, + 543 + ], + "type": "inline_equation", + "content": "\\breve{\\mathbf{y}} \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 130, + 471, + 482, + 543 + ], + "type": "text", + "content": " correspond to the Euclidean and homogeneous coordinates of the ellipse center point. The homogeneous coordinates " + }, + { + "bbox": [ + 130, + 471, + 482, + 543 + ], + "type": "inline_equation", + "content": "\\breve{\\mathbf{y}}" + }, + { + "bbox": [ + 130, + 471, + 482, + 543 + ], + "type": "text", + "content": " originate from the projection of the corresponding ellipsoid center in the homogeneous coordinates " + }, + { + "bbox": [ + 130, + 471, + 482, + 543 + ], + "type": "inline_equation", + "content": "\\breve{\\mathbf{x}} \\in \\mathbb{R}^4" + }, + { + "bbox": [ + 130, + 471, + 482, + 543 + ], + "type": "text", + "content": ". The matrix " + }, + { + "bbox": [ + 130, + 471, + 482, + 543 + ], + "type": "inline_equation", + "content": "\\breve{\\mathbf{E}} \\in \\mathbb{R}^{3 \\times 3}" + }, + { + "bbox": [ + 130, + 471, + 482, + 543 + ], + "type": "text", + "content": " is the ellipse covariance in homogeneous space. The covariance of the ellipse " + }, + { + "bbox": [ + 130, + 471, + 482, + 543 + ], + "type": "inline_equation", + "content": "\\mathbf{E} \\in \\mathbb{R}^{2 \\times 2}" + }, + { + "bbox": [ + 130, + 471, + 482, + 543 + ], + "type": "text", + "content": ", is derived by selecting only the first two rows and columns of " + }, + { + "bbox": [ + 130, + 471, + 482, + 543 + ], + "type": "inline_equation", + "content": "\\breve{\\mathbf{E}}" + }, + { + "bbox": [ + 130, + 471, + 482, + 543 + ], + "type": "text", + "content": " and dividing by the last element on " + }, + { + "bbox": [ + 130, + 471, + 482, + 543 + ], + "type": "inline_equation", + "content": "\\breve{\\mathbf{E}}" + }, + { + "bbox": [ + 130, + 471, + 482, + 543 + ], + "type": "text", + "content": " diagonal." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 543, + 482, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 543, + 482, + 628 + ], + "spans": [ + { + "bbox": [ + 130, + 543, + 482, + 628 + ], + "type": "text", + "content": "The splatted ellipses, denoted as " + }, + { + "bbox": [ + 130, + 543, + 482, + 628 + ], + "type": "inline_equation", + "content": "\\mathcal{B} = \\{\\langle \\mathbf{y},\\mathbf{E}\\rangle \\}_{i = 1}^{K}" + }, + { + "bbox": [ + 130, + 543, + 482, + 628 + ], + "type": "text", + "content": ", generate a pixel color with the rendering function " + }, + { + "bbox": [ + 130, + 543, + 482, + 628 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 130, + 543, + 482, + 628 + ], + "type": "text", + "content": " using rasterization techniques [2,18]. The function " + }, + { + "bbox": [ + 130, + 543, + 482, + 628 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 130, + 543, + 482, + 628 + ], + "type": "text", + "content": " acts independently on every single pixel of the image " + }, + { + "bbox": [ + 130, + 543, + 482, + 628 + ], + "type": "inline_equation", + "content": "\\mathbf{p}" + }, + { + "bbox": [ + 130, + 543, + 482, + 628 + ], + "type": "text", + "content": ". The pixel value depends on the neighboring projected ellipses, taking into account their center points' distances to the pixel coordinates, as well as their orientations and scales. " + }, + { + "bbox": [ + 130, + 543, + 482, + 628 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 130, + 543, + 482, + 628 + ], + "type": "text", + "content": " assumes that the ellipses are ordered based on the depth, so they should be sorted. Formally, " + }, + { + "bbox": [ + 130, + 543, + 482, + 628 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 130, + 543, + 482, + 628 + ], + "type": "text", + "content": " can be expressed as:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 233, + 635, + 480, + 667 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 635, + 480, + 667 + ], + "spans": [ + { + "bbox": [ + 233, + 635, + 480, + 667 + ], + "type": "interline_equation", + "content": "\\phi (\\mathcal {B}, \\mathbf {p}) = \\sum_ {i = 1} ^ {K} \\rho_ {i} \\alpha_ {i} e ^ {- \\tau (\\mathcal {B} _ {i}, \\mathbf {p})} \\gamma (i, \\mathbf {p}), \\tag {3}", + "image_path": "84862964dad75772b4fa5a3ec632cff20beae4884ff028ac2f5b8732079edd3b.jpg" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 170, + 90, + 448, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 170, + 90, + 448, + 102 + ], + "spans": [ + { + "bbox": [ + 170, + 90, + 448, + 102 + ], + "type": "text", + "content": "6DGS: 6D Pose Estimation from a Single Image and a 3DGS Model" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 481, + 100 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 145, + 114, + 470, + 263 + ], + "blocks": [ + { + "bbox": [ + 145, + 114, + 470, + 263 + ], + "lines": [ + { + "bbox": [ + 145, + 114, + 470, + 263 + ], + "spans": [ + { + "bbox": [ + 145, + 114, + 470, + 263 + ], + "type": "image", + "image_path": "354bcc52e0ce0e23e9c07e9fe0d38c9d9a85d1f2bf00ba1e98cb62059859d6d2.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 266, + 482, + 344 + ], + "lines": [ + { + "bbox": [ + 130, + 266, + 482, + 344 + ], + "spans": [ + { + "bbox": [ + 130, + 266, + 482, + 344 + ], + "type": "text", + "content": "Fig. 2: The figure illustrates the pipeline of our 6DGS methodology. The image is encoded using a visual backbone (a). Concurrently, rays are uniformly projected from the center of the 3DGS ellipsoids (b), and their corresponding color is estimated. Subsequently, an attention map mechanism is employed to compare the encoded ray and image features (c). Following this comparison, the " + }, + { + "bbox": [ + 130, + 266, + 482, + 344 + ], + "type": "inline_equation", + "content": "N_{top}" + }, + { + "bbox": [ + 130, + 266, + 482, + 344 + ], + "type": "text", + "content": " matches are selected via attenuation, and the camera location is estimated (d) as the solution of a weighted Least Squares problem, resulting in a distinct 6DoF pose for the image." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 357, + 482, + 404 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 357, + 482, + 404 + ], + "spans": [ + { + "bbox": [ + 130, + 357, + 482, + 404 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 357, + 482, + 404 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 130, + 357, + 482, + 404 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 357, + 482, + 404 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 130, + 357, + 482, + 404 + ], + "type": "text", + "content": " represent the color and opacity attributes associated with the ellipsoid, which are inherited by the splatted ellipse. Similar to the volumetric rendering equation in NeRF, " + }, + { + "bbox": [ + 130, + 357, + 482, + 404 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 130, + 357, + 482, + 404 + ], + "type": "text", + "content": " denotes the inverse of the volume density accumulated up to the " + }, + { + "bbox": [ + 130, + 357, + 482, + 404 + ], + "type": "inline_equation", + "content": "i^{th}" + }, + { + "bbox": [ + 130, + 357, + 482, + 404 + ], + "type": "text", + "content": " ellipse on pixel " + }, + { + "bbox": [ + 130, + 357, + 482, + 404 + ], + "type": "inline_equation", + "content": "\\mathbf{p}" + }, + { + "bbox": [ + 130, + 357, + 482, + 404 + ], + "type": "text", + "content": " and is defined as:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 238, + 413, + 481, + 447 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 238, + 413, + 481, + 447 + ], + "spans": [ + { + "bbox": [ + 238, + 413, + 481, + 447 + ], + "type": "interline_equation", + "content": "\\gamma (i, \\mathbf {p}) = \\prod_ {j = 1} ^ {i - 1} \\left(1 - \\alpha_ {j} e ^ {- \\tau \\left(\\mathcal {B} _ {i}, \\mathbf {p}\\right)}\\right). \\tag {4}", + "image_path": "50e6b6c90e8a301eff35fe81a613afe871b3f0dec3d0b7db0c9288c3157d8f08.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 455, + 482, + 504 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 455, + 482, + 504 + ], + "spans": [ + { + "bbox": [ + 130, + 455, + 482, + 504 + ], + "type": "text", + "content": "The purpose of " + }, + { + "bbox": [ + 130, + 455, + 482, + 504 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 130, + 455, + 482, + 504 + ], + "type": "text", + "content": " is to determine the light absorption by the ellipse when represented as a 2D Gaussian. Light absorption depends on the orientation and distance between the ellipse center, denoted as " + }, + { + "bbox": [ + 130, + 455, + 482, + 504 + ], + "type": "inline_equation", + "content": "\\mathbf{y}" + }, + { + "bbox": [ + 130, + 455, + 482, + 504 + ], + "type": "text", + "content": ", and the pixel location, expressed as " + }, + { + "bbox": [ + 130, + 455, + 482, + 504 + ], + "type": "inline_equation", + "content": "\\mathbf{d} = \\mathbf{p} - \\mathbf{y}" + }, + { + "bbox": [ + 130, + 455, + 482, + 504 + ], + "type": "text", + "content": ". Consequently, we can formally define " + }, + { + "bbox": [ + 130, + 455, + 482, + 504 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 130, + 455, + 482, + 504 + ], + "type": "text", + "content": " as:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 246, + 510, + 481, + 534 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 246, + 510, + 481, + 534 + ], + "spans": [ + { + "bbox": [ + 246, + 510, + 481, + 534 + ], + "type": "interline_equation", + "content": "\\tau (\\mathbf {B}, \\mathbf {p}) = \\frac {1}{2} \\left(\\mathbf {1} _ {2} \\mathbf {d} ^ {T} \\mathbf {E} \\mathbf {d} \\mathbf {1} _ {2} ^ {T}\\right), \\tag {5}", + "image_path": "500776c444be5a262fb6c11300b2bb97e1d8217982732d746aa02228d595decf.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 542, + 482, + 580 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 542, + 482, + 580 + ], + "spans": [ + { + "bbox": [ + 130, + 542, + 482, + 580 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 542, + 482, + 580 + ], + "type": "inline_equation", + "content": "\\mathbf{1}_2\\in \\mathbb{R}^2" + }, + { + "bbox": [ + 130, + 542, + 482, + 580 + ], + "type": "text", + "content": " denotes a vector filled with ones. Following the processing of all pixels onto the image plane, the rendering function " + }, + { + "bbox": [ + 130, + 542, + 482, + 580 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 130, + 542, + 482, + 580 + ], + "type": "text", + "content": " generates an image " + }, + { + "bbox": [ + 130, + 542, + 482, + 580 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{l}}\\in \\mathbb{R}_{+}^{H\\times W}" + }, + { + "bbox": [ + 130, + 542, + 482, + 580 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 130, + 542, + 482, + 580 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 130, + 542, + 482, + 580 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 542, + 482, + 580 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 130, + 542, + 482, + 580 + ], + "type": "text", + "content": " represent the width and height of the image." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 131, + 597, + 237, + 611 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 597, + 237, + 611 + ], + "spans": [ + { + "bbox": [ + 131, + 597, + 237, + 611 + ], + "type": "text", + "content": "4 Our approach" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 131, + 621, + 209, + 632 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 621, + 209, + 632 + ], + "spans": [ + { + "bbox": [ + 131, + 621, + 209, + 632 + ], + "type": "text", + "content": "4.1 Overview" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 640, + 481, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 640, + 481, + 667 + ], + "spans": [ + { + "bbox": [ + 130, + 640, + 481, + 667 + ], + "type": "text", + "content": "6DGS estimates the camera pose " + }, + { + "bbox": [ + 130, + 640, + 481, + 667 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{P}}\\in \\mathbb{R}^{3\\times 4}" + }, + { + "bbox": [ + 130, + 640, + 481, + 667 + ], + "type": "text", + "content": ", given a target image " + }, + { + "bbox": [ + 130, + 640, + 481, + 667 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_t" + }, + { + "bbox": [ + 130, + 640, + 481, + 667 + ], + "type": "text", + "content": " and a set of ellipsoids " + }, + { + "bbox": [ + 130, + 640, + 481, + 667 + ], + "type": "inline_equation", + "content": "\\mathcal{Q}" + }, + { + "bbox": [ + 130, + 640, + 481, + 667 + ], + "type": "text", + "content": " from a pre-computed 3DGS model (Fig. 2). To solve for the" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 92, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 92, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 92, + 140, + 100 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 241, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 241, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 241, + 100 + ], + "type": "text", + "content": "M. Bortolon et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 160, + 116, + 264, + 182 + ], + "blocks": [ + { + "bbox": [ + 160, + 116, + 264, + 182 + ], + "lines": [ + { + "bbox": [ + 160, + 116, + 264, + 182 + ], + "spans": [ + { + "bbox": [ + 160, + 116, + 264, + 182 + ], + "type": "image", + "image_path": "1aca5089144560451b0ae3c65e1a0197a3a157fd7bffac263bc1dc30722b0845.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 169, + 185, + 252, + 195 + ], + "lines": [ + { + "bbox": [ + 169, + 185, + 252, + 195 + ], + "spans": [ + { + "bbox": [ + 169, + 185, + 252, + 195 + ], + "type": "text", + "content": "(a) Ellicell components" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 276, + 129, + 353, + 183 + ], + "blocks": [ + { + "bbox": [ + 276, + 129, + 353, + 183 + ], + "lines": [ + { + "bbox": [ + 276, + 129, + 353, + 183 + ], + "spans": [ + { + "bbox": [ + 276, + 129, + 353, + 183 + ], + "type": "image", + "image_path": "860e5082e321dc375ebb1202e6057a515c6539940ee47e212935a4d91ba13347.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 280, + 186, + 350, + 195 + ], + "lines": [ + { + "bbox": [ + 280, + 186, + 350, + 195 + ], + "spans": [ + { + "bbox": [ + 280, + 186, + 350, + 195 + ], + "type": "text", + "content": "(b) 3D Ellicell grid" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 372, + 126, + 454, + 185 + ], + "blocks": [ + { + "bbox": [ + 372, + 126, + 454, + 185 + ], + "lines": [ + { + "bbox": [ + 372, + 126, + 454, + 185 + ], + "spans": [ + { + "bbox": [ + 372, + 126, + 454, + 185 + ], + "type": "image", + "image_path": "084ec0cf5a3cc2c050e24018fe7040088ba4d4b8c8a6480d161b864c025bc86a.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 373, + 186, + 452, + 195 + ], + "lines": [ + { + "bbox": [ + 373, + 186, + 452, + 195 + ], + "spans": [ + { + "bbox": [ + 373, + 186, + 452, + 195 + ], + "type": "text", + "content": "(c) 3D radiant Ellicell" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 131, + 204, + 482, + 270 + ], + "lines": [ + { + "bbox": [ + 131, + 204, + 482, + 270 + ], + "spans": [ + { + "bbox": [ + 131, + 204, + 482, + 270 + ], + "type": "text", + "content": "Fig. 3: The illustration depicts the three primary stages involved in the radiant Ellicell generation. Firstly, (a) delineates the formulation of components required to compute the geometric information for each cell. Secondly, (b) shows the resulting Ellicell grid positioned on the surface of the ellipsoid along with their respective center points. Finally, (c) demonstrates the generation of rays originating from the center point of the ellipsoid going through the Ellicell center." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 288, + 482, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 288, + 482, + 469 + ], + "spans": [ + { + "bbox": [ + 130, + 288, + 482, + 469 + ], + "type": "text", + "content": "camera pose, we propose a casting method from the ellipsoid's surface, called Ellicell, that divides it in equal area cells (Sec. 4.2). The ellipsoids cast a set of " + }, + { + "bbox": [ + 130, + 288, + 482, + 469 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 130, + 288, + 482, + 469 + ], + "type": "text", + "content": " rays, denoted as " + }, + { + "bbox": [ + 130, + 288, + 482, + 469 + ], + "type": "inline_equation", + "content": "\\mathcal{V} = \\{\\langle \\mathbf{v}_o,\\mathbf{v}_d,\\mathbf{v}_c\\rangle \\}_{i = 1}^N" + }, + { + "bbox": [ + 130, + 288, + 482, + 469 + ], + "type": "text", + "content": ", one for each of the generated cell (Fig. 3c). Each ray is identified by " + }, + { + "bbox": [ + 130, + 288, + 482, + 469 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 130, + 288, + 482, + 469 + ], + "type": "text", + "content": " the origin " + }, + { + "bbox": [ + 130, + 288, + 482, + 469 + ], + "type": "inline_equation", + "content": "\\mathbf{v}_o\\in \\mathbb{R}^3" + }, + { + "bbox": [ + 130, + 288, + 482, + 469 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 288, + 482, + 469 + ], + "type": "inline_equation", + "content": "ii" + }, + { + "bbox": [ + 130, + 288, + 482, + 469 + ], + "type": "text", + "content": " the center point of each ellipsoid, " + }, + { + "bbox": [ + 130, + 288, + 482, + 469 + ], + "type": "inline_equation", + "content": "iii" + }, + { + "bbox": [ + 130, + 288, + 482, + 469 + ], + "type": "text", + "content": " the direction " + }, + { + "bbox": [ + 130, + 288, + 482, + 469 + ], + "type": "inline_equation", + "content": "\\mathbf{v}_d\\in \\mathbb{R}^3" + }, + { + "bbox": [ + 130, + 288, + 482, + 469 + ], + "type": "text", + "content": " originating from the ellipsoid center to the cell center and through the space, and " + }, + { + "bbox": [ + 130, + 288, + 482, + 469 + ], + "type": "inline_equation", + "content": "iv" + }, + { + "bbox": [ + 130, + 288, + 482, + 469 + ], + "type": "text", + "content": " the color information " + }, + { + "bbox": [ + 130, + 288, + 482, + 469 + ], + "type": "inline_equation", + "content": "\\mathbf{v}_c\\in \\mathbb{R}^3" + }, + { + "bbox": [ + 130, + 288, + 482, + 469 + ], + "type": "text", + "content": " as RGB values. We synthesize the rays' color using the 3DGS rendering function " + }, + { + "bbox": [ + 130, + 288, + 482, + 469 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 130, + 288, + 482, + 469 + ], + "type": "text", + "content": " (Eq. 3). A subset of these rays, depending on the view perspective, may intersect the camera's optical center. For binding the rays to the image pixels we compute the target image pixels features " + }, + { + "bbox": [ + 130, + 288, + 482, + 469 + ], + "type": "inline_equation", + "content": "\\psi (\\mathbf{I}_t)" + }, + { + "bbox": [ + 130, + 288, + 482, + 469 + ], + "type": "text", + "content": " (Fig. 2a) and the rays features " + }, + { + "bbox": [ + 130, + 288, + 482, + 469 + ], + "type": "inline_equation", + "content": "\\psi (\\mathcal{V})" + }, + { + "bbox": [ + 130, + 288, + 482, + 469 + ], + "type": "text", + "content": " (Fig. 2b). These features are used to identify the intersecting rays by using an attention map " + }, + { + "bbox": [ + 130, + 288, + 482, + 469 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 130, + 288, + 482, + 469 + ], + "type": "text", + "content": " (Fig. 2c), see Sec. 4.4. The higher the attention value for a ray-pixel pair is, the more likely the intersection on the image plane is a valid one. Lastly, we determine " + }, + { + "bbox": [ + 130, + 288, + 482, + 469 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{P}}_t" + }, + { + "bbox": [ + 130, + 288, + 482, + 469 + ], + "type": "text", + "content": " (Fig. 2d) by computing the intersection point of rays using the weighted Least Squares algorithm (Sec. 4.5)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 483, + 239, + 495 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 483, + 239, + 495 + ], + "spans": [ + { + "bbox": [ + 132, + 483, + 239, + 495 + ], + "type": "text", + "content": "4.2 Radiant Ellicell" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 502, + 482, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 502, + 482, + 563 + ], + "spans": [ + { + "bbox": [ + 130, + 502, + 482, + 563 + ], + "type": "text", + "content": "We create rays spanning in every direction allowing 6DGS to recover the camera pose. We introduce the concept of radiant Ellicell for generating rays that uniformly emanate from the ellipsoid surface, as illustrated in Fig. 3. Ellicell generation is deterministic [5,31] and achieves higher precision with fewer rays [17,42] compared to other sampling methods like Monte-Carlo [30]." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 130, + 562, + 482, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 562, + 482, + 601 + ], + "spans": [ + { + "bbox": [ + 130, + 562, + 482, + 601 + ], + "type": "text", + "content": "First, we compute the area of each Ellicell. This is achieved by calculating the ellipsoid surface area, using a computationally efficient approach, namely Ramanujan approximation 3:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 217, + 605, + 481, + 637 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 217, + 605, + 481, + 637 + ], + "spans": [ + { + "bbox": [ + 217, + 605, + 481, + 637 + ], + "type": "interline_equation", + "content": "h = 4 \\pi \\left(\\frac {(a b) ^ {1 . 6} + (a c) ^ {1 . 6} + (b c) ^ {1 . 6}}{3}\\right) ^ {\\frac {1}{1 . 6}}, \\tag {6}", + "image_path": "b6dc734f7282cb171491dc6ee544e2ce3955a40ad554742ff2a95efc7f764673.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 131, + 642, + 481, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 642, + 481, + 666 + ], + "spans": [ + { + "bbox": [ + 131, + 642, + 481, + 666 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 131, + 642, + 481, + 666 + ], + "type": "inline_equation", + "content": "a, b, c = \\text{diag}(\\mathcal{S})" + }, + { + "bbox": [ + 131, + 642, + 481, + 666 + ], + "type": "text", + "content": " are the ellipsoid axis scales. Each Ellicell cell's target area equals " + }, + { + "bbox": [ + 131, + 642, + 481, + 666 + ], + "type": "inline_equation", + "content": "\\mu = h / G" + }, + { + "bbox": [ + 131, + 642, + 481, + 666 + ], + "type": "text", + "content": ", with " + }, + { + "bbox": [ + 131, + 642, + 481, + 666 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 131, + 642, + 481, + 666 + ], + "type": "text", + "content": " being the number of cells dividing each ellipsoid." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 170, + 91, + 448, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 170, + 91, + 448, + 102 + ], + "spans": [ + { + "bbox": [ + 170, + 91, + 448, + 102 + ], + "type": "text", + "content": "6DGS: 6D Pose Estimation from a Single Image and a 3DGS Model" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 481, + 100 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 482, + 173 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 116, + 482, + 173 + ], + "spans": [ + { + "bbox": [ + 132, + 116, + 482, + 173 + ], + "type": "text", + "content": "Approximating each cell as a square with side " + }, + { + "bbox": [ + 132, + 116, + 482, + 173 + ], + "type": "inline_equation", + "content": "z = \\sqrt{\\mu}" + }, + { + "bbox": [ + 132, + 116, + 482, + 173 + ], + "type": "text", + "content": " we slice the ellipsoids along the major axis into ribbons, each as wide as " + }, + { + "bbox": [ + 132, + 116, + 482, + 173 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 132, + 116, + 482, + 173 + ], + "type": "text", + "content": " (Fig. 3a). The extremity of each ribbon is called a ring. The total number of rings is " + }, + { + "bbox": [ + 132, + 116, + 482, + 173 + ], + "type": "inline_equation", + "content": "e = \\lfloor \\kappa(a, b) / (2z) \\rfloor \\in \\mathbb{N}" + }, + { + "bbox": [ + 132, + 116, + 482, + 173 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 132, + 116, + 482, + 173 + ], + "type": "inline_equation", + "content": "\\kappa(a, b)" + }, + { + "bbox": [ + 132, + 116, + 482, + 173 + ], + "type": "text", + "content": " computes the ring perimeter. Ignoring ellipsoid's rotation, we compute the ring perimeter by treating them as 2D ellipses, thus defining " + }, + { + "bbox": [ + 132, + 116, + 482, + 173 + ], + "type": "inline_equation", + "content": "\\kappa(a, b)" + }, + { + "bbox": [ + 132, + 116, + 482, + 173 + ], + "type": "text", + "content": " as:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 198, + 181, + 481, + 206 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 198, + 181, + 481, + 206 + ], + "spans": [ + { + "bbox": [ + 198, + 181, + 481, + 206 + ], + "type": "interline_equation", + "content": "\\kappa (a, b) = \\pi \\left((a + b) + \\frac {3 (a - b) ^ {2}}{1 0 (a + b) + \\sqrt {a ^ {2} + 1 4 a b + b ^ {2}}}\\right). \\tag {7}", + "image_path": "78b0c0be2849b3d623e66194d45962d7bfaf1ed954469c8e77e924f0f72934e4.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 131, + 213, + 482, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 213, + 482, + 248 + ], + "spans": [ + { + "bbox": [ + 131, + 213, + 482, + 248 + ], + "type": "text", + "content": "Given the total number of rings " + }, + { + "bbox": [ + 131, + 213, + 482, + 248 + ], + "type": "inline_equation", + "content": "e" + }, + { + "bbox": [ + 131, + 213, + 482, + 248 + ], + "type": "text", + "content": " it is possible to compute the ribbon's centerline geometric parameters. In particular, we compute the scale parameter of each ribbon as:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 214, + 257, + 481, + 283 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 214, + 257, + 481, + 283 + ], + "spans": [ + { + "bbox": [ + 214, + 257, + 481, + 283 + ], + "type": "interline_equation", + "content": "\\varrho (n, \\Delta r, a, b) = \\sqrt {1 - \\frac {(0 . 5 \\Delta r + n \\Delta r - a) ^ {2}}{b ^ {2}}} \\tag {8}", + "image_path": "9577bac08372f874e2dcc64988a89e0b08140ce68decec45ff3a6476468f338f.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 131, + 291, + 482, + 362 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 291, + 482, + 362 + ], + "spans": [ + { + "bbox": [ + 131, + 291, + 482, + 362 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 131, + 291, + 482, + 362 + ], + "type": "inline_equation", + "content": "\\varDelta r=a/e" + }, + { + "bbox": [ + 131, + 291, + 482, + 362 + ], + "type": "text", + "content": " is the distance between two consecutive rings. This equation derives from the manipulation of the standard ellipse equation. While ribbon size " + }, + { + "bbox": [ + 131, + 291, + 482, + 362 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 131, + 291, + 482, + 362 + ], + "type": "text", + "content": " should be equal to " + }, + { + "bbox": [ + 131, + 291, + 482, + 362 + ], + "type": "inline_equation", + "content": "\\varDelta r" + }, + { + "bbox": [ + 131, + 291, + 482, + 362 + ], + "type": "text", + "content": ", these two values will likely differ due to the need for the number of rings being a natural number. Eq. 8 is also used to compute the other ribbon scaling parameter by replacing " + }, + { + "bbox": [ + 131, + 291, + 482, + 362 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 131, + 291, + 482, + 362 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 131, + 291, + 482, + 362 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 131, + 291, + 482, + 362 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 131, + 291, + 482, + 362 + ], + "type": "inline_equation", + "content": "\\varrho" + }, + { + "bbox": [ + 131, + 291, + 482, + 362 + ], + "type": "text", + "content": " is then used to compute the number of cells inside each ribbon as:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 207, + 370, + 481, + 397 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 207, + 370, + 481, + 397 + ], + "spans": [ + { + "bbox": [ + 207, + 370, + 481, + 397 + ], + "type": "interline_equation", + "content": "\\xi (n, e, a, b, c) = \\left\\lfloor \\frac {\\kappa (\\varrho (n , e , a , b) , \\varrho (n , e , a , c))}{z} \\right\\rfloor , \\tag {9}", + "image_path": "ed177d99d365b04b920df8617892d6b5237e82f46bb453d8f4ec60dc560c8221.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 131, + 404, + 482, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 404, + 482, + 498 + ], + "spans": [ + { + "bbox": [ + 131, + 404, + 482, + 498 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 131, + 404, + 482, + 498 + ], + "type": "inline_equation", + "content": "\\xi" + }, + { + "bbox": [ + 131, + 404, + 482, + 498 + ], + "type": "text", + "content": " is the number of cells inside the ring. We compute the center of each cell, equally spaced along the ribbon's centerline, by sampling " + }, + { + "bbox": [ + 131, + 404, + 482, + 498 + ], + "type": "inline_equation", + "content": "\\xi" + }, + { + "bbox": [ + 131, + 404, + 482, + 498 + ], + "type": "text", + "content": " points along it. This is challenging as the perimeter distance does not linearly correlate with the " + }, + { + "bbox": [ + 131, + 404, + 482, + 498 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 131, + 404, + 482, + 498 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 131, + 404, + 482, + 498 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 131, + 404, + 482, + 498 + ], + "type": "text", + "content": " variations. However, we can solve this by using a statistical method. Knowing a distribution's Cumulative Distribution Function (CDF) allows us to sample uniformly between 0 and 1 and then use the CDF inverse to map the sample to the distribution space. This approach applies to our case, where samples are distributed as follows:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 271, + 500, + 480, + 514 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 271, + 500, + 480, + 514 + ], + "spans": [ + { + "bbox": [ + 271, + 500, + 480, + 514 + ], + "type": "interline_equation", + "content": "d s ^ {2} = d x ^ {2} + d y ^ {2}, \\tag {10}", + "image_path": "f2473493a9f13e6e5f69b6f391a594e3681cb3cf08f3befa94c31185a47d064c.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 131, + 519, + 482, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 519, + 482, + 567 + ], + "spans": [ + { + "bbox": [ + 131, + 519, + 482, + 567 + ], + "type": "text", + "content": "and, by taking its inverse, we can retrieve the coordinates of each cell center. To simplify the equations, we define " + }, + { + "bbox": [ + 131, + 519, + 482, + 567 + ], + "type": "inline_equation", + "content": "r = \\varrho(n, e, a, b)" + }, + { + "bbox": [ + 131, + 519, + 482, + 567 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 131, + 519, + 482, + 567 + ], + "type": "inline_equation", + "content": "w = \\varrho(n, e, a, c)" + }, + { + "bbox": [ + 131, + 519, + 482, + 567 + ], + "type": "text", + "content": " to indicate the scale of the ellipse under consideration. Then we express Eq. [10] in polar coordinates to simplify the differentiation:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 246, + 577, + 481, + 599 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 246, + 577, + 481, + 599 + ], + "spans": [ + { + "bbox": [ + 246, + 577, + 481, + 599 + ], + "type": "interline_equation", + "content": "\\frac {d s}{d \\theta} = \\sqrt {r ^ {2} \\sin^ {2} \\theta + w ^ {2} \\cos^ {2} \\theta}, \\tag {11}", + "image_path": "77aae8b19a0634cfd98a401fd8d40df8388b8d357bf2bc96611d615636fda6b1.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 131, + 607, + 482, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 607, + 482, + 631 + ], + "spans": [ + { + "bbox": [ + 131, + 607, + 482, + 631 + ], + "type": "text", + "content": "then, we can express the set of points on the perimeter of the ribbon centerline as an angular position in the polar coordinate system as:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 232, + 640, + 481, + 669 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 232, + 640, + 481, + 669 + ], + "spans": [ + { + "bbox": [ + 232, + 640, + 481, + 669 + ], + "type": "interline_equation", + "content": "\\theta^ {\\prime} = \\left(\\frac {d s}{d \\theta}\\right) ^ {- 1} \\left(g \\cdot \\frac {1}{\\xi (n , e , a , b , c)}\\right), \\tag {12}", + "image_path": "e5ecf1c4681c01e241573e954b26067f8b3b1e45c46b964dd7d5dd84eadfcc73.jpg" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 241, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 241, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 241, + 101 + ], + "type": "text", + "content": "M. Bortolon et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "type": "text", + "content": "with " + }, + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "type": "text", + "content": " being the cell identifier. Given " + }, + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "type": "inline_equation", + "content": "\\theta'" + }, + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "type": "text", + "content": " we can use it inside the ellipse equation in polar coordinates to obtain the 3D position of each cell center:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 264, + 149, + 481, + 186 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 264, + 149, + 481, + 186 + ], + "spans": [ + { + "bbox": [ + 264, + 149, + 481, + 186 + ], + "type": "interline_equation", + "content": "\\mathbf {u} = \\left( \\begin{array}{c} w \\cos \\left(\\theta^ {\\prime}\\right) \\\\ g \\sin \\left(\\theta^ {\\prime}\\right) \\\\ - a + n \\Delta r \\end{array} \\right). \\tag {13}", + "image_path": "75a10db82f9c71d407c19e4eb118214240d99454ab966db013b2bf51a34492dd.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 201, + 238, + 213 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 201, + 238, + 213 + ], + "spans": [ + { + "bbox": [ + 132, + 201, + 238, + 213 + ], + "type": "text", + "content": "4.3 Ray generation" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 220, + 482, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 220, + 482, + 304 + ], + "spans": [ + { + "bbox": [ + 130, + 220, + 482, + 304 + ], + "type": "text", + "content": "Once we have divided each ellipsoid of the 3DGS model into equidistant cells, we cast the rays originating from the center point of the ellipsoid i.e. " + }, + { + "bbox": [ + 130, + 220, + 482, + 304 + ], + "type": "inline_equation", + "content": "\\mathbf{v}_o = \\mathbf{x}" + }, + { + "bbox": [ + 130, + 220, + 482, + 304 + ], + "type": "text", + "content": " and oriented towards the Ellicell center " + }, + { + "bbox": [ + 130, + 220, + 482, + 304 + ], + "type": "inline_equation", + "content": "\\mathbf{v}_d = \\mathbf{u} - \\mathbf{x}" + }, + { + "bbox": [ + 130, + 220, + 482, + 304 + ], + "type": "text", + "content": ". We reduce the number of potential rays cast from each ellipsoid by considering only the rays oriented in the same hemisphere as the estimated surface normal of the ellipsoid. We obtain the surface normals by treating the ellipsoid centroids as a point cloud, and the surface normal is estimated using the nearby points [41]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 304, + 482, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 304, + 482, + 365 + ], + "spans": [ + { + "bbox": [ + 130, + 304, + 482, + 365 + ], + "type": "text", + "content": "Finally, each ray has also been associated with the color information " + }, + { + "bbox": [ + 130, + 304, + 482, + 365 + ], + "type": "inline_equation", + "content": "\\mathbf{v}_c" + }, + { + "bbox": [ + 130, + 304, + 482, + 365 + ], + "type": "text", + "content": ", which we compute through the same pixel-level approach of 3DGS (Eq. 5). We note that the application of the volumetric rendering function of Eq. 5 produces a single pixel for each ray. The generated rays represent a collection of potential hypotheses, meaning that a subset of them will intersect the target image " + }, + { + "bbox": [ + 130, + 304, + 482, + 365 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_t" + }, + { + "bbox": [ + 130, + 304, + 482, + 365 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 381, + 362, + 395 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 381, + 362, + 395 + ], + "spans": [ + { + "bbox": [ + 132, + 381, + 362, + 395 + ], + "type": "text", + "content": "4.4 Binding by attenuation of rays to image" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 401, + 482, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 401, + 482, + 462 + ], + "spans": [ + { + "bbox": [ + 130, + 401, + 482, + 462 + ], + "type": "text", + "content": "Given all the cast rays " + }, + { + "bbox": [ + 130, + 401, + 482, + 462 + ], + "type": "inline_equation", + "content": "\\mathbf{v}" + }, + { + "bbox": [ + 130, + 401, + 482, + 462 + ], + "type": "text", + "content": ", we identify a subset of " + }, + { + "bbox": [ + 130, + 401, + 482, + 462 + ], + "type": "inline_equation", + "content": "\\mathbf{v}" + }, + { + "bbox": [ + 130, + 401, + 482, + 462 + ], + "type": "text", + "content": " correlating with the target image " + }, + { + "bbox": [ + 130, + 401, + 482, + 462 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_t" + }, + { + "bbox": [ + 130, + 401, + 482, + 462 + ], + "type": "text", + "content": ". A learned attention map " + }, + { + "bbox": [ + 130, + 401, + 482, + 462 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 130, + 401, + 482, + 462 + ], + "type": "text", + "content": " assigns scores " + }, + { + "bbox": [ + 130, + 401, + 482, + 462 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{s}}" + }, + { + "bbox": [ + 130, + 401, + 482, + 462 + ], + "type": "text", + "content": " based on the highest correlation to image pixels; higher similarity results in higher scores. Based on scores " + }, + { + "bbox": [ + 130, + 401, + 482, + 462 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{s}}" + }, + { + "bbox": [ + 130, + 401, + 482, + 462 + ], + "type": "text", + "content": ", we select the top candidate's rays " + }, + { + "bbox": [ + 130, + 401, + 482, + 462 + ], + "type": "inline_equation", + "content": "(N_{top})" + }, + { + "bbox": [ + 130, + 401, + 482, + 462 + ], + "type": "text", + "content": " that present maximal association and use them to recover the pose " + }, + { + "bbox": [ + 130, + 401, + 482, + 462 + ], + "type": "inline_equation", + "content": "(\\hat{\\mathbf{P}}_t)" + }, + { + "bbox": [ + 130, + 401, + 482, + 462 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 462, + 482, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 462, + 482, + 521 + ], + "spans": [ + { + "bbox": [ + 130, + 462, + 482, + 521 + ], + "type": "text", + "content": "To select rays with similar appearance and position, we use a Multi-Layer Perceptron (MLP) defined as " + }, + { + "bbox": [ + 130, + 462, + 482, + 521 + ], + "type": "inline_equation", + "content": "\\mathbf{V} = \\psi (\\mathbf{v})" + }, + { + "bbox": [ + 130, + 462, + 482, + 521 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 130, + 462, + 482, + 521 + ], + "type": "inline_equation", + "content": "\\mathbf{V}\\in \\mathbb{R}^{N\\times C}" + }, + { + "bbox": [ + 130, + 462, + 482, + 521 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 130, + 462, + 482, + 521 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 130, + 462, + 482, + 521 + ], + "type": "text", + "content": " being the feature size and " + }, + { + "bbox": [ + 130, + 462, + 482, + 521 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 130, + 462, + 482, + 521 + ], + "type": "text", + "content": " the overall number of rays. The MLP input is enriched by incorporating Positional Encoding that maps the data in the Fourier domain [40] to better distinguish between similar data." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 521, + 482, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 521, + 482, + 629 + ], + "spans": [ + { + "bbox": [ + 130, + 521, + 482, + 629 + ], + "type": "text", + "content": "We generate features from " + }, + { + "bbox": [ + 130, + 521, + 482, + 629 + ], + "type": "inline_equation", + "content": "\\mathbf{I}_t" + }, + { + "bbox": [ + 130, + 521, + 482, + 629 + ], + "type": "text", + "content": " using DINOv2 [35] as a pre-trained backbone feature extractor. This results in a set of features " + }, + { + "bbox": [ + 130, + 521, + 482, + 629 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_t \\in \\mathbb{R}^{M \\times C}" + }, + { + "bbox": [ + 130, + 521, + 482, + 629 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 130, + 521, + 482, + 629 + ], + "type": "inline_equation", + "content": "M = W \\times H" + }, + { + "bbox": [ + 130, + 521, + 482, + 629 + ], + "type": "text", + "content": ". Both the image and ray features sets are processed by a single attention module " + }, + { + "bbox": [ + 130, + 521, + 482, + 629 + ], + "type": "inline_equation", + "content": "\\mathcal{A}(\\mathbf{V}_f, \\mathbf{F}_t) \\in \\mathbb{R}^{M \\times N}" + }, + { + "bbox": [ + 130, + 521, + 482, + 629 + ], + "type": "text", + "content": " producing a set of scores. Inside the attention module the ray features, " + }, + { + "bbox": [ + 130, + 521, + 482, + 629 + ], + "type": "inline_equation", + "content": "\\mathbf{V}" + }, + { + "bbox": [ + 130, + 521, + 482, + 629 + ], + "type": "text", + "content": ", are used as queries and the image features, " + }, + { + "bbox": [ + 130, + 521, + 482, + 629 + ], + "type": "inline_equation", + "content": "\\mathbf{F}_t" + }, + { + "bbox": [ + 130, + 521, + 482, + 629 + ], + "type": "text", + "content": ", as a key. We optimize the attention map by summing along the rows and converting it into a per-ray correlation score as follows " + }, + { + "bbox": [ + 130, + 521, + 482, + 629 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{s}} = \\sum_{i=1}^{M} \\mathcal{A}_i" + }, + { + "bbox": [ + 130, + 521, + 482, + 629 + ], + "type": "text", + "content": ". The higher the score value given by " + }, + { + "bbox": [ + 130, + 521, + 482, + 629 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{s}}" + }, + { + "bbox": [ + 130, + 521, + 482, + 629 + ], + "type": "text", + "content": ", the better the association between the rays and image pixels. At test-time we select the " + }, + { + "bbox": [ + 130, + 521, + 482, + 629 + ], + "type": "inline_equation", + "content": "N_{top}" + }, + { + "bbox": [ + 130, + 521, + 482, + 629 + ], + "type": "text", + "content": " rays with the highest ranking scores." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 630, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 630, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 630, + 482, + 666 + ], + "type": "text", + "content": "Because a ray and an image pixel should be associated with each other based on the distance between the camera origin and its projection onto the corresponding ray, we supervise the predicted scores " + }, + { + "bbox": [ + 130, + 630, + 482, + 666 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{s}}" + }, + { + "bbox": [ + 130, + 630, + 482, + 666 + ], + "type": "text", + "content": " using the same images used to" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 170, + 90, + 448, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 170, + 90, + 448, + 102 + ], + "spans": [ + { + "bbox": [ + 170, + 90, + 448, + 102 + ], + "type": "text", + "content": "6DGS: 6D Pose Estimation from a Single Image and a 3DGS Model" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 91, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 91, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 91, + 481, + 100 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 213 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 213 + ], + "type": "text", + "content": "estimate the 3DGS model at training time. We compute the projection of the point on the line as " + }, + { + "bbox": [ + 130, + 116, + 482, + 213 + ], + "type": "inline_equation", + "content": "l = \\max((\\mathbf{O} - \\mathbf{v}_o)\\mathbf{v}_d, 0)" + }, + { + "bbox": [ + 130, + 116, + 482, + 213 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 130, + 116, + 482, + 213 + ], + "type": "inline_equation", + "content": "\\mathbf{O}" + }, + { + "bbox": [ + 130, + 116, + 482, + 213 + ], + "type": "text", + "content": " is the camera position, " + }, + { + "bbox": [ + 130, + 116, + 482, + 213 + ], + "type": "inline_equation", + "content": "\\mathbf{v}_o" + }, + { + "bbox": [ + 130, + 116, + 482, + 213 + ], + "type": "text", + "content": " the generated ray origin and " + }, + { + "bbox": [ + 130, + 116, + 482, + 213 + ], + "type": "inline_equation", + "content": "\\mathbf{v}_d" + }, + { + "bbox": [ + 130, + 116, + 482, + 213 + ], + "type": "text", + "content": " the corresponding direction. Rays are infinite only in one direction, so we restrict " + }, + { + "bbox": [ + 130, + 116, + 482, + 213 + ], + "type": "inline_equation", + "content": "l \\in \\mathbb{R}^+" + }, + { + "bbox": [ + 130, + 116, + 482, + 213 + ], + "type": "text", + "content": " using the max operator. Then, we can compute the distance between the camera origin and its projection on the ray as " + }, + { + "bbox": [ + 130, + 116, + 482, + 213 + ], + "type": "inline_equation", + "content": "\\mathbf{h} = \\| (\\mathbf{v}_o + l\\mathbf{v}_d) - \\mathbf{O}\\|_2" + }, + { + "bbox": [ + 130, + 116, + 482, + 213 + ], + "type": "text", + "content": ". The value " + }, + { + "bbox": [ + 130, + 116, + 482, + 213 + ], + "type": "inline_equation", + "content": "\\mathbf{h}" + }, + { + "bbox": [ + 130, + 116, + 482, + 213 + ], + "type": "text", + "content": " can span from 0 to " + }, + { + "bbox": [ + 130, + 116, + 482, + 213 + ], + "type": "inline_equation", + "content": "+\\infty" + }, + { + "bbox": [ + 130, + 116, + 482, + 213 + ], + "type": "text", + "content": ", with 0 indicating a ray that passes through the camera's optical center. We map distances to the attention map score using:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 238, + 217, + 481, + 246 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 238, + 217, + 481, + 246 + ], + "spans": [ + { + "bbox": [ + 238, + 217, + 481, + 246 + ], + "type": "interline_equation", + "content": "\\delta = 1 - \\tanh \\left(\\frac {\\mathbf {h}}{\\lambda}\\right), \\mathbf {s} = \\delta \\frac {M}{\\sum \\delta}, \\tag {14}", + "image_path": "70851b0f746aacd039cbdd9a2a41109773b6a42a9aa70e2fa130cbe41c55411a.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 249, + 482, + 298 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 249, + 482, + 298 + ], + "spans": [ + { + "bbox": [ + 130, + 249, + 482, + 298 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 249, + 482, + 298 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 130, + 249, + 482, + 298 + ], + "type": "text", + "content": " regulates the number of rays to assign to a specific camera. Lastly, the softmax inside the attention map computation requires we normalize the ground truth scores. We use the " + }, + { + "bbox": [ + 130, + 249, + 482, + 298 + ], + "type": "inline_equation", + "content": "L2" + }, + { + "bbox": [ + 130, + 249, + 482, + 298 + ], + "type": "text", + "content": " loss to minimize the difference between the predicted " + }, + { + "bbox": [ + 130, + 249, + 482, + 298 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{s}}" + }, + { + "bbox": [ + 130, + 249, + 482, + 298 + ], + "type": "text", + "content": " and the computed ground truth " + }, + { + "bbox": [ + 130, + 249, + 482, + 298 + ], + "type": "inline_equation", + "content": "\\mathbf{s}" + }, + { + "bbox": [ + 130, + 249, + 482, + 298 + ], + "type": "text", + "content": " scores as:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 238, + 302, + 481, + 337 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 238, + 302, + 481, + 337 + ], + "spans": [ + { + "bbox": [ + 238, + 302, + 481, + 337 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\frac {1}{M N} \\sum_ {i = 1} ^ {M} \\sum_ {j = 1} ^ {N} \\| \\hat {\\mathbf {s}} _ {i, j} - \\mathbf {s} _ {i, j} \\| _ {2}, \\tag {15}", + "image_path": "6c6ca0ecd4cfe0e3446fc2f934cd5e7fd550a85015b794d0fd89eb0a1bda9b01.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 131, + 342, + 481, + 367 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 342, + 481, + 367 + ], + "spans": [ + { + "bbox": [ + 131, + 342, + 481, + 367 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 131, + 342, + 481, + 367 + ], + "type": "inline_equation", + "content": "M, N" + }, + { + "bbox": [ + 131, + 342, + 481, + 367 + ], + "type": "text", + "content": " are the size of the attention map " + }, + { + "bbox": [ + 131, + 342, + 481, + 367 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 131, + 342, + 481, + 367 + ], + "type": "text", + "content": ". During each training iteration, we predict an image and a pose utilized for estimating the 3DGS model." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 383, + 291, + 395 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 383, + 291, + 395 + ], + "spans": [ + { + "bbox": [ + 132, + 383, + 291, + 395 + ], + "type": "text", + "content": "4.5 Test-time pose estimation" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 400, + 481, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 400, + 481, + 449 + ], + "spans": [ + { + "bbox": [ + 130, + 400, + 481, + 449 + ], + "type": "text", + "content": "During the test phase, the predicted scores " + }, + { + "bbox": [ + 130, + 400, + 481, + 449 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{s}}" + }, + { + "bbox": [ + 130, + 400, + 481, + 449 + ], + "type": "text", + "content": " are used to select the top " + }, + { + "bbox": [ + 130, + 400, + 481, + 449 + ], + "type": "inline_equation", + "content": "N_{top}" + }, + { + "bbox": [ + 130, + 400, + 481, + 449 + ], + "type": "text", + "content": " rays, identified as the utmost relevant, and constrained to choose at most one ray per ellipsoid. Note that only a small set of rays is sufficient to estimate the camera pose. However, based on an ablation study we set " + }, + { + "bbox": [ + 130, + 400, + 481, + 449 + ], + "type": "inline_equation", + "content": "N_{top} = 100" + }, + { + "bbox": [ + 130, + 400, + 481, + 449 + ], + "type": "text", + "content": ", see Tab. 3a." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 449, + 481, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 449, + 481, + 495 + ], + "spans": [ + { + "bbox": [ + 130, + 449, + 481, + 495 + ], + "type": "text", + "content": "The camera position is found at the intersection of selected rays, solved as a weighted Least Squares problem. Since 3D lines usually do not intersect at a single point due to discretization noise introduced by the Ellicell, we minimize the sum of squared perpendicular distances instead." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 496, + 481, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 496, + 481, + 522 + ], + "spans": [ + { + "bbox": [ + 130, + 496, + 481, + 522 + ], + "type": "text", + "content": "For the selected ray " + }, + { + "bbox": [ + 130, + 496, + 481, + 522 + ], + "type": "inline_equation", + "content": "\\mathbf{v}_j" + }, + { + "bbox": [ + 130, + 496, + 481, + 522 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 130, + 496, + 481, + 522 + ], + "type": "inline_equation", + "content": "f = 1\\ldots N_{top}" + }, + { + "bbox": [ + 130, + 496, + 481, + 522 + ], + "type": "text", + "content": ", the error is given by the square of the distance from the camera position to predict " + }, + { + "bbox": [ + 130, + 496, + 481, + 522 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{O}}" + }, + { + "bbox": [ + 130, + 496, + 481, + 522 + ], + "type": "text", + "content": " to its projection on " + }, + { + "bbox": [ + 130, + 496, + 481, + 522 + ], + "type": "inline_equation", + "content": "\\mathbf{v}_j" + }, + { + "bbox": [ + 130, + 496, + 481, + 522 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 192, + 528, + 481, + 563 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 192, + 528, + 481, + 563 + ], + "spans": [ + { + "bbox": [ + 192, + 528, + 481, + 563 + ], + "type": "interline_equation", + "content": "\\sum_ {f = 1} ^ {N _ {t o p}} \\left(\\left(\\hat {\\mathbf {O}} - \\mathbf {v} _ {o, f}\\right) ^ {T} \\left(\\hat {\\mathbf {O}} - \\mathbf {v} _ {o, f}\\right) - \\left(\\left(\\hat {\\mathbf {O}} - \\mathbf {v} _ {o, f}\\right) ^ {T} \\mathbf {v} _ {d, f}\\right) ^ {2}\\right), \\tag {16}", + "image_path": "26e28d1be8500769004f20c8c4f597ecdbdb020ca325739d8b10291963864a08.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 131, + 569, + 480, + 595 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 569, + 480, + 595 + ], + "spans": [ + { + "bbox": [ + 131, + 569, + 480, + 595 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 131, + 569, + 480, + 595 + ], + "type": "inline_equation", + "content": "\\mathbf{v}_{o,f}" + }, + { + "bbox": [ + 131, + 569, + 480, + 595 + ], + "type": "text", + "content": " indicating the origin of the " + }, + { + "bbox": [ + 131, + 569, + 480, + 595 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 131, + 569, + 480, + 595 + ], + "type": "text", + "content": "-th ray and " + }, + { + "bbox": [ + 131, + 569, + 480, + 595 + ], + "type": "inline_equation", + "content": "\\mathbf{v}_{d,f}" + }, + { + "bbox": [ + 131, + 569, + 480, + 595 + ], + "type": "text", + "content": " the respective direction. To minimize Eq. [16] we differentiate it with respect to " + }, + { + "bbox": [ + 131, + 569, + 480, + 595 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{O}}" + }, + { + "bbox": [ + 131, + 569, + 480, + 595 + ], + "type": "text", + "content": ", resulting in" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 240, + 601, + 481, + 635 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 240, + 601, + 481, + 635 + ], + "spans": [ + { + "bbox": [ + 240, + 601, + 481, + 635 + ], + "type": "interline_equation", + "content": "\\hat {\\mathbf {O}} = \\sum_ {f = 1} ^ {N _ {t o p}} \\hat {\\mathbf {s}} _ {f} \\left(\\mathbb {I} - \\mathbf {v} _ {d, f} \\mathbf {v} _ {d, f} ^ {T}\\right) \\mathbf {v} _ {o, f}, \\tag {17}", + "image_path": "b6cfa68cc45676c05885fbe4cda9212efff033058d26218764b35bf9e9e69897.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 131, + 641, + 481, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 641, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 131, + 641, + 481, + 665 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 131, + 641, + 481, + 665 + ], + "type": "inline_equation", + "content": "\\mathbb{I}" + }, + { + "bbox": [ + 131, + 641, + 481, + 665 + ], + "type": "text", + "content": " is the identity matrix and " + }, + { + "bbox": [ + 131, + 641, + 481, + 665 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{s}}_f" + }, + { + "bbox": [ + 131, + 641, + 481, + 665 + ], + "type": "text", + "content": " are the predicted ray scores. This expression can be solved as a weighted system of linear equations." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 241, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 241, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 241, + 101 + ], + "type": "text", + "content": "M. Bortolon et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 133, + 177, + 482, + 256 + ], + "blocks": [ + { + "bbox": [ + 130, + 114, + 482, + 170 + ], + "lines": [ + { + "bbox": [ + 130, + 114, + 482, + 170 + ], + "spans": [ + { + "bbox": [ + 130, + 114, + 482, + 170 + ], + "type": "text", + "content": "Table 1: Evaluation of 6DoF pose estimation on the Mip-NeRF " + }, + { + "bbox": [ + 130, + 114, + 482, + 170 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 130, + 114, + 482, + 170 + ], + "type": "text", + "content": " dataset. We report results in terms of Mean Angular Error (MAE) and Mean Translation Error (MTE) in terms of degrees and units, " + }, + { + "bbox": [ + 130, + 114, + 482, + 170 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 130, + 114, + 482, + 170 + ], + "type": "text", + "content": ", respectively. Where " + }, + { + "bbox": [ + 130, + 114, + 482, + 170 + ], + "type": "inline_equation", + "content": "1u" + }, + { + "bbox": [ + 130, + 114, + 482, + 170 + ], + "type": "text", + "content": " is equal to the object's largest dimension. For both metrics lower is better. Best-performing results are highlighted in bold and green, while second best values are highlighted in orange." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 133, + 177, + 482, + 256 + ], + "lines": [ + { + "bbox": [ + 133, + 177, + 482, + 256 + ], + "spans": [ + { + "bbox": [ + 133, + 177, + 482, + 256 + ], + "type": "table", + "html": "
Fixed pose prior (eval. protocol by 46)Random pose priorNo pose prior
iNeRFiNeRF46NeMo + VoGE44Parallel iNeRF25iNeRF46NeMo + VoGE44Parallel iNeRF256DGS (Ours)
MAE ↓MTE ↓MAE ↓MTE ↓MAE ↓MTE ↓MAE ↓MTE ↓MAE ↓MTE ↓MAE ↓MTE ↓MAE ↓MTE ↓MAE ↓MTE ↓
Bicycle39.50.11643.80.01535.90.11676.60.217111.80.03844.40.15012.10.010
Bonsai51.30.22852.50.03641.10.22396.70.38598.90.07358.20.29810.50.038
Counter40.70.32445.60.07224.70.21270.30.48798.10.13942.10.43519.60.043
Garden31.00.12131.80.02618.20.09072.80.21089.20.03860.00.14437.80.015
Kitchen38.20.11341.60.04237.30.109100.20.266122.20.08265.00.19323.20.018
Room38.80.27444.90.04530.70.25791.60.444110.00.01063.50.27138.30.019
Stump21.40.03026.30.01614.80.01686.90.03596.30.02572.60.03328.30.009
Avg.37.30.17240.90.03628.90.14685.00.292103.80.05858.00.21824.30.022
", + "image_path": "a2e9d3a579b184a61ff9612da0b6735f1eaa85d3cf3fb1991033710b046a579e.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 272, + 198, + 285 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 272, + 198, + 285 + ], + "spans": [ + { + "bbox": [ + 132, + 272, + 198, + 285 + ], + "type": "text", + "content": "5 Results" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 297, + 259, + 309 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 297, + 259, + 309 + ], + "spans": [ + { + "bbox": [ + 132, + 297, + 259, + 309 + ], + "type": "text", + "content": "5.1 Experimental setup" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 316, + 482, + 508 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 316, + 482, + 508 + ], + "spans": [ + { + "bbox": [ + 130, + 316, + 482, + 508 + ], + "type": "text", + "content": "We evaluate 6DGS and compare with other analysis-by-synthesis methods for 6D pose estimation, including iNeRF [46], Parallel iNeRF [25], and NeMo+VoGE [43, 44]. We reproduce the results using their published code. We follow iNeRF's evaluation protocol and test on two real-world datasets: Tanks & Temples [20] and Mip-NeRF " + }, + { + "bbox": [ + 130, + 316, + 482, + 508 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 130, + 316, + 482, + 508 + ], + "type": "text", + "content": " [4]. For each dataset, we use the predefined training-test splits and evaluate them with two pose initialization pipelines: " + }, + { + "bbox": [ + 130, + 316, + 482, + 508 + ], + "type": "inline_equation", + "content": "i)" + }, + { + "bbox": [ + 130, + 316, + 482, + 508 + ], + "type": "text", + "content": " the original iNeRF initialization, where the starting pose is sampled uniformly between " + }, + { + "bbox": [ + 130, + 316, + 482, + 508 + ], + "type": "inline_equation", + "content": "[-40^{\\circ}, +40^{\\circ}]" + }, + { + "bbox": [ + 130, + 316, + 482, + 508 + ], + "type": "text", + "content": " degrees of errors and " + }, + { + "bbox": [ + 130, + 316, + 482, + 508 + ], + "type": "inline_equation", + "content": "[-0.1, +0.1]" + }, + { + "bbox": [ + 130, + 316, + 482, + 508 + ], + "type": "text", + "content": " units of translation error from the ground-truth target pose; " + }, + { + "bbox": [ + 130, + 316, + 482, + 508 + ], + "type": "inline_equation", + "content": "ii)" + }, + { + "bbox": [ + 130, + 316, + 482, + 508 + ], + "type": "text", + "content": " by randomly choosing an initialization pose from the ones used to create the 3DGS mode. Although analysis-by-synthesis methods were tested with a prior, in reality it is rarely available, so we present a second scenario to assess them under more realistic conditions. We perform multiple ablation studies to assess the sensitivity of 6DGS to different hyperparameters and settings. We quantify pose estimation results in terms of mean angular (MAE) and translation (MTE) errors (see Tab. 1 and Tab. 2) and measure the inference time." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 508, + 482, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 508, + 482, + 556 + ], + "spans": [ + { + "bbox": [ + 130, + 508, + 482, + 556 + ], + "type": "text", + "content": "Implementation Details. 6DGS is implemented in PyTorch and the attention map was trained for 1.5K iterations (" + }, + { + "bbox": [ + 130, + 508, + 482, + 556 + ], + "type": "inline_equation", + "content": "\\sim" + }, + { + "bbox": [ + 130, + 508, + 482, + 556 + ], + "type": "text", + "content": "45mins) with an NVIDIA GeForce RTX 3090. We use the Adafactor optimizer [38] with weight decay of " + }, + { + "bbox": [ + 130, + 508, + 482, + 556 + ], + "type": "inline_equation", + "content": "10^{-3}" + }, + { + "bbox": [ + 130, + 508, + 482, + 556 + ], + "type": "text", + "content": ". For speedup training, we uniformly sample 2000 3DGS ellipsoids at each iteration." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 574, + 204, + 585 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 574, + 204, + 585 + ], + "spans": [ + { + "bbox": [ + 132, + 574, + 204, + 585 + ], + "type": "text", + "content": "5.2 Datasets" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 594, + 482, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 594, + 482, + 667 + ], + "spans": [ + { + "bbox": [ + 130, + 594, + 482, + 667 + ], + "type": "text", + "content": "To demonstrate the applicability of 6DGS, we test on two datasets featuring real world challenges. **Tanks&Temples** [20] was created to evaluate 3D reconstruction methods with challenging real-world objects of varying sizes, acquired from human-like viewpoints and with difficult conditions (illumination, shadows, and reflections). We use the five scenes (Barn, Caterpillar, Family, Ignatius, Truck) and the train test splits given in [7,26]. The splits are object dependent, having" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 170, + 91, + 448, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 170, + 91, + 448, + 102 + ], + "spans": [ + { + "bbox": [ + 170, + 91, + 448, + 102 + ], + "type": "text", + "content": "6DGS: 6D Pose Estimation from a Single Image and a 3DGS Model" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 479, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 479, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 479, + 100 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 133, + 144, + 482, + 210 + ], + "blocks": [ + { + "bbox": [ + 132, + 114, + 482, + 139 + ], + "lines": [ + { + "bbox": [ + 132, + 114, + 482, + 139 + ], + "spans": [ + { + "bbox": [ + 132, + 114, + 482, + 139 + ], + "type": "text", + "content": "Table 2: Evaluation of 6DoF pose estimation on the Tanks&Temples [20] dataset. We show the same metrics and analysis as in Table 1" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 133, + 144, + 482, + 210 + ], + "lines": [ + { + "bbox": [ + 133, + 144, + 482, + 210 + ], + "spans": [ + { + "bbox": [ + 133, + 144, + 482, + 210 + ], + "type": "table", + "html": "
ObjectsFixed pose prior (eval. protocol by 46)Random pose priorNo pose prior
iNeRF 46NeMo + VoGE 44Parallel iNeRF 25iNeRF 46NeMo + VoGE 44Parallel iNeRF 256DGS (Ours)
MAE ↓MTE ↓MAE ↓MTE ↓MAE ↓MTE ↓MAE ↓MTE ↓MAE ↓MTE ↓MAE ↓MTE ↓MAE ↓MTE ↓
Barn26.50.20851.20.75222.90.13189.20.68292.50.68485.20.57230.30.162
Caterpillar42.90.16652.60.51625.20.13889.32.55990.52.55986.80.84314.50.027
Family42.80.79458.41.13022.90.50793.91.50597.01.50699.02.02820.60.468
Ignatius31.40.72351.21.19323.40.60484.11.48985.41.49186.91.32615.50.441
Truck31.60.37054.61.23629.40.35194.41.04297.71.04597.60.88327.50.242
Avg.35.00.45253.60.96524.70.34690.21.45592.61.45791.11.13021.70.268
", + "image_path": "3012aa449ba9918baf363c515df23ce67a411d53a3ad2ea18f243f23bbed3fa9.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 228, + 482, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 228, + 482, + 289 + ], + "spans": [ + { + "bbox": [ + 130, + 228, + 482, + 289 + ], + "type": "text", + "content": "on average " + }, + { + "bbox": [ + 130, + 228, + 482, + 289 + ], + "type": "inline_equation", + "content": "\\approx 247" + }, + { + "bbox": [ + 130, + 228, + 482, + 289 + ], + "type": "text", + "content": " training images " + }, + { + "bbox": [ + 130, + 228, + 482, + 289 + ], + "type": "inline_equation", + "content": "(87\\%)" + }, + { + "bbox": [ + 130, + 228, + 482, + 289 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 228, + 482, + 289 + ], + "type": "inline_equation", + "content": "\\approx 35" + }, + { + "bbox": [ + 130, + 228, + 482, + 289 + ], + "type": "text", + "content": " testing images " + }, + { + "bbox": [ + 130, + 228, + 482, + 289 + ], + "type": "inline_equation", + "content": "(12\\%)" + }, + { + "bbox": [ + 130, + 228, + 482, + 289 + ], + "type": "text", + "content": ". Mip-NeRF " + }, + { + "bbox": [ + 130, + 228, + 482, + 289 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 130, + 228, + 482, + 289 + ], + "type": "text", + "content": " consists of seven scenes: two outdoors and four indoors, with a structured scenario and background. We use the original train-test splits at a ratio of 1:8. Following [25], we resize all the objects to fit inside a unit box. The translation error is relative to the object size, defined as a unit." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 308, + 204, + 320 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 308, + 204, + 320 + ], + "spans": [ + { + "bbox": [ + 132, + 308, + 204, + 320 + ], + "type": "text", + "content": "5.3 Analysis" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 329, + 485, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 329, + 485, + 544 + ], + "spans": [ + { + "bbox": [ + 130, + 329, + 485, + 544 + ], + "type": "text", + "content": "Quantitative Analysis: Tab. 1 and Tab. 2 present the results obtained across both datasets. 6DGS consistently outperforms baseline methods across all datasets and pose initialization pipelines. Notably, 6DGS achieves lower error rates than the second-best results, especially under identical comparison conditions (i.e., random pose prior). Even when initialized from a fixed pose proximal to the known camera, 6DGS still excels over baselines in most scenes. Parallel iNeRF demonstrates improvement over iNeRF across all tested scenarios, consistent with its reported enhancements, but both methods' performance drops with random initialization. Likewise, " + }, + { + "bbox": [ + 130, + 329, + 485, + 544 + ], + "type": "inline_equation", + "content": "\\mathrm{NeMo + VoGE}" + }, + { + "bbox": [ + 130, + 329, + 485, + 544 + ], + "type": "text", + "content": " performs worst, especially with random pose prior due to the utilization of a smaller number of larger ellipsoids in their approach. In contrast, 6DGS leverages approximately 300,000 ellipsoids of varying sizes obtained via 3DGS, as opposed to their mesh-to-ellipsoid method, which utilizes only about 5,000 larger ellipsoids. This fundamental disparity in ellipsoid size and quantity is a crucial factor contributing to the performance difference. Additionally, 6DGS exhibits faster processing speeds, operating nearly in real-time at 15 frames per second (fps) compared to the 0.05fps of Parallel iNeRF and 0.16fps of iNeRF. Please refer to the supplementary material for the complete table on timings." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 545, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 545, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 545, + 482, + 666 + ], + "type": "text", + "content": "Qualitative Analysis: Figure 4 illustrates qualitative findings revealing notable observations. Particularly, we notice that the estimated poses exhibit proximity to the object relative to ground truth, attributable to the quantization effect introduced by the Ellicell. The qualitative findings verify the quantitative outcomes, albeit occasional inconsistencies in results, such as in the Counter scene, with the analysis-by-synthesis approaches showcasing a total incoherent output in regards to the overall scene (notice how the estimated poses are completely off the target). Moreover, the performance of 6DGS demonstrates consistency across varied scenarios, encompassing single-object instances and indoor settings, despite substantial variations in the models utilized." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 241, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 241, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 241, + 100 + ], + "type": "text", + "content": "M. Bortolon et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 149, + 125, + 284, + 194 + ], + "blocks": [ + { + "bbox": [ + 206, + 114, + 225, + 122 + ], + "lines": [ + { + "bbox": [ + 206, + 114, + 225, + 122 + ], + "spans": [ + { + "bbox": [ + 206, + 114, + 225, + 122 + ], + "type": "text", + "content": "Truck" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 149, + 125, + 284, + 194 + ], + "lines": [ + { + "bbox": [ + 149, + 125, + 284, + 194 + ], + "spans": [ + { + "bbox": [ + 149, + 125, + 284, + 194 + ], + "type": "image", + "image_path": "13310f7eee6dc8d4b1bad5467d85fbff55008497deac449b82571e3f8d9c51fc.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 169, + 194, + 199, + 201 + ], + "lines": [ + { + "bbox": [ + 169, + 194, + 199, + 201 + ], + "spans": [ + { + "bbox": [ + 169, + 194, + 199, + 201 + ], + "type": "text", + "content": "Target image" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 228, + 194, + 263, + 201 + ], + "lines": [ + { + "bbox": [ + 228, + 194, + 263, + 201 + ], + "spans": [ + { + "bbox": [ + 228, + 194, + 263, + 201 + ], + "type": "text", + "content": "Estimated NVS" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 156, + 201, + 214, + 235 + ], + "blocks": [ + { + "bbox": [ + 156, + 201, + 214, + 235 + ], + "lines": [ + { + "bbox": [ + 156, + 201, + 214, + 235 + ], + "spans": [ + { + "bbox": [ + 156, + 201, + 214, + 235 + ], + "type": "image", + "image_path": "51049ae0156eb17b5a00d841750630afb800339bf63ae6bc11b1be9008aac152.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 231, + 201, + 275, + 235 + ], + "blocks": [ + { + "bbox": [ + 231, + 201, + 275, + 235 + ], + "lines": [ + { + "bbox": [ + 231, + 201, + 275, + 235 + ], + "spans": [ + { + "bbox": [ + 231, + 201, + 275, + 235 + ], + "type": "image", + "image_path": "50c6d3e729bc8ed1f50636857cc5361d711f4de464eb51e8131541dcb970d7be.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 330, + 129, + 428, + 194 + ], + "blocks": [ + { + "bbox": [ + 384, + 114, + 406, + 122 + ], + "lines": [ + { + "bbox": [ + 384, + 114, + 406, + 122 + ], + "spans": [ + { + "bbox": [ + 384, + 114, + 406, + 122 + ], + "type": "text", + "content": "Family" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 330, + 129, + 428, + 194 + ], + "lines": [ + { + "bbox": [ + 330, + 129, + 428, + 194 + ], + "spans": [ + { + "bbox": [ + 330, + 129, + 428, + 194 + ], + "type": "image", + "image_path": "13ec49e8e182d08ce184f3eb64387be68a5212b64deeb6419bb5644a5f663d81.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 334, + 202, + 393, + 235 + ], + "blocks": [ + { + "bbox": [ + 349, + 195, + 379, + 202 + ], + "lines": [ + { + "bbox": [ + 349, + 195, + 379, + 202 + ], + "spans": [ + { + "bbox": [ + 349, + 195, + 379, + 202 + ], + "type": "text", + "content": "Target image" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 334, + 202, + 393, + 235 + ], + "lines": [ + { + "bbox": [ + 334, + 202, + 393, + 235 + ], + "spans": [ + { + "bbox": [ + 334, + 202, + 393, + 235 + ], + "type": "image", + "image_path": "8136735097001b97815e0b8ac4f39707f6c490ca7d8360f39d3bc9ce3830fb80.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 398, + 201, + 455, + 235 + ], + "blocks": [ + { + "bbox": [ + 408, + 195, + 443, + 201 + ], + "lines": [ + { + "bbox": [ + 408, + 195, + 443, + 201 + ], + "spans": [ + { + "bbox": [ + 408, + 195, + 443, + 201 + ], + "type": "text", + "content": "Estimated NVS" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 398, + 201, + 455, + 235 + ], + "lines": [ + { + "bbox": [ + 398, + 201, + 455, + 235 + ], + "spans": [ + { + "bbox": [ + 398, + 201, + 455, + 235 + ], + "type": "image", + "image_path": "39a8ed92b2fc622381ef0a80afe613e0ea5c6533e3aab3c4ed7082cfc9b5b8dc.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 156, + 251, + 290, + 320 + ], + "blocks": [ + { + "bbox": [ + 208, + 242, + 235, + 249 + ], + "lines": [ + { + "bbox": [ + 208, + 242, + 235, + 249 + ], + "spans": [ + { + "bbox": [ + 208, + 242, + 235, + 249 + ], + "type": "text", + "content": "Counter" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 156, + 251, + 290, + 320 + ], + "lines": [ + { + "bbox": [ + 156, + 251, + 290, + 320 + ], + "spans": [ + { + "bbox": [ + 156, + 251, + 290, + 320 + ], + "type": "image", + "image_path": "fa23308f2b98af0b8ced5f9ed8a2a95eff6d216ad1fa09fce59f9ae9dc0d656d.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 176, + 319, + 205, + 325 + ], + "lines": [ + { + "bbox": [ + 176, + 319, + 205, + 325 + ], + "spans": [ + { + "bbox": [ + 176, + 319, + 205, + 325 + ], + "type": "text", + "content": "Target image" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 234, + 319, + 268, + 326 + ], + "lines": [ + { + "bbox": [ + 234, + 319, + 268, + 326 + ], + "spans": [ + { + "bbox": [ + 234, + 319, + 268, + 326 + ], + "type": "text", + "content": "Estimated NVS" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 160, + 326, + 219, + 365 + ], + "blocks": [ + { + "bbox": [ + 160, + 326, + 219, + 365 + ], + "lines": [ + { + "bbox": [ + 160, + 326, + 219, + 365 + ], + "spans": [ + { + "bbox": [ + 160, + 326, + 219, + 365 + ], + "type": "image", + "image_path": "6aa65dd389f7df608f16281eef0d4cbd6234809eb7a7951d334bc68520db6483.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 222, + 326, + 280, + 364 + ], + "blocks": [ + { + "bbox": [ + 222, + 326, + 280, + 364 + ], + "lines": [ + { + "bbox": [ + 222, + 326, + 280, + 364 + ], + "spans": [ + { + "bbox": [ + 222, + 326, + 280, + 364 + ], + "type": "image", + "image_path": "5ad4d8add8c2d8b4ef68e8887e708974801af9e44633d130bd164eb0352c42de.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 331, + 252, + 451, + 320 + ], + "blocks": [ + { + "bbox": [ + 378, + 242, + 401, + 249 + ], + "lines": [ + { + "bbox": [ + 378, + 242, + 401, + 249 + ], + "spans": [ + { + "bbox": [ + 378, + 242, + 401, + 249 + ], + "type": "text", + "content": "Bonsai" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 331, + 252, + 451, + 320 + ], + "lines": [ + { + "bbox": [ + 331, + 252, + 451, + 320 + ], + "spans": [ + { + "bbox": [ + 331, + 252, + 451, + 320 + ], + "type": "image", + "image_path": "7f6a3a23ad870c0197c6411415fd8c0ee0ad55ad8f3ff31ce2f8da7438b3feb1.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 345, + 319, + 373, + 326 + ], + "lines": [ + { + "bbox": [ + 345, + 319, + 373, + 326 + ], + "spans": [ + { + "bbox": [ + 345, + 319, + 373, + 326 + ], + "type": "text", + "content": "Target image" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 403, + 319, + 436, + 326 + ], + "lines": [ + { + "bbox": [ + 403, + 319, + 436, + 326 + ], + "spans": [ + { + "bbox": [ + 403, + 319, + 436, + 326 + ], + "type": "text", + "content": "Estimated NVS" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 329, + 326, + 388, + 365 + ], + "blocks": [ + { + "bbox": [ + 329, + 326, + 388, + 365 + ], + "lines": [ + { + "bbox": [ + 329, + 326, + 388, + 365 + ], + "spans": [ + { + "bbox": [ + 329, + 326, + 388, + 365 + ], + "type": "image", + "image_path": "1b3c82462a897deaaf06b210b8778693ea46106f3d06a8016dea5ec849bcf72e.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 391, + 326, + 449, + 365 + ], + "blocks": [ + { + "bbox": [ + 391, + 326, + 449, + 365 + ], + "lines": [ + { + "bbox": [ + 391, + 326, + 449, + 365 + ], + "spans": [ + { + "bbox": [ + 391, + 326, + 449, + 365 + ], + "type": "image", + "image_path": "3b6f0ae544df771fa6bce565993ea8a003cb284359d4d4a6f5d242b24f5b96e1.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 139, + 378, + 156, + 396 + ], + "blocks": [ + { + "bbox": [ + 139, + 378, + 156, + 396 + ], + "lines": [ + { + "bbox": [ + 139, + 378, + 156, + 396 + ], + "spans": [ + { + "bbox": [ + 139, + 378, + 156, + 396 + ], + "type": "image", + "image_path": "eb872e69fea2e95491e7b9198300b6a6d3e301447ee3395d18f5f38325cace1a.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + } + ], + "index": 26 + }, + { + "type": "image", + "bbox": [ + 157, + 378, + 172, + 396 + ], + "blocks": [ + { + "bbox": [ + 157, + 378, + 172, + 396 + ], + "lines": [ + { + "bbox": [ + 157, + 378, + 172, + 396 + ], + "spans": [ + { + "bbox": [ + 157, + 378, + 172, + 396 + ], + "type": "image", + "image_path": "24fbd16aa5751e943e9bf8d21cbc5cc7c7e4da4bc1633952df7338ed9afd945b.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + } + ], + "index": 27 + }, + { + "type": "image", + "bbox": [ + 173, + 378, + 186, + 396 + ], + "blocks": [ + { + "bbox": [ + 173, + 378, + 186, + 396 + ], + "lines": [ + { + "bbox": [ + 173, + 378, + 186, + 396 + ], + "spans": [ + { + "bbox": [ + 173, + 378, + 186, + 396 + ], + "type": "image", + "image_path": "bb4ff5db10655d3bca5c6394c0149a06a86f7717f4a651ba84bf97217aaa147b.jpg" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_body" + } + ], + "index": 28 + }, + { + "type": "image", + "bbox": [ + 187, + 379, + 203, + 388 + ], + "blocks": [ + { + "bbox": [ + 187, + 379, + 203, + 388 + ], + "lines": [ + { + "bbox": [ + 187, + 379, + 203, + 388 + ], + "spans": [ + { + "bbox": [ + 187, + 379, + 203, + 388 + ], + "type": "image", + "image_path": "13f2b9b34dde64712afdf43686a02f00c7b42e4b5d1505576377925d6a815d18.jpg" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_body" + } + ], + "index": 29 + }, + { + "type": "image", + "bbox": [ + 188, + 388, + 203, + 395 + ], + "blocks": [ + { + "bbox": [ + 188, + 388, + 203, + 395 + ], + "lines": [ + { + "bbox": [ + 188, + 388, + 203, + 395 + ], + "spans": [ + { + "bbox": [ + 188, + 388, + 203, + 395 + ], + "type": "image", + "image_path": "2a0ec10c3ac048e38a42f3cdcdde3c5204ba0759ecaa3a936196eb720fa70631.jpg" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_body" + } + ], + "index": 30 + }, + { + "type": "image", + "bbox": [ + 207, + 379, + 222, + 388 + ], + "blocks": [ + { + "bbox": [ + 207, + 379, + 222, + 388 + ], + "lines": [ + { + "bbox": [ + 207, + 379, + 222, + 388 + ], + "spans": [ + { + "bbox": [ + 207, + 379, + 222, + 388 + ], + "type": "image", + "image_path": "27d37ab08d225afa573cdde677b4bc371d3ddeb06b7051c8ba454cb107eb115e.jpg" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 208, + 388, + 222, + 395 + ], + "lines": [ + { + "bbox": [ + 208, + 388, + 222, + 395 + ], + "spans": [ + { + "bbox": [ + 208, + 388, + 222, + 395 + ], + "type": "text", + "content": "#" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_caption" + } + ], + "index": 31 + }, + { + "type": "image", + "bbox": [ + 225, + 379, + 241, + 388 + ], + "blocks": [ + { + "bbox": [ + 225, + 379, + 241, + 388 + ], + "lines": [ + { + "bbox": [ + 225, + 379, + 241, + 388 + ], + "spans": [ + { + "bbox": [ + 225, + 379, + 241, + 388 + ], + "type": "image", + "image_path": "990a949f1333feeb91c3d837ff02c130b4db1c053ae6931abf01bb8a60de7e1e.jpg" + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 226, + 388, + 241, + 395 + ], + "lines": [ + { + "bbox": [ + 226, + 388, + 241, + 395 + ], + "spans": [ + { + "bbox": [ + 226, + 388, + 241, + 395 + ], + "type": "text", + "content": "prior" + } + ] + } + ], + "index": 34, + "angle": 0, + "type": "image_caption" + } + ], + "index": 33 + }, + { + "type": "image", + "bbox": [ + 246, + 378, + 261, + 388 + ], + "blocks": [ + { + "bbox": [ + 246, + 378, + 261, + 388 + ], + "lines": [ + { + "bbox": [ + 246, + 378, + 261, + 388 + ], + "spans": [ + { + "bbox": [ + 246, + 378, + 261, + 388 + ], + "type": "image", + "image_path": "a8b5579e01770265e4875d125da47fedd52bf5131356aa4e4a100ec8bfc444e3.jpg" + } + ] + } + ], + "index": 35, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 247, + 388, + 268, + 395 + ], + "lines": [ + { + "bbox": [ + 247, + 388, + 268, + 395 + ], + "spans": [ + { + "bbox": [ + 247, + 388, + 268, + 395 + ], + "type": "text", + "content": "D" + } + ] + } + ], + "index": 36, + "angle": 0, + "type": "image_caption" + } + ], + "index": 35 + }, + { + "type": "image", + "bbox": [ + 291, + 379, + 307, + 396 + ], + "blocks": [ + { + "bbox": [ + 291, + 379, + 307, + 396 + ], + "lines": [ + { + "bbox": [ + 291, + 379, + 307, + 396 + ], + "spans": [ + { + "bbox": [ + 291, + 379, + 307, + 396 + ], + "type": "image", + "image_path": "ba9a5a8d015e2c3432c3eec472695b604bebef4c913c3b8907ae242acc6dd5a6.jpg" + } + ] + } + ], + "index": 37, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 308, + 379, + 326, + 385 + ], + "lines": [ + { + "bbox": [ + 308, + 379, + 326, + 385 + ], + "spans": [ + { + "bbox": [ + 308, + 379, + 326, + 385 + ], + "type": "text", + "content": "Parallel" + } + ] + } + ], + "index": 38, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 307, + 385, + 328, + 390 + ], + "lines": [ + { + "bbox": [ + 307, + 385, + 328, + 390 + ], + "spans": [ + { + "bbox": [ + 307, + 385, + 328, + 390 + ], + "type": "text", + "content": "jNeRFw/" + } + ] + } + ], + "index": 39, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 307, + 390, + 328, + 395 + ], + "lines": [ + { + "bbox": [ + 307, + 390, + 328, + 395 + ], + "spans": [ + { + "bbox": [ + 307, + 390, + 328, + 395 + ], + "type": "text", + "content": "north w" + } + ] + } + ], + "index": 40, + "angle": 0, + "type": "image_caption" + } + ], + "index": 37 + }, + { + "type": "image", + "bbox": [ + 330, + 379, + 346, + 396 + ], + "blocks": [ + { + "bbox": [ + 330, + 379, + 346, + 396 + ], + "lines": [ + { + "bbox": [ + 330, + 379, + 346, + 396 + ], + "spans": [ + { + "bbox": [ + 330, + 379, + 346, + 396 + ], + "type": "image", + "image_path": "d221bc754f3503497ac01adac637d1455865d72b47b42046e69e6fef60bf1071.jpg" + } + ] + } + ], + "index": 41, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 350, + 380, + 369, + 386 + ], + "lines": [ + { + "bbox": [ + 350, + 380, + 369, + 386 + ], + "spans": [ + { + "bbox": [ + 350, + 380, + 369, + 386 + ], + "type": "text", + "content": "Parallel" + } + ] + } + ], + "index": 42, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 354, + 392, + 365, + 397 + ], + "lines": [ + { + "bbox": [ + 354, + 392, + 365, + 397 + ], + "spans": [ + { + "bbox": [ + 354, + 392, + 365, + 397 + ], + "type": "text", + "content": "prior" + } + ] + } + ], + "index": 44, + "angle": 0, + "type": "image_caption" + } + ], + "index": 41 + }, + { + "type": "image", + "bbox": [ + 374, + 379, + 390, + 397 + ], + "blocks": [ + { + "bbox": [ + 350, + 386, + 371, + 392 + ], + "lines": [ + { + "bbox": [ + 350, + 386, + 371, + 392 + ], + "spans": [ + { + "bbox": [ + 350, + 386, + 371, + 392 + ], + "type": "text", + "content": "NeRF w/c" + } + ] + } + ], + "index": 43, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 374, + 379, + 390, + 397 + ], + "lines": [ + { + "bbox": [ + 374, + 379, + 390, + 397 + ], + "spans": [ + { + "bbox": [ + 374, + 379, + 390, + 397 + ], + "type": "image", + "image_path": "2f3721800e1b80d25f6bc516aa30fb91044e9784d09b1702ab2dd418f2c32c8b.jpg" + } + ] + } + ], + "index": 45, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 391, + 382, + 406, + 388 + ], + "lines": [ + { + "bbox": [ + 391, + 382, + 406, + 388 + ], + "spans": [ + { + "bbox": [ + 391, + 382, + 406, + 388 + ], + "type": "text", + "content": "NeM" + } + ] + } + ], + "index": 46, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 391, + 388, + 422, + 394 + ], + "lines": [ + { + "bbox": [ + 391, + 388, + 422, + 394 + ], + "spans": [ + { + "bbox": [ + 391, + 388, + 422, + 394 + ], + "type": "text", + "content": "VoGE w/ prior" + } + ] + } + ], + "index": 47, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 131, + 410, + 482, + 498 + ], + "lines": [ + { + "bbox": [ + 131, + 410, + 482, + 498 + ], + "spans": [ + { + "bbox": [ + 131, + 410, + 482, + 498 + ], + "type": "text", + "content": "Fig. 4: The illustration presents qualitative results from Tanks & Temple (upper row) and Mip-NeRF " + }, + { + "bbox": [ + 131, + 410, + 482, + 498 + ], + "type": "inline_equation", + "content": "360^{\\circ}" + }, + { + "bbox": [ + 131, + 410, + 482, + 498 + ], + "type": "text", + "content": " (lower row) datasets. Each scene showcases the target images (bottom left) along with their corresponding Novel View Synthesis (NVS) outputs (bottom right), derived from the camera poses estimated by 6DGS (located on the top). Furthermore, the estimated camera poses from the comparative baselines are visualized, with distinct colors as indicated in the image legend. The NVS of each scene is rendered based on the provided 3DGS model. Please check the supplementary material for more qualitative results." + } + ] + } + ], + "index": 52, + "angle": 0, + "type": "image_caption" + } + ], + "index": 45 + }, + { + "type": "image", + "bbox": [ + 425, + 378, + 441, + 396 + ], + "blocks": [ + { + "bbox": [ + 425, + 378, + 441, + 396 + ], + "lines": [ + { + "bbox": [ + 425, + 378, + 441, + 396 + ], + "spans": [ + { + "bbox": [ + 425, + 378, + 441, + 396 + ], + "type": "image", + "image_path": "b0336dcb57af9db42df841c33466de04eba655c2efda0676255b98d3d490cc4e.jpg" + } + ] + } + ], + "index": 48, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 446, + 379, + 468, + 385 + ], + "lines": [ + { + "bbox": [ + 446, + 379, + 468, + 385 + ], + "spans": [ + { + "bbox": [ + 446, + 379, + 468, + 385 + ], + "type": "text", + "content": "NeMo +" + } + ] + } + ], + "index": 49, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 447, + 385, + 469, + 390 + ], + "lines": [ + { + "bbox": [ + 447, + 385, + 469, + 390 + ], + "spans": [ + { + "bbox": [ + 447, + 385, + 469, + 390 + ], + "type": "text", + "content": "VoGE w/o" + } + ] + } + ], + "index": 50, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 451, + 390, + 463, + 396 + ], + "lines": [ + { + "bbox": [ + 451, + 390, + 463, + 396 + ], + "spans": [ + { + "bbox": [ + 451, + 390, + 463, + 396 + ], + "type": "text", + "content": "prior" + } + ] + } + ], + "index": 51, + "angle": 0, + "type": "image_caption" + } + ], + "index": 48 + }, + { + "bbox": [ + 132, + 514, + 243, + 525 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 514, + 243, + 525 + ], + "spans": [ + { + "bbox": [ + 132, + 514, + 243, + 525 + ], + "type": "text", + "content": "5.4 Ablation studies" + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 130, + 533, + 482, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 533, + 482, + 593 + ], + "spans": [ + { + "bbox": [ + 130, + 533, + 482, + 593 + ], + "type": "text", + "content": "Our ablation studies involve the analysis of the number of rays selected for the pose estimation (Tab. 3a), the number of rays that we cast from a Ellicell (Tab. 3b) as well as the different feature size on the MLP channels (Tab. 3c). The supplementary material contains additional ablations that analyze 6DGS performance with low-quality 3DGS models." + } + ] + } + ], + "index": 54 + }, + { + "bbox": [ + 130, + 594, + 482, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 594, + 482, + 667 + ], + "spans": [ + { + "bbox": [ + 130, + 594, + 482, + 667 + ], + "type": "text", + "content": "We find that the number of selected rays mainly affects the angular error, while the translation error remains relatively stable. Increasing the number of rays decreases the angular error but slightly increases the translation error, likely due to less confident rays contributing to the pose estimation. The optimal balance between translation and angular errors is achieved between 100 to 150 rays, with 100 being the best. The slight increase in error with more " + }, + { + "bbox": [ + 130, + 594, + 482, + 667 + ], + "type": "inline_equation", + "content": "N_{top}" + }, + { + "bbox": [ + 130, + 594, + 482, + 667 + ], + "type": "text", + "content": " rays is due to" + } + ] + } + ], + "index": 55 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 170, + 91, + 448, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 170, + 91, + 448, + 102 + ], + "spans": [ + { + "bbox": [ + 170, + 91, + 448, + 102 + ], + "type": "text", + "content": "6DGS: 6D Pose Estimation from a Single Image and a 3DGS Model" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 114, + 482, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 114, + 482, + 148 + ], + "spans": [ + { + "bbox": [ + 132, + 114, + 482, + 148 + ], + "type": "text", + "content": "Table 3: Ablation study on the number of rays selected for pose estimation, on the rays cast from each ellipsoid and on the MLP channels using Mip-NeRF 360 [4]. Underline indicates the default values used." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 154, + 228, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 154, + 228, + 171 + ], + "spans": [ + { + "bbox": [ + 132, + 154, + 228, + 171 + ], + "type": "text", + "content": "(a) Number of rays used for pose estimation." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 230, + 154, + 353, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 154, + 353, + 171 + ], + "spans": [ + { + "bbox": [ + 230, + 154, + 353, + 171 + ], + "type": "text", + "content": "(b) Number of cast rays per ellipsoid." + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 133, + 172, + 228, + 218 + ], + "blocks": [ + { + "bbox": [ + 133, + 172, + 228, + 218 + ], + "lines": [ + { + "bbox": [ + 133, + 172, + 228, + 218 + ], + "spans": [ + { + "bbox": [ + 133, + 172, + 228, + 218 + ], + "type": "table", + "html": "
\\( {N}_{\\text{top }} \\)MAE (°) \\( \\downarrow \\)MTE (u) \\( \\downarrow \\)Time (s)
2029.00.02350.03
5026.30.02270.04
10024.30.02170.06
15024.40.02190.9
20024.50.02220.11
", + "image_path": "f30f633f20ae79fba7749ed3982a17038a11267a53e9af76f2f3258761784557.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 230, + 171, + 354, + 218 + ], + "blocks": [ + { + "bbox": [ + 230, + 171, + 354, + 218 + ], + "lines": [ + { + "bbox": [ + 230, + 171, + 354, + 218 + ], + "spans": [ + { + "bbox": [ + 230, + 171, + 354, + 218 + ], + "type": "table", + "html": "
# of cast raysMAE (°)↓MTE (u)↓Time (s)
2029.00.02350.04
3524.70.02200.04
5024.30.02170.06
6525.10.02180.09
8025.20.02210.15
", + "image_path": "e0925e54eb1e7eb5a1051d421eefac774d30439fe1a8e077ef71ed1434bdd9d7.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 363, + 154, + 467, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 363, + 154, + 467, + 163 + ], + "spans": [ + { + "bbox": [ + 363, + 154, + 467, + 163 + ], + "type": "text", + "content": "(c) MLP channel feature size." + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 357, + 171, + 474, + 201 + ], + "blocks": [ + { + "bbox": [ + 357, + 171, + 474, + 201 + ], + "lines": [ + { + "bbox": [ + 357, + 171, + 474, + 201 + ], + "spans": [ + { + "bbox": [ + 357, + 171, + 474, + 201 + ], + "type": "table", + "html": "
MLP channels MAE (°)↓ MTE (u)↓ Time (s)
25629.40.02730.04
51224.30.02170.06
102430.10.02280.27
", + "image_path": "a1c10670ff200d5ebc3a7bd1b9adc5668557af9d04517a2a2b10743b9e045689.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 237, + 481, + 273 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 237, + 481, + 273 + ], + "spans": [ + { + "bbox": [ + 130, + 237, + 481, + 273 + ], + "type": "text", + "content": "introducing rays not pointing precisely to the camera's optical center. Similar to what we observed in the qualitative examples, the noisy rays make the weighted Least Squares estimating the camera closer to the object than it actually is." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 274, + 482, + 429 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 274, + 482, + 429 + ], + "spans": [ + { + "bbox": [ + 130, + 274, + 482, + 429 + ], + "type": "text", + "content": "Regarding the impact of the varying number of rays cast from the Ellicells, the angular error tends to remain relatively constant across different configurations. In contrast, the translation error decreases when 50 cast rays are used, and then increases again. This behavior is connected to network generalization capability. Increasing the number of rays allows the network to fit the training set better, but at test time, it makes the network more prone to noise and consequently selecting the wrong rays, thus increasing the error. We observe this generalization issue when increasing the MLP channels, see Tab. 3c, particularly given the limited and uneven distribution of training images (" + }, + { + "bbox": [ + 130, + 274, + 482, + 429 + ], + "type": "inline_equation", + "content": "\\approx" + }, + { + "bbox": [ + 130, + 274, + 482, + 429 + ], + "type": "text", + "content": " 150). Moreover, the processing time increases proportionally with the number of rays and the MLP channels; upon exceeding the default values for rays and feature size, a notable surge in processing time is observed, reaching approximately 10fps and 13fps, respectively." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 443, + 226, + 455 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 443, + 226, + 455 + ], + "spans": [ + { + "bbox": [ + 132, + 443, + 226, + 455 + ], + "type": "text", + "content": "6 Conclusions" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 130, + 468, + 482, + 587 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 468, + 482, + 587 + ], + "spans": [ + { + "bbox": [ + 130, + 468, + 482, + 587 + ], + "type": "text", + "content": "In this study, we proposed a novel ray sampling by attention method for estimating 6DoF camera poses from a single image and a 3DGS scene model. Our analytical evaluation demonstrates its robustness and efficiency without requiring initialization, up to " + }, + { + "bbox": [ + 130, + 468, + 482, + 587 + ], + "type": "inline_equation", + "content": "22\\%" + }, + { + "bbox": [ + 130, + 468, + 482, + 587 + ], + "type": "text", + "content": " in accuracy and while being faster by a big margin, approx. " + }, + { + "bbox": [ + 130, + 468, + 482, + 587 + ], + "type": "inline_equation", + "content": "94\\mathrm{x}" + }, + { + "bbox": [ + 130, + 468, + 482, + 587 + ], + "type": "text", + "content": " faster. Furthermore, the proposed method formulates and utilizes a novel ray generation methodology in order to explore diverse camera pose hypotheses in accordance to an effective attention mechanism. Our method exhibits enhanced robustness across real-world datasets and holds promise for real-time deployment in robotics and other fields. Future research endeavors will focus on improving accuracy and extending applicability to diverse scenes and objects." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 130, + 588, + 481, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 588, + 481, + 624 + ], + "spans": [ + { + "bbox": [ + 130, + 588, + 481, + 624 + ], + "type": "text", + "content": "Limitations. The main constraint of 6DGS is the need for retraining with each new scene. This could be mitigated with meta-learning, particularly when similar objects or scenes are under consideration." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 241, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 241, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 241, + 100 + ], + "type": "text", + "content": "M. Bortolon et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 133, + 114, + 240, + 129 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 114, + 240, + 129 + ], + "spans": [ + { + "bbox": [ + 133, + 114, + 240, + 129 + ], + "type": "text", + "content": "Acknowledgments" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 139, + 482, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 139, + 482, + 224 + ], + "spans": [ + { + "bbox": [ + 130, + 139, + 482, + 224 + ], + "type": "text", + "content": "This work is part of the RePAIR project that has received funding from the European Union's Horizon 2020 research and innovation programme under grant agreement No. 964854. This work has also received funding from the European Union's Horizon Europe research and innovation programme under grant agreement No. 101092043, project AGILEHAND (Smart Grading, Handling and Packaging Solutions for Soft and Deformable Products in Agile and Reconfigurable Lines). We thank S. Fiorini for the discussion on the optimizers." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 133, + 240, + 198, + 252 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 240, + 198, + 252 + ], + "spans": [ + { + "bbox": [ + 133, + 240, + 198, + 252 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 138, + 264, + 482, + 665 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 138, + 264, + 482, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 264, + 482, + 285 + ], + "spans": [ + { + "bbox": [ + 138, + 264, + 482, + 285 + ], + "type": "text", + "content": "1. Google maps nerf integration. https://blog.google/products/maps/sustainable-immersive-maps-announcements/, accessed: 2024-03-07" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 138, + 286, + 481, + 307 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 286, + 481, + 307 + ], + "spans": [ + { + "bbox": [ + 138, + 286, + 481, + 307 + ], + "type": "text", + "content": "2. Akenine-Mo, T., Haines, E., Hoffman, N., et al.: Real-time rendering. AK Pe- ters/CRC Press (2018)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 138, + 308, + 481, + 339 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 308, + 481, + 339 + ], + "spans": [ + { + "bbox": [ + 138, + 308, + 481, + 339 + ], + "type": "text", + "content": "3. Almkvist, G., Berndt, B.: Gauss, landen, ramanujan, the arithmetic-geometric mean, ellipses, " + }, + { + "bbox": [ + 138, + 308, + 481, + 339 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 138, + 308, + 481, + 339 + ], + "type": "text", + "content": ", and the ladies diary. The American Mathematical Monthly 95(7), 585-608 (1988)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 138, + 340, + 481, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 340, + 481, + 361 + ], + "spans": [ + { + "bbox": [ + 138, + 340, + 481, + 361 + ], + "type": "text", + "content": "4. Barron, J.T., Mildenhall, B., Verbin, D., Srinivasan, P.P., Hedman, P.: Mip-nerf 360: Unbounded anti-aliased neural radiance fields. In: CVPR (2022)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 138, + 362, + 481, + 383 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 362, + 481, + 383 + ], + "spans": [ + { + "bbox": [ + 138, + 362, + 481, + 383 + ], + "type": "text", + "content": "5. Beckers, B., Beckers, P.: Fast and accurate view factor generation. In: FICUP (2016)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 138, + 384, + 481, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 384, + 481, + 415 + ], + "spans": [ + { + "bbox": [ + 138, + 384, + 481, + 415 + ], + "type": "text", + "content": "6. Bortolon, M., Tsesmelis, T., James, S., Poiesi, F., Del Bue, A.: Ifnrf: Initialization free and fast 6 dof pose estimation from a single image and a nef model. In: ICRA (2024)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 138, + 416, + 481, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 416, + 481, + 437 + ], + "spans": [ + { + "bbox": [ + 138, + 416, + 481, + 437 + ], + "type": "text", + "content": "7. Chen, A., Xu, Z., Geiger, A., Yu, J., Su, H.: Tensorf: Tensorial radiance fields. In: ECCV (2022)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 138, + 437, + 481, + 470 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 437, + 481, + 470 + ], + "spans": [ + { + "bbox": [ + 138, + 437, + 481, + 470 + ], + "type": "text", + "content": "8. Chen, S., Song, S., Zhao, J., Feng, T., Ye, C., Xiong, L., Li, D.: Robust dual quadric initialization for forward-translating camera movements. RAL 6(3), 4712-4719 (2021)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 138, + 471, + 481, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 471, + 481, + 491 + ], + "spans": [ + { + "bbox": [ + 138, + 471, + 481, + 491 + ], + "type": "text", + "content": "9. Crocco, M., Rubino, C., Del Bue, A.: Structure from motion with objects. In: CVPR (2016)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 138, + 492, + 481, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 492, + 481, + 514 + ], + "spans": [ + { + "bbox": [ + 138, + 492, + 481, + 514 + ], + "type": "text", + "content": "0. Ding, M., Wang, Z., Sun, J., Shi, J., Luo, P.: Camnet: Coarse-to-fine retrieval for camera re-localization. In: ICCV (2019)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 138, + 514, + 481, + 535 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 514, + 481, + 535 + ], + "spans": [ + { + "bbox": [ + 138, + 514, + 481, + 535 + ], + "type": "text", + "content": "1. Gaudilliere, V., Simon, G., Berger, M.O.: Camera relocalization with ellipsoidal abstraction of objects. In: ISMAR (2019)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 138, + 536, + 481, + 556 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 536, + 481, + 556 + ], + "spans": [ + { + "bbox": [ + 138, + 536, + 481, + 556 + ], + "type": "text", + "content": "2. Gaudilliere, V., Simon, G., Berger, M.O.: Perspective-2-ellipsoid: Bridging the gap between object detections and 6-dof camera pose. RAL 5(4), 5189-5196 (2020)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 138, + 557, + 481, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 557, + 481, + 578 + ], + "spans": [ + { + "bbox": [ + 138, + 557, + 481, + 578 + ], + "type": "text", + "content": "3. Gay, P., Rubino, C., Bansal, V., Del Bue, A.: Probabilistic structure from motion with objects (psfmo). In: ICCV" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 138, + 579, + 481, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 579, + 481, + 600 + ], + "spans": [ + { + "bbox": [ + 138, + 579, + 481, + 600 + ], + "type": "text", + "content": "4. Gay, P., Stuart, J., Del Bue, A.: Visual graphs from motion (vgfm): Scene understanding with object geometry reasoning. In: ACCV (2019)" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 138, + 601, + 481, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 601, + 481, + 621 + ], + "spans": [ + { + "bbox": [ + 138, + 601, + 481, + 621 + ], + "type": "text", + "content": "5. He, X., Sun, J., Wang, Y., Huang, D., Bao, H., Zhou, X.: Onepose++: Keypoint-free one-shot object pose estimation without cad models. In: NeurIPS (2022)" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 138, + 622, + 481, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 622, + 481, + 643 + ], + "spans": [ + { + "bbox": [ + 138, + 622, + 481, + 643 + ], + "type": "text", + "content": "6. Hosseinzadeh, M., Latif, Y., Pham, T., Suenderhauf, N., Reid, I.: Structure aware slam using quadrics and planes. In: ACCV (2019)" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 138, + 644, + 481, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 644, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 138, + 644, + 481, + 665 + ], + "type": "text", + "content": "7. Jacques, L., Masset, L., Kerschen, G.: Direction and surface sampling in ray tracing for spacecraft radiative heat transfer. Aerospace Science and Technology 47 (2015)" + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 170, + 91, + 448, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 170, + 91, + 448, + 102 + ], + "spans": [ + { + "bbox": [ + 170, + 91, + 448, + 102 + ], + "type": "text", + "content": "6DGS: 6D Pose Estimation from a Single Image and a 3DGS Model" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 481, + 100 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 117, + 480, + 665 + ], + "type": "list", + "angle": 0, + "index": 23, + "blocks": [ + { + "bbox": [ + 133, + 117, + 480, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 117, + 480, + 138 + ], + "spans": [ + { + "bbox": [ + 133, + 117, + 480, + 138 + ], + "type": "text", + "content": "18. Kerbl, B., Kopanas, G., Leimkuhler, T., Drettakis, G.: 3d gaussian splatting for real-time radiance field rendering. TCG 42(4) (2023)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 133, + 140, + 480, + 160 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 140, + 480, + 160 + ], + "spans": [ + { + "bbox": [ + 133, + 140, + 480, + 160 + ], + "type": "text", + "content": "19. Kim, S., Min, J., Cho, M.: Transformers: Match-to-match attention for semantic correspondence. In: CVPR (2022)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 133, + 161, + 480, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 161, + 480, + 182 + ], + "spans": [ + { + "bbox": [ + 133, + 161, + 480, + 182 + ], + "type": "text", + "content": "20. Knapitsch, A., Park, J., Zhou, Q.Y., Koltun, V.: Tanks and temples: Benchmarking large-scale scene reconstruction. TCG 36(4) (2017)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 183, + 480, + 204 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 183, + 480, + 204 + ], + "spans": [ + { + "bbox": [ + 132, + 183, + 480, + 204 + ], + "type": "text", + "content": "21. Laidlow, T., Davison, A.J.: Simultaneous localisation and mapping with quadric surfaces. In: 3DV (2022)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 205, + 480, + 225 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 205, + 480, + 225 + ], + "spans": [ + { + "bbox": [ + 132, + 205, + 480, + 225 + ], + "type": "text", + "content": "22. Lee, J., Kim, B., Cho, M.: Self-supervised equivariant learning for oriented keypoint detection. In: CVPR (2022)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 227, + 480, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 227, + 480, + 247 + ], + "spans": [ + { + "bbox": [ + 132, + 227, + 480, + 247 + ], + "type": "text", + "content": "23. Lee, J., Kim, B., Kim, S., Cho, M.: Learning rotation-equivariant features for visual correspondence. In: CVPR" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 249, + 480, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 249, + 480, + 281 + ], + "spans": [ + { + "bbox": [ + 132, + 249, + 480, + 281 + ], + "type": "text", + "content": "24. Liao, Z., Hu, Y., Zhang, J., Qi, X., Zhang, X., Wang, W.: So-slam: Semantic object slam with scale proportional and symmetrical texture constraints. RAL 7(2), 4008-4015 (2022)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 282, + 480, + 313 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 282, + 480, + 313 + ], + "spans": [ + { + "bbox": [ + 132, + 282, + 480, + 313 + ], + "type": "text", + "content": "25. Lin, Y., Müller, T., Tremblay, J., Wen, B., Tyree, S., Evans, A., Vela, P.A., Birchfield, S.: Parallel inversion of neural radiance fields for robust pose estimation. In: ICRA (2023)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 315, + 480, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 315, + 480, + 335 + ], + "spans": [ + { + "bbox": [ + 132, + 315, + 480, + 335 + ], + "type": "text", + "content": "26. Liu, L., Gu, J., Lin, K.Z., Chua, T.S., Theobalt, C.: Neural sparse voxel fields. In: NeurIPS (2020)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 337, + 460, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 337, + 460, + 346 + ], + "spans": [ + { + "bbox": [ + 132, + 337, + 460, + 346 + ], + "type": "text", + "content": "27. Lowe, D.G.: Object recognition from local scale-invariant features. In: ICCV" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 348, + 480, + 369 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 348, + 480, + 369 + ], + "spans": [ + { + "bbox": [ + 132, + 348, + 480, + 369 + ], + "type": "text", + "content": "28. Luiten, J., Kopanas, G., Leibe, B., Ramanan, D.: Dynamic 3d gaussians: Tracking by persistent dynamic view synthesis. In: 3DV (2024)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 132, + 370, + 480, + 390 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 370, + 480, + 390 + ], + "spans": [ + { + "bbox": [ + 132, + 370, + 480, + 390 + ], + "type": "text", + "content": "29. Maggio, D., Mario, C., Carlone, L.: Verf: Runtime monitoring of pose estimation with neural radiance fields. In: ICCV (2023)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 132, + 392, + 480, + 412 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 392, + 480, + 412 + ], + "spans": [ + { + "bbox": [ + 132, + 392, + 480, + 412 + ], + "type": "text", + "content": "30. Malley, T.: A shading method for computer generated images. Master's thesis, Dept. of Computer Science, University of Utah (1988)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 132, + 414, + 480, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 414, + 480, + 456 + ], + "spans": [ + { + "bbox": [ + 132, + 414, + 480, + 456 + ], + "type": "text", + "content": "31. Masset, L., Brüls, O., Kerschen, G.: Partition of the circle in cells of equal area and shape. Tech. rep., Structural Dynamics Research Group, Aerospace and Mechanical Engineering Department, University of Liege, 'Institut de Mecanique et G 'enie Civil (B52/3) (2011)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 132, + 457, + 480, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 457, + 480, + 468 + ], + "spans": [ + { + "bbox": [ + 132, + 457, + 480, + 468 + ], + "type": "text", + "content": "32. Meng, Y., Zhou, B.: Ellipsoid slam with novel object initialization. In: CASE (2022)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 132, + 468, + 480, + 500 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 468, + 480, + 500 + ], + "spans": [ + { + "bbox": [ + 132, + 468, + 480, + 500 + ], + "type": "text", + "content": "33. Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: Nerf: Representing scenes as neural radiance fields for view synthesis. In: ECCV (2020)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 132, + 502, + 480, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 502, + 480, + 533 + ], + "spans": [ + { + "bbox": [ + 132, + 502, + 480, + 533 + ], + "type": "text", + "content": "34. Moreau, A., Piasco, N., Bennehar, M., Tsishkou, D., Stanciulescu, B., de La Fortelle, A.: Crossfire: Camera relocalization on self-supervised features from an implicit representation. In: ICCV (2023)" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 132, + 534, + 480, + 587 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 534, + 480, + 587 + ], + "spans": [ + { + "bbox": [ + 132, + 534, + 480, + 587 + ], + "type": "text", + "content": "35. Oquab, M., Darcet, T., Moutakanni, T., Vo, H., Szafraniec, M., Khalidov, V., Fernandez, P., Haziza, D., Massa, F., El-Nouby, A., Assran, M., Ballas, N., Galuba, W., Howes, R., Huang, P.Y., Li, S.W., Misra, I., Rabbat, M., Sharma, V., Synnaeve, G., Xu, H., Jegou, H., Mairal, J., Labatut, P., Joulin, A., Bojanowski, P.: Dinov2: Learning robust visual features without supervision (2023)" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 132, + 589, + 480, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 589, + 480, + 609 + ], + "spans": [ + { + "bbox": [ + 132, + 589, + 480, + 609 + ], + "type": "text", + "content": "36. Sarlin, P.E., DeTone, D., Malisiewicz, T., Rabinovich, A.: Superglue: Learning feature matching with graph neural networks. In: CVPR (2020)" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 132, + 611, + 480, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 611, + 480, + 643 + ], + "spans": [ + { + "bbox": [ + 132, + 611, + 480, + 643 + ], + "type": "text", + "content": "37. Shan, M., Feng, Q., Jau, Y.Y., Atanasov, N.: Ellipsdf: joint object pose and shape optimization with a bi-level ellipsoid and signed distance function description. In: ICCV (2021)" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 132, + 644, + 480, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 644, + 480, + 665 + ], + "spans": [ + { + "bbox": [ + 132, + 644, + 480, + 665 + ], + "type": "text", + "content": "38. Shazeer, N., Stern, M.: Adafactor: Adaptive learning rates with sublinear memory cost. In: ICML (2018)" + } + ] + } + ], + "index": 22 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 241, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 241, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 241, + 100 + ], + "type": "text", + "content": "M. Bortolon et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 117, + 481, + 325 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 132, + 117, + 481, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 117, + 481, + 138 + ], + "spans": [ + { + "bbox": [ + 132, + 117, + 481, + 138 + ], + "type": "text", + "content": "39. Sinkhorn, R.: A Relationship Between Arbitrary Positive Matrices and Doubly Stochastic Matrices. The Annals of Mathematical Statistics 35(2), 876-879 (1964)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 140, + 481, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 140, + 481, + 171 + ], + "spans": [ + { + "bbox": [ + 132, + 140, + 481, + 171 + ], + "type": "text", + "content": "40. Tancik, M., Srinivasan, P.P., Mildenhall, B., Fridovich-Keil, S., Raghavan, N., Singhal, U., Ramamoorthi, R., Barron, J.T., Ng, R.: Fourier features let networks learn high frequency functions in low dimensional domains. In: NeurIPS (2020)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 133, + 172, + 481, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 172, + 481, + 194 + ], + "spans": [ + { + "bbox": [ + 133, + 172, + 481, + 194 + ], + "type": "text", + "content": "41. Tombari, F., Salti, S., di Stefano, L.: Unique signatures of histograms for local surface description. In: ECCV (2010)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 133, + 194, + 481, + 216 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 194, + 481, + 216 + ], + "spans": [ + { + "bbox": [ + 133, + 194, + 481, + 216 + ], + "type": "text", + "content": "42. Tsesmelis, T., Hasan, I., Cristani, M., Bue, A.D., Galasso, F.: Rgbd2lux: Dense light intensity estimation with an rgbd sensor. In: WACV (2018)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 216, + 481, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 216, + 481, + 237 + ], + "spans": [ + { + "bbox": [ + 132, + 216, + 481, + 237 + ], + "type": "text", + "content": "43. Wang, A., Kortylewski, A., Yuille, A.: Nemo: Neural mesh models of contrastive features for robust 3d pose estimation. In: ICLR (2020)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 237, + 481, + 259 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 237, + 481, + 259 + ], + "spans": [ + { + "bbox": [ + 132, + 237, + 481, + 259 + ], + "type": "text", + "content": "44. Wang, A., Wang, P., Sun, J., Kortylewski, A., Yuille, A.: Voge: a differentiable volume renderer using gaussian ellipsoids for analysis-by-synthesis. In: ICLR (2022)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 259, + 481, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 259, + 481, + 281 + ], + "spans": [ + { + "bbox": [ + 132, + 259, + 481, + 281 + ], + "type": "text", + "content": "45. Xie, T., Zong, Z., Qiu, Y., Li, X., Feng, Y., Yang, Y., Jiang, C.: Physgaussian: Physics-integrated 3d gaussians for generative dynamics. In: CVPR (2024)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 281, + 481, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 281, + 481, + 304 + ], + "spans": [ + { + "bbox": [ + 132, + 281, + 481, + 304 + ], + "type": "text", + "content": "46. Yen-Chen, L., Florence, P., Barron, J.T., Rodriguez, A., Isola, P., Lin, T.Y.: iNeRF: Inverting neural radiance fields for pose estimation. In: IROS (2021)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 304, + 481, + 325 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 304, + 481, + 325 + ], + "spans": [ + { + "bbox": [ + 132, + 304, + 481, + 325 + ], + "type": "text", + "content": "47. Zins, M., Simon, G., Berger, M.O.: Oa-slam: Leveraging objects for camera localization in visual slam. In: ISMAR (2022)" + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 170, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 170, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 170, + 91, + 447, + 102 + ], + "type": "text", + "content": "6DGS: 6D Pose Estimation from a Single Image and a 3DGS Model" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 481, + 100 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/6DoF Head Pose Estimation through Explicit Bidirectional Interaction with Face Geometry/2a1e442c-8a76-4afd-b0e2-7a3c115bb3f2_content_list.json b/2024/6DoF Head Pose Estimation through Explicit Bidirectional Interaction with Face Geometry/2a1e442c-8a76-4afd-b0e2-7a3c115bb3f2_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..c13f3f4ea5e292c31acea073d39b2010867bccf3 --- /dev/null +++ b/2024/6DoF Head Pose Estimation through Explicit Bidirectional Interaction with Face Geometry/2a1e442c-8a76-4afd-b0e2-7a3c115bb3f2_content_list.json @@ -0,0 +1,1842 @@ +[ + { + "type": "text", + "text": "6DoF Head Pose Estimation through Explicit Bidirectional Interaction with Face Geometry", + "text_level": 1, + "bbox": [ + 241, + 140, + 759, + 186 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Sungho Chun and Ju Yong Chang", + "bbox": [ + 364, + 212, + 635, + 227 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Department of ECE, Kwangwoon University, Korea {asw9161, jychang}@kw.ac.kr", + "bbox": [ + 328, + 239, + 673, + 268 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract. This study addresses the nuanced challenge of estimating head translations within the context of six-degrees-of-freedom (6DoF) head pose estimation, placing emphasis on this aspect over the more commonly studied head rotations. Identifying a gap in existing methodologies, we recognized the underutilized potential synergy between facial geometry and head translation. To bridge this gap, we propose a novel approach called the head Translation, Rotation, and face Geometry network (TRG), which stands out for its explicit bidirectional interaction structure. This structure has been carefully designed to leverage the complementary relationship between face geometry and head translation, marking a significant advancement in the field of head pose estimation. Our contributions also include the development of a strategy for estimating bounding box correction parameters and a technique for aligning landmarks to image. Both of these innovations demonstrate superior performance in 6DoF head pose estimation tasks. Extensive experiments conducted on ARKitFace and BIWI datasets confirm that the proposed method outperforms current state-of-the-art techniques. Codes are released at https://github.com/asn91666/TRG-Release.", + "bbox": [ + 261, + 303, + 738, + 551 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Keywords: 6DoF head pose estimation $\\cdot$ bidirectional interaction $\\cdot$ landmark-based approach", + "bbox": [ + 261, + 566, + 772, + 594 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 217, + 642, + 375, + 657 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Six-degrees-of-freedom (6DoF) head pose estimation is a crucial concern in both computer vision and graphics communities owing to its broad applications in augmented/virtual reality, vehicular monitoring systems, and sports analytics. Despite its prominence, existing studies [3,4,21,27,38,41,45-47] have primarily focused on estimating head orientation, whereas research on head translation estimation has not received as much attention. Some studies [1,44] have estimated pseudo-depth calculated from fitted data [55] without exploring methods to estimate the actual distance between the camera and head.", + "bbox": [ + 212, + 672, + 787, + 792 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Estimating head translation from a single image using learning-based methods poses significant challenges, which can be attributed to roughly two reasons. First, head translation estimation depends on real-scale face geometry. However,", + "bbox": [ + 212, + 795, + 787, + 840 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/d458188cd0318d3f363853cfc5116e0e91c3bbd1894de8b9a41299a2f65f93e9.jpg", + "image_caption": [ + "Fig. 1: Methods of inferring 6DoF head pose. The landmark-free approach [1] directly calculates the head pose from the image. Optimization-based methods [18,25,56] first predict face geometry, and then calculate the head pose. In contrast, TRG simultaneously estimates both face geometry and head pose to leverage the synergy between them." + ], + "image_footnote": [], + "bbox": [ + 218, + 145, + 782, + 296 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "the estimation of real-scale face geometry suffers from head translation ambiguities. In other words, the estimation of head translation and the estimation of actual size face geometry are strongly correlated, and there exists ambiguity due to their mutual absence. Second, learning-based head translation estimation encounters severe generalization issues with out-of-distribution data. Unlike head rotation, the range of head translation is infinite, necessitating a generalization strategy to address it.", + "bbox": [ + 212, + 417, + 787, + 523 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "However, existing works [1, 18, 25, 56] do not address the aforementioned issues. Fig. 1 provides an overview of the 6DoF head pose estimation methods used by existing models. In [18, 25, 56], face geometry is first inferred from an image, followed by the calculation of the 6DoF head pose using an optimization-based method. In other words, these methods [18,25,56] do not model the transfer of information from head pose to face geometry. This unidirectional information transfer method may face difficulties in predicting the actual size face geometry due to the absence of depth information. Consequently, the resulting face prior could create a vicious cycle, further reducing the accuracy of head translation prediction.", + "bbox": [ + 212, + 527, + 787, + 679 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Landmark-free approach [1] estimates head translation directly from an image using a learning-based method; however, it does not utilize face geometry information during the inference process. Directly estimating head depth from an image is highly non-linear, making the landmark-free approach challenging for estimating head translation.", + "bbox": [ + 212, + 684, + 787, + 760 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To overcome the limitations of existing models [1,18,25,56], we propose a head Translation, Rotation, and face Geometry network (TRG), which is a landmark-based method for estimating a 6DoF head pose. The TRG is designed with an explicit bidirectional interaction structure that leverages the complementary characteristics between the 6DoF head pose and face geometry. Specifically, we", + "bbox": [ + 212, + 763, + 787, + 840 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "S. Chun and J. Y. Chang", + "bbox": [ + 269, + 114, + 442, + 128 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "propose a method that simultaneously estimates the head pose and dense 3D landmarks, using each other's information to iteratively improve one another.", + "bbox": [ + 212, + 146, + 782, + 176 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To achieve generalizable head translation estimation, TRG does not directly estimate depth, but utilizes the position and size information of the bounding box. The center coordinates of the bounding box are typically well-aligned with the coordinates of the head center, and the size of the bounding box inversely reflects the head's depth. These relationships make the bounding box a useful tool for estimating head translation in 3D space. However, reliance on the bounding box alone is insufficient. This is due to potential misalignments between the bounding box center and the head center, and the bounding box size being influenced by factors beyond depth, such as face size and head rotation. To address these discrepancies, we propose to estimate bounding box correction parameters and calculate head translation using these parameters and bounding box information. The proposed method has been found to achieve high accuracy and to be robust even for out-of-distribution data.", + "bbox": [ + 212, + 176, + 787, + 372 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Additionally, TRG aligns the estimated 3D landmarks with the image through perspective projection. By iterating this process, TRG not only enhances the performance of head translation estimation but also improves head rotation accuracy. This landmark-to-image alignment framework is inspired by the architecture of PyMAF [49, 50], which is a model used to reconstruct a human mesh. However, PyMAF is not designed to estimate the camera-to-human distance and fundamentally differs from TRG as it does not leverage the synergy between real-scale human geometry and depth.", + "bbox": [ + 212, + 373, + 787, + 492 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Furthermore, we discovered that TRG can accurately predict 3D face landmarks from a single image, even when strongly affected by perspective distortions, such as in selfies. This accuracy is attributed to the TRG's depth-aware landmark prediction architecture, which actively utilizes head translation information during the landmark prediction process. This finding further supports our main idea that head translation estimation should be conducted simultaneously with facial geometry estimation.", + "bbox": [ + 212, + 493, + 787, + 599 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The main contributions of this study can be summarized as follows:", + "bbox": [ + 238, + 599, + 728, + 614 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We propose TRG for 6DoF head pose estimation. To the best of our knowledge, this is the first study to introduce an explicit bidirectional interaction structure between head translation and face geometry. Through this innovative structure, TRG simultaneously mitigates ambiguity concerning head depth and face size.", + "- The proposed strategy for estimating correction parameters for the bounding box demonstrates stable generalization performance on out-of-distribution data in terms of head translation.", + "- The landmark-to-image alignment strategy demonstrates high accuracy not only in terms of head translation but also regarding head rotation.", + "- TRG's depth-aware landmark prediction architecture exhibits high landmark prediction accuracy, even in images heavily influenced by perspective transformation, such as selfies.", + "- Extensive experimental results on the benchmark datasets ARKitFace [25] and BIWI [15] show that TRG outperforms current SotA methods." + ], + "bbox": [ + 223, + 618, + 784, + 839 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "TRG", + "bbox": [ + 694, + 114, + 730, + 126 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 774, + 114, + 785, + 126 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Related Works", + "text_level": 1, + "bbox": [ + 215, + 143, + 397, + 160 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.1 Landmark-free Approach", + "text_level": 1, + "bbox": [ + 215, + 176, + 470, + 193 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The landmark-free approach [1,3,4,14,21,31,47] aims to estimate head pose directly from input image without relying on landmarks. However, most landmark-free approaches [3,4,14,21,31,47] only estimate head rotation and do not consider head translation.", + "bbox": [ + 212, + 202, + 782, + 261 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Among them, img2pose [1] not only estimates head rotation but also head translation. It calculates head translation from a proposal and employs a local-to-global transformation strategy to convert the estimated local pose into a global image space. Infrinsics are utilized during the conversion of the local head pose into the global head pose. However, img2pose does not use intrinsics when calculating head translation from a proposal, leading to inaccurate local head poses. This is because utilizing intrinsics is essential when calculating depth from an image, even when dealing with a cropped image. Furthermore, [1] does not utilize face geometry information during inference, which can exacerbate depth ambiguity.", + "bbox": [ + 212, + 262, + 784, + 412 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In contrast to landmark-free approaches, our proposed method explicitly utilizes facial geometry information. Specifically, TRG simultaneously mitigates ambiguity regarding face size and head translation through a bidirectional interaction structure. Additionally, TRG does not directly calculate head translation from cropped images but infers bounding box correction parameters instead. It then computes head translation using the inferred correction parameters and intrinsics. The proposed bounding box correction parameter strategy enables stable and accurate inference of head translation.", + "bbox": [ + 212, + 414, + 785, + 534 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.2 Landmark-based Approach", + "text_level": 1, + "bbox": [ + 215, + 556, + 485, + 573 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Numerous landmark-based approaches have been proposed [18,25,27,37,43-45, 56] for estimating a 6DoF head pose or 3D head rotation. [27,37,43] proposed methods that simultaneously estimate 2D face landmarks and 3D head rotation by leveraging the synergy between them using learning-based approaches. However, these studies have not explored the synergy between 3D face geometry and head translation.", + "bbox": [ + 212, + 583, + 784, + 672 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "SynergyNet [44] demonstrated that the parameters for shape and expression [35] can improve 3D sparse landmarks, and these enhanced landmarks can, in turn, improve the 3DMM parameters and head rotation during training. However, during the test time, it utilized a unidirectional information transfer architecture, which does not refine the 3DMM parameters and head rotation from the improved landmarks. Furthermore, SynergyNet is a model based on weak-perspective projections, similar to those in [5, 10, 16, 19, 28, 55]. Such models fundamentally do not compute the actual distance between the camera and the face.", + "bbox": [ + 212, + 672, + 785, + 808 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "MICA [56], JMLR [18], and PerspNet [25] employ unidirectional information transfer methods that first estimate face geometry and then calculate head pose.", + "bbox": [ + 215, + 809, + 785, + 839 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "S. Chun and J. Y. Chang", + "bbox": [ + 271, + 114, + 442, + 128 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/ac978229c07763e29da1561556884b0c36888ac27e0c7790a31a51a8f6d729e5.jpg", + "image_caption": [ + "Fig. 2: Overall pipeline of the proposed method." + ], + "image_footnote": [], + "bbox": [ + 222, + 143, + 781, + 323 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "However, these methods are limited in their ability to reconstruct real-scale face geometry due to depth ambiguity. Furthermore, calculating the 6DoF head pose based on these inaccurate geometry priors makes it difficult to achieve high accuracy.", + "bbox": [ + 212, + 380, + 782, + 441 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To address the aforementioned issues, we propose, for the first time, an explicit bidirectional interaction structure between the 6DoF head pose and face geometry. Additionally, unlike other landmark-based approaches, the proposed structure actively utilizes head depth information during the landmark estimation process. This approach demonstrates accurate geometry estimation even for images with strong perspective distortions, such as selfies.", + "bbox": [ + 212, + 441, + 784, + 532 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3 Proposed Method", + "text_level": 1, + "bbox": [ + 215, + 563, + 426, + 580 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.1 Overview of the Proposed Method", + "text_level": 1, + "bbox": [ + 214, + 602, + 547, + 618 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "TRG is designed to iteratively regress head translation $\\{T_t \\in \\mathbb{R}^3\\}_{t=1}^3$ and rotation $\\{R_t \\in \\mathbb{R}^6\\}_{t=1}^3$ from a single image $I \\in \\mathbb{R}^{3 \\times 192 \\times 192}$ , while also providing the auxiliary output of dense 3D landmarks $\\{V_t \\in \\mathbb{R}^{3 \\times N^V}\\}_{t=1}^3$ .", + "bbox": [ + 214, + 633, + 784, + 681 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Fig. 2 illustrates the comprehensive structure of TRG, which comprises a feature extractor that generates multi-scale feature maps $\\{\\phi_t\\in \\mathbb{R}^{256\\times H_t\\times W_t}\\}_{t = 1}^3$ from $I$ , a feature sampler that extracts a landmark-aligned feature vector $\\phi_t^p\\in \\mathbb{R}^{5N_{t - 1}^P}$ from the feature map $\\phi_t$ , and a face regressor that regresses head translation $T_{t}$ , rotation $R_{t}$ , and dense landmarks $V_{t}$ from $\\phi_t^p$ . $N_{t - 1}^P$ and $N^V$ denote the number of sampling points $P_{t - 1}\\in \\mathbb{R}^{2\\times N_{t - 1}^P}$ and the number of 3D dense landmarks $V_{t}$ , respectively. Each of these components—feature extractor, feature sampler, and face regressor—is described in detail in Sections 3.2, 3.3, and 3.4, respectively. Additionally, the loss functions employed in the training are discussed in Section 3.5.", + "bbox": [ + 212, + 683, + 787, + 839 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "TRG", + "bbox": [ + 694, + 114, + 730, + 126 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 774, + 114, + 784, + 126 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2 Feature Extractor", + "text_level": 1, + "bbox": [ + 215, + 146, + 410, + 160 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The feature extractor computes multi-scale feature maps $\\{\\phi_t\\}_{t=1}^3$ and 2D sparse landmarks $L \\in \\mathbb{R}^{2 \\times N^L}$ from a single image $I$ . $N^L$ denote the number of sparse landmarks. The feature extractor comprises ResNet18 [20], three deconvolution layers, a $1 \\times 1$ convolution layer, and a soft-argmax operation [42]. ResNet18 is initialized with pre-trained weights on ImageNet [11] and is used after removing the final classification layer and the pooling layer. The $\\phi_t$ is computed from the $t$ -th deconvolution layer and fed into the feature sampler. Additionally, the last feature map, $\\phi_3$ undergoes a transformation into 2D heatmaps through the $1 \\times 1$ convolution layer. The soft-argmax operation computes $L$ from the resultant heatmaps. These computed landmarks, along with the ground-truth landmarks $L^* \\in \\mathbb{R}^{2 \\times N^L}$ , are incorporated into the loss function.", + "bbox": [ + 212, + 171, + 787, + 342 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.3 Feature Sampler", + "text_level": 1, + "bbox": [ + 215, + 364, + 398, + 380 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The feature sampler computes the landmark-aligned feature vector $\\phi_t^p \\in \\mathbb{R}^{5N_{t-1}^P}$ from the feature map $\\phi_t$ and the corresponding sampling points $P_{t-1} \\in \\mathbb{R}^{2 \\times N_{t-1}^P}$ . Sampling points $P_{t-1}$ are used to extract point-wise features from the feature map $\\phi_t$ . Here, $P_0$ is set to 2D grid coordinates. For $t > 0$ , $P_t$ is computed using the $t$ -th face regressor. The methodology for deriving these sampling points from the face regressor is described in Section 3.4.", + "bbox": [ + 212, + 388, + 785, + 484 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The point-wise feature vector $\\phi_t(p_{t-1,n}) \\in \\mathbb{R}^{256}$ is obtained using bilinear sampling at the location specified by the point $p_{t-1,n} \\in \\mathbb{R}^2$ on $\\phi_t$ . Here, $p_{t-1,n}$ denotes the $n$ -th column vector of the sampling points $P_{t-1}$ . The $N_{t-1}^P$ point-wise features, denoted as $\\{\\phi_t(p_{t-1,n})\\}_{n=1}^{N_{t-1}^P}$ , are then transformed into 5D vectors using a dimension reduction layer $\\mathcal{F}(\\cdot)$ . These vectors are subsequently concatenated to form the landmark-aligned feature vector $\\phi_t^p$ :", + "bbox": [ + 212, + 484, + 787, + 583 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\phi_ {t} ^ {p} = \\bigoplus \\left(\\left\\{\\mathcal {F} \\left(\\phi_ {t} \\left(p _ {t - 1, n}\\right)\\right) \\right\\} _ {n = 1} ^ {N _ {t - 1} ^ {P}}\\right), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 388, + 593, + 785, + 619 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\bigoplus (\\cdot)$ denotes concatenation. The dimension reduction layer, $\\mathcal{F}(\\cdot)$ , is structured as a multilayer perceptron (MLP), which comprises three fully connected layers and two Leaky ReLU activations [32,50]. The obtained landmark-aligned feature vector $\\phi_t^p$ is then fed into the face regressor.", + "bbox": [ + 212, + 630, + 785, + 691 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.4 Face Regressor", + "text_level": 1, + "bbox": [ + 215, + 714, + 385, + 729 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The face regressor comprises an MLP $\\mathcal{R}_t(\\cdot)$ to calculate the head rotation, bounding box correction parameters, and dense landmarks $\\Theta_t = \\{R_t \\in \\mathbb{R}^6, c_t \\in \\mathbb{R}^3, V_t \\in \\mathbb{R}^{3 \\times N^V}\\}$ , a function that computes the head translation $T_t = \\{T_t^x, T_t^y, T_t^z\\} \\in \\mathbb{R}^3$ based on the bounding box information $I_{\\mathrm{bbox}} = \\{\\frac{\\tau^{x,\\mathrm{bbox}}}{f}, \\frac{\\tau^{y,\\mathrm{bbox}}}{f}, \\frac{b}{f}\\} \\in \\mathbb{R}^3$ and the correction parameter $c_t = \\{s_t, \\tilde{\\tau}_t^{x,\\mathrm{face}}, \\tilde{\\tau}_t^{y,\\mathrm{face}}\\}$ , and a perspective projection function that calculates the image coordinates of the dense landmarks", + "bbox": [ + 212, + 739, + 813, + 840 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "S. Chun and J. Y. Chang", + "bbox": [ + 271, + 114, + 442, + 128 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "$V_{t}^{img} \\in \\mathbb{R}^{2 \\times N^{V}}$ and the sampling points $P_{t}$ . $V_{t}$ and $R_{t}$ denote the 3D coordinates of the dense landmarks defined in the head space and the head rotation expressed in a 6D representation [52], respectively. $T_{t}^{x}, T_{t}^{y}$ , and $T_{t}^{z}$ represent the head translations along the $x$ -, $y$ -, and $z$ -axes in the camera space, respectively. $\\tau^{x,\\mathrm{bbox}}$ , $\\tau^{y,\\mathrm{bbox}}$ , $b$ , and $f$ denote the $x$ - and $y$ -coordinates of the bounding box center relative to the center of the uncropped image, the size of the bounding box, and the focal length, respectively. $s_{t}$ , $\\tilde{\\tau}_{t}^{x,\\mathrm{face}}$ , and $\\tilde{\\tau}_{t}^{y,\\mathrm{face}}$ respectively denote the bounding box scale factor and the normalized offset of the head center relative to the bounding box center in the $x$ - and $y$ -directions.", + "bbox": [ + 212, + 143, + 782, + 281 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The MLP $\\mathcal{R}_t(\\cdot)$ estimates the residual for calculating $\\Theta_t$ from the landmark-aligned feature $\\phi_t^p$ , the previously iterated output $\\Theta_{t - 1}^{sub} = \\{R_{t - 1},c_{t - 1},V_{t - 1}^{sub}\\in \\mathbb{R}^{3\\times 305}\\}$ , and the bounding box information $I_{\\mathrm{bbox}}$ [29,49,50]. $\\Theta_t$ is computed by adding the residual estimated by $\\mathcal{R}_t(\\cdot)$ to $\\Theta_{t - 1}$ . $V_{t - 1}^{sub}$ represents the landmarks obtained by subsampling $V_{t - 1}$ [36]. The use of $V_{t - 1}^{sub}$ for $\\mathcal{R}_t(\\cdot)$ instead of $V_{t - 1}$ reduces the redundancy of the dense landmarks, which improves the performance of the proposed model [6-8,30,50].", + "bbox": [ + 212, + 282, + 784, + 388 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We model a real human face as being enclosed within a box $B$ of size $0.2m \\times 0.2m$ , with $m$ denoting meters. The size of this box, when projected into the image space, is represented by $b$ . However, since the assumption about the face size is typically imprecise, $\\mathcal{R}_t(\\cdot)$ estimates a scale factor $s_t$ to adjust the size of $B$ . Furthermore, $\\mathcal{R}_t(\\cdot)$ is responsible for determining the normalized offsets of the head center $\\tilde{\\tau}_t^{x,\\mathrm{face}}$ , $\\tilde{\\tau}_t^{y,\\mathrm{face}}$ . These offsets represent the values obtained by normalizing the image space translation from the bounding box center to the head center with $b$ . The calculation of $T_t$ from $c_t$ and $I_{\\mathrm{bbox}}$ is expressed as:", + "bbox": [ + 212, + 388, + 784, + 512 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} T _ {t} ^ {x} = \\frac {0 . 2 s _ {t}}{b} \\tau^ {x, \\mathrm {b b o x}} + 0. 2 s _ {t} \\tilde {\\tau} _ {t} ^ {x, \\mathrm {f a c e}}, \\\\ T _ {t} ^ {y} = \\frac {0 . 2 s _ {t}}{b} \\tau^ {y, \\mathrm {b b o x}} + 0. 2 s _ {t} \\tilde {\\tau} _ {t} ^ {y, \\text {f a c e}}, \\quad T _ {t} ^ {z} = \\frac {0 . 2 s _ {t}}{b} f. \\tag {2} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 331, + 523, + 784, + 580 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The derivation of Eq. 2 can be found in the supplementary material. The image coordinates of the dense landmarks, $V_{t}^{img}$ , are computed by projecting $V_{t}$ , as follows:", + "bbox": [ + 212, + 593, + 782, + 637 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nV _ {t} ^ {i m g} = \\Pi \\left(V _ {t}, R _ {t}, T _ {t}, K\\right), \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 415, + 638, + 784, + 656 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $\\varPi(\\cdot)$ and $K\\in \\mathbb{R}^{3\\times 3}$ denote the perspective projection and the intrinsic camera parameters, respectively. The sampling points $P_{t}$ are obtained by subsampling $V_{t}^{img}$ .", + "bbox": [ + 212, + 664, + 782, + 710 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.5 Loss Functions", + "text_level": 1, + "bbox": [ + 214, + 734, + 382, + 750 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We detail the loss functions employed to train TRG, ensuring accurate predictions of face geometry and head pose. The training process utilizes several loss functions for dense landmarks: head space coordinate loss $\\mathcal{L}_{\\mathrm{head}}$ , camera space coordinate loss $\\mathcal{L}_{\\mathrm{cam}}$ , and image space coordinate loss $\\mathcal{L}_{\\mathrm{img}}$ . For a precise estimation of head rotation, a head rotation loss $\\mathcal{L}_{\\mathrm{rot}}$ is also adopted. As iteration", + "bbox": [ + 212, + 763, + 782, + 839 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "TRG", + "bbox": [ + 694, + 114, + 730, + 126 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 774, + 114, + 784, + 125 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "progresses, the loss functions are doubled as follows:", + "bbox": [ + 215, + 146, + 593, + 161 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {h e a d}} = \\sum_ {t = 1} ^ {3} 2 ^ {t - 3} \\left(\\frac {1}{N ^ {V}} \\sum_ {n = 1} ^ {N ^ {V}} \\| V _ {t, n} - V _ {n} ^ {*} \\| _ {1}\\right),\n$$\n", + "text_format": "latex", + "bbox": [ + 341, + 172, + 627, + 213 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {c a m}} = \\sum_ {t = 1} ^ {3} 2 ^ {t - 3} \\left(\\frac {1}{N ^ {V}} \\sum_ {n = 1} ^ {N ^ {V}} \\| V _ {t, n} ^ {\\text {c a m}} - V _ {n} ^ {* , \\text {c a m}} \\| _ {1}\\right), \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 341, + 218, + 785, + 267 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {i m g}} = \\sum_ {t = 1} ^ {3} 2 ^ {t - 3} \\left(\\frac {1}{N ^ {V}} \\sum_ {n = 1} ^ {N ^ {V}} \\| V _ {t, n} ^ {i m g} - V _ {n} ^ {*, i m g} \\| _ {1}\\right),\n$$\n", + "text_format": "latex", + "bbox": [ + 341, + 263, + 658, + 305 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {r o t}} = \\sum_ {t = 1} ^ {3} 2 ^ {t - 3} (\\| R _ {t} ^ {m a t} - R ^ {*, m a t} \\| _ {F}),\n$$\n", + "text_format": "latex", + "bbox": [ + 341, + 311, + 596, + 348 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "where $*$ and $V_{t,n}$ represent the ground truth and the $n$ -th column vector of $V_{t}$ , respectively. $V_{t}^{cam} = R_{t}^{mat}V_{t} + T_{t} \\in \\mathbb{R}^{3 \\times N^{V}}$ and $V_{t}^{img}$ represent the camera space coordinates and the image space coordinates of the $t$ -th dense landmarks, respectively. $R_{t}^{mat} \\in \\mathbb{R}^{3 \\times 3}$ represents the 3D head rotation in matrix form, and $\\| \\cdot \\|_{F}$ denotes the Frobenius norm.", + "bbox": [ + 215, + 357, + 785, + 434 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "If connectivity between dense landmarks is defined in the dataset, we utilize this information to apply an edge length loss. We empirically found that applying the edge length loss $\\mathcal{L}_{\\mathrm{ed}}$ [18, 33] to $V_{3}$ , estimated by the final face regressor, improves the model's performance in estimating face geometry. The edge length loss $\\mathcal{L}_{\\mathrm{ed}}$ can be written as:", + "bbox": [ + 215, + 435, + 785, + 510 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {e d}} = \\sum_ {M} \\sum_ {\\{n, m \\} \\subset M} | \\| V _ {3, n} - V _ {3, m} \\| _ {2} - \\| V _ {n} ^ {*} - V _ {m} ^ {*} \\| _ {2} |, \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 321, + 521, + 784, + 551 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "where $M$ denotes a triangle. Additionally, to improve the quality of the feature map, we apply the sparse 2D landmark loss $\\mathcal{L}_L$ to the landmarks $L$ obtained from $\\phi_3$ as follows:", + "bbox": [ + 215, + 561, + 785, + 606 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {L} = \\frac {1}{N ^ {L}} \\sum_ {n = 1} ^ {N ^ {L}} \\| L _ {n} - L _ {n} ^ {*} \\| _ {1}. \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 406, + 608, + 784, + 648 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The final loss function to train TRG can be written as:", + "bbox": [ + 238, + 652, + 640, + 667 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\lambda_ {\\text {h e a d}} \\mathcal {L} _ {\\text {h e a d}} + \\lambda_ {\\text {c a m}} \\mathcal {L} _ {\\text {c a m}} + \\lambda_ {\\text {i m g}} \\mathcal {L} _ {\\text {i m g}} + \\lambda_ {\\text {r o t}} \\mathcal {L} _ {\\text {r o t}} + \\lambda_ {\\text {e d}} \\mathcal {L} _ {\\text {e d}} + \\lambda_ {L} \\mathcal {L} _ {L}, \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 246, + 679, + 784, + 694 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "where $\\lambda$ s represent the weights of the loss functions. $\\lambda_{\\mathrm{head}}$ , $\\lambda_{\\mathrm{cam}}$ , $\\lambda_{\\mathrm{img}}$ , $\\lambda_{\\mathrm{rot}}$ , $\\lambda_{\\mathrm{ed}}$ , and $\\lambda_L$ are set to 20, 2, 0.01, 10, 2, and 1.25, respectively.", + "bbox": [ + 215, + 704, + 785, + 733 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4 Experimental Results", + "text_level": 1, + "bbox": [ + 215, + 756, + 460, + 772 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.1 Implementation Details", + "text_level": 1, + "bbox": [ + 215, + 786, + 457, + 801 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The spatial dimensions $H_{t}, W_{t}$ of the feature map $\\phi_t$ were set to $\\frac{192}{2^{5 - t}}$ . The number of sampling points $N_{t}^{P}$ was set to $18 \\times 18 = 324$ when $t = 0$ , and to 305", + "bbox": [ + 215, + 808, + 785, + 840 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "S. Chun and J. Y. Chang", + "bbox": [ + 271, + 114, + 442, + 128 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "when $t > 0$ . $N^V$ and $N^L$ were set to 1220 and 68, respectively. For the ARKitFace training dataset [25], we selected a random sample and used its corresponding ground-truth dense 3D landmarks and head rotation as the initial landmarks $V_0$ and head rotation $R_0$ for the TRG. The initial correction parameter $c_0$ was set to $\\{s_0 = 1, \\tilde{\\tau}_0^{x,\\mathrm{face}} = 0, \\tilde{\\tau}_0^{y,\\mathrm{face}} = 0\\}$ . For the TRG training, both the ARKitFace training data [25] and 300W-LP [55] were utilized. Unless otherwise stated, the performances of models trained using both datasets are presented. When a fair comparison with the state-of-the-art methods is required, results from models trained solely on the ARKitFace training dataset are also provided. Please refer to the supplementary material for more implementation details.", + "bbox": [ + 212, + 145, + 782, + 297 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.2 Datasets", + "text_level": 1, + "bbox": [ + 215, + 315, + 334, + 329 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "ARKitFace [25] is a dataset that provides the 6DoF head poses, the dense 3D landmarks, and intrinsic camera parameters. It is collected from selfie scenarios, with data gathered at a camera-to-face distance ranging from 0.3 to 0.9 meters, resulting in images significantly influenced by strong perspective transformations. Following previous work [25], we used 717,840 frames from 400 subjects for training, and 184,884 frames from 100 subjects for testing.", + "bbox": [ + 212, + 335, + 782, + 425 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "300W-LP [55] is an extended synthetic dataset derived from the 300W [39], which itself is composed of several standardized datasets, including AFW [54], HELEN [51], IBUG [40], and LFPW [2]. Through face profiling, the 300W-LP dataset provides 122,450 synthesized images from approximately 4,000 original pictures.", + "bbox": [ + 212, + 426, + 782, + 500 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "BIWI [15] provides 6DoF head poses, a 3D neutral face mesh for each subject, and intrinsic camera parameters. Since BIWI does not provide ground-truth face meshes for each frame, our evaluation focuses solely on the head poses. BIWI serves exclusively as test data to assess the effectiveness of our method. We evaluated the performance of our proposed model by following the protocol used in previous studies [25,46].", + "bbox": [ + 212, + 501, + 782, + 592 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.3 Evaluation Metrics", + "text_level": 1, + "bbox": [ + 215, + 609, + 419, + 625 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "For head rotation accuracy assessment, we follow the approach used in previous studies [1,23,25,46], measuring rotation errors separately for roll, pitch, and yaw. Additionally, to provide a comprehensive understanding of the head rotation estimation performance, we also present the mean absolute error $(\\mathrm{MAE}_r)$ and geodesic error (GE) [9]. For evaluating the accuracy of head translation, we calculate the errors for translation along the $x-$ , $y-$ , and $z$ -axes, represented as $t_x$ , $t_y$ , and $t_z$ errors, respectively. Similar to head rotation, we present the mean absolute error performance for head translation, denoted as $\\mathrm{MAE}_t$ . Following previous research [25], we utilize the average 3D distance (ADD) metric [22] to present a holistic evaluation of the method's performance in estimating both rotation and translation:", + "bbox": [ + 212, + 631, + 782, + 795 + ], + "page_idx": 8 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {A D D} = \\frac {1}{N ^ {V}} \\sum_ {n = 1} ^ {N ^ {V}} \\| \\left(R _ {3} ^ {\\text {m a t}} V _ {n} ^ {*} + T _ {3}\\right) - \\left(R ^ {*}, \\text {m a t} V _ {n} ^ {*} + T ^ {*}\\right) \\| _ {2}. \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 303, + 801, + 782, + 842 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "TRG", + "bbox": [ + 694, + 114, + 730, + 126 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 774, + 114, + 785, + 126 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/4f7ef870c6fd99d10d92019b206a766d4bb689132b53304337eaae115f239c90.jpg", + "table_caption": [ + "Table 1: Ablation study of TRG on ARKitFace and BIWI. We explored the effects of the bidirectional interaction structure and utilizing the correction parameter. We also investigated the importance of utilizing face geometry in the 6DoF head pose estimation process and the effectiveness of the landmark-to-image alignment method. \"MS\" means multi-scale features." + ], + "table_footnote": [], + "table_body": "
MethodARKitFaceBIWI
Mean ↓MAEr ↓MAEt ↓ADD ↓MAEr ↓MAEt ↓ADD ↓
1-iter (w/o MS)1.691.003.708.933.2813.7432.28
2-iter (w/o MS)1.660.893.618.722.9513.7731.28
3-iter (w/o MS)1.570.883.638.712.5913.6731.52
Tt-prediction1.660.924.6411.668.811.7K5.1K
Landmark-free baseline-1.033.869.343.8718.4242.22
Grid sampled baseline1.590.953.748.992.9814.5835.04
TRG (Ours)1.580.913.628.682.7512.9729.46
", + "bbox": [ + 215, + 227, + 789, + 337 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "To assess the 3D landmark prediction accuracy of our proposed method, we evaluate the median and average distances between the estimated and ground-truth dense landmarks [25]. The effectiveness of our method is evaluated based on the estimated values of $V_{3}$ , $R_{3}$ , and $T_{3}$ from the final face regressor at $t = 3$ . The unit for median, mean, translation error, and ADD is in millimeters, and the unit for rotation error is in degrees.", + "bbox": [ + 212, + 367, + 787, + 458 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4.4 Ablation Experiments", + "text_level": 1, + "bbox": [ + 214, + 481, + 444, + 496 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Effectiveness of bidirectional interaction structure. In this experiment, we delve into the significance of explicit bidirectional interaction between the 6DoF head pose and face geometry. To investigate this, we observe the model's performance variations based on the number of interactions between these two types of information. For our experiments, we designed 1-iteration, 2-iteration, and 3-iteration baselines and then compared their performance. The 1-iteration baseline model simultaneously regresses the face geometry and head pose using $\\mathcal{R}_1(\\cdot)$ but without an iterative inference process. The 2- and 3-iteration baseline models enhance this process by incorporating the iterative inference approach. They project the predicted dense landmarks onto the image feature, with all other aspects remaining consistent with the 1-iteration baseline. Similar to the 1-iteration baseline, they utilize only $\\phi_1$ and do not employ multi-scale features. The key distinction between the 3-iteration models and TRG lies in the utilization of multi-scale features.", + "bbox": [ + 212, + 507, + 787, + 717 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "The evaluation on the ARKitFace test data, as presented in Table 1, indicates that the performance in estimating the face geometry and head pose improves with the increasing number of iterations. This improvement is attributed to the reduction in ambiguity between the face geometry and 6DoF head pose as the number of bidirectional interactions increases. The BIWI evaluation results further corroborate the effectiveness of the bidirectional interaction method.", + "bbox": [ + 212, + 719, + 787, + 808 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Use of correction parameter. In this experiment, we investigate the rationale behind estimating the correction parameter $c_{t}$ instead of directly estimating head", + "bbox": [ + 212, + 809, + 787, + 839 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "S. Chun and J. Y. Chang", + "bbox": [ + 271, + 114, + 441, + 128 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/66254c28f96a6499a9c6266840f9b6e98a0ffebd9d53ed0c634c01aac60a66cc.jpg", + "image_caption": [ + "Fig. 3: The distribution of ground-truth translation and correction parameters in ARKitFace and BIWI. The colors blue, green, and brown represent the distributions of the ARKitFace training data, ARKitFace test data, and BIWI dataset, respectively. The symbol * denotes ground-truth." + ], + "image_footnote": [], + "bbox": [ + 218, + 147, + 359, + 243 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/e6323f87d6abbb622c81d4b20ac09fd9d86aee026003b5c9d1dbf099c8aad482.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 359, + 148, + 496, + 244 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/1f64105f660563317f21db533cf64207e32da044e977fd7f4140d6b07108d03b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 148, + 643, + 244 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/453f97d3caa55cc24482cc928b1a496b441d257a96f90c31ff438dcaf90468eb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 645, + 148, + 782, + 244 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "translation $T_{t}$ . To elucidate this, we compare the performance of two models: the $T_{t}$ -prediction baseline, which directly estimates head translation $T_{t}$ and TRG. According to Table 1, while the $T_{t}$ -prediction baseline demonstrates accurate estimation of head translation on the ARKitFace test data, its performance significantly declines on the BIWI dataset. We attribute this discrepancy to the differing translation distributions between the ARKitFace and BIWI datasets.", + "bbox": [ + 212, + 349, + 784, + 439 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "The first and second columns of Fig. 3 illustrate the ground-truth head translation distributions for ARKitFace and BIWI. While the translation distribution in the ARKitFace training data closely matches its test data, it significantly differs from that of BIWI. This discrepancy is particularly noticeable in the $z$ -axis translations, indicating substantial divergence between the ARKitFace training data and BIWI. To achieve generalization from the ARKitFace training data to BIWI, a model must effectively extrapolate the $z$ -axis translation. However, as evidenced by Table 1, this extrapolation poses a significant challenge for the direct translation estimation model.", + "bbox": [ + 212, + 441, + 784, + 575 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "The third and fourth columns of Fig. 3 visualize the distribution of the ground-truth correction parameters for both ARKitFace and BIWI datasets. A key observation here is that the variation in the correction parameter distribution is significantly smaller compared to the translation distribution. Based on these observations, we can conclude that shifting the estimation target from $T_{t}$ to $c_{t}$ effectively reduces distribution discrepancies. This strategic redefinition enhances the model's generalizability, particularly for data that fall outside the training distribution, as evidenced in Table 1.", + "bbox": [ + 212, + 580, + 784, + 700 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "The importance of utilizing facial geometry and the effectiveness of landmark-to-image alignment technique. For the purpose of our experiment, we designed a landmark-free baseline that does not estimate facial geometry $\\{V_t\\}_{t=1}^3$ . Given the absence of facial geometry information, the landmark-free baseline is unable to utilize landmark-to-image alignment techniques. Consequently, it extracts grid sampled features from $\\{\\phi_t\\}_{t=1}^3$ and inputs them into a face regressor. However, due to significant structural differences from TRG, we mitigate these differences by also designing a grid sampled baseline for incremental comparison. This grid sampled baseline is similar to the TRG, except", + "bbox": [ + 212, + 704, + 785, + 839 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "TRG", + "bbox": [ + 694, + 114, + 730, + 126 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 767, + 114, + 784, + 126 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/e79d402db21b487599533f739557577ec0f8519bee357cce556ce22f0d9355c7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 217, + 143, + 357, + 215 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/78ce7ea9e3785344dc870c8edb47fe95dc9bc0c852fba119ca612f211cf900a6.jpg", + "image_caption": [ + "Fig. 4: Qualitative comparison on the ARKitFace and BIWI datasets. The first and second rows show visualized results for ARKitFace and BIWI, respectively. The colors cyan, pink, gold, and gray represent JMLR, PerspNet, TRG, and ground truth, respectively. The red, green, and blue axes respectively represent the X, Y, and Z axes of the camera coordinate system." + ], + "image_footnote": [], + "bbox": [ + 217, + 217, + 357, + 281 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/6ce119f5711db7a66b8b31dbbb8bc31bd39cb6202aaa6115cd73dcb0ad484f29.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 357, + 143, + 500, + 215 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/6a05fca9497ca74bfd4b8f6a85b2ac2d4c0ab130b2adc41152d986eb5459a871.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 359, + 217, + 500, + 281 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/c2ef4dcc973780196eac307ca6b0499787ab0edc17b95715d15ad2bd02309ac3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 143, + 640, + 215 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/e6fcdc5b7e0156603dde8ba5e1339e4f8821166c928365ebb8161bd87f47b58b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 217, + 640, + 281 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/03c7031213aff0cb3e4db5f04098e945fcb6f13c77bafe0fd3df20fc4817bf5d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 640, + 143, + 784, + 215 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/457fe3e7ca3c6af4b5a678cd486d61ca8a69f028ad5647c7912cc64af05ff18e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 640, + 217, + 784, + 281 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "it does not employ the landmark-to-image alignment method, indicating that the primary distinction from the landmark-free baseline lies in whether facial geometry is estimated.", + "bbox": [ + 212, + 393, + 784, + 438 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "According to our findings, as presented in the Table 1, the landmark-free baseline underperforms compared to the grid sampled baseline. This supports our hypothesis that landmark information should be integrated during the 6DoF head pose estimation process. Furthermore, our results demonstrate that TRG outperforms the grid sampled baseline, affirming the superiority of our landmark-to-image alignment strategy.", + "bbox": [ + 212, + 439, + 784, + 530 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/b8e87098f183a5ea68b757514e6365ead151676aaab98ee8f689f6ce2f1f7f9b.jpg", + "table_caption": [ + "Table 2: Comparison with previous methods for 6DoF head pose estimation on ARKit-Face test dataset. Models trained with multiple datasets are marked with the symbol $\\star$ , and retrained models are indicated by the symbol $\\dagger$ ." + ], + "table_footnote": [], + "table_body": "
MethodMAErGEMAEtADD
img2pose [1,25]5.55-7.0220.54
Direct 6DoF Regress [25]1.87-9.0621.39
Refined Pix2Pose [25,34]2.35-14.0036.44
JMLR [18] †1.162.394.8611.87
PerspNet [25]0.991.814.1810.01
TRG (Ours)0.921.803.648.74
TRG (Ours) †0.911.843.628.68
", + "bbox": [ + 217, + 640, + 537, + 729 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/9e6b1e6e6005fc0f46708575be99a2186ebb2bd614039bd7042ddb7bbc139ca1.jpg", + "table_caption": [ + "Table 3: Comparison with previous methods for dense 3D landmark estimation on ARKitFace test dataset." + ], + "table_footnote": [], + "table_body": "
MethodMedianMean
PRNet [17]1.972.05
3DDFA-v2 [19]2.352.31
Deng et al. [13]2.462.55
JMLR [18] † *1.861.94
PerspNet [25]1.721.76
TRG (Ours)1.551.61
TRG (Ours) *1.551.58
", + "bbox": [ + 558, + 633, + 782, + 722 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "4.5 Comparison with State-of-the-Art Methods", + "text_level": 1, + "bbox": [ + 215, + 751, + 620, + 767 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "In this experiment, we conducted a benchmark of our proposed method against existing approaches for 6DoF head pose estimation. The evaluation results on the ARKitFace and BIWI datasets are presented in Tables 2, 3 and 4. Model retrained for this comparison is marked with the symbol $\\dagger$ . Multiple datasets", + "bbox": [ + 212, + 779, + 787, + 840 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "S. Chun and J. Y. Chang", + "bbox": [ + 271, + 114, + 442, + 128 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "were used for the model, which could be trained on multiple datasets. However, PerspNet was trained exclusively using the ARKitFace train dataset due to the difficulty of using two datasets [25,55] with differing 3D face mesh topologies. To ensure a fair comparison, we also present the results of TRG trained solely on the ARKitFace train dataset. Models trained on multiple datasets are denoted with the symbol $\\star$ .", + "bbox": [ + 212, + 146, + 782, + 234 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Evaluation on ARKitFace [25]. Img2pose directly infers the 6DoF head pose from images without utilizing face geometry information. However, the absence of face geometry information can lead to increased face size ambiguity, potentially worsening the performance of head pose inference, as can be seen in Table 2.", + "bbox": [ + 212, + 237, + 782, + 296 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "JMLR and PerspNet do not incorporate head pose information during the face geometry inference process. The predicted face geometry, derived without considering head pose information, is relatively inaccurate (Table 3). Consequently, methods that predict the 6DoF head pose based on this relatively imprecise geometry yield inaccurate results (Table 2). In contrast, TRG actively integrates face geometry information into the head pose estimation process. According to Table 2, TRG achieves state-of-the-art in head pose estimation, attributed to its explicit bidirectional interaction structure. Furthermore, owing to its depth-aware landmark prediction architecture, TRG maintains stable face landmark prediction accuracy even in selfie scenarios, as shown in Table 3. Fig. 4 visually illustrates the performance of TRG and existing models [18,25] for head pose estimation and face landmark prediction. When the geometries predicted by each model are aligned with the image, they appear to be well-aligned. However, a stark contrast in model performance becomes evident when comparing the ground-truth geometry with the predicted geometries in the 3D camera space. JMLR and PerspNet struggle to accurately predict the actual size of a human face, resulting in high translation errors.", + "bbox": [ + 212, + 297, + 785, + 551 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Evaluation on BIWI [15]. According to Table 4, TRG significantly outperforms existing optimization-based methods [18,25,56] in head translation estimation. This superior performance is attributed to TRG's design, which effectively leverages the synergy between face geometry and head translation. Furthermore, TRG's landmark-to-image alignment method enables it to achieve high head rotation estimation accuracy, surpassing even methods that solely estimate 3D head rotation. Fig. 4 qualitatively demonstrates TRG's exceptional head pose estimation performance. To visualize how closely the predicted head pose matches the ground-truth pose, we utilized the ground-truth neutral mesh and the predicted head pose.", + "bbox": [ + 212, + 554, + 785, + 705 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "4.6 Limitations", + "text_level": 1, + "bbox": [ + 215, + 726, + 357, + 739 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "In the process of deriving depth from images using the proposed method, the requirement for camera intrinsics emerges as a necessary component. This necessity indicates that, in the absence of camera intrinsics, while it is still possible to estimate relative depth among faces in an image, achieving precise depth measurement poses a challenge. To address this challenge and ensure accurate depth determination between the face and the camera, incorporating algorithms that", + "bbox": [ + 212, + 750, + 782, + 839 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "TRG", + "bbox": [ + 694, + 114, + 730, + 126 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 767, + 114, + 785, + 126 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/4a44840d4fe5a0235369a2d89747c55d678e5901adaa6d00e7c69fc03a04abc2.jpg", + "table_caption": [ + "Table 4: Comparison with previous methods for 6DoF head pose estimation on BIWI dataset. The models were evaluated using BIWI solely for testing purposes, without utilizing it as training data. We used the camera intrinsics provided by BIWI for the evaluation of the head pose estimation performance of MICA [56]." + ], + "table_footnote": [], + "table_body": "
MethodYawPitchRollMAErGEtxtytzMAEtADD
Dlib [26]11.8613.0019.5614.81------
3DDFA [55]5.5041.9013.2219.07------
EVA-GCN [45]4.014.782.983.92------
HopeNet [38]4.816.613.274.899.53-----
QuatNet [23]4.015.492.944.15------
Liu et al. [31]4.125.613.154.29------
FSA-Net [46]4.274.962.764.007.64-----
HPE [24]4.575.183.124.29------
WHENet-V [53]3.604.102.733.48------
RetinaFace [12] ★4.076.422.974.49------
FDN [48]4.524.702.563.93------
MNN [43]3.984.612.393.66------
TriNet [3]3.054.764.113.97------
6DRepNet [21]3.244.482.683.47------
Cao et al. [4]4.213.523.103.61------
TokenHPE [47]3.954.512.713.72------
Cobo et al. [9]4.584.652.713.987.30-----
img2pose [1] ★4.573.553.243.797.10-----
Direct 6DoF Regress [25]16.4914.035.8112.11-62.3685.01366.52171.30562.38
Refined Pix2Pose [25,34]5.755.0611.237.35-16.8221.30255.3697.83356.32
MICA [56] ★5.407.173.805.46-9.3213.6660.1327.7068.03
JMLR [18] † ★6.316.173.725.408.618.667.2732.6316.1939.71
PerspNet [25]3.103.372.382.955.614.156.4346.6919.09100.09
TRG (Ours)3.283.521.872.895.688.417.3827.1314.3132.10
TRG (Ours) ★3.043.441.782.755.357.836.9924.0712.9729.46
", + "bbox": [ + 222, + 212, + 779, + 507 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "estimate intrinsics becomes essential. This aspect of requiring camera intrinsics for depth calculations highlights an area for further exploration and adaptation in our method, especially when intrinsic parameters are not readily available.", + "bbox": [ + 215, + 534, + 784, + 579 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "5 Conclusion", + "text_level": 1, + "bbox": [ + 217, + 599, + 357, + 614 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "This study proposed a novel approach by introducing the TRG to predict a 6DoF head pose from a single image. Through extensive experimentation, we demonstrated the effectiveness of the explicit bidirectional interaction between the 6DoF head pose and the dense 3D face landmarks, a core feature of the TRG architecture. We further established that our method of estimating the correction parameters significantly enhances the generalizability of the model in cross-dataset evaluations. Evaluation on the ARKitFace and BIWI datasets showed TRG's superior performance in head pose estimation compared to existing state-of-the-art methods. Our extensive experiments have also highlighted the strength of TRG's depth-aware landmark prediction structure, particularly in images heavily influenced by perspective transformation, facilitating accurate estimation of face geometry. Based on these findings, our future work will focus on accurately reconstructing detailed facial geometries from close-up facial photos, such as selfies, further pushing the boundaries of facial analysis technology.", + "bbox": [ + 215, + 628, + 785, + 839 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "S. Chun and J. Y. Chang", + "bbox": [ + 271, + 114, + 441, + 128 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Acknowledgement", + "text_level": 1, + "bbox": [ + 217, + 143, + 392, + 162 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "This work was partly supported by Institute of Information & Communications Technology Planning & Evaluation (IITP) grant funded by the Korea government (MSIT) (No. RS-2023-00219700, Development of FACS-compatible Facial Expression Style Transfer Technology for Digital Human, $90\\%$ ) and National Research Foundation of Korea (NRF) grant funded by the Korea government (MSIT) (No. NRF-2022R1F1A1066170, Physically valid 3D human motion reconstruction from multi-view videos, $10\\%$ ).", + "bbox": [ + 212, + 175, + 787, + 282 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 217, + 303, + 321, + 318 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "1. Albiero, V., Chen, X., Yin, X., Pang, G., Hassner, T.: img2pose: Face alignment and detection via 6dof, face pose estimation. In: CVPR (2021)", + "2. Belhumeur, P.N., Jacobs, D.W., Kriegman, D.J., Kumar, N.: Localizing parts of faces using a consensus of exemplars. IEEE TPAMI 35(12), 2930-2940 (2013)", + "3. Cao, Z., Chu, Z., Liu, D., Chen, Y.: A vector-based representation to enhance head pose estimation. In: WACV (2021)", + "4. Cao, Z., Liu, D., Wang, Q., Chen, Y.: Towards unbiased label distribution learning for facial pose estimation using anisotropic spherical gaussian. In: ECCV (2022)", + "5. Chai, Z., Zhang, T., He, T., Tan, X., Baltrusaitis, T., Wu, H., Li, R., Zhao, S., Yuan, C., Bian, J.: Hiface: High-fidelity 3d face reconstruction by learning static and dynamic details. In: ICCV (2023)", + "6. Cho, J., Youwang, K., Oh, T.H.: Cross-attention of disentangled modalities for 3d human mesh recovery with transformers. In: ECCV (2022)", + "7. Chun, S., Park, S., Chang, J.Y.: Learnable human mesh triangulation for 3d human pose and shape estimation. In: WACV (2023)", + "8. Chun, S., Park, S., Chang, J.Y.: Representation learning of vertex heatmaps for 3d human mesh reconstruction from multi-view images. In: ICIP (2023)", + "9. Cobo, A., Valle, R., Buenaposada, J.M., Baumela, L.: On the representation and methodology for wide and short range head pose estimation. PR 149, 110263 (2024)", + "10. Danecek, R., Black, M.J., Bolkart, T.: EMOCA: Emotion driven monocular face capture and animation. In: CVPR (2022)", + "1. Deng, J., Dong, W., Socher, R., Li, L.J., Li, K., Fei-Fei, L.: Imagenet: A large-scale hierarchical image database. In: CVPR (2009)", + "2. Deng, J., Guo, J., Ververas, E., Kotsia, I., Zafeiriou, S.: Retinaface: Single-shot multi-level face localisation in the wild. In: CVPR (2020)", + "3. Deng, Y., Yang, J., Xu, S., Chen, D., Jia, Y., Tong, X.: Accurate 3d face reconstruction with weakly-supervised learning: From single image to image set. In: CVPRW (2019)", + "4. Dhingra, N.: Lwposr: Lightweight efficient fine grained head pose estimation. In: WACV (2022)", + "5. Fanelli, G., Dantone, M., Gall, J., Fossati, A., Van Gool, L.: Random forests for real time 3d face analysis. IJCV 101, 437-458 (2013)", + "6. Feng, Y., Feng, H., Black, M.J., Bolkart, T.: Learning an animatable detailed 3d face model from in-the-wild images. ACM TOG 40(4), 1-13 (2021)", + "7. Feng, Y., Wu, F., Shao, X., Wang, Y., Zhou, X.: Joint 3d face reconstruction and dense alignment with position map regression network. In: ECCV (2018)" + ], + "bbox": [ + 225, + 332, + 784, + 839 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "TRG", + "bbox": [ + 694, + 114, + 730, + 126 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 767, + 114, + 785, + 126 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "18. Guo, J., Yu, J., Lattas, A., Deng, J.: Perspective reconstruction of human faces by joint mesh and landmark regression. In: ECCVW (2022)", + "19. Guo, J., Zhu, X., Yang, Y., Yang, F., Lei, Z., Li, S.Z.: Towards fast, accurate and stable 3d dense face alignment. In: ECCV (2020)", + "20. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: CVPR (2016)", + "21. Hempel, T., Abdelrahman, A.A., Al-Hamadi, A.: 6d rotation representation for unconstrained head pose estimation. In: ICIP (2022)", + "22. Hinterstoisser, S., Lepetit, V., Ilic, S., Holzer, S., Bradski, G., Konolige, K., Navab, N.: Model based training, detection and pose estimation of texture-less 3d objects in heavily cluttered scenes. In: ACCV (2013)", + "23. Hsu, H.W., Wu, T.Y., Wan, S., Wong, W.H., Lee, C.Y.: Quantnet: Quaternion-based head pose estimation with multiregression loss. IEEE TMM 21(4), 1035-1046 (2018)", + "24. Huang, B., Chen, R., Xu, W., Zhou, Q.: Improving head pose estimation using two-stage ensembles with top-k regression. IVC 93, 103827 (2020)", + "25. Kao, Y., Pan, B., Xu, M., Lyu, J., Zhu, X., Chang, Y., Li, X., Lei, Z.: Toward 3d face reconstruction in perspective projection: Estimating 6 dof face pose from monocular image. IEEE TIP 32, 3080-3091 (2023)", + "26. Kazemi, V., Sullivan, J.: One millisecond face alignment with an ensemble of regression trees. In: CVPR (2014)", + "27. Kumar, A., Alavi, A., Chellappa, R.: Kepler: Keypoint and pose estimation of unconstrained faces by learning efficient h-cnn regressors. In: FG (2017)", + "28. Li, H., Wang, B., Cheng, Y., Kankanhalli, M., Tan, R.T.: Dsfnet: Dual space fusion network for occlusion-robust 3d dense face alignment. In: CVPR (2023)", + "29. Li, Z., Liu, J., Zhang, Z., Xu, S., Yan, Y.: Cliff: Carrying location information in full frames into human pose and shape estimation. In: ECCV (2022)", + "30. Lin, K., Wang, L., Liu, Z.: End-to-end human pose and mesh reconstruction with transformers. In: CVPR (2021)", + "31. Liu, Z., Chen, Z., Bai, J., Li, S., Lian, S.: Facial pose estimation by deep learning from label distributions. In: ICCVW (2019)", + "32. Maas, A.L., Hannun, A.Y., Ng, A.Y., et al.: Rectifier nonlinearities improve neural network acoustic models. In: ICML (2013)", + "33. Moon, G., Lee, K.M.: I2l-meshnet: Image-to-lixel prediction network for accurate 3d human pose and mesh estimation from a single rgb image. In: ECCV (2020)", + "34. Park, K., Patten, T., Vincze, M.: Pix2pose: Pixel-wise coordinate regression of objects for 6d pose estimation. In: ICCV (2019)", + "35. Paysan, P., Knothe, R., Amberg, B., Romdhani, S., Vetter, T.: A 3d face model for pose and illumination invariant face recognition. In: AVSS (2009)", + "36. Ranjan, A., Bolkart, T., Sanyal, S., Black, M.J.: Generating 3d faces using convolutional mesh autoencoders. In: ECCV (2018)", + "37. Ranjan, R., Patel, V.M., Chellappa, R.: Hyperface: A deep multi-task learning framework for face detection, landmark localization, pose estimation, and gender recognition. IEEE TPAMI 41(1), 121-135 (2019)", + "38. Ruiz, N., Chong, E., Rehg, J.M.: Fine-grained head pose estimation without keypoints. In: CVPRW (2018)", + "39. Sagonas, C., Tzimiropoulos, G., Zafeiriou, S., Pantic, M.: 300 faces in-the-wild challenge: The first facial landmark localization challenge. In: ICCVW (2013)", + "40. Sagonas, C., Tzimiropoulos, G., Zafeiriou, S., Pantic, M.: 300 faces in-the-wild challenge: The first facial landmark localization challenge. In: ICCVW (2013)" + ], + "bbox": [ + 215, + 147, + 784, + 839 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "S. Chun and J. Y. Chang", + "bbox": [ + 271, + 114, + 441, + 128 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "41. Shao, M., Sun, Z., Ozay, M., Okatani, T.: Improving head pose estimation with a combined loss and bounding box margin adjustment. In: FG (2019)", + "42. Sun, X., Xiao, B., Wei, F., Liang, S., Wei, Y.: Integral human pose regression. In: ECCV (2018)", + "43. Valle, R., Buenaposada, J.M., Baumela, L.: Multi-task head pose estimation inthe-wild. IEEE TPAMI 43(8), 2874-2881 (2020)", + "44. Wu, C.Y., Xu, Q., Neumann, U.: Synergy between 3dmm and 3d landmarks for accurate 3d facial geometry. In: 3DV (2021)", + "45. Xin, M., Mo, S., Lin, Y.: Eva-gcn: Head pose estimation based on graph convolutional networks. In: CVPR (2021)", + "46. Yang, T.Y., Chen, Y.T., Lin, Y.Y., Chuang, Y.Y.: Fsa-net: Learning fine-grained structure aggregation for head pose estimation from a single image. In: CVPR (2019)", + "47. Zhang, C., Liu, H., Deng, Y., Xie, B., Li, Y.: Tokenhpe: Learning orientation tokens for efficient head pose estimation via transformers. In: CVPR (2023)", + "48. Zhang, H., Wang, M., Liu, Y., Yuan, Y.: Fdn: Feature decoupling network for head pose estimation. In: AAAI (2020)", + "49. Zhang, H., Tian, Y., Zhang, Y., Li, M., An, L., Sun, Z., Liu, Y.: Pymaf-x: Towards well-aligned full-body model regression from monocular images. IEEE TPAMI (2023)", + "50. Zhang, H., Tian, Y., Zhou, X., Ouyang, W., Liu, Y., Wang, L., Sun, Z.: Pymaf: 3d human pose and shape regression with pyramidal mesh alignment feedback loop. In: ICCV (2021)", + "51. Zhou, E., Fan, H., Cao, Z., Jiang, Y., Yin, Q.: Extensive facial landmark localization with coarse-to-fine convolutional network cascade. In: ICCVW (2013)", + "52. Zhou, Y., Barnes, C., Lu, J., Yang, J., Li, H.: On the continuity of rotation representations in neural networks. In: CVPR (2019)", + "53. Zhou, Y., Gregson, J.: Whenet: Real-time fine-grained estimation for wide range head pose. arXiv preprint arXiv:2005.10353 (2020)", + "54. Zhu, X., Ramanan, D.: Face detection, pose estimation, and landmark localization in the wild. In: CVPR (2012)", + "55. Zhu, X., Lei, Z., Liu, X., Shi, H., Li, S.Z.: Face alignment across large poses: A 3d solution. In: CVPR (2016)", + "56. Zielonka, W., Bolkart, T., Thies, J.: Towards metrical reconstruction of human faces. In: ECCV (2022)" + ], + "bbox": [ + 212, + 146, + 787, + 631 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "TRG", + "bbox": [ + 694, + 114, + 730, + 126 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 767, + 114, + 785, + 126 + ], + "page_idx": 16 + } +] \ No newline at end of file diff --git a/2024/6DoF Head Pose Estimation through Explicit Bidirectional Interaction with Face Geometry/2a1e442c-8a76-4afd-b0e2-7a3c115bb3f2_model.json b/2024/6DoF Head Pose Estimation through Explicit Bidirectional Interaction with Face Geometry/2a1e442c-8a76-4afd-b0e2-7a3c115bb3f2_model.json new file mode 100644 index 0000000000000000000000000000000000000000..e04f9620a6de9d3a796f17ca03f759e43c5604aa --- /dev/null +++ b/2024/6DoF Head Pose Estimation through Explicit Bidirectional Interaction with Face Geometry/2a1e442c-8a76-4afd-b0e2-7a3c115bb3f2_model.json @@ -0,0 +1,2478 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.243, + 0.141, + 0.761, + 0.187 + ], + "angle": 0, + "content": "6DoF Head Pose Estimation through Explicit Bidirectional Interaction with Face Geometry" + }, + { + "type": "text", + "bbox": [ + 0.365, + 0.213, + 0.637, + 0.228 + ], + "angle": 0, + "content": "Sungho Chun and Ju Yong Chang" + }, + { + "type": "text", + "bbox": [ + 0.33, + 0.24, + 0.674, + 0.269 + ], + "angle": 0, + "content": "Department of ECE, Kwangwoon University, Korea {asw9161, jychang}@kw.ac.kr" + }, + { + "type": "text", + "bbox": [ + 0.263, + 0.304, + 0.74, + 0.553 + ], + "angle": 0, + "content": "Abstract. This study addresses the nuanced challenge of estimating head translations within the context of six-degrees-of-freedom (6DoF) head pose estimation, placing emphasis on this aspect over the more commonly studied head rotations. Identifying a gap in existing methodologies, we recognized the underutilized potential synergy between facial geometry and head translation. To bridge this gap, we propose a novel approach called the head Translation, Rotation, and face Geometry network (TRG), which stands out for its explicit bidirectional interaction structure. This structure has been carefully designed to leverage the complementary relationship between face geometry and head translation, marking a significant advancement in the field of head pose estimation. Our contributions also include the development of a strategy for estimating bounding box correction parameters and a technique for aligning landmarks to image. Both of these innovations demonstrate superior performance in 6DoF head pose estimation tasks. Extensive experiments conducted on ARKitFace and BIWI datasets confirm that the proposed method outperforms current state-of-the-art techniques. Codes are released at https://github.com/asn91666/TRG-Release." + }, + { + "type": "text", + "bbox": [ + 0.263, + 0.567, + 0.774, + 0.595 + ], + "angle": 0, + "content": "Keywords: 6DoF head pose estimation \\(\\cdot\\) bidirectional interaction \\(\\cdot\\) landmark-based approach" + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.643, + 0.377, + 0.659 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.674, + 0.788, + 0.794 + ], + "angle": 0, + "content": "Six-degrees-of-freedom (6DoF) head pose estimation is a crucial concern in both computer vision and graphics communities owing to its broad applications in augmented/virtual reality, vehicular monitoring systems, and sports analytics. Despite its prominence, existing studies [3,4,21,27,38,41,45-47] have primarily focused on estimating head orientation, whereas research on head translation estimation has not received as much attention. Some studies [1,44] have estimated pseudo-depth calculated from fitted data [55] without exploring methods to estimate the actual distance between the camera and head." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.796, + 0.788, + 0.842 + ], + "angle": 0, + "content": "Estimating head translation from a single image using learning-based methods poses significant challenges, which can be attributed to roughly two reasons. First, head translation estimation depends on real-scale face geometry. However," + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "2" + }, + { + "type": "header", + "bbox": [ + 0.271, + 0.115, + 0.444, + 0.129 + ], + "angle": 0, + "content": "S. Chun and J. Y. Chang" + }, + { + "type": "image", + "bbox": [ + 0.219, + 0.146, + 0.784, + 0.297 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.308, + 0.788, + 0.378 + ], + "angle": 0, + "content": "Fig. 1: Methods of inferring 6DoF head pose. The landmark-free approach [1] directly calculates the head pose from the image. Optimization-based methods [18,25,56] first predict face geometry, and then calculate the head pose. In contrast, TRG simultaneously estimates both face geometry and head pose to leverage the synergy between them." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.418, + 0.788, + 0.524 + ], + "angle": 0, + "content": "the estimation of real-scale face geometry suffers from head translation ambiguities. In other words, the estimation of head translation and the estimation of actual size face geometry are strongly correlated, and there exists ambiguity due to their mutual absence. Second, learning-based head translation estimation encounters severe generalization issues with out-of-distribution data. Unlike head rotation, the range of head translation is infinite, necessitating a generalization strategy to address it." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.529, + 0.788, + 0.68 + ], + "angle": 0, + "content": "However, existing works [1, 18, 25, 56] do not address the aforementioned issues. Fig. 1 provides an overview of the 6DoF head pose estimation methods used by existing models. In [18, 25, 56], face geometry is first inferred from an image, followed by the calculation of the 6DoF head pose using an optimization-based method. In other words, these methods [18,25,56] do not model the transfer of information from head pose to face geometry. This unidirectional information transfer method may face difficulties in predicting the actual size face geometry due to the absence of depth information. Consequently, the resulting face prior could create a vicious cycle, further reducing the accuracy of head translation prediction." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.685, + 0.788, + 0.761 + ], + "angle": 0, + "content": "Landmark-free approach [1] estimates head translation directly from an image using a learning-based method; however, it does not utilize face geometry information during the inference process. Directly estimating head depth from an image is highly non-linear, making the landmark-free approach challenging for estimating head translation." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.765, + 0.788, + 0.842 + ], + "angle": 0, + "content": "To overcome the limitations of existing models [1,18,25,56], we propose a head Translation, Rotation, and face Geometry network (TRG), which is a landmark-based method for estimating a 6DoF head pose. The TRG is designed with an explicit bidirectional interaction structure that leverages the complementary characteristics between the 6DoF head pose and face geometry. Specifically, we" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.695, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "TRG" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.116, + 0.787, + 0.127 + ], + "angle": 0, + "content": "3" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.784, + 0.177 + ], + "angle": 0, + "content": "propose a method that simultaneously estimates the head pose and dense 3D landmarks, using each other's information to iteratively improve one another." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.178, + 0.788, + 0.373 + ], + "angle": 0, + "content": "To achieve generalizable head translation estimation, TRG does not directly estimate depth, but utilizes the position and size information of the bounding box. The center coordinates of the bounding box are typically well-aligned with the coordinates of the head center, and the size of the bounding box inversely reflects the head's depth. These relationships make the bounding box a useful tool for estimating head translation in 3D space. However, reliance on the bounding box alone is insufficient. This is due to potential misalignments between the bounding box center and the head center, and the bounding box size being influenced by factors beyond depth, such as face size and head rotation. To address these discrepancies, we propose to estimate bounding box correction parameters and calculate head translation using these parameters and bounding box information. The proposed method has been found to achieve high accuracy and to be robust even for out-of-distribution data." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.374, + 0.788, + 0.493 + ], + "angle": 0, + "content": "Additionally, TRG aligns the estimated 3D landmarks with the image through perspective projection. By iterating this process, TRG not only enhances the performance of head translation estimation but also improves head rotation accuracy. This landmark-to-image alignment framework is inspired by the architecture of PyMAF [49, 50], which is a model used to reconstruct a human mesh. However, PyMAF is not designed to estimate the camera-to-human distance and fundamentally differs from TRG as it does not leverage the synergy between real-scale human geometry and depth." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.494, + 0.788, + 0.6 + ], + "angle": 0, + "content": "Furthermore, we discovered that TRG can accurately predict 3D face landmarks from a single image, even when strongly affected by perspective distortions, such as in selfies. This accuracy is attributed to the TRG's depth-aware landmark prediction architecture, which actively utilizes head translation information during the landmark prediction process. This finding further supports our main idea that head translation estimation should be conducted simultaneously with facial geometry estimation." + }, + { + "type": "text", + "bbox": [ + 0.239, + 0.6, + 0.729, + 0.615 + ], + "angle": 0, + "content": "The main contributions of this study can be summarized as follows:" + }, + { + "type": "text", + "bbox": [ + 0.226, + 0.619, + 0.785, + 0.693 + ], + "angle": 0, + "content": "- We propose TRG for 6DoF head pose estimation. To the best of our knowledge, this is the first study to introduce an explicit bidirectional interaction structure between head translation and face geometry. Through this innovative structure, TRG simultaneously mitigates ambiguity concerning head depth and face size." + }, + { + "type": "text", + "bbox": [ + 0.226, + 0.694, + 0.785, + 0.737 + ], + "angle": 0, + "content": "- The proposed strategy for estimating correction parameters for the bounding box demonstrates stable generalization performance on out-of-distribution data in terms of head translation." + }, + { + "type": "text", + "bbox": [ + 0.225, + 0.738, + 0.784, + 0.767 + ], + "angle": 0, + "content": "- The landmark-to-image alignment strategy demonstrates high accuracy not only in terms of head translation but also regarding head rotation." + }, + { + "type": "text", + "bbox": [ + 0.225, + 0.767, + 0.785, + 0.809 + ], + "angle": 0, + "content": "- TRG's depth-aware landmark prediction architecture exhibits high landmark prediction accuracy, even in images heavily influenced by perspective transformation, such as selfies." + }, + { + "type": "text", + "bbox": [ + 0.225, + 0.81, + 0.785, + 0.84 + ], + "angle": 0, + "content": "- Extensive experimental results on the benchmark datasets ARKitFace [25] and BIWI [15] show that TRG outperforms current SotA methods." + }, + { + "type": "list", + "bbox": [ + 0.225, + 0.619, + 0.785, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "4" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.444, + 0.129 + ], + "angle": 0, + "content": "S. Chun and J. Y. Chang" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.145, + 0.398, + 0.161 + ], + "angle": 0, + "content": "2 Related Works" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.178, + 0.471, + 0.194 + ], + "angle": 0, + "content": "2.1 Landmark-free Approach" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.203, + 0.784, + 0.262 + ], + "angle": 0, + "content": "The landmark-free approach [1,3,4,14,21,31,47] aims to estimate head pose directly from input image without relying on landmarks. However, most landmark-free approaches [3,4,14,21,31,47] only estimate head rotation and do not consider head translation." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.263, + 0.785, + 0.413 + ], + "angle": 0, + "content": "Among them, img2pose [1] not only estimates head rotation but also head translation. It calculates head translation from a proposal and employs a local-to-global transformation strategy to convert the estimated local pose into a global image space. Infrinsics are utilized during the conversion of the local head pose into the global head pose. However, img2pose does not use intrinsics when calculating head translation from a proposal, leading to inaccurate local head poses. This is because utilizing intrinsics is essential when calculating depth from an image, even when dealing with a cropped image. Furthermore, [1] does not utilize face geometry information during inference, which can exacerbate depth ambiguity." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.415, + 0.787, + 0.535 + ], + "angle": 0, + "content": "In contrast to landmark-free approaches, our proposed method explicitly utilizes facial geometry information. Specifically, TRG simultaneously mitigates ambiguity regarding face size and head translation through a bidirectional interaction structure. Additionally, TRG does not directly calculate head translation from cropped images but infers bounding box correction parameters instead. It then computes head translation using the inferred correction parameters and intrinsics. The proposed bounding box correction parameter strategy enables stable and accurate inference of head translation." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.558, + 0.486, + 0.574 + ], + "angle": 0, + "content": "2.2 Landmark-based Approach" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.584, + 0.785, + 0.673 + ], + "angle": 0, + "content": "Numerous landmark-based approaches have been proposed [18,25,27,37,43-45, 56] for estimating a 6DoF head pose or 3D head rotation. [27,37,43] proposed methods that simultaneously estimate 2D face landmarks and 3D head rotation by leveraging the synergy between them using learning-based approaches. However, these studies have not explored the synergy between 3D face geometry and head translation." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.674, + 0.786, + 0.809 + ], + "angle": 0, + "content": "SynergyNet [44] demonstrated that the parameters for shape and expression [35] can improve 3D sparse landmarks, and these enhanced landmarks can, in turn, improve the 3DMM parameters and head rotation during training. However, during the test time, it utilized a unidirectional information transfer architecture, which does not refine the 3DMM parameters and head rotation from the improved landmarks. Furthermore, SynergyNet is a model based on weak-perspective projections, similar to those in [5, 10, 16, 19, 28, 55]. Such models fundamentally do not compute the actual distance between the camera and the face." + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.81, + 0.786, + 0.84 + ], + "angle": 0, + "content": "MICA [56], JMLR [18], and PerspNet [25] employ unidirectional information transfer methods that first estimate face geometry and then calculate head pose." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.695, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "TRG" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.116, + 0.785, + 0.127 + ], + "angle": 0, + "content": "5" + }, + { + "type": "image", + "bbox": [ + 0.223, + 0.145, + 0.782, + 0.324 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.337, + 0.334, + 0.665, + 0.35 + ], + "angle": 0, + "content": "Fig. 2: Overall pipeline of the proposed method." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.381, + 0.784, + 0.442 + ], + "angle": 0, + "content": "However, these methods are limited in their ability to reconstruct real-scale face geometry due to depth ambiguity. Furthermore, calculating the 6DoF head pose based on these inaccurate geometry priors makes it difficult to achieve high accuracy." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.443, + 0.785, + 0.534 + ], + "angle": 0, + "content": "To address the aforementioned issues, we propose, for the first time, an explicit bidirectional interaction structure between the 6DoF head pose and face geometry. Additionally, unlike other landmark-based approaches, the proposed structure actively utilizes head depth information during the landmark estimation process. This approach demonstrates accurate geometry estimation even for images with strong perspective distortions, such as selfies." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.564, + 0.427, + 0.581 + ], + "angle": 0, + "content": "3 Proposed Method" + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.603, + 0.548, + 0.619 + ], + "angle": 0, + "content": "3.1 Overview of the Proposed Method" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.634, + 0.785, + 0.683 + ], + "angle": 0, + "content": "TRG is designed to iteratively regress head translation \\(\\{T_t \\in \\mathbb{R}^3\\}_{t=1}^3\\) and rotation \\(\\{R_t \\in \\mathbb{R}^6\\}_{t=1}^3\\) from a single image \\(I \\in \\mathbb{R}^{3 \\times 192 \\times 192}\\), while also providing the auxiliary output of dense 3D landmarks \\(\\{V_t \\in \\mathbb{R}^{3 \\times N^V}\\}_{t=1}^3\\)." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.684, + 0.788, + 0.84 + ], + "angle": 0, + "content": "Fig. 2 illustrates the comprehensive structure of TRG, which comprises a feature extractor that generates multi-scale feature maps \\(\\{\\phi_t\\in \\mathbb{R}^{256\\times H_t\\times W_t}\\}_{t = 1}^3\\) from \\(I\\), a feature sampler that extracts a landmark-aligned feature vector \\(\\phi_t^p\\in \\mathbb{R}^{5N_{t - 1}^P}\\) from the feature map \\(\\phi_t\\), and a face regressor that regresses head translation \\(T_{t}\\), rotation \\(R_{t}\\), and dense landmarks \\(V_{t}\\) from \\(\\phi_t^p\\). \\(N_{t - 1}^P\\) and \\(N^V\\) denote the number of sampling points \\(P_{t - 1}\\in \\mathbb{R}^{2\\times N_{t - 1}^P}\\) and the number of 3D dense landmarks \\(V_{t}\\), respectively. Each of these components—feature extractor, feature sampler, and face regressor—is described in detail in Sections 3.2, 3.3, and 3.4, respectively. Additionally, the loss functions employed in the training are discussed in Section 3.5." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "6" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.444, + 0.129 + ], + "angle": 0, + "content": "S. Chun and J. Y. Chang" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.147, + 0.411, + 0.161 + ], + "angle": 0, + "content": "3.2 Feature Extractor" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.172, + 0.788, + 0.343 + ], + "angle": 0, + "content": "The feature extractor computes multi-scale feature maps \\(\\{\\phi_t\\}_{t=1}^3\\) and 2D sparse landmarks \\(L \\in \\mathbb{R}^{2 \\times N^L}\\) from a single image \\(I\\). \\(N^L\\) denote the number of sparse landmarks. The feature extractor comprises ResNet18 [20], three deconvolution layers, a \\(1 \\times 1\\) convolution layer, and a soft-argmax operation [42]. ResNet18 is initialized with pre-trained weights on ImageNet [11] and is used after removing the final classification layer and the pooling layer. The \\(\\phi_t\\) is computed from the \\(t\\)-th deconvolution layer and fed into the feature sampler. Additionally, the last feature map, \\(\\phi_3\\) undergoes a transformation into 2D heatmaps through the \\(1 \\times 1\\) convolution layer. The soft-argmax operation computes \\(L\\) from the resultant heatmaps. These computed landmarks, along with the ground-truth landmarks \\(L^* \\in \\mathbb{R}^{2 \\times N^L}\\), are incorporated into the loss function." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.366, + 0.4, + 0.381 + ], + "angle": 0, + "content": "3.3 Feature Sampler" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.39, + 0.787, + 0.485 + ], + "angle": 0, + "content": "The feature sampler computes the landmark-aligned feature vector \\(\\phi_t^p \\in \\mathbb{R}^{5N_{t-1}^P}\\) from the feature map \\(\\phi_t\\) and the corresponding sampling points \\(P_{t-1} \\in \\mathbb{R}^{2 \\times N_{t-1}^P}\\). Sampling points \\(P_{t-1}\\) are used to extract point-wise features from the feature map \\(\\phi_t\\). Here, \\(P_0\\) is set to 2D grid coordinates. For \\(t > 0\\), \\(P_t\\) is computed using the \\(t\\)-th face regressor. The methodology for deriving these sampling points from the face regressor is described in Section 3.4." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.486, + 0.788, + 0.584 + ], + "angle": 0, + "content": "The point-wise feature vector \\(\\phi_t(p_{t-1,n}) \\in \\mathbb{R}^{256}\\) is obtained using bilinear sampling at the location specified by the point \\(p_{t-1,n} \\in \\mathbb{R}^2\\) on \\(\\phi_t\\). Here, \\(p_{t-1,n}\\) denotes the \\(n\\)-th column vector of the sampling points \\(P_{t-1}\\). The \\(N_{t-1}^P\\) point-wise features, denoted as \\(\\{\\phi_t(p_{t-1,n})\\}_{n=1}^{N_{t-1}^P}\\), are then transformed into 5D vectors using a dimension reduction layer \\(\\mathcal{F}(\\cdot)\\). These vectors are subsequently concatenated to form the landmark-aligned feature vector \\(\\phi_t^p\\):" + }, + { + "type": "equation", + "bbox": [ + 0.389, + 0.594, + 0.787, + 0.621 + ], + "angle": 0, + "content": "\\[\n\\phi_ {t} ^ {p} = \\bigoplus \\left(\\left\\{\\mathcal {F} \\left(\\phi_ {t} \\left(p _ {t - 1, n}\\right)\\right) \\right\\} _ {n = 1} ^ {N _ {t - 1} ^ {P}}\\right), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.631, + 0.787, + 0.693 + ], + "angle": 0, + "content": "where \\(\\bigoplus (\\cdot)\\) denotes concatenation. The dimension reduction layer, \\(\\mathcal{F}(\\cdot)\\), is structured as a multilayer perceptron (MLP), which comprises three fully connected layers and two Leaky ReLU activations [32,50]. The obtained landmark-aligned feature vector \\(\\phi_t^p\\) is then fed into the face regressor." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.715, + 0.387, + 0.73 + ], + "angle": 0, + "content": "3.4 Face Regressor" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.741, + 0.814, + 0.842 + ], + "angle": 0, + "content": "The face regressor comprises an MLP \\(\\mathcal{R}_t(\\cdot)\\) to calculate the head rotation, bounding box correction parameters, and dense landmarks \\(\\Theta_t = \\{R_t \\in \\mathbb{R}^6, c_t \\in \\mathbb{R}^3, V_t \\in \\mathbb{R}^{3 \\times N^V}\\}\\), a function that computes the head translation \\(T_t = \\{T_t^x, T_t^y, T_t^z\\} \\in \\mathbb{R}^3\\) based on the bounding box information \\(I_{\\mathrm{bbox}} = \\{\\frac{\\tau^{x,\\mathrm{bbox}}}{f}, \\frac{\\tau^{y,\\mathrm{bbox}}}{f}, \\frac{b}{f}\\} \\in \\mathbb{R}^3\\) and the correction parameter \\(c_t = \\{s_t, \\tilde{\\tau}_t^{x,\\mathrm{face}}, \\tilde{\\tau}_t^{y,\\mathrm{face}}\\}\\), and a perspective projection function that calculates the image coordinates of the dense landmarks" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.695, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "TRG" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.116, + 0.785, + 0.126 + ], + "angle": 0, + "content": "7" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.145, + 0.784, + 0.282 + ], + "angle": 0, + "content": "\\(V_{t}^{img} \\in \\mathbb{R}^{2 \\times N^{V}}\\) and the sampling points \\(P_{t}\\). \\(V_{t}\\) and \\(R_{t}\\) denote the 3D coordinates of the dense landmarks defined in the head space and the head rotation expressed in a 6D representation [52], respectively. \\(T_{t}^{x}, T_{t}^{y}\\), and \\(T_{t}^{z}\\) represent the head translations along the \\(x\\)-, \\(y\\)-, and \\(z\\)-axes in the camera space, respectively. \\(\\tau^{x,\\mathrm{bbox}}\\), \\(\\tau^{y,\\mathrm{bbox}}\\), \\(b\\), and \\(f\\) denote the \\(x\\)- and \\(y\\)-coordinates of the bounding box center relative to the center of the uncropped image, the size of the bounding box, and the focal length, respectively. \\(s_{t}\\), \\(\\tilde{\\tau}_{t}^{x,\\mathrm{face}}\\), and \\(\\tilde{\\tau}_{t}^{y,\\mathrm{face}}\\) respectively denote the bounding box scale factor and the normalized offset of the head center relative to the bounding box center in the \\(x\\)- and \\(y\\)-directions." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.284, + 0.785, + 0.389 + ], + "angle": 0, + "content": "The MLP \\(\\mathcal{R}_t(\\cdot)\\) estimates the residual for calculating \\(\\Theta_t\\) from the landmark-aligned feature \\(\\phi_t^p\\), the previously iterated output \\(\\Theta_{t - 1}^{sub} = \\{R_{t - 1},c_{t - 1},V_{t - 1}^{sub}\\in \\mathbb{R}^{3\\times 305}\\}\\), and the bounding box information \\(I_{\\mathrm{bbox}}\\) [29,49,50]. \\(\\Theta_t\\) is computed by adding the residual estimated by \\(\\mathcal{R}_t(\\cdot)\\) to \\(\\Theta_{t - 1}\\). \\(V_{t - 1}^{sub}\\) represents the landmarks obtained by subsampling \\(V_{t - 1}\\) [36]. The use of \\(V_{t - 1}^{sub}\\) for \\(\\mathcal{R}_t(\\cdot)\\) instead of \\(V_{t - 1}\\) reduces the redundancy of the dense landmarks, which improves the performance of the proposed model [6-8,30,50]." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.39, + 0.785, + 0.513 + ], + "angle": 0, + "content": "We model a real human face as being enclosed within a box \\( B \\) of size \\( 0.2m \\times 0.2m \\), with \\( m \\) denoting meters. The size of this box, when projected into the image space, is represented by \\( b \\). However, since the assumption about the face size is typically imprecise, \\( \\mathcal{R}_t(\\cdot) \\) estimates a scale factor \\( s_t \\) to adjust the size of \\( B \\). Furthermore, \\( \\mathcal{R}_t(\\cdot) \\) is responsible for determining the normalized offsets of the head center \\( \\tilde{\\tau}_t^{x,\\mathrm{face}} \\), \\( \\tilde{\\tau}_t^{y,\\mathrm{face}} \\). These offsets represent the values obtained by normalizing the image space translation from the bounding box center to the head center with \\( b \\). The calculation of \\( T_t \\) from \\( c_t \\) and \\( I_{\\mathrm{bbox}} \\) is expressed as:" + }, + { + "type": "equation", + "bbox": [ + 0.333, + 0.525, + 0.785, + 0.582 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} T _ {t} ^ {x} = \\frac {0 . 2 s _ {t}}{b} \\tau^ {x, \\mathrm {b b o x}} + 0. 2 s _ {t} \\tilde {\\tau} _ {t} ^ {x, \\mathrm {f a c e}}, \\\\ T _ {t} ^ {y} = \\frac {0 . 2 s _ {t}}{b} \\tau^ {y, \\mathrm {b b o x}} + 0. 2 s _ {t} \\tilde {\\tau} _ {t} ^ {y, \\text {f a c e}}, \\quad T _ {t} ^ {z} = \\frac {0 . 2 s _ {t}}{b} f. \\tag {2} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.594, + 0.784, + 0.638 + ], + "angle": 0, + "content": "The derivation of Eq. 2 can be found in the supplementary material. The image coordinates of the dense landmarks, \\( V_{t}^{img} \\), are computed by projecting \\( V_{t} \\), as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.416, + 0.64, + 0.785, + 0.657 + ], + "angle": 0, + "content": "\\[\nV _ {t} ^ {i m g} = \\Pi \\left(V _ {t}, R _ {t}, T _ {t}, K\\right), \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.665, + 0.784, + 0.711 + ], + "angle": 0, + "content": "where \\(\\varPi(\\cdot)\\) and \\(K\\in \\mathbb{R}^{3\\times 3}\\) denote the perspective projection and the intrinsic camera parameters, respectively. The sampling points \\(P_{t}\\) are obtained by subsampling \\(V_{t}^{img}\\)." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.736, + 0.383, + 0.751 + ], + "angle": 0, + "content": "3.5 Loss Functions" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.765, + 0.784, + 0.84 + ], + "angle": 0, + "content": "We detail the loss functions employed to train TRG, ensuring accurate predictions of face geometry and head pose. The training process utilizes several loss functions for dense landmarks: head space coordinate loss \\(\\mathcal{L}_{\\mathrm{head}}\\), camera space coordinate loss \\(\\mathcal{L}_{\\mathrm{cam}}\\), and image space coordinate loss \\(\\mathcal{L}_{\\mathrm{img}}\\). For a precise estimation of head rotation, a head rotation loss \\(\\mathcal{L}_{\\mathrm{rot}}\\) is also adopted. As iteration" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "8" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.444, + 0.129 + ], + "angle": 0, + "content": "S. Chun and J. Y. Chang" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.147, + 0.594, + 0.162 + ], + "angle": 0, + "content": "progresses, the loss functions are doubled as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.342, + 0.173, + 0.629, + 0.214 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {h e a d}} = \\sum_ {t = 1} ^ {3} 2 ^ {t - 3} \\left(\\frac {1}{N ^ {V}} \\sum_ {n = 1} ^ {N ^ {V}} \\| V _ {t, n} - V _ {n} ^ {*} \\| _ {1}\\right),\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.342, + 0.219, + 0.786, + 0.268 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {c a m}} = \\sum_ {t = 1} ^ {3} 2 ^ {t - 3} \\left(\\frac {1}{N ^ {V}} \\sum_ {n = 1} ^ {N ^ {V}} \\| V _ {t, n} ^ {\\text {c a m}} - V _ {n} ^ {* , \\text {c a m}} \\| _ {1}\\right), \\tag {4}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.342, + 0.265, + 0.659, + 0.306 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {i m g}} = \\sum_ {t = 1} ^ {3} 2 ^ {t - 3} \\left(\\frac {1}{N ^ {V}} \\sum_ {n = 1} ^ {N ^ {V}} \\| V _ {t, n} ^ {i m g} - V _ {n} ^ {*, i m g} \\| _ {1}\\right),\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.342, + 0.312, + 0.598, + 0.349 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {r o t}} = \\sum_ {t = 1} ^ {3} 2 ^ {t - 3} (\\| R _ {t} ^ {m a t} - R ^ {*, m a t} \\| _ {F}),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.358, + 0.787, + 0.435 + ], + "angle": 0, + "content": "where \\( * \\) and \\( V_{t,n} \\) represent the ground truth and the \\( n \\)-th column vector of \\( V_{t} \\), respectively. \\( V_{t}^{cam} = R_{t}^{mat}V_{t} + T_{t} \\in \\mathbb{R}^{3 \\times N^{V}} \\) and \\( V_{t}^{img} \\) represent the camera space coordinates and the image space coordinates of the \\( t \\)-th dense landmarks, respectively. \\( R_{t}^{mat} \\in \\mathbb{R}^{3 \\times 3} \\) represents the 3D head rotation in matrix form, and \\( \\| \\cdot \\|_{F} \\) denotes the Frobenius norm." + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.436, + 0.787, + 0.511 + ], + "angle": 0, + "content": "If connectivity between dense landmarks is defined in the dataset, we utilize this information to apply an edge length loss. We empirically found that applying the edge length loss \\(\\mathcal{L}_{\\mathrm{ed}}\\) [18, 33] to \\(V_{3}\\), estimated by the final face regressor, improves the model's performance in estimating face geometry. The edge length loss \\(\\mathcal{L}_{\\mathrm{ed}}\\) can be written as:" + }, + { + "type": "equation", + "bbox": [ + 0.322, + 0.522, + 0.785, + 0.553 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {e d}} = \\sum_ {M} \\sum_ {\\{n, m \\} \\subset M} | \\| V _ {3, n} - V _ {3, m} \\| _ {2} - \\| V _ {n} ^ {*} - V _ {m} ^ {*} \\| _ {2} |, \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.563, + 0.787, + 0.607 + ], + "angle": 0, + "content": "where \\(M\\) denotes a triangle. Additionally, to improve the quality of the feature map, we apply the sparse 2D landmark loss \\(\\mathcal{L}_L\\) to the landmarks \\(L\\) obtained from \\(\\phi_3\\) as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.408, + 0.609, + 0.785, + 0.649 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {L} = \\frac {1}{N ^ {L}} \\sum_ {n = 1} ^ {N ^ {L}} \\| L _ {n} - L _ {n} ^ {*} \\| _ {1}. \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.24, + 0.654, + 0.641, + 0.668 + ], + "angle": 0, + "content": "The final loss function to train TRG can be written as:" + }, + { + "type": "equation", + "bbox": [ + 0.248, + 0.68, + 0.785, + 0.695 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\lambda_ {\\text {h e a d}} \\mathcal {L} _ {\\text {h e a d}} + \\lambda_ {\\text {c a m}} \\mathcal {L} _ {\\text {c a m}} + \\lambda_ {\\text {i m g}} \\mathcal {L} _ {\\text {i m g}} + \\lambda_ {\\text {r o t}} \\mathcal {L} _ {\\text {r o t}} + \\lambda_ {\\text {e d}} \\mathcal {L} _ {\\text {e d}} + \\lambda_ {L} \\mathcal {L} _ {L}, \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.705, + 0.787, + 0.734 + ], + "angle": 0, + "content": "where \\(\\lambda\\)s represent the weights of the loss functions. \\(\\lambda_{\\mathrm{head}}\\), \\(\\lambda_{\\mathrm{cam}}\\), \\(\\lambda_{\\mathrm{img}}\\), \\(\\lambda_{\\mathrm{rot}}\\), \\(\\lambda_{\\mathrm{ed}}\\), and \\(\\lambda_L\\) are set to 20, 2, 0.01, 10, 2, and 1.25, respectively." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.757, + 0.462, + 0.773 + ], + "angle": 0, + "content": "4 Experimental Results" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.787, + 0.458, + 0.802 + ], + "angle": 0, + "content": "4.1 Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.809, + 0.787, + 0.841 + ], + "angle": 0, + "content": "The spatial dimensions \\( H_{t}, W_{t} \\) of the feature map \\( \\phi_t \\) were set to \\( \\frac{192}{2^{5 - t}} \\). The number of sampling points \\( N_{t}^{P} \\) was set to \\( 18 \\times 18 = 324 \\) when \\( t = 0 \\), and to 305" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.695, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "TRG" + }, + { + "type": "page_number", + "bbox": [ + 0.776, + 0.116, + 0.786, + 0.127 + ], + "angle": 0, + "content": "9" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.146, + 0.784, + 0.298 + ], + "angle": 0, + "content": "when \\( t > 0 \\). \\( N^V \\) and \\( N^L \\) were set to 1220 and 68, respectively. For the ARKitFace training dataset [25], we selected a random sample and used its corresponding ground-truth dense 3D landmarks and head rotation as the initial landmarks \\( V_0 \\) and head rotation \\( R_0 \\) for the TRG. The initial correction parameter \\( c_0 \\) was set to \\( \\{s_0 = 1, \\tilde{\\tau}_0^{x,\\mathrm{face}} = 0, \\tilde{\\tau}_0^{y,\\mathrm{face}} = 0\\} \\). For the TRG training, both the ARKitFace training data [25] and 300W-LP [55] were utilized. Unless otherwise stated, the performances of models trained using both datasets are presented. When a fair comparison with the state-of-the-art methods is required, results from models trained solely on the ARKitFace training dataset are also provided. Please refer to the supplementary material for more implementation details." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.316, + 0.335, + 0.33 + ], + "angle": 0, + "content": "4.2 Datasets" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.336, + 0.784, + 0.426 + ], + "angle": 0, + "content": "ARKitFace [25] is a dataset that provides the 6DoF head poses, the dense 3D landmarks, and intrinsic camera parameters. It is collected from selfie scenarios, with data gathered at a camera-to-face distance ranging from 0.3 to 0.9 meters, resulting in images significantly influenced by strong perspective transformations. Following previous work [25], we used 717,840 frames from 400 subjects for training, and 184,884 frames from 100 subjects for testing." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.427, + 0.784, + 0.5 + ], + "angle": 0, + "content": "300W-LP [55] is an extended synthetic dataset derived from the 300W [39], which itself is composed of several standardized datasets, including AFW [54], HELEN [51], IBUG [40], and LFPW [2]. Through face profiling, the 300W-LP dataset provides 122,450 synthesized images from approximately 4,000 original pictures." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.502, + 0.784, + 0.593 + ], + "angle": 0, + "content": "BIWI [15] provides 6DoF head poses, a 3D neutral face mesh for each subject, and intrinsic camera parameters. Since BIWI does not provide ground-truth face meshes for each frame, our evaluation focuses solely on the head poses. BIWI serves exclusively as test data to assess the effectiveness of our method. We evaluated the performance of our proposed model by following the protocol used in previous studies [25,46]." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.611, + 0.42, + 0.625 + ], + "angle": 0, + "content": "4.3 Evaluation Metrics" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.632, + 0.784, + 0.796 + ], + "angle": 0, + "content": "For head rotation accuracy assessment, we follow the approach used in previous studies [1,23,25,46], measuring rotation errors separately for roll, pitch, and yaw. Additionally, to provide a comprehensive understanding of the head rotation estimation performance, we also present the mean absolute error \\((\\mathrm{MAE}_r)\\) and geodesic error (GE) [9]. For evaluating the accuracy of head translation, we calculate the errors for translation along the \\(x-\\), \\(y-\\), and \\(z\\)-axes, represented as \\(t_x\\), \\(t_y\\), and \\(t_z\\) errors, respectively. Similar to head rotation, we present the mean absolute error performance for head translation, denoted as \\(\\mathrm{MAE}_t\\). Following previous research [25], we utilize the average 3D distance (ADD) metric [22] to present a holistic evaluation of the method's performance in estimating both rotation and translation:" + }, + { + "type": "equation", + "bbox": [ + 0.305, + 0.802, + 0.784, + 0.843 + ], + "angle": 0, + "content": "\\[\n\\mathrm {A D D} = \\frac {1}{N ^ {V}} \\sum_ {n = 1} ^ {N ^ {V}} \\| \\left(R _ {3} ^ {\\text {m a t}} V _ {n} ^ {*} + T _ {3}\\right) - \\left(R ^ {*}, \\text {m a t} V _ {n} ^ {*} + T ^ {*}\\right) \\| _ {2}. \\tag {8}\n\\]" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "10" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.442, + 0.129 + ], + "angle": 0, + "content": "S. Chun and J. Y. Chang" + }, + { + "type": "table_caption", + "bbox": [ + 0.214, + 0.145, + 0.788, + 0.214 + ], + "angle": 0, + "content": "Table 1: Ablation study of TRG on ARKitFace and BIWI. We explored the effects of the bidirectional interaction structure and utilizing the correction parameter. We also investigated the importance of utilizing face geometry in the 6DoF head pose estimation process and the effectiveness of the landmark-to-image alignment method. \"MS\" means multi-scale features." + }, + { + "type": "table", + "bbox": [ + 0.216, + 0.228, + 0.79, + 0.338 + ], + "angle": 0, + "content": "
MethodARKitFaceBIWI
Mean ↓MAEr ↓MAEt ↓ADD ↓MAEr ↓MAEt ↓ADD ↓
1-iter (w/o MS)1.691.003.708.933.2813.7432.28
2-iter (w/o MS)1.660.893.618.722.9513.7731.28
3-iter (w/o MS)1.570.883.638.712.5913.6731.52
Tt-prediction1.660.924.6411.668.811.7K5.1K
Landmark-free baseline-1.033.869.343.8718.4242.22
Grid sampled baseline1.590.953.748.992.9814.5835.04
TRG (Ours)1.580.913.628.682.7512.9729.46
" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.368, + 0.788, + 0.459 + ], + "angle": 0, + "content": "To assess the 3D landmark prediction accuracy of our proposed method, we evaluate the median and average distances between the estimated and ground-truth dense landmarks [25]. The effectiveness of our method is evaluated based on the estimated values of \\( V_{3} \\), \\( R_{3} \\), and \\( T_{3} \\) from the final face regressor at \\( t = 3 \\). The unit for median, mean, translation error, and ADD is in millimeters, and the unit for rotation error is in degrees." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.482, + 0.445, + 0.497 + ], + "angle": 0, + "content": "4.4 Ablation Experiments" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.508, + 0.788, + 0.718 + ], + "angle": 0, + "content": "Effectiveness of bidirectional interaction structure. In this experiment, we delve into the significance of explicit bidirectional interaction between the 6DoF head pose and face geometry. To investigate this, we observe the model's performance variations based on the number of interactions between these two types of information. For our experiments, we designed 1-iteration, 2-iteration, and 3-iteration baselines and then compared their performance. The 1-iteration baseline model simultaneously regresses the face geometry and head pose using \\(\\mathcal{R}_1(\\cdot)\\) but without an iterative inference process. The 2- and 3-iteration baseline models enhance this process by incorporating the iterative inference approach. They project the predicted dense landmarks onto the image feature, with all other aspects remaining consistent with the 1-iteration baseline. Similar to the 1-iteration baseline, they utilize only \\(\\phi_1\\) and do not employ multi-scale features. The key distinction between the 3-iteration models and TRG lies in the utilization of multi-scale features." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.72, + 0.788, + 0.809 + ], + "angle": 0, + "content": "The evaluation on the ARKitFace test data, as presented in Table 1, indicates that the performance in estimating the face geometry and head pose improves with the increasing number of iterations. This improvement is attributed to the reduction in ambiguity between the face geometry and 6DoF head pose as the number of bidirectional interactions increases. The BIWI evaluation results further corroborate the effectiveness of the bidirectional interaction method." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.81, + 0.788, + 0.84 + ], + "angle": 0, + "content": "Use of correction parameter. In this experiment, we investigate the rationale behind estimating the correction parameter \\( c_{t} \\) instead of directly estimating head" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.695, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "TRG" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.116, + 0.785, + 0.127 + ], + "angle": 0, + "content": "11" + }, + { + "type": "image", + "bbox": [ + 0.22, + 0.148, + 0.36, + 0.244 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.36, + 0.149, + 0.497, + 0.245 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.149, + 0.644, + 0.245 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.646, + 0.149, + 0.783, + 0.245 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.259, + 0.785, + 0.314 + ], + "angle": 0, + "content": "Fig. 3: The distribution of ground-truth translation and correction parameters in ARKitFace and BIWI. The colors blue, green, and brown represent the distributions of the ARKitFace training data, ARKitFace test data, and BIWI dataset, respectively. The symbol * denotes ground-truth." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.35, + 0.785, + 0.44 + ], + "angle": 0, + "content": "translation \\( T_{t} \\). To elucidate this, we compare the performance of two models: the \\( T_{t} \\)-prediction baseline, which directly estimates head translation \\( T_{t} \\) and TRG. According to Table 1, while the \\( T_{t} \\)-prediction baseline demonstrates accurate estimation of head translation on the ARKitFace test data, its performance significantly declines on the BIWI dataset. We attribute this discrepancy to the differing translation distributions between the ARKitFace and BIWI datasets." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.442, + 0.785, + 0.577 + ], + "angle": 0, + "content": "The first and second columns of Fig. 3 illustrate the ground-truth head translation distributions for ARKitFace and BIWI. While the translation distribution in the ARKitFace training data closely matches its test data, it significantly differs from that of BIWI. This discrepancy is particularly noticeable in the \\(z\\)-axis translations, indicating substantial divergence between the ARKitFace training data and BIWI. To achieve generalization from the ARKitFace training data to BIWI, a model must effectively extrapolate the \\(z\\)-axis translation. However, as evidenced by Table 1, this extrapolation poses a significant challenge for the direct translation estimation model." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.581, + 0.785, + 0.701 + ], + "angle": 0, + "content": "The third and fourth columns of Fig. 3 visualize the distribution of the ground-truth correction parameters for both ARKitFace and BIWI datasets. A key observation here is that the variation in the correction parameter distribution is significantly smaller compared to the translation distribution. Based on these observations, we can conclude that shifting the estimation target from \\( T_{t} \\) to \\( c_{t} \\) effectively reduces distribution discrepancies. This strategic redefinition enhances the model's generalizability, particularly for data that fall outside the training distribution, as evidenced in Table 1." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.705, + 0.787, + 0.84 + ], + "angle": 0, + "content": "The importance of utilizing facial geometry and the effectiveness of landmark-to-image alignment technique. For the purpose of our experiment, we designed a landmark-free baseline that does not estimate facial geometry \\(\\{V_t\\}_{t=1}^3\\). Given the absence of facial geometry information, the landmark-free baseline is unable to utilize landmark-to-image alignment techniques. Consequently, it extracts grid sampled features from \\(\\{\\phi_t\\}_{t=1}^3\\) and inputs them into a face regressor. However, due to significant structural differences from TRG, we mitigate these differences by also designing a grid sampled baseline for incremental comparison. This grid sampled baseline is similar to the TRG, except" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "12" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.444, + 0.129 + ], + "angle": 0, + "content": "S. Chun and J. Y. Chang" + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.145, + 0.358, + 0.217 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.218, + 0.358, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.359, + 0.145, + 0.501, + 0.217 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.36, + 0.218, + 0.501, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.145, + 0.642, + 0.217 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.218, + 0.642, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.642, + 0.145, + 0.785, + 0.217 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.642, + 0.218, + 0.785, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.293, + 0.788, + 0.364 + ], + "angle": 0, + "content": "Fig. 4: Qualitative comparison on the ARKitFace and BIWI datasets. The first and second rows show visualized results for ARKitFace and BIWI, respectively. The colors cyan, pink, gold, and gray represent JMLR, PerspNet, TRG, and ground truth, respectively. The red, green, and blue axes respectively represent the X, Y, and Z axes of the camera coordinate system." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.394, + 0.785, + 0.439 + ], + "angle": 0, + "content": "it does not employ the landmark-to-image alignment method, indicating that the primary distinction from the landmark-free baseline lies in whether facial geometry is estimated." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.44, + 0.785, + 0.531 + ], + "angle": 0, + "content": "According to our findings, as presented in the Table 1, the landmark-free baseline underperforms compared to the grid sampled baseline. This supports our hypothesis that landmark information should be integrated during the 6DoF head pose estimation process. Furthermore, our results demonstrate that TRG outperforms the grid sampled baseline, affirming the superiority of our landmark-to-image alignment strategy." + }, + { + "type": "table_caption", + "bbox": [ + 0.214, + 0.558, + 0.534, + 0.63 + ], + "angle": 0, + "content": "Table 2: Comparison with previous methods for 6DoF head pose estimation on ARKit-Face test dataset. Models trained with multiple datasets are marked with the symbol \\(\\star\\), and retrained models are indicated by the symbol \\(\\dagger\\)." + }, + { + "type": "table", + "bbox": [ + 0.218, + 0.641, + 0.538, + 0.73 + ], + "angle": 0, + "content": "
MethodMAErGEMAEtADD
img2pose [1,25]5.55-7.0220.54
Direct 6DoF Regress [25]1.87-9.0621.39
Refined Pix2Pose [25,34]2.35-14.0036.44
JMLR [18] †1.162.394.8611.87
PerspNet [25]0.991.814.1810.01
TRG (Ours)0.921.803.648.74
TRG (Ours) †0.911.843.628.68
" + }, + { + "type": "table_caption", + "bbox": [ + 0.554, + 0.566, + 0.788, + 0.621 + ], + "angle": 0, + "content": "Table 3: Comparison with previous methods for dense 3D landmark estimation on ARKitFace test dataset." + }, + { + "type": "table", + "bbox": [ + 0.56, + 0.635, + 0.784, + 0.723 + ], + "angle": 0, + "content": "
MethodMedianMean
PRNet [17]1.972.05
3DDFA-v2 [19]2.352.31
Deng et al. [13]2.462.55
JMLR [18] † *1.861.94
PerspNet [25]1.721.76
TRG (Ours)1.551.61
TRG (Ours) *1.551.58
" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.752, + 0.622, + 0.768 + ], + "angle": 0, + "content": "4.5 Comparison with State-of-the-Art Methods" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.78, + 0.788, + 0.842 + ], + "angle": 0, + "content": "In this experiment, we conducted a benchmark of our proposed method against existing approaches for 6DoF head pose estimation. The evaluation results on the ARKitFace and BIWI datasets are presented in Tables 2, 3 and 4. Model retrained for this comparison is marked with the symbol \\(\\dagger\\). Multiple datasets" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.695, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "TRG" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.116, + 0.786, + 0.127 + ], + "angle": 0, + "content": "13" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.784, + 0.236 + ], + "angle": 0, + "content": "were used for the model, which could be trained on multiple datasets. However, PerspNet was trained exclusively using the ARKitFace train dataset due to the difficulty of using two datasets [25,55] with differing 3D face mesh topologies. To ensure a fair comparison, we also present the results of TRG trained solely on the ARKitFace train dataset. Models trained on multiple datasets are denoted with the symbol \\(\\star\\)." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.238, + 0.784, + 0.297 + ], + "angle": 0, + "content": "Evaluation on ARKitFace [25]. Img2pose directly infers the 6DoF head pose from images without utilizing face geometry information. However, the absence of face geometry information can lead to increased face size ambiguity, potentially worsening the performance of head pose inference, as can be seen in Table 2." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.298, + 0.786, + 0.553 + ], + "angle": 0, + "content": "JMLR and PerspNet do not incorporate head pose information during the face geometry inference process. The predicted face geometry, derived without considering head pose information, is relatively inaccurate (Table 3). Consequently, methods that predict the 6DoF head pose based on this relatively imprecise geometry yield inaccurate results (Table 2). In contrast, TRG actively integrates face geometry information into the head pose estimation process. According to Table 2, TRG achieves state-of-the-art in head pose estimation, attributed to its explicit bidirectional interaction structure. Furthermore, owing to its depth-aware landmark prediction architecture, TRG maintains stable face landmark prediction accuracy even in selfie scenarios, as shown in Table 3. Fig. 4 visually illustrates the performance of TRG and existing models [18,25] for head pose estimation and face landmark prediction. When the geometries predicted by each model are aligned with the image, they appear to be well-aligned. However, a stark contrast in model performance becomes evident when comparing the ground-truth geometry with the predicted geometries in the 3D camera space. JMLR and PerspNet struggle to accurately predict the actual size of a human face, resulting in high translation errors." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.555, + 0.786, + 0.706 + ], + "angle": 0, + "content": "Evaluation on BIWI [15]. According to Table 4, TRG significantly outperforms existing optimization-based methods [18,25,56] in head translation estimation. This superior performance is attributed to TRG's design, which effectively leverages the synergy between face geometry and head translation. Furthermore, TRG's landmark-to-image alignment method enables it to achieve high head rotation estimation accuracy, surpassing even methods that solely estimate 3D head rotation. Fig. 4 qualitatively demonstrates TRG's exceptional head pose estimation performance. To visualize how closely the predicted head pose matches the ground-truth pose, we utilized the ground-truth neutral mesh and the predicted head pose." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.727, + 0.358, + 0.74 + ], + "angle": 0, + "content": "4.6 Limitations" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.75, + 0.784, + 0.84 + ], + "angle": 0, + "content": "In the process of deriving depth from images using the proposed method, the requirement for camera intrinsics emerges as a necessary component. This necessity indicates that, in the absence of camera intrinsics, while it is still possible to estimate relative depth among faces in an image, achieving precise depth measurement poses a challenge. To address this challenge and ensure accurate depth determination between the face and the camera, incorporating algorithms that" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "14" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.442, + 0.129 + ], + "angle": 0, + "content": "S. Chun and J. Y. Chang" + }, + { + "type": "table_caption", + "bbox": [ + 0.217, + 0.145, + 0.785, + 0.201 + ], + "angle": 0, + "content": "Table 4: Comparison with previous methods for 6DoF head pose estimation on BIWI dataset. The models were evaluated using BIWI solely for testing purposes, without utilizing it as training data. We used the camera intrinsics provided by BIWI for the evaluation of the head pose estimation performance of MICA [56]." + }, + { + "type": "table", + "bbox": [ + 0.223, + 0.213, + 0.78, + 0.508 + ], + "angle": 0, + "content": "
MethodYawPitchRollMAErGEtxtytzMAEtADD
Dlib [26]11.8613.0019.5614.81------
3DDFA [55]5.5041.9013.2219.07------
EVA-GCN [45]4.014.782.983.92------
HopeNet [38]4.816.613.274.899.53-----
QuatNet [23]4.015.492.944.15------
Liu et al. [31]4.125.613.154.29------
FSA-Net [46]4.274.962.764.007.64-----
HPE [24]4.575.183.124.29------
WHENet-V [53]3.604.102.733.48------
RetinaFace [12] ★4.076.422.974.49------
FDN [48]4.524.702.563.93------
MNN [43]3.984.612.393.66------
TriNet [3]3.054.764.113.97------
6DRepNet [21]3.244.482.683.47------
Cao et al. [4]4.213.523.103.61------
TokenHPE [47]3.954.512.713.72------
Cobo et al. [9]4.584.652.713.987.30-----
img2pose [1] ★4.573.553.243.797.10-----
Direct 6DoF Regress [25]16.4914.035.8112.11-62.3685.01366.52171.30562.38
Refined Pix2Pose [25,34]5.755.0611.237.35-16.8221.30255.3697.83356.32
MICA [56] ★5.407.173.805.46-9.3213.6660.1327.7068.03
JMLR [18] † ★6.316.173.725.408.618.667.2732.6316.1939.71
PerspNet [25]3.103.372.382.955.614.156.4346.6919.09100.09
TRG (Ours)3.283.521.872.895.688.417.3827.1314.3132.10
TRG (Ours) ★3.043.441.782.755.357.836.9924.0712.9729.46
" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.535, + 0.785, + 0.58 + ], + "angle": 0, + "content": "estimate intrinsics becomes essential. This aspect of requiring camera intrinsics for depth calculations highlights an area for further exploration and adaptation in our method, especially when intrinsic parameters are not readily available." + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.6, + 0.358, + 0.616 + ], + "angle": 0, + "content": "5 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.629, + 0.786, + 0.84 + ], + "angle": 0, + "content": "This study proposed a novel approach by introducing the TRG to predict a 6DoF head pose from a single image. Through extensive experimentation, we demonstrated the effectiveness of the explicit bidirectional interaction between the 6DoF head pose and the dense 3D face landmarks, a core feature of the TRG architecture. We further established that our method of estimating the correction parameters significantly enhances the generalizability of the model in cross-dataset evaluations. Evaluation on the ARKitFace and BIWI datasets showed TRG's superior performance in head pose estimation compared to existing state-of-the-art methods. Our extensive experiments have also highlighted the strength of TRG's depth-aware landmark prediction structure, particularly in images heavily influenced by perspective transformation, facilitating accurate estimation of face geometry. Based on these findings, our future work will focus on accurately reconstructing detailed facial geometries from close-up facial photos, such as selfies, further pushing the boundaries of facial analysis technology." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.695, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "TRG" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.116, + 0.786, + 0.127 + ], + "angle": 0, + "content": "15" + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.145, + 0.393, + 0.163 + ], + "angle": 0, + "content": "Acknowledgement" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.176, + 0.788, + 0.283 + ], + "angle": 0, + "content": "This work was partly supported by Institute of Information & Communications Technology Planning & Evaluation (IITP) grant funded by the Korea government (MSIT) (No. RS-2023-00219700, Development of FACS-compatible Facial Expression Style Transfer Technology for Digital Human, \\(90\\%\\)) and National Research Foundation of Korea (NRF) grant funded by the Korea government (MSIT) (No. NRF-2022R1F1A1066170, Physically valid 3D human motion reconstruction from multi-view videos, \\(10\\%\\))." + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.304, + 0.323, + 0.319 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.333, + 0.785, + 0.361 + ], + "angle": 0, + "content": "1. Albiero, V., Chen, X., Yin, X., Pang, G., Hassner, T.: img2pose: Face alignment and detection via 6dof, face pose estimation. In: CVPR (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.362, + 0.785, + 0.388 + ], + "angle": 0, + "content": "2. Belhumeur, P.N., Jacobs, D.W., Kriegman, D.J., Kumar, N.: Localizing parts of faces using a consensus of exemplars. IEEE TPAMI 35(12), 2930-2940 (2013)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.389, + 0.785, + 0.416 + ], + "angle": 0, + "content": "3. Cao, Z., Chu, Z., Liu, D., Chen, Y.: A vector-based representation to enhance head pose estimation. In: WACV (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.417, + 0.785, + 0.443 + ], + "angle": 0, + "content": "4. Cao, Z., Liu, D., Wang, Q., Chen, Y.: Towards unbiased label distribution learning for facial pose estimation using anisotropic spherical gaussian. In: ECCV (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.444, + 0.785, + 0.484 + ], + "angle": 0, + "content": "5. Chai, Z., Zhang, T., He, T., Tan, X., Baltrusaitis, T., Wu, H., Li, R., Zhao, S., Yuan, C., Bian, J.: Hiface: High-fidelity 3d face reconstruction by learning static and dynamic details. In: ICCV (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.485, + 0.785, + 0.511 + ], + "angle": 0, + "content": "6. Cho, J., Youwang, K., Oh, T.H.: Cross-attention of disentangled modalities for 3d human mesh recovery with transformers. In: ECCV (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.512, + 0.785, + 0.539 + ], + "angle": 0, + "content": "7. Chun, S., Park, S., Chang, J.Y.: Learnable human mesh triangulation for 3d human pose and shape estimation. In: WACV (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.54, + 0.785, + 0.566 + ], + "angle": 0, + "content": "8. Chun, S., Park, S., Chang, J.Y.: Representation learning of vertex heatmaps for 3d human mesh reconstruction from multi-view images. In: ICIP (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.567, + 0.785, + 0.607 + ], + "angle": 0, + "content": "9. Cobo, A., Valle, R., Buenaposada, J.M., Baumela, L.: On the representation and methodology for wide and short range head pose estimation. PR 149, 110263 (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.608, + 0.785, + 0.636 + ], + "angle": 0, + "content": "10. Danecek, R., Black, M.J., Bolkart, T.: EMOCA: Emotion driven monocular face capture and animation. In: CVPR (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.636, + 0.785, + 0.662 + ], + "angle": 0, + "content": "1. Deng, J., Dong, W., Socher, R., Li, L.J., Li, K., Fei-Fei, L.: Imagenet: A large-scale hierarchical image database. In: CVPR (2009)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.663, + 0.785, + 0.689 + ], + "angle": 0, + "content": "2. Deng, J., Guo, J., Ververas, E., Kotsia, I., Zafeiriou, S.: Retinaface: Single-shot multi-level face localisation in the wild. In: CVPR (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.69, + 0.785, + 0.731 + ], + "angle": 0, + "content": "3. Deng, Y., Yang, J., Xu, S., Chen, D., Jia, Y., Tong, X.: Accurate 3d face reconstruction with weakly-supervised learning: From single image to image set. In: CVPRW (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.732, + 0.785, + 0.758 + ], + "angle": 0, + "content": "4. Dhingra, N.: Lwposr: Lightweight efficient fine grained head pose estimation. In: WACV (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.759, + 0.785, + 0.785 + ], + "angle": 0, + "content": "5. Fanelli, G., Dantone, M., Gall, J., Fossati, A., Van Gool, L.: Random forests for real time 3d face analysis. IJCV 101, 437-458 (2013)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.786, + 0.785, + 0.813 + ], + "angle": 0, + "content": "6. Feng, Y., Feng, H., Black, M.J., Bolkart, T.: Learning an animatable detailed 3d face model from in-the-wild images. ACM TOG 40(4), 1-13 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.814, + 0.785, + 0.84 + ], + "angle": 0, + "content": "7. Feng, Y., Wu, F., Shao, X., Wang, Y., Zhou, X.: Joint 3d face reconstruction and dense alignment with position map regression network. In: ECCV (2018)" + }, + { + "type": "list", + "bbox": [ + 0.226, + 0.333, + 0.785, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "16" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.442, + 0.129 + ], + "angle": 0, + "content": "S. Chun and J. Y. Chang" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.148, + 0.785, + 0.175 + ], + "angle": 0, + "content": "18. Guo, J., Yu, J., Lattas, A., Deng, J.: Perspective reconstruction of human faces by joint mesh and landmark regression. In: ECCVW (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.177, + 0.785, + 0.203 + ], + "angle": 0, + "content": "19. Guo, J., Zhu, X., Yang, Y., Yang, F., Lei, Z., Li, S.Z.: Towards fast, accurate and stable 3d dense face alignment. In: ECCV (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.204, + 0.785, + 0.231 + ], + "angle": 0, + "content": "20. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: CVPR (2016)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.232, + 0.785, + 0.259 + ], + "angle": 0, + "content": "21. Hempel, T., Abdelrahman, A.A., Al-Hamadi, A.: 6d rotation representation for unconstrained head pose estimation. In: ICIP (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.26, + 0.785, + 0.3 + ], + "angle": 0, + "content": "22. Hinterstoisser, S., Lepetit, V., Ilic, S., Holzer, S., Bradski, G., Konolige, K., Navab, N.: Model based training, detection and pose estimation of texture-less 3d objects in heavily cluttered scenes. In: ACCV (2013)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.301, + 0.785, + 0.342 + ], + "angle": 0, + "content": "23. Hsu, H.W., Wu, T.Y., Wan, S., Wong, W.H., Lee, C.Y.: Quantnet: Quaternion-based head pose estimation with multiregression loss. IEEE TMM 21(4), 1035-1046 (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.343, + 0.785, + 0.37 + ], + "angle": 0, + "content": "24. Huang, B., Chen, R., Xu, W., Zhou, Q.: Improving head pose estimation using two-stage ensembles with top-k regression. IVC 93, 103827 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.371, + 0.785, + 0.411 + ], + "angle": 0, + "content": "25. Kao, Y., Pan, B., Xu, M., Lyu, J., Zhu, X., Chang, Y., Li, X., Lei, Z.: Toward 3d face reconstruction in perspective projection: Estimating 6 dof face pose from monocular image. IEEE TIP 32, 3080-3091 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.412, + 0.785, + 0.439 + ], + "angle": 0, + "content": "26. Kazemi, V., Sullivan, J.: One millisecond face alignment with an ensemble of regression trees. In: CVPR (2014)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.44, + 0.785, + 0.466 + ], + "angle": 0, + "content": "27. Kumar, A., Alavi, A., Chellappa, R.: Kepler: Keypoint and pose estimation of unconstrained faces by learning efficient h-cnn regressors. In: FG (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.467, + 0.785, + 0.494 + ], + "angle": 0, + "content": "28. Li, H., Wang, B., Cheng, Y., Kankanhalli, M., Tan, R.T.: Dsfnet: Dual space fusion network for occlusion-robust 3d dense face alignment. In: CVPR (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.495, + 0.785, + 0.522 + ], + "angle": 0, + "content": "29. Li, Z., Liu, J., Zhang, Z., Xu, S., Yan, Y.: Cliff: Carrying location information in full frames into human pose and shape estimation. In: ECCV (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.523, + 0.785, + 0.55 + ], + "angle": 0, + "content": "30. Lin, K., Wang, L., Liu, Z.: End-to-end human pose and mesh reconstruction with transformers. In: CVPR (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.551, + 0.785, + 0.577 + ], + "angle": 0, + "content": "31. Liu, Z., Chen, Z., Bai, J., Li, S., Lian, S.: Facial pose estimation by deep learning from label distributions. In: ICCVW (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.578, + 0.785, + 0.605 + ], + "angle": 0, + "content": "32. Maas, A.L., Hannun, A.Y., Ng, A.Y., et al.: Rectifier nonlinearities improve neural network acoustic models. In: ICML (2013)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.606, + 0.785, + 0.633 + ], + "angle": 0, + "content": "33. Moon, G., Lee, K.M.: I2l-meshnet: Image-to-lixel prediction network for accurate 3d human pose and mesh estimation from a single rgb image. In: ECCV (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.634, + 0.785, + 0.66 + ], + "angle": 0, + "content": "34. Park, K., Patten, T., Vincze, M.: Pix2pose: Pixel-wise coordinate regression of objects for 6d pose estimation. In: ICCV (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.661, + 0.785, + 0.688 + ], + "angle": 0, + "content": "35. Paysan, P., Knothe, R., Amberg, B., Romdhani, S., Vetter, T.: A 3d face model for pose and illumination invariant face recognition. In: AVSS (2009)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.689, + 0.785, + 0.716 + ], + "angle": 0, + "content": "36. Ranjan, A., Bolkart, T., Sanyal, S., Black, M.J.: Generating 3d faces using convolutional mesh autoencoders. In: ECCV (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.717, + 0.785, + 0.757 + ], + "angle": 0, + "content": "37. Ranjan, R., Patel, V.M., Chellappa, R.: Hyperface: A deep multi-task learning framework for face detection, landmark localization, pose estimation, and gender recognition. IEEE TPAMI 41(1), 121-135 (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.758, + 0.785, + 0.785 + ], + "angle": 0, + "content": "38. Ruiz, N., Chong, E., Rehg, J.M.: Fine-grained head pose estimation without keypoints. In: CVPRW (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.786, + 0.785, + 0.813 + ], + "angle": 0, + "content": "39. Sagonas, C., Tzimiropoulos, G., Zafeiriou, S., Pantic, M.: 300 faces in-the-wild challenge: The first facial landmark localization challenge. In: ICCVW (2013)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.814, + 0.785, + 0.84 + ], + "angle": 0, + "content": "40. Sagonas, C., Tzimiropoulos, G., Zafeiriou, S., Pantic, M.: 300 faces in-the-wild challenge: The first facial landmark localization challenge. In: ICCVW (2013)" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.148, + 0.785, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.695, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "TRG" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.116, + 0.786, + 0.127 + ], + "angle": 0, + "content": "17" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.175 + ], + "angle": 0, + "content": "41. Shao, M., Sun, Z., Ozay, M., Okatani, T.: Improving head pose estimation with a combined loss and bounding box margin adjustment. In: FG (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.175, + 0.788, + 0.202 + ], + "angle": 0, + "content": "42. Sun, X., Xiao, B., Wei, F., Liang, S., Wei, Y.: Integral human pose regression. In: ECCV (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.203, + 0.788, + 0.231 + ], + "angle": 0, + "content": "43. Valle, R., Buenaposada, J.M., Baumela, L.: Multi-task head pose estimation inthe-wild. IEEE TPAMI 43(8), 2874-2881 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.231, + 0.788, + 0.259 + ], + "angle": 0, + "content": "44. Wu, C.Y., Xu, Q., Neumann, U.: Synergy between 3dmm and 3d landmarks for accurate 3d facial geometry. In: 3DV (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.259, + 0.788, + 0.286 + ], + "angle": 0, + "content": "45. Xin, M., Mo, S., Lin, Y.: Eva-gcn: Head pose estimation based on graph convolutional networks. In: CVPR (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.286, + 0.788, + 0.327 + ], + "angle": 0, + "content": "46. Yang, T.Y., Chen, Y.T., Lin, Y.Y., Chuang, Y.Y.: Fsa-net: Learning fine-grained structure aggregation for head pose estimation from a single image. In: CVPR (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.328, + 0.788, + 0.355 + ], + "angle": 0, + "content": "47. Zhang, C., Liu, H., Deng, Y., Xie, B., Li, Y.: Tokenhpe: Learning orientation tokens for efficient head pose estimation via transformers. In: CVPR (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.356, + 0.788, + 0.383 + ], + "angle": 0, + "content": "48. Zhang, H., Wang, M., Liu, Y., Yuan, Y.: Fdn: Feature decoupling network for head pose estimation. In: AAAI (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.383, + 0.788, + 0.424 + ], + "angle": 0, + "content": "49. Zhang, H., Tian, Y., Zhang, Y., Li, M., An, L., Sun, Z., Liu, Y.: Pymaf-x: Towards well-aligned full-body model regression from monocular images. IEEE TPAMI (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.424, + 0.788, + 0.466 + ], + "angle": 0, + "content": "50. Zhang, H., Tian, Y., Zhou, X., Ouyang, W., Liu, Y., Wang, L., Sun, Z.: Pymaf: 3d human pose and shape regression with pyramidal mesh alignment feedback loop. In: ICCV (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.466, + 0.788, + 0.494 + ], + "angle": 0, + "content": "51. Zhou, E., Fan, H., Cao, Z., Jiang, Y., Yin, Q.: Extensive facial landmark localization with coarse-to-fine convolutional network cascade. In: ICCVW (2013)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.494, + 0.788, + 0.522 + ], + "angle": 0, + "content": "52. Zhou, Y., Barnes, C., Lu, J., Yang, J., Li, H.: On the continuity of rotation representations in neural networks. In: CVPR (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.522, + 0.788, + 0.549 + ], + "angle": 0, + "content": "53. Zhou, Y., Gregson, J.: Whenet: Real-time fine-grained estimation for wide range head pose. arXiv preprint arXiv:2005.10353 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.549, + 0.788, + 0.577 + ], + "angle": 0, + "content": "54. Zhu, X., Ramanan, D.: Face detection, pose estimation, and landmark localization in the wild. In: CVPR (2012)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.577, + 0.788, + 0.604 + ], + "angle": 0, + "content": "55. Zhu, X., Lei, Z., Liu, X., Shi, H., Li, S.Z.: Face alignment across large poses: A 3d solution. In: CVPR (2016)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.605, + 0.788, + 0.632 + ], + "angle": 0, + "content": "56. Zielonka, W., Bolkart, T., Thies, J.: Towards metrical reconstruction of human faces. In: ECCV (2022)" + }, + { + "type": "list", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.632 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/2024/6DoF Head Pose Estimation through Explicit Bidirectional Interaction with Face Geometry/2a1e442c-8a76-4afd-b0e2-7a3c115bb3f2_origin.pdf b/2024/6DoF Head Pose Estimation through Explicit Bidirectional Interaction with Face Geometry/2a1e442c-8a76-4afd-b0e2-7a3c115bb3f2_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..f68e30d7a3a7fa2c4e48c31ed1183ff7b874f17a --- /dev/null +++ b/2024/6DoF Head Pose Estimation through Explicit Bidirectional Interaction with Face Geometry/2a1e442c-8a76-4afd-b0e2-7a3c115bb3f2_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fbb0b36edeba96b146061cb6b6cb051871b3c6a84ae9feb848d7a4f778f50da9 +size 1080828 diff --git a/2024/6DoF Head Pose Estimation through Explicit Bidirectional Interaction with Face Geometry/full.md b/2024/6DoF Head Pose Estimation through Explicit Bidirectional Interaction with Face Geometry/full.md new file mode 100644 index 0000000000000000000000000000000000000000..c14ab16464b9b77a24849a1dcd19cd7e74379f20 --- /dev/null +++ b/2024/6DoF Head Pose Estimation through Explicit Bidirectional Interaction with Face Geometry/full.md @@ -0,0 +1,332 @@ +# 6DoF Head Pose Estimation through Explicit Bidirectional Interaction with Face Geometry + +Sungho Chun and Ju Yong Chang + +Department of ECE, Kwangwoon University, Korea {asw9161, jychang}@kw.ac.kr + +Abstract. This study addresses the nuanced challenge of estimating head translations within the context of six-degrees-of-freedom (6DoF) head pose estimation, placing emphasis on this aspect over the more commonly studied head rotations. Identifying a gap in existing methodologies, we recognized the underutilized potential synergy between facial geometry and head translation. To bridge this gap, we propose a novel approach called the head Translation, Rotation, and face Geometry network (TRG), which stands out for its explicit bidirectional interaction structure. This structure has been carefully designed to leverage the complementary relationship between face geometry and head translation, marking a significant advancement in the field of head pose estimation. Our contributions also include the development of a strategy for estimating bounding box correction parameters and a technique for aligning landmarks to image. Both of these innovations demonstrate superior performance in 6DoF head pose estimation tasks. Extensive experiments conducted on ARKitFace and BIWI datasets confirm that the proposed method outperforms current state-of-the-art techniques. Codes are released at https://github.com/asn91666/TRG-Release. + +Keywords: 6DoF head pose estimation $\cdot$ bidirectional interaction $\cdot$ landmark-based approach + +# 1 Introduction + +Six-degrees-of-freedom (6DoF) head pose estimation is a crucial concern in both computer vision and graphics communities owing to its broad applications in augmented/virtual reality, vehicular monitoring systems, and sports analytics. Despite its prominence, existing studies [3,4,21,27,38,41,45-47] have primarily focused on estimating head orientation, whereas research on head translation estimation has not received as much attention. Some studies [1,44] have estimated pseudo-depth calculated from fitted data [55] without exploring methods to estimate the actual distance between the camera and head. + +Estimating head translation from a single image using learning-based methods poses significant challenges, which can be attributed to roughly two reasons. First, head translation estimation depends on real-scale face geometry. However, + +![](images/d458188cd0318d3f363853cfc5116e0e91c3bbd1894de8b9a41299a2f65f93e9.jpg) +Fig. 1: Methods of inferring 6DoF head pose. The landmark-free approach [1] directly calculates the head pose from the image. Optimization-based methods [18,25,56] first predict face geometry, and then calculate the head pose. In contrast, TRG simultaneously estimates both face geometry and head pose to leverage the synergy between them. + +the estimation of real-scale face geometry suffers from head translation ambiguities. In other words, the estimation of head translation and the estimation of actual size face geometry are strongly correlated, and there exists ambiguity due to their mutual absence. Second, learning-based head translation estimation encounters severe generalization issues with out-of-distribution data. Unlike head rotation, the range of head translation is infinite, necessitating a generalization strategy to address it. + +However, existing works [1, 18, 25, 56] do not address the aforementioned issues. Fig. 1 provides an overview of the 6DoF head pose estimation methods used by existing models. In [18, 25, 56], face geometry is first inferred from an image, followed by the calculation of the 6DoF head pose using an optimization-based method. In other words, these methods [18,25,56] do not model the transfer of information from head pose to face geometry. This unidirectional information transfer method may face difficulties in predicting the actual size face geometry due to the absence of depth information. Consequently, the resulting face prior could create a vicious cycle, further reducing the accuracy of head translation prediction. + +Landmark-free approach [1] estimates head translation directly from an image using a learning-based method; however, it does not utilize face geometry information during the inference process. Directly estimating head depth from an image is highly non-linear, making the landmark-free approach challenging for estimating head translation. + +To overcome the limitations of existing models [1,18,25,56], we propose a head Translation, Rotation, and face Geometry network (TRG), which is a landmark-based method for estimating a 6DoF head pose. The TRG is designed with an explicit bidirectional interaction structure that leverages the complementary characteristics between the 6DoF head pose and face geometry. Specifically, we + +propose a method that simultaneously estimates the head pose and dense 3D landmarks, using each other's information to iteratively improve one another. + +To achieve generalizable head translation estimation, TRG does not directly estimate depth, but utilizes the position and size information of the bounding box. The center coordinates of the bounding box are typically well-aligned with the coordinates of the head center, and the size of the bounding box inversely reflects the head's depth. These relationships make the bounding box a useful tool for estimating head translation in 3D space. However, reliance on the bounding box alone is insufficient. This is due to potential misalignments between the bounding box center and the head center, and the bounding box size being influenced by factors beyond depth, such as face size and head rotation. To address these discrepancies, we propose to estimate bounding box correction parameters and calculate head translation using these parameters and bounding box information. The proposed method has been found to achieve high accuracy and to be robust even for out-of-distribution data. + +Additionally, TRG aligns the estimated 3D landmarks with the image through perspective projection. By iterating this process, TRG not only enhances the performance of head translation estimation but also improves head rotation accuracy. This landmark-to-image alignment framework is inspired by the architecture of PyMAF [49, 50], which is a model used to reconstruct a human mesh. However, PyMAF is not designed to estimate the camera-to-human distance and fundamentally differs from TRG as it does not leverage the synergy between real-scale human geometry and depth. + +Furthermore, we discovered that TRG can accurately predict 3D face landmarks from a single image, even when strongly affected by perspective distortions, such as in selfies. This accuracy is attributed to the TRG's depth-aware landmark prediction architecture, which actively utilizes head translation information during the landmark prediction process. This finding further supports our main idea that head translation estimation should be conducted simultaneously with facial geometry estimation. + +The main contributions of this study can be summarized as follows: + +- We propose TRG for 6DoF head pose estimation. To the best of our knowledge, this is the first study to introduce an explicit bidirectional interaction structure between head translation and face geometry. Through this innovative structure, TRG simultaneously mitigates ambiguity concerning head depth and face size. +- The proposed strategy for estimating correction parameters for the bounding box demonstrates stable generalization performance on out-of-distribution data in terms of head translation. +- The landmark-to-image alignment strategy demonstrates high accuracy not only in terms of head translation but also regarding head rotation. +- TRG's depth-aware landmark prediction architecture exhibits high landmark prediction accuracy, even in images heavily influenced by perspective transformation, such as selfies. +- Extensive experimental results on the benchmark datasets ARKitFace [25] and BIWI [15] show that TRG outperforms current SotA methods. + +# 2 Related Works + +# 2.1 Landmark-free Approach + +The landmark-free approach [1,3,4,14,21,31,47] aims to estimate head pose directly from input image without relying on landmarks. However, most landmark-free approaches [3,4,14,21,31,47] only estimate head rotation and do not consider head translation. + +Among them, img2pose [1] not only estimates head rotation but also head translation. It calculates head translation from a proposal and employs a local-to-global transformation strategy to convert the estimated local pose into a global image space. Infrinsics are utilized during the conversion of the local head pose into the global head pose. However, img2pose does not use intrinsics when calculating head translation from a proposal, leading to inaccurate local head poses. This is because utilizing intrinsics is essential when calculating depth from an image, even when dealing with a cropped image. Furthermore, [1] does not utilize face geometry information during inference, which can exacerbate depth ambiguity. + +In contrast to landmark-free approaches, our proposed method explicitly utilizes facial geometry information. Specifically, TRG simultaneously mitigates ambiguity regarding face size and head translation through a bidirectional interaction structure. Additionally, TRG does not directly calculate head translation from cropped images but infers bounding box correction parameters instead. It then computes head translation using the inferred correction parameters and intrinsics. The proposed bounding box correction parameter strategy enables stable and accurate inference of head translation. + +# 2.2 Landmark-based Approach + +Numerous landmark-based approaches have been proposed [18,25,27,37,43-45, 56] for estimating a 6DoF head pose or 3D head rotation. [27,37,43] proposed methods that simultaneously estimate 2D face landmarks and 3D head rotation by leveraging the synergy between them using learning-based approaches. However, these studies have not explored the synergy between 3D face geometry and head translation. + +SynergyNet [44] demonstrated that the parameters for shape and expression [35] can improve 3D sparse landmarks, and these enhanced landmarks can, in turn, improve the 3DMM parameters and head rotation during training. However, during the test time, it utilized a unidirectional information transfer architecture, which does not refine the 3DMM parameters and head rotation from the improved landmarks. Furthermore, SynergyNet is a model based on weak-perspective projections, similar to those in [5, 10, 16, 19, 28, 55]. Such models fundamentally do not compute the actual distance between the camera and the face. + +MICA [56], JMLR [18], and PerspNet [25] employ unidirectional information transfer methods that first estimate face geometry and then calculate head pose. + +![](images/ac978229c07763e29da1561556884b0c36888ac27e0c7790a31a51a8f6d729e5.jpg) +Fig. 2: Overall pipeline of the proposed method. + +However, these methods are limited in their ability to reconstruct real-scale face geometry due to depth ambiguity. Furthermore, calculating the 6DoF head pose based on these inaccurate geometry priors makes it difficult to achieve high accuracy. + +To address the aforementioned issues, we propose, for the first time, an explicit bidirectional interaction structure between the 6DoF head pose and face geometry. Additionally, unlike other landmark-based approaches, the proposed structure actively utilizes head depth information during the landmark estimation process. This approach demonstrates accurate geometry estimation even for images with strong perspective distortions, such as selfies. + +# 3 Proposed Method + +# 3.1 Overview of the Proposed Method + +TRG is designed to iteratively regress head translation $\{T_t \in \mathbb{R}^3\}_{t=1}^3$ and rotation $\{R_t \in \mathbb{R}^6\}_{t=1}^3$ from a single image $I \in \mathbb{R}^{3 \times 192 \times 192}$ , while also providing the auxiliary output of dense 3D landmarks $\{V_t \in \mathbb{R}^{3 \times N^V}\}_{t=1}^3$ . + +Fig. 2 illustrates the comprehensive structure of TRG, which comprises a feature extractor that generates multi-scale feature maps $\{\phi_t\in \mathbb{R}^{256\times H_t\times W_t}\}_{t = 1}^3$ from $I$ , a feature sampler that extracts a landmark-aligned feature vector $\phi_t^p\in \mathbb{R}^{5N_{t - 1}^P}$ from the feature map $\phi_t$ , and a face regressor that regresses head translation $T_{t}$ , rotation $R_{t}$ , and dense landmarks $V_{t}$ from $\phi_t^p$ . $N_{t - 1}^P$ and $N^V$ denote the number of sampling points $P_{t - 1}\in \mathbb{R}^{2\times N_{t - 1}^P}$ and the number of 3D dense landmarks $V_{t}$ , respectively. Each of these components—feature extractor, feature sampler, and face regressor—is described in detail in Sections 3.2, 3.3, and 3.4, respectively. Additionally, the loss functions employed in the training are discussed in Section 3.5. + +# 3.2 Feature Extractor + +The feature extractor computes multi-scale feature maps $\{\phi_t\}_{t=1}^3$ and 2D sparse landmarks $L \in \mathbb{R}^{2 \times N^L}$ from a single image $I$ . $N^L$ denote the number of sparse landmarks. The feature extractor comprises ResNet18 [20], three deconvolution layers, a $1 \times 1$ convolution layer, and a soft-argmax operation [42]. ResNet18 is initialized with pre-trained weights on ImageNet [11] and is used after removing the final classification layer and the pooling layer. The $\phi_t$ is computed from the $t$ -th deconvolution layer and fed into the feature sampler. Additionally, the last feature map, $\phi_3$ undergoes a transformation into 2D heatmaps through the $1 \times 1$ convolution layer. The soft-argmax operation computes $L$ from the resultant heatmaps. These computed landmarks, along with the ground-truth landmarks $L^* \in \mathbb{R}^{2 \times N^L}$ , are incorporated into the loss function. + +# 3.3 Feature Sampler + +The feature sampler computes the landmark-aligned feature vector $\phi_t^p \in \mathbb{R}^{5N_{t-1}^P}$ from the feature map $\phi_t$ and the corresponding sampling points $P_{t-1} \in \mathbb{R}^{2 \times N_{t-1}^P}$ . Sampling points $P_{t-1}$ are used to extract point-wise features from the feature map $\phi_t$ . Here, $P_0$ is set to 2D grid coordinates. For $t > 0$ , $P_t$ is computed using the $t$ -th face regressor. The methodology for deriving these sampling points from the face regressor is described in Section 3.4. + +The point-wise feature vector $\phi_t(p_{t-1,n}) \in \mathbb{R}^{256}$ is obtained using bilinear sampling at the location specified by the point $p_{t-1,n} \in \mathbb{R}^2$ on $\phi_t$ . Here, $p_{t-1,n}$ denotes the $n$ -th column vector of the sampling points $P_{t-1}$ . The $N_{t-1}^P$ point-wise features, denoted as $\{\phi_t(p_{t-1,n})\}_{n=1}^{N_{t-1}^P}$ , are then transformed into 5D vectors using a dimension reduction layer $\mathcal{F}(\cdot)$ . These vectors are subsequently concatenated to form the landmark-aligned feature vector $\phi_t^p$ : + +$$ +\phi_ {t} ^ {p} = \bigoplus \left(\left\{\mathcal {F} \left(\phi_ {t} \left(p _ {t - 1, n}\right)\right) \right\} _ {n = 1} ^ {N _ {t - 1} ^ {P}}\right), \tag {1} +$$ + +where $\bigoplus (\cdot)$ denotes concatenation. The dimension reduction layer, $\mathcal{F}(\cdot)$ , is structured as a multilayer perceptron (MLP), which comprises three fully connected layers and two Leaky ReLU activations [32,50]. The obtained landmark-aligned feature vector $\phi_t^p$ is then fed into the face regressor. + +# 3.4 Face Regressor + +The face regressor comprises an MLP $\mathcal{R}_t(\cdot)$ to calculate the head rotation, bounding box correction parameters, and dense landmarks $\Theta_t = \{R_t \in \mathbb{R}^6, c_t \in \mathbb{R}^3, V_t \in \mathbb{R}^{3 \times N^V}\}$ , a function that computes the head translation $T_t = \{T_t^x, T_t^y, T_t^z\} \in \mathbb{R}^3$ based on the bounding box information $I_{\mathrm{bbox}} = \{\frac{\tau^{x,\mathrm{bbox}}}{f}, \frac{\tau^{y,\mathrm{bbox}}}{f}, \frac{b}{f}\} \in \mathbb{R}^3$ and the correction parameter $c_t = \{s_t, \tilde{\tau}_t^{x,\mathrm{face}}, \tilde{\tau}_t^{y,\mathrm{face}}\}$ , and a perspective projection function that calculates the image coordinates of the dense landmarks + +$V_{t}^{img} \in \mathbb{R}^{2 \times N^{V}}$ and the sampling points $P_{t}$ . $V_{t}$ and $R_{t}$ denote the 3D coordinates of the dense landmarks defined in the head space and the head rotation expressed in a 6D representation [52], respectively. $T_{t}^{x}, T_{t}^{y}$ , and $T_{t}^{z}$ represent the head translations along the $x$ -, $y$ -, and $z$ -axes in the camera space, respectively. $\tau^{x,\mathrm{bbox}}$ , $\tau^{y,\mathrm{bbox}}$ , $b$ , and $f$ denote the $x$ - and $y$ -coordinates of the bounding box center relative to the center of the uncropped image, the size of the bounding box, and the focal length, respectively. $s_{t}$ , $\tilde{\tau}_{t}^{x,\mathrm{face}}$ , and $\tilde{\tau}_{t}^{y,\mathrm{face}}$ respectively denote the bounding box scale factor and the normalized offset of the head center relative to the bounding box center in the $x$ - and $y$ -directions. + +The MLP $\mathcal{R}_t(\cdot)$ estimates the residual for calculating $\Theta_t$ from the landmark-aligned feature $\phi_t^p$ , the previously iterated output $\Theta_{t - 1}^{sub} = \{R_{t - 1},c_{t - 1},V_{t - 1}^{sub}\in \mathbb{R}^{3\times 305}\}$ , and the bounding box information $I_{\mathrm{bbox}}$ [29,49,50]. $\Theta_t$ is computed by adding the residual estimated by $\mathcal{R}_t(\cdot)$ to $\Theta_{t - 1}$ . $V_{t - 1}^{sub}$ represents the landmarks obtained by subsampling $V_{t - 1}$ [36]. The use of $V_{t - 1}^{sub}$ for $\mathcal{R}_t(\cdot)$ instead of $V_{t - 1}$ reduces the redundancy of the dense landmarks, which improves the performance of the proposed model [6-8,30,50]. + +We model a real human face as being enclosed within a box $B$ of size $0.2m \times 0.2m$ , with $m$ denoting meters. The size of this box, when projected into the image space, is represented by $b$ . However, since the assumption about the face size is typically imprecise, $\mathcal{R}_t(\cdot)$ estimates a scale factor $s_t$ to adjust the size of $B$ . Furthermore, $\mathcal{R}_t(\cdot)$ is responsible for determining the normalized offsets of the head center $\tilde{\tau}_t^{x,\mathrm{face}}$ , $\tilde{\tau}_t^{y,\mathrm{face}}$ . These offsets represent the values obtained by normalizing the image space translation from the bounding box center to the head center with $b$ . The calculation of $T_t$ from $c_t$ and $I_{\mathrm{bbox}}$ is expressed as: + +$$ +\begin{array}{l} T _ {t} ^ {x} = \frac {0 . 2 s _ {t}}{b} \tau^ {x, \mathrm {b b o x}} + 0. 2 s _ {t} \tilde {\tau} _ {t} ^ {x, \mathrm {f a c e}}, \\ T _ {t} ^ {y} = \frac {0 . 2 s _ {t}}{b} \tau^ {y, \mathrm {b b o x}} + 0. 2 s _ {t} \tilde {\tau} _ {t} ^ {y, \text {f a c e}}, \quad T _ {t} ^ {z} = \frac {0 . 2 s _ {t}}{b} f. \tag {2} \\ \end{array} +$$ + +The derivation of Eq. 2 can be found in the supplementary material. The image coordinates of the dense landmarks, $V_{t}^{img}$ , are computed by projecting $V_{t}$ , as follows: + +$$ +V _ {t} ^ {i m g} = \Pi \left(V _ {t}, R _ {t}, T _ {t}, K\right), \tag {3} +$$ + +where $\varPi(\cdot)$ and $K\in \mathbb{R}^{3\times 3}$ denote the perspective projection and the intrinsic camera parameters, respectively. The sampling points $P_{t}$ are obtained by subsampling $V_{t}^{img}$ . + +# 3.5 Loss Functions + +We detail the loss functions employed to train TRG, ensuring accurate predictions of face geometry and head pose. The training process utilizes several loss functions for dense landmarks: head space coordinate loss $\mathcal{L}_{\mathrm{head}}$ , camera space coordinate loss $\mathcal{L}_{\mathrm{cam}}$ , and image space coordinate loss $\mathcal{L}_{\mathrm{img}}$ . For a precise estimation of head rotation, a head rotation loss $\mathcal{L}_{\mathrm{rot}}$ is also adopted. As iteration + +progresses, the loss functions are doubled as follows: + +$$ +\mathcal {L} _ {\mathrm {h e a d}} = \sum_ {t = 1} ^ {3} 2 ^ {t - 3} \left(\frac {1}{N ^ {V}} \sum_ {n = 1} ^ {N ^ {V}} \| V _ {t, n} - V _ {n} ^ {*} \| _ {1}\right), +$$ + +$$ +\mathcal {L} _ {\text {c a m}} = \sum_ {t = 1} ^ {3} 2 ^ {t - 3} \left(\frac {1}{N ^ {V}} \sum_ {n = 1} ^ {N ^ {V}} \| V _ {t, n} ^ {\text {c a m}} - V _ {n} ^ {* , \text {c a m}} \| _ {1}\right), \tag {4} +$$ + +$$ +\mathcal {L} _ {\mathrm {i m g}} = \sum_ {t = 1} ^ {3} 2 ^ {t - 3} \left(\frac {1}{N ^ {V}} \sum_ {n = 1} ^ {N ^ {V}} \| V _ {t, n} ^ {i m g} - V _ {n} ^ {*, i m g} \| _ {1}\right), +$$ + +$$ +\mathcal {L} _ {\mathrm {r o t}} = \sum_ {t = 1} ^ {3} 2 ^ {t - 3} (\| R _ {t} ^ {m a t} - R ^ {*, m a t} \| _ {F}), +$$ + +where $*$ and $V_{t,n}$ represent the ground truth and the $n$ -th column vector of $V_{t}$ , respectively. $V_{t}^{cam} = R_{t}^{mat}V_{t} + T_{t} \in \mathbb{R}^{3 \times N^{V}}$ and $V_{t}^{img}$ represent the camera space coordinates and the image space coordinates of the $t$ -th dense landmarks, respectively. $R_{t}^{mat} \in \mathbb{R}^{3 \times 3}$ represents the 3D head rotation in matrix form, and $\| \cdot \|_{F}$ denotes the Frobenius norm. + +If connectivity between dense landmarks is defined in the dataset, we utilize this information to apply an edge length loss. We empirically found that applying the edge length loss $\mathcal{L}_{\mathrm{ed}}$ [18, 33] to $V_{3}$ , estimated by the final face regressor, improves the model's performance in estimating face geometry. The edge length loss $\mathcal{L}_{\mathrm{ed}}$ can be written as: + +$$ +\mathcal {L} _ {\mathrm {e d}} = \sum_ {M} \sum_ {\{n, m \} \subset M} | \| V _ {3, n} - V _ {3, m} \| _ {2} - \| V _ {n} ^ {*} - V _ {m} ^ {*} \| _ {2} |, \tag {5} +$$ + +where $M$ denotes a triangle. Additionally, to improve the quality of the feature map, we apply the sparse 2D landmark loss $\mathcal{L}_L$ to the landmarks $L$ obtained from $\phi_3$ as follows: + +$$ +\mathcal {L} _ {L} = \frac {1}{N ^ {L}} \sum_ {n = 1} ^ {N ^ {L}} \| L _ {n} - L _ {n} ^ {*} \| _ {1}. \tag {6} +$$ + +The final loss function to train TRG can be written as: + +$$ +\mathcal {L} = \lambda_ {\text {h e a d}} \mathcal {L} _ {\text {h e a d}} + \lambda_ {\text {c a m}} \mathcal {L} _ {\text {c a m}} + \lambda_ {\text {i m g}} \mathcal {L} _ {\text {i m g}} + \lambda_ {\text {r o t}} \mathcal {L} _ {\text {r o t}} + \lambda_ {\text {e d}} \mathcal {L} _ {\text {e d}} + \lambda_ {L} \mathcal {L} _ {L}, \tag {7} +$$ + +where $\lambda$ s represent the weights of the loss functions. $\lambda_{\mathrm{head}}$ , $\lambda_{\mathrm{cam}}$ , $\lambda_{\mathrm{img}}$ , $\lambda_{\mathrm{rot}}$ , $\lambda_{\mathrm{ed}}$ , and $\lambda_L$ are set to 20, 2, 0.01, 10, 2, and 1.25, respectively. + +# 4 Experimental Results + +# 4.1 Implementation Details + +The spatial dimensions $H_{t}, W_{t}$ of the feature map $\phi_t$ were set to $\frac{192}{2^{5 - t}}$ . The number of sampling points $N_{t}^{P}$ was set to $18 \times 18 = 324$ when $t = 0$ , and to 305 + +when $t > 0$ . $N^V$ and $N^L$ were set to 1220 and 68, respectively. For the ARKitFace training dataset [25], we selected a random sample and used its corresponding ground-truth dense 3D landmarks and head rotation as the initial landmarks $V_0$ and head rotation $R_0$ for the TRG. The initial correction parameter $c_0$ was set to $\{s_0 = 1, \tilde{\tau}_0^{x,\mathrm{face}} = 0, \tilde{\tau}_0^{y,\mathrm{face}} = 0\}$ . For the TRG training, both the ARKitFace training data [25] and 300W-LP [55] were utilized. Unless otherwise stated, the performances of models trained using both datasets are presented. When a fair comparison with the state-of-the-art methods is required, results from models trained solely on the ARKitFace training dataset are also provided. Please refer to the supplementary material for more implementation details. + +# 4.2 Datasets + +ARKitFace [25] is a dataset that provides the 6DoF head poses, the dense 3D landmarks, and intrinsic camera parameters. It is collected from selfie scenarios, with data gathered at a camera-to-face distance ranging from 0.3 to 0.9 meters, resulting in images significantly influenced by strong perspective transformations. Following previous work [25], we used 717,840 frames from 400 subjects for training, and 184,884 frames from 100 subjects for testing. + +300W-LP [55] is an extended synthetic dataset derived from the 300W [39], which itself is composed of several standardized datasets, including AFW [54], HELEN [51], IBUG [40], and LFPW [2]. Through face profiling, the 300W-LP dataset provides 122,450 synthesized images from approximately 4,000 original pictures. + +BIWI [15] provides 6DoF head poses, a 3D neutral face mesh for each subject, and intrinsic camera parameters. Since BIWI does not provide ground-truth face meshes for each frame, our evaluation focuses solely on the head poses. BIWI serves exclusively as test data to assess the effectiveness of our method. We evaluated the performance of our proposed model by following the protocol used in previous studies [25,46]. + +# 4.3 Evaluation Metrics + +For head rotation accuracy assessment, we follow the approach used in previous studies [1,23,25,46], measuring rotation errors separately for roll, pitch, and yaw. Additionally, to provide a comprehensive understanding of the head rotation estimation performance, we also present the mean absolute error $(\mathrm{MAE}_r)$ and geodesic error (GE) [9]. For evaluating the accuracy of head translation, we calculate the errors for translation along the $x-$ , $y-$ , and $z$ -axes, represented as $t_x$ , $t_y$ , and $t_z$ errors, respectively. Similar to head rotation, we present the mean absolute error performance for head translation, denoted as $\mathrm{MAE}_t$ . Following previous research [25], we utilize the average 3D distance (ADD) metric [22] to present a holistic evaluation of the method's performance in estimating both rotation and translation: + +$$ +\mathrm {A D D} = \frac {1}{N ^ {V}} \sum_ {n = 1} ^ {N ^ {V}} \| \left(R _ {3} ^ {\text {m a t}} V _ {n} ^ {*} + T _ {3}\right) - \left(R ^ {*}, \text {m a t} V _ {n} ^ {*} + T ^ {*}\right) \| _ {2}. \tag {8} +$$ + +Table 1: Ablation study of TRG on ARKitFace and BIWI. We explored the effects of the bidirectional interaction structure and utilizing the correction parameter. We also investigated the importance of utilizing face geometry in the 6DoF head pose estimation process and the effectiveness of the landmark-to-image alignment method. "MS" means multi-scale features. + +
MethodARKitFaceBIWI
Mean ↓MAEr ↓MAEt ↓ADD ↓MAEr ↓MAEt ↓ADD ↓
1-iter (w/o MS)1.691.003.708.933.2813.7432.28
2-iter (w/o MS)1.660.893.618.722.9513.7731.28
3-iter (w/o MS)1.570.883.638.712.5913.6731.52
Tt-prediction1.660.924.6411.668.811.7K5.1K
Landmark-free baseline-1.033.869.343.8718.4242.22
Grid sampled baseline1.590.953.748.992.9814.5835.04
TRG (Ours)1.580.913.628.682.7512.9729.46
+ +To assess the 3D landmark prediction accuracy of our proposed method, we evaluate the median and average distances between the estimated and ground-truth dense landmarks [25]. The effectiveness of our method is evaluated based on the estimated values of $V_{3}$ , $R_{3}$ , and $T_{3}$ from the final face regressor at $t = 3$ . The unit for median, mean, translation error, and ADD is in millimeters, and the unit for rotation error is in degrees. + +# 4.4 Ablation Experiments + +Effectiveness of bidirectional interaction structure. In this experiment, we delve into the significance of explicit bidirectional interaction between the 6DoF head pose and face geometry. To investigate this, we observe the model's performance variations based on the number of interactions between these two types of information. For our experiments, we designed 1-iteration, 2-iteration, and 3-iteration baselines and then compared their performance. The 1-iteration baseline model simultaneously regresses the face geometry and head pose using $\mathcal{R}_1(\cdot)$ but without an iterative inference process. The 2- and 3-iteration baseline models enhance this process by incorporating the iterative inference approach. They project the predicted dense landmarks onto the image feature, with all other aspects remaining consistent with the 1-iteration baseline. Similar to the 1-iteration baseline, they utilize only $\phi_1$ and do not employ multi-scale features. The key distinction between the 3-iteration models and TRG lies in the utilization of multi-scale features. + +The evaluation on the ARKitFace test data, as presented in Table 1, indicates that the performance in estimating the face geometry and head pose improves with the increasing number of iterations. This improvement is attributed to the reduction in ambiguity between the face geometry and 6DoF head pose as the number of bidirectional interactions increases. The BIWI evaluation results further corroborate the effectiveness of the bidirectional interaction method. + +Use of correction parameter. In this experiment, we investigate the rationale behind estimating the correction parameter $c_{t}$ instead of directly estimating head + +![](images/66254c28f96a6499a9c6266840f9b6e98a0ffebd9d53ed0c634c01aac60a66cc.jpg) +Fig. 3: The distribution of ground-truth translation and correction parameters in ARKitFace and BIWI. The colors blue, green, and brown represent the distributions of the ARKitFace training data, ARKitFace test data, and BIWI dataset, respectively. The symbol * denotes ground-truth. + +![](images/e6323f87d6abbb622c81d4b20ac09fd9d86aee026003b5c9d1dbf099c8aad482.jpg) + +![](images/1f64105f660563317f21db533cf64207e32da044e977fd7f4140d6b07108d03b.jpg) + +![](images/453f97d3caa55cc24482cc928b1a496b441d257a96f90c31ff438dcaf90468eb.jpg) + +translation $T_{t}$ . To elucidate this, we compare the performance of two models: the $T_{t}$ -prediction baseline, which directly estimates head translation $T_{t}$ and TRG. According to Table 1, while the $T_{t}$ -prediction baseline demonstrates accurate estimation of head translation on the ARKitFace test data, its performance significantly declines on the BIWI dataset. We attribute this discrepancy to the differing translation distributions between the ARKitFace and BIWI datasets. + +The first and second columns of Fig. 3 illustrate the ground-truth head translation distributions for ARKitFace and BIWI. While the translation distribution in the ARKitFace training data closely matches its test data, it significantly differs from that of BIWI. This discrepancy is particularly noticeable in the $z$ -axis translations, indicating substantial divergence between the ARKitFace training data and BIWI. To achieve generalization from the ARKitFace training data to BIWI, a model must effectively extrapolate the $z$ -axis translation. However, as evidenced by Table 1, this extrapolation poses a significant challenge for the direct translation estimation model. + +The third and fourth columns of Fig. 3 visualize the distribution of the ground-truth correction parameters for both ARKitFace and BIWI datasets. A key observation here is that the variation in the correction parameter distribution is significantly smaller compared to the translation distribution. Based on these observations, we can conclude that shifting the estimation target from $T_{t}$ to $c_{t}$ effectively reduces distribution discrepancies. This strategic redefinition enhances the model's generalizability, particularly for data that fall outside the training distribution, as evidenced in Table 1. + +The importance of utilizing facial geometry and the effectiveness of landmark-to-image alignment technique. For the purpose of our experiment, we designed a landmark-free baseline that does not estimate facial geometry $\{V_t\}_{t=1}^3$ . Given the absence of facial geometry information, the landmark-free baseline is unable to utilize landmark-to-image alignment techniques. Consequently, it extracts grid sampled features from $\{\phi_t\}_{t=1}^3$ and inputs them into a face regressor. However, due to significant structural differences from TRG, we mitigate these differences by also designing a grid sampled baseline for incremental comparison. This grid sampled baseline is similar to the TRG, except + +![](images/e79d402db21b487599533f739557577ec0f8519bee357cce556ce22f0d9355c7.jpg) + +![](images/78ce7ea9e3785344dc870c8edb47fe95dc9bc0c852fba119ca612f211cf900a6.jpg) +Fig. 4: Qualitative comparison on the ARKitFace and BIWI datasets. The first and second rows show visualized results for ARKitFace and BIWI, respectively. The colors cyan, pink, gold, and gray represent JMLR, PerspNet, TRG, and ground truth, respectively. The red, green, and blue axes respectively represent the X, Y, and Z axes of the camera coordinate system. + +![](images/6ce119f5711db7a66b8b31dbbb8bc31bd39cb6202aaa6115cd73dcb0ad484f29.jpg) + +![](images/6a05fca9497ca74bfd4b8f6a85b2ac2d4c0ab130b2adc41152d986eb5459a871.jpg) + +![](images/c2ef4dcc973780196eac307ca6b0499787ab0edc17b95715d15ad2bd02309ac3.jpg) + +![](images/e6fcdc5b7e0156603dde8ba5e1339e4f8821166c928365ebb8161bd87f47b58b.jpg) + +![](images/03c7031213aff0cb3e4db5f04098e945fcb6f13c77bafe0fd3df20fc4817bf5d.jpg) + +![](images/457fe3e7ca3c6af4b5a678cd486d61ca8a69f028ad5647c7912cc64af05ff18e.jpg) + +it does not employ the landmark-to-image alignment method, indicating that the primary distinction from the landmark-free baseline lies in whether facial geometry is estimated. + +According to our findings, as presented in the Table 1, the landmark-free baseline underperforms compared to the grid sampled baseline. This supports our hypothesis that landmark information should be integrated during the 6DoF head pose estimation process. Furthermore, our results demonstrate that TRG outperforms the grid sampled baseline, affirming the superiority of our landmark-to-image alignment strategy. + +Table 2: Comparison with previous methods for 6DoF head pose estimation on ARKit-Face test dataset. Models trained with multiple datasets are marked with the symbol $\star$ , and retrained models are indicated by the symbol $\dagger$ . + +
MethodMAErGEMAEtADD
img2pose [1,25]5.55-7.0220.54
Direct 6DoF Regress [25]1.87-9.0621.39
Refined Pix2Pose [25,34]2.35-14.0036.44
JMLR [18] †1.162.394.8611.87
PerspNet [25]0.991.814.1810.01
TRG (Ours)0.921.803.648.74
TRG (Ours) †0.911.843.628.68
+ +Table 3: Comparison with previous methods for dense 3D landmark estimation on ARKitFace test dataset. + +
MethodMedianMean
PRNet [17]1.972.05
3DDFA-v2 [19]2.352.31
Deng et al. [13]2.462.55
JMLR [18] † *1.861.94
PerspNet [25]1.721.76
TRG (Ours)1.551.61
TRG (Ours) *1.551.58
+ +# 4.5 Comparison with State-of-the-Art Methods + +In this experiment, we conducted a benchmark of our proposed method against existing approaches for 6DoF head pose estimation. The evaluation results on the ARKitFace and BIWI datasets are presented in Tables 2, 3 and 4. Model retrained for this comparison is marked with the symbol $\dagger$ . Multiple datasets + +were used for the model, which could be trained on multiple datasets. However, PerspNet was trained exclusively using the ARKitFace train dataset due to the difficulty of using two datasets [25,55] with differing 3D face mesh topologies. To ensure a fair comparison, we also present the results of TRG trained solely on the ARKitFace train dataset. Models trained on multiple datasets are denoted with the symbol $\star$ . + +Evaluation on ARKitFace [25]. Img2pose directly infers the 6DoF head pose from images without utilizing face geometry information. However, the absence of face geometry information can lead to increased face size ambiguity, potentially worsening the performance of head pose inference, as can be seen in Table 2. + +JMLR and PerspNet do not incorporate head pose information during the face geometry inference process. The predicted face geometry, derived without considering head pose information, is relatively inaccurate (Table 3). Consequently, methods that predict the 6DoF head pose based on this relatively imprecise geometry yield inaccurate results (Table 2). In contrast, TRG actively integrates face geometry information into the head pose estimation process. According to Table 2, TRG achieves state-of-the-art in head pose estimation, attributed to its explicit bidirectional interaction structure. Furthermore, owing to its depth-aware landmark prediction architecture, TRG maintains stable face landmark prediction accuracy even in selfie scenarios, as shown in Table 3. Fig. 4 visually illustrates the performance of TRG and existing models [18,25] for head pose estimation and face landmark prediction. When the geometries predicted by each model are aligned with the image, they appear to be well-aligned. However, a stark contrast in model performance becomes evident when comparing the ground-truth geometry with the predicted geometries in the 3D camera space. JMLR and PerspNet struggle to accurately predict the actual size of a human face, resulting in high translation errors. + +Evaluation on BIWI [15]. According to Table 4, TRG significantly outperforms existing optimization-based methods [18,25,56] in head translation estimation. This superior performance is attributed to TRG's design, which effectively leverages the synergy between face geometry and head translation. Furthermore, TRG's landmark-to-image alignment method enables it to achieve high head rotation estimation accuracy, surpassing even methods that solely estimate 3D head rotation. Fig. 4 qualitatively demonstrates TRG's exceptional head pose estimation performance. To visualize how closely the predicted head pose matches the ground-truth pose, we utilized the ground-truth neutral mesh and the predicted head pose. + +# 4.6 Limitations + +In the process of deriving depth from images using the proposed method, the requirement for camera intrinsics emerges as a necessary component. This necessity indicates that, in the absence of camera intrinsics, while it is still possible to estimate relative depth among faces in an image, achieving precise depth measurement poses a challenge. To address this challenge and ensure accurate depth determination between the face and the camera, incorporating algorithms that + +Table 4: Comparison with previous methods for 6DoF head pose estimation on BIWI dataset. The models were evaluated using BIWI solely for testing purposes, without utilizing it as training data. We used the camera intrinsics provided by BIWI for the evaluation of the head pose estimation performance of MICA [56]. + +
MethodYawPitchRollMAErGEtxtytzMAEtADD
Dlib [26]11.8613.0019.5614.81------
3DDFA [55]5.5041.9013.2219.07------
EVA-GCN [45]4.014.782.983.92------
HopeNet [38]4.816.613.274.899.53-----
QuatNet [23]4.015.492.944.15------
Liu et al. [31]4.125.613.154.29------
FSA-Net [46]4.274.962.764.007.64-----
HPE [24]4.575.183.124.29------
WHENet-V [53]3.604.102.733.48------
RetinaFace [12] ★4.076.422.974.49------
FDN [48]4.524.702.563.93------
MNN [43]3.984.612.393.66------
TriNet [3]3.054.764.113.97------
6DRepNet [21]3.244.482.683.47------
Cao et al. [4]4.213.523.103.61------
TokenHPE [47]3.954.512.713.72------
Cobo et al. [9]4.584.652.713.987.30-----
img2pose [1] ★4.573.553.243.797.10-----
Direct 6DoF Regress [25]16.4914.035.8112.11-62.3685.01366.52171.30562.38
Refined Pix2Pose [25,34]5.755.0611.237.35-16.8221.30255.3697.83356.32
MICA [56] ★5.407.173.805.46-9.3213.6660.1327.7068.03
JMLR [18] † ★6.316.173.725.408.618.667.2732.6316.1939.71
PerspNet [25]3.103.372.382.955.614.156.4346.6919.09100.09
TRG (Ours)3.283.521.872.895.688.417.3827.1314.3132.10
TRG (Ours) ★3.043.441.782.755.357.836.9924.0712.9729.46
+ +estimate intrinsics becomes essential. This aspect of requiring camera intrinsics for depth calculations highlights an area for further exploration and adaptation in our method, especially when intrinsic parameters are not readily available. + +# 5 Conclusion + +This study proposed a novel approach by introducing the TRG to predict a 6DoF head pose from a single image. Through extensive experimentation, we demonstrated the effectiveness of the explicit bidirectional interaction between the 6DoF head pose and the dense 3D face landmarks, a core feature of the TRG architecture. We further established that our method of estimating the correction parameters significantly enhances the generalizability of the model in cross-dataset evaluations. Evaluation on the ARKitFace and BIWI datasets showed TRG's superior performance in head pose estimation compared to existing state-of-the-art methods. Our extensive experiments have also highlighted the strength of TRG's depth-aware landmark prediction structure, particularly in images heavily influenced by perspective transformation, facilitating accurate estimation of face geometry. Based on these findings, our future work will focus on accurately reconstructing detailed facial geometries from close-up facial photos, such as selfies, further pushing the boundaries of facial analysis technology. + +# Acknowledgement + +This work was partly supported by Institute of Information & Communications Technology Planning & Evaluation (IITP) grant funded by the Korea government (MSIT) (No. RS-2023-00219700, Development of FACS-compatible Facial Expression Style Transfer Technology for Digital Human, $90\%$ ) and National Research Foundation of Korea (NRF) grant funded by the Korea government (MSIT) (No. NRF-2022R1F1A1066170, Physically valid 3D human motion reconstruction from multi-view videos, $10\%$ ). + +# References + +1. Albiero, V., Chen, X., Yin, X., Pang, G., Hassner, T.: img2pose: Face alignment and detection via 6dof, face pose estimation. In: CVPR (2021) +2. Belhumeur, P.N., Jacobs, D.W., Kriegman, D.J., Kumar, N.: Localizing parts of faces using a consensus of exemplars. IEEE TPAMI 35(12), 2930-2940 (2013) +3. Cao, Z., Chu, Z., Liu, D., Chen, Y.: A vector-based representation to enhance head pose estimation. In: WACV (2021) +4. Cao, Z., Liu, D., Wang, Q., Chen, Y.: Towards unbiased label distribution learning for facial pose estimation using anisotropic spherical gaussian. In: ECCV (2022) +5. Chai, Z., Zhang, T., He, T., Tan, X., Baltrusaitis, T., Wu, H., Li, R., Zhao, S., Yuan, C., Bian, J.: Hiface: High-fidelity 3d face reconstruction by learning static and dynamic details. In: ICCV (2023) +6. Cho, J., Youwang, K., Oh, T.H.: Cross-attention of disentangled modalities for 3d human mesh recovery with transformers. In: ECCV (2022) +7. Chun, S., Park, S., Chang, J.Y.: Learnable human mesh triangulation for 3d human pose and shape estimation. In: WACV (2023) +8. Chun, S., Park, S., Chang, J.Y.: Representation learning of vertex heatmaps for 3d human mesh reconstruction from multi-view images. In: ICIP (2023) +9. Cobo, A., Valle, R., Buenaposada, J.M., Baumela, L.: On the representation and methodology for wide and short range head pose estimation. PR 149, 110263 (2024) +10. Danecek, R., Black, M.J., Bolkart, T.: EMOCA: Emotion driven monocular face capture and animation. In: CVPR (2022) +1. Deng, J., Dong, W., Socher, R., Li, L.J., Li, K., Fei-Fei, L.: Imagenet: A large-scale hierarchical image database. In: CVPR (2009) +2. Deng, J., Guo, J., Ververas, E., Kotsia, I., Zafeiriou, S.: Retinaface: Single-shot multi-level face localisation in the wild. In: CVPR (2020) +3. Deng, Y., Yang, J., Xu, S., Chen, D., Jia, Y., Tong, X.: Accurate 3d face reconstruction with weakly-supervised learning: From single image to image set. In: CVPRW (2019) +4. Dhingra, N.: Lwposr: Lightweight efficient fine grained head pose estimation. In: WACV (2022) +5. Fanelli, G., Dantone, M., Gall, J., Fossati, A., Van Gool, L.: Random forests for real time 3d face analysis. IJCV 101, 437-458 (2013) +6. Feng, Y., Feng, H., Black, M.J., Bolkart, T.: Learning an animatable detailed 3d face model from in-the-wild images. ACM TOG 40(4), 1-13 (2021) +7. Feng, Y., Wu, F., Shao, X., Wang, Y., Zhou, X.: Joint 3d face reconstruction and dense alignment with position map regression network. In: ECCV (2018) + +18. Guo, J., Yu, J., Lattas, A., Deng, J.: Perspective reconstruction of human faces by joint mesh and landmark regression. In: ECCVW (2022) +19. Guo, J., Zhu, X., Yang, Y., Yang, F., Lei, Z., Li, S.Z.: Towards fast, accurate and stable 3d dense face alignment. In: ECCV (2020) +20. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: CVPR (2016) +21. Hempel, T., Abdelrahman, A.A., Al-Hamadi, A.: 6d rotation representation for unconstrained head pose estimation. In: ICIP (2022) +22. Hinterstoisser, S., Lepetit, V., Ilic, S., Holzer, S., Bradski, G., Konolige, K., Navab, N.: Model based training, detection and pose estimation of texture-less 3d objects in heavily cluttered scenes. In: ACCV (2013) +23. Hsu, H.W., Wu, T.Y., Wan, S., Wong, W.H., Lee, C.Y.: Quantnet: Quaternion-based head pose estimation with multiregression loss. IEEE TMM 21(4), 1035-1046 (2018) +24. Huang, B., Chen, R., Xu, W., Zhou, Q.: Improving head pose estimation using two-stage ensembles with top-k regression. IVC 93, 103827 (2020) +25. Kao, Y., Pan, B., Xu, M., Lyu, J., Zhu, X., Chang, Y., Li, X., Lei, Z.: Toward 3d face reconstruction in perspective projection: Estimating 6 dof face pose from monocular image. IEEE TIP 32, 3080-3091 (2023) +26. Kazemi, V., Sullivan, J.: One millisecond face alignment with an ensemble of regression trees. In: CVPR (2014) +27. Kumar, A., Alavi, A., Chellappa, R.: Kepler: Keypoint and pose estimation of unconstrained faces by learning efficient h-cnn regressors. In: FG (2017) +28. Li, H., Wang, B., Cheng, Y., Kankanhalli, M., Tan, R.T.: Dsfnet: Dual space fusion network for occlusion-robust 3d dense face alignment. In: CVPR (2023) +29. Li, Z., Liu, J., Zhang, Z., Xu, S., Yan, Y.: Cliff: Carrying location information in full frames into human pose and shape estimation. In: ECCV (2022) +30. Lin, K., Wang, L., Liu, Z.: End-to-end human pose and mesh reconstruction with transformers. In: CVPR (2021) +31. Liu, Z., Chen, Z., Bai, J., Li, S., Lian, S.: Facial pose estimation by deep learning from label distributions. In: ICCVW (2019) +32. Maas, A.L., Hannun, A.Y., Ng, A.Y., et al.: Rectifier nonlinearities improve neural network acoustic models. In: ICML (2013) +33. Moon, G., Lee, K.M.: I2l-meshnet: Image-to-lixel prediction network for accurate 3d human pose and mesh estimation from a single rgb image. In: ECCV (2020) +34. Park, K., Patten, T., Vincze, M.: Pix2pose: Pixel-wise coordinate regression of objects for 6d pose estimation. In: ICCV (2019) +35. Paysan, P., Knothe, R., Amberg, B., Romdhani, S., Vetter, T.: A 3d face model for pose and illumination invariant face recognition. In: AVSS (2009) +36. Ranjan, A., Bolkart, T., Sanyal, S., Black, M.J.: Generating 3d faces using convolutional mesh autoencoders. In: ECCV (2018) +37. Ranjan, R., Patel, V.M., Chellappa, R.: Hyperface: A deep multi-task learning framework for face detection, landmark localization, pose estimation, and gender recognition. IEEE TPAMI 41(1), 121-135 (2019) +38. Ruiz, N., Chong, E., Rehg, J.M.: Fine-grained head pose estimation without keypoints. In: CVPRW (2018) +39. Sagonas, C., Tzimiropoulos, G., Zafeiriou, S., Pantic, M.: 300 faces in-the-wild challenge: The first facial landmark localization challenge. In: ICCVW (2013) +40. Sagonas, C., Tzimiropoulos, G., Zafeiriou, S., Pantic, M.: 300 faces in-the-wild challenge: The first facial landmark localization challenge. In: ICCVW (2013) + +41. Shao, M., Sun, Z., Ozay, M., Okatani, T.: Improving head pose estimation with a combined loss and bounding box margin adjustment. In: FG (2019) +42. Sun, X., Xiao, B., Wei, F., Liang, S., Wei, Y.: Integral human pose regression. In: ECCV (2018) +43. Valle, R., Buenaposada, J.M., Baumela, L.: Multi-task head pose estimation inthe-wild. IEEE TPAMI 43(8), 2874-2881 (2020) +44. Wu, C.Y., Xu, Q., Neumann, U.: Synergy between 3dmm and 3d landmarks for accurate 3d facial geometry. In: 3DV (2021) +45. Xin, M., Mo, S., Lin, Y.: Eva-gcn: Head pose estimation based on graph convolutional networks. In: CVPR (2021) +46. Yang, T.Y., Chen, Y.T., Lin, Y.Y., Chuang, Y.Y.: Fsa-net: Learning fine-grained structure aggregation for head pose estimation from a single image. In: CVPR (2019) +47. Zhang, C., Liu, H., Deng, Y., Xie, B., Li, Y.: Tokenhpe: Learning orientation tokens for efficient head pose estimation via transformers. In: CVPR (2023) +48. Zhang, H., Wang, M., Liu, Y., Yuan, Y.: Fdn: Feature decoupling network for head pose estimation. In: AAAI (2020) +49. Zhang, H., Tian, Y., Zhang, Y., Li, M., An, L., Sun, Z., Liu, Y.: Pymaf-x: Towards well-aligned full-body model regression from monocular images. IEEE TPAMI (2023) +50. Zhang, H., Tian, Y., Zhou, X., Ouyang, W., Liu, Y., Wang, L., Sun, Z.: Pymaf: 3d human pose and shape regression with pyramidal mesh alignment feedback loop. In: ICCV (2021) +51. Zhou, E., Fan, H., Cao, Z., Jiang, Y., Yin, Q.: Extensive facial landmark localization with coarse-to-fine convolutional network cascade. In: ICCVW (2013) +52. Zhou, Y., Barnes, C., Lu, J., Yang, J., Li, H.: On the continuity of rotation representations in neural networks. In: CVPR (2019) +53. Zhou, Y., Gregson, J.: Whenet: Real-time fine-grained estimation for wide range head pose. arXiv preprint arXiv:2005.10353 (2020) +54. Zhu, X., Ramanan, D.: Face detection, pose estimation, and landmark localization in the wild. In: CVPR (2012) +55. Zhu, X., Lei, Z., Liu, X., Shi, H., Li, S.Z.: Face alignment across large poses: A 3d solution. In: CVPR (2016) +56. Zielonka, W., Bolkart, T., Thies, J.: Towards metrical reconstruction of human faces. In: ECCV (2022) \ No newline at end of file diff --git a/2024/6DoF Head Pose Estimation through Explicit Bidirectional Interaction with Face Geometry/images.zip b/2024/6DoF Head Pose Estimation through Explicit Bidirectional Interaction with Face Geometry/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..df06e1d1d630785c8a92176f5fae802f78ffa746 --- /dev/null +++ b/2024/6DoF Head Pose Estimation through Explicit Bidirectional Interaction with Face Geometry/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac62bcbfd64fab1cef90fe6ffea9362871ab838862c3da4cd32e8dc42763d452 +size 417607 diff --git a/2024/6DoF Head Pose Estimation through Explicit Bidirectional Interaction with Face Geometry/layout.json b/2024/6DoF Head Pose Estimation through Explicit Bidirectional Interaction with Face Geometry/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..2ff9373520ab6b0d332fae645b47e5caf1ea1dbc --- /dev/null +++ b/2024/6DoF Head Pose Estimation through Explicit Bidirectional Interaction with Face Geometry/layout.json @@ -0,0 +1,11263 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 148, + 111, + 465, + 148 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 148, + 111, + 465, + 148 + ], + "spans": [ + { + "bbox": [ + 148, + 111, + 465, + 148 + ], + "type": "text", + "content": "6DoF Head Pose Estimation through Explicit Bidirectional Interaction with Face Geometry" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 223, + 168, + 389, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 223, + 168, + 389, + 180 + ], + "spans": [ + { + "bbox": [ + 223, + 168, + 389, + 180 + ], + "type": "text", + "content": "Sungho Chun and Ju Yong Chang" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 201, + 190, + 412, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 201, + 190, + 412, + 213 + ], + "spans": [ + { + "bbox": [ + 201, + 190, + 412, + 213 + ], + "type": "text", + "content": "Department of ECE, Kwangwoon University, Korea {asw9161, jychang}@kw.ac.kr" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 160, + 240, + 452, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 240, + 452, + 437 + ], + "spans": [ + { + "bbox": [ + 160, + 240, + 452, + 437 + ], + "type": "text", + "content": "Abstract. This study addresses the nuanced challenge of estimating head translations within the context of six-degrees-of-freedom (6DoF) head pose estimation, placing emphasis on this aspect over the more commonly studied head rotations. Identifying a gap in existing methodologies, we recognized the underutilized potential synergy between facial geometry and head translation. To bridge this gap, we propose a novel approach called the head Translation, Rotation, and face Geometry network (TRG), which stands out for its explicit bidirectional interaction structure. This structure has been carefully designed to leverage the complementary relationship between face geometry and head translation, marking a significant advancement in the field of head pose estimation. Our contributions also include the development of a strategy for estimating bounding box correction parameters and a technique for aligning landmarks to image. Both of these innovations demonstrate superior performance in 6DoF head pose estimation tasks. Extensive experiments conducted on ARKitFace and BIWI datasets confirm that the proposed method outperforms current state-of-the-art techniques. Codes are released at https://github.com/asn91666/TRG-Release." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 160, + 449, + 473, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 449, + 473, + 471 + ], + "spans": [ + { + "bbox": [ + 160, + 449, + 473, + 471 + ], + "type": "text", + "content": "Keywords: 6DoF head pose estimation " + }, + { + "bbox": [ + 160, + 449, + 473, + 471 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 160, + 449, + 473, + 471 + ], + "type": "text", + "content": " bidirectional interaction " + }, + { + "bbox": [ + 160, + 449, + 473, + 471 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 160, + 449, + 473, + 471 + ], + "type": "text", + "content": " landmark-based approach" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 133, + 509, + 230, + 521 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 509, + 230, + 521 + ], + "spans": [ + { + "bbox": [ + 133, + 509, + 230, + 521 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 533, + 482, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 533, + 482, + 628 + ], + "spans": [ + { + "bbox": [ + 130, + 533, + 482, + 628 + ], + "type": "text", + "content": "Six-degrees-of-freedom (6DoF) head pose estimation is a crucial concern in both computer vision and graphics communities owing to its broad applications in augmented/virtual reality, vehicular monitoring systems, and sports analytics. Despite its prominence, existing studies [3,4,21,27,38,41,45-47] have primarily focused on estimating head orientation, whereas research on head translation estimation has not received as much attention. Some studies [1,44] have estimated pseudo-depth calculated from fitted data [55] without exploring methods to estimate the actual distance between the camera and head." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 630, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 630, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 630, + 482, + 666 + ], + "type": "text", + "content": "Estimating head translation from a single image using learning-based methods poses significant challenges, which can be attributed to roughly two reasons. First, head translation estimation depends on real-scale face geometry. However," + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 134, + 115, + 479, + 235 + ], + "blocks": [ + { + "bbox": [ + 134, + 115, + 479, + 235 + ], + "lines": [ + { + "bbox": [ + 134, + 115, + 479, + 235 + ], + "spans": [ + { + "bbox": [ + 134, + 115, + 479, + 235 + ], + "type": "image", + "image_path": "d458188cd0318d3f363853cfc5116e0e91c3bbd1894de8b9a41299a2f65f93e9.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 243, + 482, + 299 + ], + "lines": [ + { + "bbox": [ + 130, + 243, + 482, + 299 + ], + "spans": [ + { + "bbox": [ + 130, + 243, + 482, + 299 + ], + "type": "text", + "content": "Fig. 1: Methods of inferring 6DoF head pose. The landmark-free approach [1] directly calculates the head pose from the image. Optimization-based methods [18,25,56] first predict face geometry, and then calculate the head pose. In contrast, TRG simultaneously estimates both face geometry and head pose to leverage the synergy between them." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 331, + 482, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 331, + 482, + 415 + ], + "spans": [ + { + "bbox": [ + 130, + 331, + 482, + 415 + ], + "type": "text", + "content": "the estimation of real-scale face geometry suffers from head translation ambiguities. In other words, the estimation of head translation and the estimation of actual size face geometry are strongly correlated, and there exists ambiguity due to their mutual absence. Second, learning-based head translation estimation encounters severe generalization issues with out-of-distribution data. Unlike head rotation, the range of head translation is infinite, necessitating a generalization strategy to address it." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 418, + 482, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 418, + 482, + 538 + ], + "spans": [ + { + "bbox": [ + 130, + 418, + 482, + 538 + ], + "type": "text", + "content": "However, existing works [1, 18, 25, 56] do not address the aforementioned issues. Fig. 1 provides an overview of the 6DoF head pose estimation methods used by existing models. In [18, 25, 56], face geometry is first inferred from an image, followed by the calculation of the 6DoF head pose using an optimization-based method. In other words, these methods [18,25,56] do not model the transfer of information from head pose to face geometry. This unidirectional information transfer method may face difficulties in predicting the actual size face geometry due to the absence of depth information. Consequently, the resulting face prior could create a vicious cycle, further reducing the accuracy of head translation prediction." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 542, + 482, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 542, + 482, + 602 + ], + "spans": [ + { + "bbox": [ + 130, + 542, + 482, + 602 + ], + "type": "text", + "content": "Landmark-free approach [1] estimates head translation directly from an image using a learning-based method; however, it does not utilize face geometry information during the inference process. Directly estimating head depth from an image is highly non-linear, making the landmark-free approach challenging for estimating head translation." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "type": "text", + "content": "To overcome the limitations of existing models [1,18,25,56], we propose a head Translation, Rotation, and face Geometry network (TRG), which is a landmark-based method for estimating a 6DoF head pose. The TRG is designed with an explicit bidirectional interaction structure that leverages the complementary characteristics between the 6DoF head pose and face geometry. Specifically, we" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 165, + 91, + 271, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 91, + 271, + 102 + ], + "spans": [ + { + "bbox": [ + 165, + 91, + 271, + 102 + ], + "type": "text", + "content": "S. Chun and J. Y. Chang" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "type": "text", + "content": "propose a method that simultaneously estimates the head pose and dense 3D landmarks, using each other's information to iteratively improve one another." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 140, + 482, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 140, + 482, + 295 + ], + "spans": [ + { + "bbox": [ + 130, + 140, + 482, + 295 + ], + "type": "text", + "content": "To achieve generalizable head translation estimation, TRG does not directly estimate depth, but utilizes the position and size information of the bounding box. The center coordinates of the bounding box are typically well-aligned with the coordinates of the head center, and the size of the bounding box inversely reflects the head's depth. These relationships make the bounding box a useful tool for estimating head translation in 3D space. However, reliance on the bounding box alone is insufficient. This is due to potential misalignments between the bounding box center and the head center, and the bounding box size being influenced by factors beyond depth, such as face size and head rotation. To address these discrepancies, we propose to estimate bounding box correction parameters and calculate head translation using these parameters and bounding box information. The proposed method has been found to achieve high accuracy and to be robust even for out-of-distribution data." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 296, + 482, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 296, + 482, + 390 + ], + "spans": [ + { + "bbox": [ + 130, + 296, + 482, + 390 + ], + "type": "text", + "content": "Additionally, TRG aligns the estimated 3D landmarks with the image through perspective projection. By iterating this process, TRG not only enhances the performance of head translation estimation but also improves head rotation accuracy. This landmark-to-image alignment framework is inspired by the architecture of PyMAF [49, 50], which is a model used to reconstruct a human mesh. However, PyMAF is not designed to estimate the camera-to-human distance and fundamentally differs from TRG as it does not leverage the synergy between real-scale human geometry and depth." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 391, + 482, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 391, + 482, + 475 + ], + "spans": [ + { + "bbox": [ + 130, + 391, + 482, + 475 + ], + "type": "text", + "content": "Furthermore, we discovered that TRG can accurately predict 3D face landmarks from a single image, even when strongly affected by perspective distortions, such as in selfies. This accuracy is attributed to the TRG's depth-aware landmark prediction architecture, which actively utilizes head translation information during the landmark prediction process. This finding further supports our main idea that head translation estimation should be conducted simultaneously with facial geometry estimation." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 146, + 475, + 446, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 146, + 475, + 446, + 487 + ], + "spans": [ + { + "bbox": [ + 146, + 475, + 446, + 487 + ], + "type": "text", + "content": "The main contributions of this study can be summarized as follows:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 137, + 490, + 480, + 665 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 138, + 490, + 480, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 490, + 480, + 548 + ], + "spans": [ + { + "bbox": [ + 138, + 490, + 480, + 548 + ], + "type": "text", + "content": "- We propose TRG for 6DoF head pose estimation. To the best of our knowledge, this is the first study to introduce an explicit bidirectional interaction structure between head translation and face geometry. Through this innovative structure, TRG simultaneously mitigates ambiguity concerning head depth and face size." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 138, + 549, + 480, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 549, + 480, + 583 + ], + "spans": [ + { + "bbox": [ + 138, + 549, + 480, + 583 + ], + "type": "text", + "content": "- The proposed strategy for estimating correction parameters for the bounding box demonstrates stable generalization performance on out-of-distribution data in terms of head translation." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 137, + 584, + 479, + 607 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 584, + 479, + 607 + ], + "spans": [ + { + "bbox": [ + 137, + 584, + 479, + 607 + ], + "type": "text", + "content": "- The landmark-to-image alignment strategy demonstrates high accuracy not only in terms of head translation but also regarding head rotation." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 137, + 607, + 480, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 607, + 480, + 640 + ], + "spans": [ + { + "bbox": [ + 137, + 607, + 480, + 640 + ], + "type": "text", + "content": "- TRG's depth-aware landmark prediction architecture exhibits high landmark prediction accuracy, even in images heavily influenced by perspective transformation, such as selfies." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 137, + 641, + 480, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 641, + 480, + 665 + ], + "spans": [ + { + "bbox": [ + 137, + 641, + 480, + 665 + ], + "type": "text", + "content": "- Extensive experimental results on the benchmark datasets ARKitFace [25] and BIWI [15] show that TRG outperforms current SotA methods." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 425, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 425, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 425, + 91, + 447, + 100 + ], + "type": "text", + "content": "TRG" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 91, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 91, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 91, + 481, + 100 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 114, + 243, + 127 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 114, + 243, + 127 + ], + "spans": [ + { + "bbox": [ + 132, + 114, + 243, + 127 + ], + "type": "text", + "content": "2 Related Works" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 140, + 288, + 153 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 140, + 288, + 153 + ], + "spans": [ + { + "bbox": [ + 132, + 140, + 288, + 153 + ], + "type": "text", + "content": "2.1 Landmark-free Approach" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 160, + 479, + 207 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 160, + 479, + 207 + ], + "spans": [ + { + "bbox": [ + 130, + 160, + 479, + 207 + ], + "type": "text", + "content": "The landmark-free approach [1,3,4,14,21,31,47] aims to estimate head pose directly from input image without relying on landmarks. However, most landmark-free approaches [3,4,14,21,31,47] only estimate head rotation and do not consider head translation." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 208, + 480, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 208, + 480, + 327 + ], + "spans": [ + { + "bbox": [ + 130, + 208, + 480, + 327 + ], + "type": "text", + "content": "Among them, img2pose [1] not only estimates head rotation but also head translation. It calculates head translation from a proposal and employs a local-to-global transformation strategy to convert the estimated local pose into a global image space. Infrinsics are utilized during the conversion of the local head pose into the global head pose. However, img2pose does not use intrinsics when calculating head translation from a proposal, leading to inaccurate local head poses. This is because utilizing intrinsics is essential when calculating depth from an image, even when dealing with a cropped image. Furthermore, [1] does not utilize face geometry information during inference, which can exacerbate depth ambiguity." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 328, + 481, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 328, + 481, + 423 + ], + "spans": [ + { + "bbox": [ + 130, + 328, + 481, + 423 + ], + "type": "text", + "content": "In contrast to landmark-free approaches, our proposed method explicitly utilizes facial geometry information. Specifically, TRG simultaneously mitigates ambiguity regarding face size and head translation through a bidirectional interaction structure. Additionally, TRG does not directly calculate head translation from cropped images but infers bounding box correction parameters instead. It then computes head translation using the inferred correction parameters and intrinsics. The proposed bounding box correction parameter strategy enables stable and accurate inference of head translation." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 441, + 297, + 454 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 441, + 297, + 454 + ], + "spans": [ + { + "bbox": [ + 132, + 441, + 297, + 454 + ], + "type": "text", + "content": "2.2 Landmark-based Approach" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 462, + 480, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 462, + 480, + 533 + ], + "spans": [ + { + "bbox": [ + 130, + 462, + 480, + 533 + ], + "type": "text", + "content": "Numerous landmark-based approaches have been proposed [18,25,27,37,43-45, 56] for estimating a 6DoF head pose or 3D head rotation. [27,37,43] proposed methods that simultaneously estimate 2D face landmarks and 3D head rotation by leveraging the synergy between them using learning-based approaches. However, these studies have not explored the synergy between 3D face geometry and head translation." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 533, + 481, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 533, + 481, + 640 + ], + "spans": [ + { + "bbox": [ + 130, + 533, + 481, + 640 + ], + "type": "text", + "content": "SynergyNet [44] demonstrated that the parameters for shape and expression [35] can improve 3D sparse landmarks, and these enhanced landmarks can, in turn, improve the 3DMM parameters and head rotation during training. However, during the test time, it utilized a unidirectional information transfer architecture, which does not refine the 3DMM parameters and head rotation from the improved landmarks. Furthermore, SynergyNet is a model based on weak-perspective projections, similar to those in [5, 10, 16, 19, 28, 55]. Such models fundamentally do not compute the actual distance between the camera and the face." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 641, + 481, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 641, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 132, + 641, + 481, + 665 + ], + "type": "text", + "content": "MICA [56], JMLR [18], and PerspNet [25] employ unidirectional information transfer methods that first estimate face geometry and then calculate head pose." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 271, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 271, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 271, + 102 + ], + "type": "text", + "content": "S. Chun and J. Y. Chang" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 136, + 114, + 478, + 256 + ], + "blocks": [ + { + "bbox": [ + 136, + 114, + 478, + 256 + ], + "lines": [ + { + "bbox": [ + 136, + 114, + 478, + 256 + ], + "spans": [ + { + "bbox": [ + 136, + 114, + 478, + 256 + ], + "type": "image", + "image_path": "ac978229c07763e29da1561556884b0c36888ac27e0c7790a31a51a8f6d729e5.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 206, + 264, + 406, + 277 + ], + "lines": [ + { + "bbox": [ + 206, + 264, + 406, + 277 + ], + "spans": [ + { + "bbox": [ + 206, + 264, + 406, + 277 + ], + "type": "text", + "content": "Fig. 2: Overall pipeline of the proposed method." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 301, + 479, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 301, + 479, + 350 + ], + "spans": [ + { + "bbox": [ + 130, + 301, + 479, + 350 + ], + "type": "text", + "content": "However, these methods are limited in their ability to reconstruct real-scale face geometry due to depth ambiguity. Furthermore, calculating the 6DoF head pose based on these inaccurate geometry priors makes it difficult to achieve high accuracy." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 350, + 480, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 350, + 480, + 422 + ], + "spans": [ + { + "bbox": [ + 130, + 350, + 480, + 422 + ], + "type": "text", + "content": "To address the aforementioned issues, we propose, for the first time, an explicit bidirectional interaction structure between the 6DoF head pose and face geometry. Additionally, unlike other landmark-based approaches, the proposed structure actively utilizes head depth information during the landmark estimation process. This approach demonstrates accurate geometry estimation even for images with strong perspective distortions, such as selfies." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 446, + 261, + 460 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 446, + 261, + 460 + ], + "spans": [ + { + "bbox": [ + 132, + 446, + 261, + 460 + ], + "type": "text", + "content": "3 Proposed Method" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 131, + 477, + 335, + 490 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 477, + 335, + 490 + ], + "spans": [ + { + "bbox": [ + 131, + 477, + 335, + 490 + ], + "type": "text", + "content": "3.1 Overview of the Proposed Method" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 131, + 502, + 480, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 502, + 480, + 540 + ], + "spans": [ + { + "bbox": [ + 131, + 502, + 480, + 540 + ], + "type": "text", + "content": "TRG is designed to iteratively regress head translation " + }, + { + "bbox": [ + 131, + 502, + 480, + 540 + ], + "type": "inline_equation", + "content": "\\{T_t \\in \\mathbb{R}^3\\}_{t=1}^3" + }, + { + "bbox": [ + 131, + 502, + 480, + 540 + ], + "type": "text", + "content": " and rotation " + }, + { + "bbox": [ + 131, + 502, + 480, + 540 + ], + "type": "inline_equation", + "content": "\\{R_t \\in \\mathbb{R}^6\\}_{t=1}^3" + }, + { + "bbox": [ + 131, + 502, + 480, + 540 + ], + "type": "text", + "content": " from a single image " + }, + { + "bbox": [ + 131, + 502, + 480, + 540 + ], + "type": "inline_equation", + "content": "I \\in \\mathbb{R}^{3 \\times 192 \\times 192}" + }, + { + "bbox": [ + 131, + 502, + 480, + 540 + ], + "type": "text", + "content": ", while also providing the auxiliary output of dense 3D landmarks " + }, + { + "bbox": [ + 131, + 502, + 480, + 540 + ], + "type": "inline_equation", + "content": "\\{V_t \\in \\mathbb{R}^{3 \\times N^V}\\}_{t=1}^3" + }, + { + "bbox": [ + 131, + 502, + 480, + 540 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 541, + 482, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 541, + 482, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 541, + 482, + 665 + ], + "type": "text", + "content": "Fig. 2 illustrates the comprehensive structure of TRG, which comprises a feature extractor that generates multi-scale feature maps " + }, + { + "bbox": [ + 130, + 541, + 482, + 665 + ], + "type": "inline_equation", + "content": "\\{\\phi_t\\in \\mathbb{R}^{256\\times H_t\\times W_t}\\}_{t = 1}^3" + }, + { + "bbox": [ + 130, + 541, + 482, + 665 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 130, + 541, + 482, + 665 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 130, + 541, + 482, + 665 + ], + "type": "text", + "content": ", a feature sampler that extracts a landmark-aligned feature vector " + }, + { + "bbox": [ + 130, + 541, + 482, + 665 + ], + "type": "inline_equation", + "content": "\\phi_t^p\\in \\mathbb{R}^{5N_{t - 1}^P}" + }, + { + "bbox": [ + 130, + 541, + 482, + 665 + ], + "type": "text", + "content": " from the feature map " + }, + { + "bbox": [ + 130, + 541, + 482, + 665 + ], + "type": "inline_equation", + "content": "\\phi_t" + }, + { + "bbox": [ + 130, + 541, + 482, + 665 + ], + "type": "text", + "content": ", and a face regressor that regresses head translation " + }, + { + "bbox": [ + 130, + 541, + 482, + 665 + ], + "type": "inline_equation", + "content": "T_{t}" + }, + { + "bbox": [ + 130, + 541, + 482, + 665 + ], + "type": "text", + "content": ", rotation " + }, + { + "bbox": [ + 130, + 541, + 482, + 665 + ], + "type": "inline_equation", + "content": "R_{t}" + }, + { + "bbox": [ + 130, + 541, + 482, + 665 + ], + "type": "text", + "content": ", and dense landmarks " + }, + { + "bbox": [ + 130, + 541, + 482, + 665 + ], + "type": "inline_equation", + "content": "V_{t}" + }, + { + "bbox": [ + 130, + 541, + 482, + 665 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 130, + 541, + 482, + 665 + ], + "type": "inline_equation", + "content": "\\phi_t^p" + }, + { + "bbox": [ + 130, + 541, + 482, + 665 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 130, + 541, + 482, + 665 + ], + "type": "inline_equation", + "content": "N_{t - 1}^P" + }, + { + "bbox": [ + 130, + 541, + 482, + 665 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 541, + 482, + 665 + ], + "type": "inline_equation", + "content": "N^V" + }, + { + "bbox": [ + 130, + 541, + 482, + 665 + ], + "type": "text", + "content": " denote the number of sampling points " + }, + { + "bbox": [ + 130, + 541, + 482, + 665 + ], + "type": "inline_equation", + "content": "P_{t - 1}\\in \\mathbb{R}^{2\\times N_{t - 1}^P}" + }, + { + "bbox": [ + 130, + 541, + 482, + 665 + ], + "type": "text", + "content": " and the number of 3D dense landmarks " + }, + { + "bbox": [ + 130, + 541, + 482, + 665 + ], + "type": "inline_equation", + "content": "V_{t}" + }, + { + "bbox": [ + 130, + 541, + 482, + 665 + ], + "type": "text", + "content": ", respectively. Each of these components—feature extractor, feature sampler, and face regressor—is described in detail in Sections 3.2, 3.3, and 3.4, respectively. Additionally, the loss functions employed in the training are discussed in Section 3.5." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 425, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 425, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 425, + 91, + 447, + 100 + ], + "type": "text", + "content": "TRG" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 91, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 91, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 91, + 480, + 100 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 251, + 127 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 116, + 251, + 127 + ], + "spans": [ + { + "bbox": [ + 132, + 116, + 251, + 127 + ], + "type": "text", + "content": "3.2 Feature Extractor" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 136, + 482, + 271 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 136, + 482, + 271 + ], + "spans": [ + { + "bbox": [ + 130, + 136, + 482, + 271 + ], + "type": "text", + "content": "The feature extractor computes multi-scale feature maps " + }, + { + "bbox": [ + 130, + 136, + 482, + 271 + ], + "type": "inline_equation", + "content": "\\{\\phi_t\\}_{t=1}^3" + }, + { + "bbox": [ + 130, + 136, + 482, + 271 + ], + "type": "text", + "content": " and 2D sparse landmarks " + }, + { + "bbox": [ + 130, + 136, + 482, + 271 + ], + "type": "inline_equation", + "content": "L \\in \\mathbb{R}^{2 \\times N^L}" + }, + { + "bbox": [ + 130, + 136, + 482, + 271 + ], + "type": "text", + "content": " from a single image " + }, + { + "bbox": [ + 130, + 136, + 482, + 271 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 130, + 136, + 482, + 271 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 130, + 136, + 482, + 271 + ], + "type": "inline_equation", + "content": "N^L" + }, + { + "bbox": [ + 130, + 136, + 482, + 271 + ], + "type": "text", + "content": " denote the number of sparse landmarks. The feature extractor comprises ResNet18 [20], three deconvolution layers, a " + }, + { + "bbox": [ + 130, + 136, + 482, + 271 + ], + "type": "inline_equation", + "content": "1 \\times 1" + }, + { + "bbox": [ + 130, + 136, + 482, + 271 + ], + "type": "text", + "content": " convolution layer, and a soft-argmax operation [42]. ResNet18 is initialized with pre-trained weights on ImageNet [11] and is used after removing the final classification layer and the pooling layer. The " + }, + { + "bbox": [ + 130, + 136, + 482, + 271 + ], + "type": "inline_equation", + "content": "\\phi_t" + }, + { + "bbox": [ + 130, + 136, + 482, + 271 + ], + "type": "text", + "content": " is computed from the " + }, + { + "bbox": [ + 130, + 136, + 482, + 271 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 130, + 136, + 482, + 271 + ], + "type": "text", + "content": "-th deconvolution layer and fed into the feature sampler. Additionally, the last feature map, " + }, + { + "bbox": [ + 130, + 136, + 482, + 271 + ], + "type": "inline_equation", + "content": "\\phi_3" + }, + { + "bbox": [ + 130, + 136, + 482, + 271 + ], + "type": "text", + "content": " undergoes a transformation into 2D heatmaps through the " + }, + { + "bbox": [ + 130, + 136, + 482, + 271 + ], + "type": "inline_equation", + "content": "1 \\times 1" + }, + { + "bbox": [ + 130, + 136, + 482, + 271 + ], + "type": "text", + "content": " convolution layer. The soft-argmax operation computes " + }, + { + "bbox": [ + 130, + 136, + 482, + 271 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 130, + 136, + 482, + 271 + ], + "type": "text", + "content": " from the resultant heatmaps. These computed landmarks, along with the ground-truth landmarks " + }, + { + "bbox": [ + 130, + 136, + 482, + 271 + ], + "type": "inline_equation", + "content": "L^* \\in \\mathbb{R}^{2 \\times N^L}" + }, + { + "bbox": [ + 130, + 136, + 482, + 271 + ], + "type": "text", + "content": ", are incorporated into the loss function." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 289, + 244, + 301 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 289, + 244, + 301 + ], + "spans": [ + { + "bbox": [ + 132, + 289, + 244, + 301 + ], + "type": "text", + "content": "3.3 Feature Sampler" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 308, + 481, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 308, + 481, + 384 + ], + "spans": [ + { + "bbox": [ + 130, + 308, + 481, + 384 + ], + "type": "text", + "content": "The feature sampler computes the landmark-aligned feature vector " + }, + { + "bbox": [ + 130, + 308, + 481, + 384 + ], + "type": "inline_equation", + "content": "\\phi_t^p \\in \\mathbb{R}^{5N_{t-1}^P}" + }, + { + "bbox": [ + 130, + 308, + 481, + 384 + ], + "type": "text", + "content": " from the feature map " + }, + { + "bbox": [ + 130, + 308, + 481, + 384 + ], + "type": "inline_equation", + "content": "\\phi_t" + }, + { + "bbox": [ + 130, + 308, + 481, + 384 + ], + "type": "text", + "content": " and the corresponding sampling points " + }, + { + "bbox": [ + 130, + 308, + 481, + 384 + ], + "type": "inline_equation", + "content": "P_{t-1} \\in \\mathbb{R}^{2 \\times N_{t-1}^P}" + }, + { + "bbox": [ + 130, + 308, + 481, + 384 + ], + "type": "text", + "content": ". Sampling points " + }, + { + "bbox": [ + 130, + 308, + 481, + 384 + ], + "type": "inline_equation", + "content": "P_{t-1}" + }, + { + "bbox": [ + 130, + 308, + 481, + 384 + ], + "type": "text", + "content": " are used to extract point-wise features from the feature map " + }, + { + "bbox": [ + 130, + 308, + 481, + 384 + ], + "type": "inline_equation", + "content": "\\phi_t" + }, + { + "bbox": [ + 130, + 308, + 481, + 384 + ], + "type": "text", + "content": ". Here, " + }, + { + "bbox": [ + 130, + 308, + 481, + 384 + ], + "type": "inline_equation", + "content": "P_0" + }, + { + "bbox": [ + 130, + 308, + 481, + 384 + ], + "type": "text", + "content": " is set to 2D grid coordinates. For " + }, + { + "bbox": [ + 130, + 308, + 481, + 384 + ], + "type": "inline_equation", + "content": "t > 0" + }, + { + "bbox": [ + 130, + 308, + 481, + 384 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 308, + 481, + 384 + ], + "type": "inline_equation", + "content": "P_t" + }, + { + "bbox": [ + 130, + 308, + 481, + 384 + ], + "type": "text", + "content": " is computed using the " + }, + { + "bbox": [ + 130, + 308, + 481, + 384 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 130, + 308, + 481, + 384 + ], + "type": "text", + "content": "-th face regressor. The methodology for deriving these sampling points from the face regressor is described in Section 3.4." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 384, + 482, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 384, + 482, + 462 + ], + "spans": [ + { + "bbox": [ + 130, + 384, + 482, + 462 + ], + "type": "text", + "content": "The point-wise feature vector " + }, + { + "bbox": [ + 130, + 384, + 482, + 462 + ], + "type": "inline_equation", + "content": "\\phi_t(p_{t-1,n}) \\in \\mathbb{R}^{256}" + }, + { + "bbox": [ + 130, + 384, + 482, + 462 + ], + "type": "text", + "content": " is obtained using bilinear sampling at the location specified by the point " + }, + { + "bbox": [ + 130, + 384, + 482, + 462 + ], + "type": "inline_equation", + "content": "p_{t-1,n} \\in \\mathbb{R}^2" + }, + { + "bbox": [ + 130, + 384, + 482, + 462 + ], + "type": "text", + "content": " on " + }, + { + "bbox": [ + 130, + 384, + 482, + 462 + ], + "type": "inline_equation", + "content": "\\phi_t" + }, + { + "bbox": [ + 130, + 384, + 482, + 462 + ], + "type": "text", + "content": ". Here, " + }, + { + "bbox": [ + 130, + 384, + 482, + 462 + ], + "type": "inline_equation", + "content": "p_{t-1,n}" + }, + { + "bbox": [ + 130, + 384, + 482, + 462 + ], + "type": "text", + "content": " denotes the " + }, + { + "bbox": [ + 130, + 384, + 482, + 462 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 130, + 384, + 482, + 462 + ], + "type": "text", + "content": "-th column vector of the sampling points " + }, + { + "bbox": [ + 130, + 384, + 482, + 462 + ], + "type": "inline_equation", + "content": "P_{t-1}" + }, + { + "bbox": [ + 130, + 384, + 482, + 462 + ], + "type": "text", + "content": ". The " + }, + { + "bbox": [ + 130, + 384, + 482, + 462 + ], + "type": "inline_equation", + "content": "N_{t-1}^P" + }, + { + "bbox": [ + 130, + 384, + 482, + 462 + ], + "type": "text", + "content": " point-wise features, denoted as " + }, + { + "bbox": [ + 130, + 384, + 482, + 462 + ], + "type": "inline_equation", + "content": "\\{\\phi_t(p_{t-1,n})\\}_{n=1}^{N_{t-1}^P}" + }, + { + "bbox": [ + 130, + 384, + 482, + 462 + ], + "type": "text", + "content": ", are then transformed into 5D vectors using a dimension reduction layer " + }, + { + "bbox": [ + 130, + 384, + 482, + 462 + ], + "type": "inline_equation", + "content": "\\mathcal{F}(\\cdot)" + }, + { + "bbox": [ + 130, + 384, + 482, + 462 + ], + "type": "text", + "content": ". These vectors are subsequently concatenated to form the landmark-aligned feature vector " + }, + { + "bbox": [ + 130, + 384, + 482, + 462 + ], + "type": "inline_equation", + "content": "\\phi_t^p" + }, + { + "bbox": [ + 130, + 384, + 482, + 462 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 238, + 470, + 481, + 491 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 238, + 470, + 481, + 491 + ], + "spans": [ + { + "bbox": [ + 238, + 470, + 481, + 491 + ], + "type": "interline_equation", + "content": "\\phi_ {t} ^ {p} = \\bigoplus \\left(\\left\\{\\mathcal {F} \\left(\\phi_ {t} \\left(p _ {t - 1, n}\\right)\\right) \\right\\} _ {n = 1} ^ {N _ {t - 1} ^ {P}}\\right), \\tag {1}", + "image_path": "652c6528198b45ded4f94f826ec1db01fc3fdb4b5609566779854a35008d5c1b.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 499, + 481, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 499, + 481, + 548 + ], + "spans": [ + { + "bbox": [ + 130, + 499, + 481, + 548 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 499, + 481, + 548 + ], + "type": "inline_equation", + "content": "\\bigoplus (\\cdot)" + }, + { + "bbox": [ + 130, + 499, + 481, + 548 + ], + "type": "text", + "content": " denotes concatenation. The dimension reduction layer, " + }, + { + "bbox": [ + 130, + 499, + 481, + 548 + ], + "type": "inline_equation", + "content": "\\mathcal{F}(\\cdot)" + }, + { + "bbox": [ + 130, + 499, + 481, + 548 + ], + "type": "text", + "content": ", is structured as a multilayer perceptron (MLP), which comprises three fully connected layers and two Leaky ReLU activations [32,50]. The obtained landmark-aligned feature vector " + }, + { + "bbox": [ + 130, + 499, + 481, + 548 + ], + "type": "inline_equation", + "content": "\\phi_t^p" + }, + { + "bbox": [ + 130, + 499, + 481, + 548 + ], + "type": "text", + "content": " is then fed into the face regressor." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 566, + 236, + 578 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 566, + 236, + 578 + ], + "spans": [ + { + "bbox": [ + 132, + 566, + 236, + 578 + ], + "type": "text", + "content": "3.4 Face Regressor" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 586, + 498, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 586, + 498, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 586, + 498, + 666 + ], + "type": "text", + "content": "The face regressor comprises an MLP " + }, + { + "bbox": [ + 130, + 586, + 498, + 666 + ], + "type": "inline_equation", + "content": "\\mathcal{R}_t(\\cdot)" + }, + { + "bbox": [ + 130, + 586, + 498, + 666 + ], + "type": "text", + "content": " to calculate the head rotation, bounding box correction parameters, and dense landmarks " + }, + { + "bbox": [ + 130, + 586, + 498, + 666 + ], + "type": "inline_equation", + "content": "\\Theta_t = \\{R_t \\in \\mathbb{R}^6, c_t \\in \\mathbb{R}^3, V_t \\in \\mathbb{R}^{3 \\times N^V}\\}" + }, + { + "bbox": [ + 130, + 586, + 498, + 666 + ], + "type": "text", + "content": ", a function that computes the head translation " + }, + { + "bbox": [ + 130, + 586, + 498, + 666 + ], + "type": "inline_equation", + "content": "T_t = \\{T_t^x, T_t^y, T_t^z\\} \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 130, + 586, + 498, + 666 + ], + "type": "text", + "content": " based on the bounding box information " + }, + { + "bbox": [ + 130, + 586, + 498, + 666 + ], + "type": "inline_equation", + "content": "I_{\\mathrm{bbox}} = \\{\\frac{\\tau^{x,\\mathrm{bbox}}}{f}, \\frac{\\tau^{y,\\mathrm{bbox}}}{f}, \\frac{b}{f}\\} \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 130, + 586, + 498, + 666 + ], + "type": "text", + "content": " and the correction parameter " + }, + { + "bbox": [ + 130, + 586, + 498, + 666 + ], + "type": "inline_equation", + "content": "c_t = \\{s_t, \\tilde{\\tau}_t^{x,\\mathrm{face}}, \\tilde{\\tau}_t^{y,\\mathrm{face}}\\}" + }, + { + "bbox": [ + 130, + 586, + 498, + 666 + ], + "type": "text", + "content": ", and a perspective projection function that calculates the image coordinates of the dense landmarks" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 271, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 271, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 271, + 102 + ], + "type": "text", + "content": "S. Chun and J. Y. Chang" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 114, + 479, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 114, + 479, + 223 + ], + "spans": [ + { + "bbox": [ + 130, + 114, + 479, + 223 + ], + "type": "inline_equation", + "content": "V_{t}^{img} \\in \\mathbb{R}^{2 \\times N^{V}}" + }, + { + "bbox": [ + 130, + 114, + 479, + 223 + ], + "type": "text", + "content": " and the sampling points " + }, + { + "bbox": [ + 130, + 114, + 479, + 223 + ], + "type": "inline_equation", + "content": "P_{t}" + }, + { + "bbox": [ + 130, + 114, + 479, + 223 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 130, + 114, + 479, + 223 + ], + "type": "inline_equation", + "content": "V_{t}" + }, + { + "bbox": [ + 130, + 114, + 479, + 223 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 114, + 479, + 223 + ], + "type": "inline_equation", + "content": "R_{t}" + }, + { + "bbox": [ + 130, + 114, + 479, + 223 + ], + "type": "text", + "content": " denote the 3D coordinates of the dense landmarks defined in the head space and the head rotation expressed in a 6D representation [52], respectively. " + }, + { + "bbox": [ + 130, + 114, + 479, + 223 + ], + "type": "inline_equation", + "content": "T_{t}^{x}, T_{t}^{y}" + }, + { + "bbox": [ + 130, + 114, + 479, + 223 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 130, + 114, + 479, + 223 + ], + "type": "inline_equation", + "content": "T_{t}^{z}" + }, + { + "bbox": [ + 130, + 114, + 479, + 223 + ], + "type": "text", + "content": " represent the head translations along the " + }, + { + "bbox": [ + 130, + 114, + 479, + 223 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 130, + 114, + 479, + 223 + ], + "type": "text", + "content": "-, " + }, + { + "bbox": [ + 130, + 114, + 479, + 223 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 130, + 114, + 479, + 223 + ], + "type": "text", + "content": "-, and " + }, + { + "bbox": [ + 130, + 114, + 479, + 223 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 130, + 114, + 479, + 223 + ], + "type": "text", + "content": "-axes in the camera space, respectively. " + }, + { + "bbox": [ + 130, + 114, + 479, + 223 + ], + "type": "inline_equation", + "content": "\\tau^{x,\\mathrm{bbox}}" + }, + { + "bbox": [ + 130, + 114, + 479, + 223 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 114, + 479, + 223 + ], + "type": "inline_equation", + "content": "\\tau^{y,\\mathrm{bbox}}" + }, + { + "bbox": [ + 130, + 114, + 479, + 223 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 114, + 479, + 223 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 130, + 114, + 479, + 223 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 130, + 114, + 479, + 223 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 130, + 114, + 479, + 223 + ], + "type": "text", + "content": " denote the " + }, + { + "bbox": [ + 130, + 114, + 479, + 223 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 130, + 114, + 479, + 223 + ], + "type": "text", + "content": "- and " + }, + { + "bbox": [ + 130, + 114, + 479, + 223 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 130, + 114, + 479, + 223 + ], + "type": "text", + "content": "-coordinates of the bounding box center relative to the center of the uncropped image, the size of the bounding box, and the focal length, respectively. " + }, + { + "bbox": [ + 130, + 114, + 479, + 223 + ], + "type": "inline_equation", + "content": "s_{t}" + }, + { + "bbox": [ + 130, + 114, + 479, + 223 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 114, + 479, + 223 + ], + "type": "inline_equation", + "content": "\\tilde{\\tau}_{t}^{x,\\mathrm{face}}" + }, + { + "bbox": [ + 130, + 114, + 479, + 223 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 130, + 114, + 479, + 223 + ], + "type": "inline_equation", + "content": "\\tilde{\\tau}_{t}^{y,\\mathrm{face}}" + }, + { + "bbox": [ + 130, + 114, + 479, + 223 + ], + "type": "text", + "content": " respectively denote the bounding box scale factor and the normalized offset of the head center relative to the bounding box center in the " + }, + { + "bbox": [ + 130, + 114, + 479, + 223 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 130, + 114, + 479, + 223 + ], + "type": "text", + "content": "- and " + }, + { + "bbox": [ + 130, + 114, + 479, + 223 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 130, + 114, + 479, + 223 + ], + "type": "text", + "content": "-directions." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 224, + 480, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 224, + 480, + 308 + ], + "spans": [ + { + "bbox": [ + 130, + 224, + 480, + 308 + ], + "type": "text", + "content": "The MLP " + }, + { + "bbox": [ + 130, + 224, + 480, + 308 + ], + "type": "inline_equation", + "content": "\\mathcal{R}_t(\\cdot)" + }, + { + "bbox": [ + 130, + 224, + 480, + 308 + ], + "type": "text", + "content": " estimates the residual for calculating " + }, + { + "bbox": [ + 130, + 224, + 480, + 308 + ], + "type": "inline_equation", + "content": "\\Theta_t" + }, + { + "bbox": [ + 130, + 224, + 480, + 308 + ], + "type": "text", + "content": " from the landmark-aligned feature " + }, + { + "bbox": [ + 130, + 224, + 480, + 308 + ], + "type": "inline_equation", + "content": "\\phi_t^p" + }, + { + "bbox": [ + 130, + 224, + 480, + 308 + ], + "type": "text", + "content": ", the previously iterated output " + }, + { + "bbox": [ + 130, + 224, + 480, + 308 + ], + "type": "inline_equation", + "content": "\\Theta_{t - 1}^{sub} = \\{R_{t - 1},c_{t - 1},V_{t - 1}^{sub}\\in \\mathbb{R}^{3\\times 305}\\}" + }, + { + "bbox": [ + 130, + 224, + 480, + 308 + ], + "type": "text", + "content": ", and the bounding box information " + }, + { + "bbox": [ + 130, + 224, + 480, + 308 + ], + "type": "inline_equation", + "content": "I_{\\mathrm{bbox}}" + }, + { + "bbox": [ + 130, + 224, + 480, + 308 + ], + "type": "text", + "content": " [29,49,50]. " + }, + { + "bbox": [ + 130, + 224, + 480, + 308 + ], + "type": "inline_equation", + "content": "\\Theta_t" + }, + { + "bbox": [ + 130, + 224, + 480, + 308 + ], + "type": "text", + "content": " is computed by adding the residual estimated by " + }, + { + "bbox": [ + 130, + 224, + 480, + 308 + ], + "type": "inline_equation", + "content": "\\mathcal{R}_t(\\cdot)" + }, + { + "bbox": [ + 130, + 224, + 480, + 308 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 130, + 224, + 480, + 308 + ], + "type": "inline_equation", + "content": "\\Theta_{t - 1}" + }, + { + "bbox": [ + 130, + 224, + 480, + 308 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 130, + 224, + 480, + 308 + ], + "type": "inline_equation", + "content": "V_{t - 1}^{sub}" + }, + { + "bbox": [ + 130, + 224, + 480, + 308 + ], + "type": "text", + "content": " represents the landmarks obtained by subsampling " + }, + { + "bbox": [ + 130, + 224, + 480, + 308 + ], + "type": "inline_equation", + "content": "V_{t - 1}" + }, + { + "bbox": [ + 130, + 224, + 480, + 308 + ], + "type": "text", + "content": " [36]. The use of " + }, + { + "bbox": [ + 130, + 224, + 480, + 308 + ], + "type": "inline_equation", + "content": "V_{t - 1}^{sub}" + }, + { + "bbox": [ + 130, + 224, + 480, + 308 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 130, + 224, + 480, + 308 + ], + "type": "inline_equation", + "content": "\\mathcal{R}_t(\\cdot)" + }, + { + "bbox": [ + 130, + 224, + 480, + 308 + ], + "type": "text", + "content": " instead of " + }, + { + "bbox": [ + 130, + 224, + 480, + 308 + ], + "type": "inline_equation", + "content": "V_{t - 1}" + }, + { + "bbox": [ + 130, + 224, + 480, + 308 + ], + "type": "text", + "content": " reduces the redundancy of the dense landmarks, which improves the performance of the proposed model [6-8,30,50]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 308, + 480, + 406 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 308, + 480, + 406 + ], + "spans": [ + { + "bbox": [ + 130, + 308, + 480, + 406 + ], + "type": "text", + "content": "We model a real human face as being enclosed within a box " + }, + { + "bbox": [ + 130, + 308, + 480, + 406 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 130, + 308, + 480, + 406 + ], + "type": "text", + "content": " of size " + }, + { + "bbox": [ + 130, + 308, + 480, + 406 + ], + "type": "inline_equation", + "content": "0.2m \\times 0.2m" + }, + { + "bbox": [ + 130, + 308, + 480, + 406 + ], + "type": "text", + "content": ", with " + }, + { + "bbox": [ + 130, + 308, + 480, + 406 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 130, + 308, + 480, + 406 + ], + "type": "text", + "content": " denoting meters. The size of this box, when projected into the image space, is represented by " + }, + { + "bbox": [ + 130, + 308, + 480, + 406 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 130, + 308, + 480, + 406 + ], + "type": "text", + "content": ". However, since the assumption about the face size is typically imprecise, " + }, + { + "bbox": [ + 130, + 308, + 480, + 406 + ], + "type": "inline_equation", + "content": "\\mathcal{R}_t(\\cdot)" + }, + { + "bbox": [ + 130, + 308, + 480, + 406 + ], + "type": "text", + "content": " estimates a scale factor " + }, + { + "bbox": [ + 130, + 308, + 480, + 406 + ], + "type": "inline_equation", + "content": "s_t" + }, + { + "bbox": [ + 130, + 308, + 480, + 406 + ], + "type": "text", + "content": " to adjust the size of " + }, + { + "bbox": [ + 130, + 308, + 480, + 406 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 130, + 308, + 480, + 406 + ], + "type": "text", + "content": ". Furthermore, " + }, + { + "bbox": [ + 130, + 308, + 480, + 406 + ], + "type": "inline_equation", + "content": "\\mathcal{R}_t(\\cdot)" + }, + { + "bbox": [ + 130, + 308, + 480, + 406 + ], + "type": "text", + "content": " is responsible for determining the normalized offsets of the head center " + }, + { + "bbox": [ + 130, + 308, + 480, + 406 + ], + "type": "inline_equation", + "content": "\\tilde{\\tau}_t^{x,\\mathrm{face}}" + }, + { + "bbox": [ + 130, + 308, + 480, + 406 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 308, + 480, + 406 + ], + "type": "inline_equation", + "content": "\\tilde{\\tau}_t^{y,\\mathrm{face}}" + }, + { + "bbox": [ + 130, + 308, + 480, + 406 + ], + "type": "text", + "content": ". These offsets represent the values obtained by normalizing the image space translation from the bounding box center to the head center with " + }, + { + "bbox": [ + 130, + 308, + 480, + 406 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 130, + 308, + 480, + 406 + ], + "type": "text", + "content": ". The calculation of " + }, + { + "bbox": [ + 130, + 308, + 480, + 406 + ], + "type": "inline_equation", + "content": "T_t" + }, + { + "bbox": [ + 130, + 308, + 480, + 406 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 130, + 308, + 480, + 406 + ], + "type": "inline_equation", + "content": "c_t" + }, + { + "bbox": [ + 130, + 308, + 480, + 406 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 308, + 480, + 406 + ], + "type": "inline_equation", + "content": "I_{\\mathrm{bbox}}" + }, + { + "bbox": [ + 130, + 308, + 480, + 406 + ], + "type": "text", + "content": " is expressed as:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 203, + 415, + 480, + 460 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 203, + 415, + 480, + 460 + ], + "spans": [ + { + "bbox": [ + 203, + 415, + 480, + 460 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} T _ {t} ^ {x} = \\frac {0 . 2 s _ {t}}{b} \\tau^ {x, \\mathrm {b b o x}} + 0. 2 s _ {t} \\tilde {\\tau} _ {t} ^ {x, \\mathrm {f a c e}}, \\\\ T _ {t} ^ {y} = \\frac {0 . 2 s _ {t}}{b} \\tau^ {y, \\mathrm {b b o x}} + 0. 2 s _ {t} \\tilde {\\tau} _ {t} ^ {y, \\text {f a c e}}, \\quad T _ {t} ^ {z} = \\frac {0 . 2 s _ {t}}{b} f. \\tag {2} \\\\ \\end{array}", + "image_path": "8e9224e5afa4016091a20cccbb67f7a3daca3ae871e2dbf04ca2f73f4fc224fc.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 470, + 479, + 505 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 470, + 479, + 505 + ], + "spans": [ + { + "bbox": [ + 130, + 470, + 479, + 505 + ], + "type": "text", + "content": "The derivation of Eq. 2 can be found in the supplementary material. The image coordinates of the dense landmarks, " + }, + { + "bbox": [ + 130, + 470, + 479, + 505 + ], + "type": "inline_equation", + "content": "V_{t}^{img}" + }, + { + "bbox": [ + 130, + 470, + 479, + 505 + ], + "type": "text", + "content": ", are computed by projecting " + }, + { + "bbox": [ + 130, + 470, + 479, + 505 + ], + "type": "inline_equation", + "content": "V_{t}" + }, + { + "bbox": [ + 130, + 470, + 479, + 505 + ], + "type": "text", + "content": ", as follows:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 254, + 506, + 480, + 520 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 254, + 506, + 480, + 520 + ], + "spans": [ + { + "bbox": [ + 254, + 506, + 480, + 520 + ], + "type": "interline_equation", + "content": "V _ {t} ^ {i m g} = \\Pi \\left(V _ {t}, R _ {t}, T _ {t}, K\\right), \\tag {3}", + "image_path": "5a9513190619ede4bc4b5a74fd1cacf3058a2d9dd92b467d4920046bde088a34.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 526, + 479, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 526, + 479, + 563 + ], + "spans": [ + { + "bbox": [ + 130, + 526, + 479, + 563 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 526, + 479, + 563 + ], + "type": "inline_equation", + "content": "\\varPi(\\cdot)" + }, + { + "bbox": [ + 130, + 526, + 479, + 563 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 526, + 479, + 563 + ], + "type": "inline_equation", + "content": "K\\in \\mathbb{R}^{3\\times 3}" + }, + { + "bbox": [ + 130, + 526, + 479, + 563 + ], + "type": "text", + "content": " denote the perspective projection and the intrinsic camera parameters, respectively. The sampling points " + }, + { + "bbox": [ + 130, + 526, + 479, + 563 + ], + "type": "inline_equation", + "content": "P_{t}" + }, + { + "bbox": [ + 130, + 526, + 479, + 563 + ], + "type": "text", + "content": " are obtained by subsampling " + }, + { + "bbox": [ + 130, + 526, + 479, + 563 + ], + "type": "inline_equation", + "content": "V_{t}^{img}" + }, + { + "bbox": [ + 130, + 526, + 479, + 563 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 131, + 582, + 234, + 594 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 582, + 234, + 594 + ], + "spans": [ + { + "bbox": [ + 131, + 582, + 234, + 594 + ], + "type": "text", + "content": "3.5 Loss Functions" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 605, + 479, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 605, + 479, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 605, + 479, + 665 + ], + "type": "text", + "content": "We detail the loss functions employed to train TRG, ensuring accurate predictions of face geometry and head pose. The training process utilizes several loss functions for dense landmarks: head space coordinate loss " + }, + { + "bbox": [ + 130, + 605, + 479, + 665 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{head}}" + }, + { + "bbox": [ + 130, + 605, + 479, + 665 + ], + "type": "text", + "content": ", camera space coordinate loss " + }, + { + "bbox": [ + 130, + 605, + 479, + 665 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{cam}}" + }, + { + "bbox": [ + 130, + 605, + 479, + 665 + ], + "type": "text", + "content": ", and image space coordinate loss " + }, + { + "bbox": [ + 130, + 605, + 479, + 665 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{img}}" + }, + { + "bbox": [ + 130, + 605, + 479, + 665 + ], + "type": "text", + "content": ". For a precise estimation of head rotation, a head rotation loss " + }, + { + "bbox": [ + 130, + 605, + 479, + 665 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{rot}}" + }, + { + "bbox": [ + 130, + 605, + 479, + 665 + ], + "type": "text", + "content": " is also adopted. As iteration" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 425, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 425, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 425, + 91, + 447, + 100 + ], + "type": "text", + "content": "TRG" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 91, + 480, + 99 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 91, + 480, + 99 + ], + "spans": [ + { + "bbox": [ + 474, + 91, + 480, + 99 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 363, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 116, + 363, + 128 + ], + "spans": [ + { + "bbox": [ + 132, + 116, + 363, + 128 + ], + "type": "text", + "content": "progresses, the loss functions are doubled as follows:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 209, + 137, + 384, + 169 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 209, + 137, + 384, + 169 + ], + "spans": [ + { + "bbox": [ + 209, + 137, + 384, + 169 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {h e a d}} = \\sum_ {t = 1} ^ {3} 2 ^ {t - 3} \\left(\\frac {1}{N ^ {V}} \\sum_ {n = 1} ^ {N ^ {V}} \\| V _ {t, n} - V _ {n} ^ {*} \\| _ {1}\\right),", + "image_path": "71f5eab6a10eca893ea8b2b7e048b7735242062dd62ff6a3686c3a53155f95a2.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 209, + 173, + 481, + 212 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 209, + 173, + 481, + 212 + ], + "spans": [ + { + "bbox": [ + 209, + 173, + 481, + 212 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {c a m}} = \\sum_ {t = 1} ^ {3} 2 ^ {t - 3} \\left(\\frac {1}{N ^ {V}} \\sum_ {n = 1} ^ {N ^ {V}} \\| V _ {t, n} ^ {\\text {c a m}} - V _ {n} ^ {* , \\text {c a m}} \\| _ {1}\\right), \\tag {4}", + "image_path": "894c004f7941c2f3de4d88d9c93217c334732f58cbfa6f116b77b9a6674433cb.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 209, + 209, + 403, + 242 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 209, + 209, + 403, + 242 + ], + "spans": [ + { + "bbox": [ + 209, + 209, + 403, + 242 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {i m g}} = \\sum_ {t = 1} ^ {3} 2 ^ {t - 3} \\left(\\frac {1}{N ^ {V}} \\sum_ {n = 1} ^ {N ^ {V}} \\| V _ {t, n} ^ {i m g} - V _ {n} ^ {*, i m g} \\| _ {1}\\right),", + "image_path": "66c8ed0df1ab7b666873367a910c65e2633b52fa9c15ffbce634543db22d0c42.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 209, + 247, + 365, + 276 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 209, + 247, + 365, + 276 + ], + "spans": [ + { + "bbox": [ + 209, + 247, + 365, + 276 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {r o t}} = \\sum_ {t = 1} ^ {3} 2 ^ {t - 3} (\\| R _ {t} ^ {m a t} - R ^ {*, m a t} \\| _ {F}),", + "image_path": "07e42768e99d0bd4bf735b9a1c2b2d91ccf3ed87cf225e0a16de380267256239.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 283, + 481, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 283, + 481, + 344 + ], + "spans": [ + { + "bbox": [ + 132, + 283, + 481, + 344 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 132, + 283, + 481, + 344 + ], + "type": "inline_equation", + "content": "*" + }, + { + "bbox": [ + 132, + 283, + 481, + 344 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 132, + 283, + 481, + 344 + ], + "type": "inline_equation", + "content": "V_{t,n}" + }, + { + "bbox": [ + 132, + 283, + 481, + 344 + ], + "type": "text", + "content": " represent the ground truth and the " + }, + { + "bbox": [ + 132, + 283, + 481, + 344 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 132, + 283, + 481, + 344 + ], + "type": "text", + "content": "-th column vector of " + }, + { + "bbox": [ + 132, + 283, + 481, + 344 + ], + "type": "inline_equation", + "content": "V_{t}" + }, + { + "bbox": [ + 132, + 283, + 481, + 344 + ], + "type": "text", + "content": ", respectively. " + }, + { + "bbox": [ + 132, + 283, + 481, + 344 + ], + "type": "inline_equation", + "content": "V_{t}^{cam} = R_{t}^{mat}V_{t} + T_{t} \\in \\mathbb{R}^{3 \\times N^{V}}" + }, + { + "bbox": [ + 132, + 283, + 481, + 344 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 132, + 283, + 481, + 344 + ], + "type": "inline_equation", + "content": "V_{t}^{img}" + }, + { + "bbox": [ + 132, + 283, + 481, + 344 + ], + "type": "text", + "content": " represent the camera space coordinates and the image space coordinates of the " + }, + { + "bbox": [ + 132, + 283, + 481, + 344 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 132, + 283, + 481, + 344 + ], + "type": "text", + "content": "-th dense landmarks, respectively. " + }, + { + "bbox": [ + 132, + 283, + 481, + 344 + ], + "type": "inline_equation", + "content": "R_{t}^{mat} \\in \\mathbb{R}^{3 \\times 3}" + }, + { + "bbox": [ + 132, + 283, + 481, + 344 + ], + "type": "text", + "content": " represents the 3D head rotation in matrix form, and " + }, + { + "bbox": [ + 132, + 283, + 481, + 344 + ], + "type": "inline_equation", + "content": "\\| \\cdot \\|_{F}" + }, + { + "bbox": [ + 132, + 283, + 481, + 344 + ], + "type": "text", + "content": " denotes the Frobenius norm." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 345, + 481, + 404 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 345, + 481, + 404 + ], + "spans": [ + { + "bbox": [ + 132, + 345, + 481, + 404 + ], + "type": "text", + "content": "If connectivity between dense landmarks is defined in the dataset, we utilize this information to apply an edge length loss. We empirically found that applying the edge length loss " + }, + { + "bbox": [ + 132, + 345, + 481, + 404 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{ed}}" + }, + { + "bbox": [ + 132, + 345, + 481, + 404 + ], + "type": "text", + "content": " [18, 33] to " + }, + { + "bbox": [ + 132, + 345, + 481, + 404 + ], + "type": "inline_equation", + "content": "V_{3}" + }, + { + "bbox": [ + 132, + 345, + 481, + 404 + ], + "type": "text", + "content": ", estimated by the final face regressor, improves the model's performance in estimating face geometry. The edge length loss " + }, + { + "bbox": [ + 132, + 345, + 481, + 404 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{ed}}" + }, + { + "bbox": [ + 132, + 345, + 481, + 404 + ], + "type": "text", + "content": " can be written as:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 197, + 413, + 480, + 437 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 197, + 413, + 480, + 437 + ], + "spans": [ + { + "bbox": [ + 197, + 413, + 480, + 437 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {e d}} = \\sum_ {M} \\sum_ {\\{n, m \\} \\subset M} | \\| V _ {3, n} - V _ {3, m} \\| _ {2} - \\| V _ {n} ^ {*} - V _ {m} ^ {*} \\| _ {2} |, \\tag {5}", + "image_path": "f526b0cc98256c239d8e4c5eefe1283da18b73146e90d9db66de19726bfe5a53.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 445, + 481, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 445, + 481, + 480 + ], + "spans": [ + { + "bbox": [ + 132, + 445, + 481, + 480 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 132, + 445, + 481, + 480 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 132, + 445, + 481, + 480 + ], + "type": "text", + "content": " denotes a triangle. Additionally, to improve the quality of the feature map, we apply the sparse 2D landmark loss " + }, + { + "bbox": [ + 132, + 445, + 481, + 480 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_L" + }, + { + "bbox": [ + 132, + 445, + 481, + 480 + ], + "type": "text", + "content": " to the landmarks " + }, + { + "bbox": [ + 132, + 445, + 481, + 480 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 132, + 445, + 481, + 480 + ], + "type": "text", + "content": " obtained from " + }, + { + "bbox": [ + 132, + 445, + 481, + 480 + ], + "type": "inline_equation", + "content": "\\phi_3" + }, + { + "bbox": [ + 132, + 445, + 481, + 480 + ], + "type": "text", + "content": " as follows:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 249, + 482, + 480, + 514 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 249, + 482, + 480, + 514 + ], + "spans": [ + { + "bbox": [ + 249, + 482, + 480, + 514 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {L} = \\frac {1}{N ^ {L}} \\sum_ {n = 1} ^ {N ^ {L}} \\| L _ {n} - L _ {n} ^ {*} \\| _ {1}. \\tag {6}", + "image_path": "925f76077db1192ea11e0a0684225aa1e5fdd3b8c6bf694d77262e8fecdb578c.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 146, + 517, + 392, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 146, + 517, + 392, + 529 + ], + "spans": [ + { + "bbox": [ + 146, + 517, + 392, + 529 + ], + "type": "text", + "content": "The final loss function to train TRG can be written as:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 151, + 538, + 480, + 550 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 538, + 480, + 550 + ], + "spans": [ + { + "bbox": [ + 151, + 538, + 480, + 550 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\lambda_ {\\text {h e a d}} \\mathcal {L} _ {\\text {h e a d}} + \\lambda_ {\\text {c a m}} \\mathcal {L} _ {\\text {c a m}} + \\lambda_ {\\text {i m g}} \\mathcal {L} _ {\\text {i m g}} + \\lambda_ {\\text {r o t}} \\mathcal {L} _ {\\text {r o t}} + \\lambda_ {\\text {e d}} \\mathcal {L} _ {\\text {e d}} + \\lambda_ {L} \\mathcal {L} _ {L}, \\tag {7}", + "image_path": "4ef26bf332139391a153ad6ad700d4b4435b562449af92acbccb03a36915c0e5.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 132, + 558, + 481, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 558, + 481, + 581 + ], + "spans": [ + { + "bbox": [ + 132, + 558, + 481, + 581 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 132, + 558, + 481, + 581 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 132, + 558, + 481, + 581 + ], + "type": "text", + "content": "s represent the weights of the loss functions. " + }, + { + "bbox": [ + 132, + 558, + 481, + 581 + ], + "type": "inline_equation", + "content": "\\lambda_{\\mathrm{head}}" + }, + { + "bbox": [ + 132, + 558, + 481, + 581 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 132, + 558, + 481, + 581 + ], + "type": "inline_equation", + "content": "\\lambda_{\\mathrm{cam}}" + }, + { + "bbox": [ + 132, + 558, + 481, + 581 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 132, + 558, + 481, + 581 + ], + "type": "inline_equation", + "content": "\\lambda_{\\mathrm{img}}" + }, + { + "bbox": [ + 132, + 558, + 481, + 581 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 132, + 558, + 481, + 581 + ], + "type": "inline_equation", + "content": "\\lambda_{\\mathrm{rot}}" + }, + { + "bbox": [ + 132, + 558, + 481, + 581 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 132, + 558, + 481, + 581 + ], + "type": "inline_equation", + "content": "\\lambda_{\\mathrm{ed}}" + }, + { + "bbox": [ + 132, + 558, + 481, + 581 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 132, + 558, + 481, + 581 + ], + "type": "inline_equation", + "content": "\\lambda_L" + }, + { + "bbox": [ + 132, + 558, + 481, + 581 + ], + "type": "text", + "content": " are set to 20, 2, 0.01, 10, 2, and 1.25, respectively." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 132, + 599, + 282, + 612 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 599, + 282, + 612 + ], + "spans": [ + { + "bbox": [ + 132, + 599, + 282, + 612 + ], + "type": "text", + "content": "4 Experimental Results" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 132, + 623, + 280, + 635 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 623, + 280, + 635 + ], + "spans": [ + { + "bbox": [ + 132, + 623, + 280, + 635 + ], + "type": "text", + "content": "4.1 Implementation Details" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 132, + 640, + 481, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 640, + 481, + 666 + ], + "spans": [ + { + "bbox": [ + 132, + 640, + 481, + 666 + ], + "type": "text", + "content": "The spatial dimensions " + }, + { + "bbox": [ + 132, + 640, + 481, + 666 + ], + "type": "inline_equation", + "content": "H_{t}, W_{t}" + }, + { + "bbox": [ + 132, + 640, + 481, + 666 + ], + "type": "text", + "content": " of the feature map " + }, + { + "bbox": [ + 132, + 640, + 481, + 666 + ], + "type": "inline_equation", + "content": "\\phi_t" + }, + { + "bbox": [ + 132, + 640, + 481, + 666 + ], + "type": "text", + "content": " were set to " + }, + { + "bbox": [ + 132, + 640, + 481, + 666 + ], + "type": "inline_equation", + "content": "\\frac{192}{2^{5 - t}}" + }, + { + "bbox": [ + 132, + 640, + 481, + 666 + ], + "type": "text", + "content": ". The number of sampling points " + }, + { + "bbox": [ + 132, + 640, + 481, + 666 + ], + "type": "inline_equation", + "content": "N_{t}^{P}" + }, + { + "bbox": [ + 132, + 640, + 481, + 666 + ], + "type": "text", + "content": " was set to " + }, + { + "bbox": [ + 132, + 640, + 481, + 666 + ], + "type": "inline_equation", + "content": "18 \\times 18 = 324" + }, + { + "bbox": [ + 132, + 640, + 481, + 666 + ], + "type": "text", + "content": " when " + }, + { + "bbox": [ + 132, + 640, + 481, + 666 + ], + "type": "inline_equation", + "content": "t = 0" + }, + { + "bbox": [ + 132, + 640, + 481, + 666 + ], + "type": "text", + "content": ", and to 305" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 271, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 271, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 271, + 102 + ], + "type": "text", + "content": "S. Chun and J. Y. Chang" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 115, + 479, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 115, + 479, + 236 + ], + "spans": [ + { + "bbox": [ + 130, + 115, + 479, + 236 + ], + "type": "text", + "content": "when " + }, + { + "bbox": [ + 130, + 115, + 479, + 236 + ], + "type": "inline_equation", + "content": "t > 0" + }, + { + "bbox": [ + 130, + 115, + 479, + 236 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 130, + 115, + 479, + 236 + ], + "type": "inline_equation", + "content": "N^V" + }, + { + "bbox": [ + 130, + 115, + 479, + 236 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 115, + 479, + 236 + ], + "type": "inline_equation", + "content": "N^L" + }, + { + "bbox": [ + 130, + 115, + 479, + 236 + ], + "type": "text", + "content": " were set to 1220 and 68, respectively. For the ARKitFace training dataset [25], we selected a random sample and used its corresponding ground-truth dense 3D landmarks and head rotation as the initial landmarks " + }, + { + "bbox": [ + 130, + 115, + 479, + 236 + ], + "type": "inline_equation", + "content": "V_0" + }, + { + "bbox": [ + 130, + 115, + 479, + 236 + ], + "type": "text", + "content": " and head rotation " + }, + { + "bbox": [ + 130, + 115, + 479, + 236 + ], + "type": "inline_equation", + "content": "R_0" + }, + { + "bbox": [ + 130, + 115, + 479, + 236 + ], + "type": "text", + "content": " for the TRG. The initial correction parameter " + }, + { + "bbox": [ + 130, + 115, + 479, + 236 + ], + "type": "inline_equation", + "content": "c_0" + }, + { + "bbox": [ + 130, + 115, + 479, + 236 + ], + "type": "text", + "content": " was set to " + }, + { + "bbox": [ + 130, + 115, + 479, + 236 + ], + "type": "inline_equation", + "content": "\\{s_0 = 1, \\tilde{\\tau}_0^{x,\\mathrm{face}} = 0, \\tilde{\\tau}_0^{y,\\mathrm{face}} = 0\\}" + }, + { + "bbox": [ + 130, + 115, + 479, + 236 + ], + "type": "text", + "content": ". For the TRG training, both the ARKitFace training data [25] and 300W-LP [55] were utilized. Unless otherwise stated, the performances of models trained using both datasets are presented. When a fair comparison with the state-of-the-art methods is required, results from models trained solely on the ARKitFace training dataset are also provided. Please refer to the supplementary material for more implementation details." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 250, + 205, + 261 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 250, + 205, + 261 + ], + "spans": [ + { + "bbox": [ + 132, + 250, + 205, + 261 + ], + "type": "text", + "content": "4.2 Datasets" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 266, + 479, + 337 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 266, + 479, + 337 + ], + "spans": [ + { + "bbox": [ + 130, + 266, + 479, + 337 + ], + "type": "text", + "content": "ARKitFace [25] is a dataset that provides the 6DoF head poses, the dense 3D landmarks, and intrinsic camera parameters. It is collected from selfie scenarios, with data gathered at a camera-to-face distance ranging from 0.3 to 0.9 meters, resulting in images significantly influenced by strong perspective transformations. Following previous work [25], we used 717,840 frames from 400 subjects for training, and 184,884 frames from 100 subjects for testing." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 338, + 479, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 338, + 479, + 396 + ], + "spans": [ + { + "bbox": [ + 130, + 338, + 479, + 396 + ], + "type": "text", + "content": "300W-LP [55] is an extended synthetic dataset derived from the 300W [39], which itself is composed of several standardized datasets, including AFW [54], HELEN [51], IBUG [40], and LFPW [2]. Through face profiling, the 300W-LP dataset provides 122,450 synthesized images from approximately 4,000 original pictures." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 397, + 479, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 397, + 479, + 469 + ], + "spans": [ + { + "bbox": [ + 130, + 397, + 479, + 469 + ], + "type": "text", + "content": "BIWI [15] provides 6DoF head poses, a 3D neutral face mesh for each subject, and intrinsic camera parameters. Since BIWI does not provide ground-truth face meshes for each frame, our evaluation focuses solely on the head poses. BIWI serves exclusively as test data to assess the effectiveness of our method. We evaluated the performance of our proposed model by following the protocol used in previous studies [25,46]." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 483, + 257, + 495 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 483, + 257, + 495 + ], + "spans": [ + { + "bbox": [ + 132, + 483, + 257, + 495 + ], + "type": "text", + "content": "4.3 Evaluation Metrics" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 500, + 479, + 630 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 500, + 479, + 630 + ], + "spans": [ + { + "bbox": [ + 130, + 500, + 479, + 630 + ], + "type": "text", + "content": "For head rotation accuracy assessment, we follow the approach used in previous studies [1,23,25,46], measuring rotation errors separately for roll, pitch, and yaw. Additionally, to provide a comprehensive understanding of the head rotation estimation performance, we also present the mean absolute error " + }, + { + "bbox": [ + 130, + 500, + 479, + 630 + ], + "type": "inline_equation", + "content": "(\\mathrm{MAE}_r)" + }, + { + "bbox": [ + 130, + 500, + 479, + 630 + ], + "type": "text", + "content": " and geodesic error (GE) [9]. For evaluating the accuracy of head translation, we calculate the errors for translation along the " + }, + { + "bbox": [ + 130, + 500, + 479, + 630 + ], + "type": "inline_equation", + "content": "x-" + }, + { + "bbox": [ + 130, + 500, + 479, + 630 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 500, + 479, + 630 + ], + "type": "inline_equation", + "content": "y-" + }, + { + "bbox": [ + 130, + 500, + 479, + 630 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 130, + 500, + 479, + 630 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 130, + 500, + 479, + 630 + ], + "type": "text", + "content": "-axes, represented as " + }, + { + "bbox": [ + 130, + 500, + 479, + 630 + ], + "type": "inline_equation", + "content": "t_x" + }, + { + "bbox": [ + 130, + 500, + 479, + 630 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 500, + 479, + 630 + ], + "type": "inline_equation", + "content": "t_y" + }, + { + "bbox": [ + 130, + 500, + 479, + 630 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 130, + 500, + 479, + 630 + ], + "type": "inline_equation", + "content": "t_z" + }, + { + "bbox": [ + 130, + 500, + 479, + 630 + ], + "type": "text", + "content": " errors, respectively. Similar to head rotation, we present the mean absolute error performance for head translation, denoted as " + }, + { + "bbox": [ + 130, + 500, + 479, + 630 + ], + "type": "inline_equation", + "content": "\\mathrm{MAE}_t" + }, + { + "bbox": [ + 130, + 500, + 479, + 630 + ], + "type": "text", + "content": ". Following previous research [25], we utilize the average 3D distance (ADD) metric [22] to present a holistic evaluation of the method's performance in estimating both rotation and translation:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 186, + 635, + 479, + 667 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 186, + 635, + 479, + 667 + ], + "spans": [ + { + "bbox": [ + 186, + 635, + 479, + 667 + ], + "type": "interline_equation", + "content": "\\mathrm {A D D} = \\frac {1}{N ^ {V}} \\sum_ {n = 1} ^ {N ^ {V}} \\| \\left(R _ {3} ^ {\\text {m a t}} V _ {n} ^ {*} + T _ {3}\\right) - \\left(R ^ {*}, \\text {m a t} V _ {n} ^ {*} + T ^ {*}\\right) \\| _ {2}. \\tag {8}", + "image_path": "af3d0bce6f965cba69560c9ec5bb8c882428240d3d2fba28b4359a4a90960d5a.jpg" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 425, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 425, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 425, + 91, + 447, + 100 + ], + "type": "text", + "content": "TRG" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 91, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 91, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 91, + 481, + 100 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 132, + 180, + 483, + 267 + ], + "blocks": [ + { + "bbox": [ + 130, + 114, + 482, + 169 + ], + "lines": [ + { + "bbox": [ + 130, + 114, + 482, + 169 + ], + "spans": [ + { + "bbox": [ + 130, + 114, + 482, + 169 + ], + "type": "text", + "content": "Table 1: Ablation study of TRG on ARKitFace and BIWI. We explored the effects of the bidirectional interaction structure and utilizing the correction parameter. We also investigated the importance of utilizing face geometry in the 6DoF head pose estimation process and the effectiveness of the landmark-to-image alignment method. \"MS\" means multi-scale features." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 132, + 180, + 483, + 267 + ], + "lines": [ + { + "bbox": [ + 132, + 180, + 483, + 267 + ], + "spans": [ + { + "bbox": [ + 132, + 180, + 483, + 267 + ], + "type": "table", + "html": "
MethodARKitFaceBIWI
Mean ↓MAEr ↓MAEt ↓ADD ↓MAEr ↓MAEt ↓ADD ↓
1-iter (w/o MS)1.691.003.708.933.2813.7432.28
2-iter (w/o MS)1.660.893.618.722.9513.7731.28
3-iter (w/o MS)1.570.883.638.712.5913.6731.52
Tt-prediction1.660.924.6411.668.811.7K5.1K
Landmark-free baseline-1.033.869.343.8718.4242.22
Grid sampled baseline1.590.953.748.992.9814.5835.04
TRG (Ours)1.580.913.628.682.7512.9729.46
", + "image_path": "4f7ef870c6fd99d10d92019b206a766d4bb689132b53304337eaae115f239c90.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 291, + 482, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 291, + 482, + 363 + ], + "spans": [ + { + "bbox": [ + 130, + 291, + 482, + 363 + ], + "type": "text", + "content": "To assess the 3D landmark prediction accuracy of our proposed method, we evaluate the median and average distances between the estimated and ground-truth dense landmarks [25]. The effectiveness of our method is evaluated based on the estimated values of " + }, + { + "bbox": [ + 130, + 291, + 482, + 363 + ], + "type": "inline_equation", + "content": "V_{3}" + }, + { + "bbox": [ + 130, + 291, + 482, + 363 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 291, + 482, + 363 + ], + "type": "inline_equation", + "content": "R_{3}" + }, + { + "bbox": [ + 130, + 291, + 482, + 363 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 130, + 291, + 482, + 363 + ], + "type": "inline_equation", + "content": "T_{3}" + }, + { + "bbox": [ + 130, + 291, + 482, + 363 + ], + "type": "text", + "content": " from the final face regressor at " + }, + { + "bbox": [ + 130, + 291, + 482, + 363 + ], + "type": "inline_equation", + "content": "t = 3" + }, + { + "bbox": [ + 130, + 291, + 482, + 363 + ], + "type": "text", + "content": ". The unit for median, mean, translation error, and ADD is in millimeters, and the unit for rotation error is in degrees." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 131, + 381, + 272, + 393 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 381, + 272, + 393 + ], + "spans": [ + { + "bbox": [ + 131, + 381, + 272, + 393 + ], + "type": "text", + "content": "4.4 Ablation Experiments" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 402, + 482, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 402, + 482, + 568 + ], + "spans": [ + { + "bbox": [ + 130, + 402, + 482, + 568 + ], + "type": "text", + "content": "Effectiveness of bidirectional interaction structure. In this experiment, we delve into the significance of explicit bidirectional interaction between the 6DoF head pose and face geometry. To investigate this, we observe the model's performance variations based on the number of interactions between these two types of information. For our experiments, we designed 1-iteration, 2-iteration, and 3-iteration baselines and then compared their performance. The 1-iteration baseline model simultaneously regresses the face geometry and head pose using " + }, + { + "bbox": [ + 130, + 402, + 482, + 568 + ], + "type": "inline_equation", + "content": "\\mathcal{R}_1(\\cdot)" + }, + { + "bbox": [ + 130, + 402, + 482, + 568 + ], + "type": "text", + "content": " but without an iterative inference process. The 2- and 3-iteration baseline models enhance this process by incorporating the iterative inference approach. They project the predicted dense landmarks onto the image feature, with all other aspects remaining consistent with the 1-iteration baseline. Similar to the 1-iteration baseline, they utilize only " + }, + { + "bbox": [ + 130, + 402, + 482, + 568 + ], + "type": "inline_equation", + "content": "\\phi_1" + }, + { + "bbox": [ + 130, + 402, + 482, + 568 + ], + "type": "text", + "content": " and do not employ multi-scale features. The key distinction between the 3-iteration models and TRG lies in the utilization of multi-scale features." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 570, + 482, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 570, + 482, + 640 + ], + "spans": [ + { + "bbox": [ + 130, + 570, + 482, + 640 + ], + "type": "text", + "content": "The evaluation on the ARKitFace test data, as presented in Table 1, indicates that the performance in estimating the face geometry and head pose improves with the increasing number of iterations. This improvement is attributed to the reduction in ambiguity between the face geometry and 6DoF head pose as the number of bidirectional interactions increases. The BIWI evaluation results further corroborate the effectiveness of the bidirectional interaction method." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 641, + 482, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 641, + 482, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 641, + 482, + 665 + ], + "type": "text", + "content": "Use of correction parameter. In this experiment, we investigate the rationale behind estimating the correction parameter " + }, + { + "bbox": [ + 130, + 641, + 482, + 665 + ], + "type": "inline_equation", + "content": "c_{t}" + }, + { + "bbox": [ + 130, + 641, + 482, + 665 + ], + "type": "text", + "content": " instead of directly estimating head" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 270, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 270, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 270, + 102 + ], + "type": "text", + "content": "S. Chun and J. Y. Chang" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 134, + 117, + 220, + 193 + ], + "blocks": [ + { + "bbox": [ + 134, + 117, + 220, + 193 + ], + "lines": [ + { + "bbox": [ + 134, + 117, + 220, + 193 + ], + "spans": [ + { + "bbox": [ + 134, + 117, + 220, + 193 + ], + "type": "image", + "image_path": "66254c28f96a6499a9c6266840f9b6e98a0ffebd9d53ed0c634c01aac60a66cc.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 205, + 480, + 248 + ], + "lines": [ + { + "bbox": [ + 130, + 205, + 480, + 248 + ], + "spans": [ + { + "bbox": [ + 130, + 205, + 480, + 248 + ], + "type": "text", + "content": "Fig. 3: The distribution of ground-truth translation and correction parameters in ARKitFace and BIWI. The colors blue, green, and brown represent the distributions of the ARKitFace training data, ARKitFace test data, and BIWI dataset, respectively. The symbol * denotes ground-truth." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 220, + 118, + 304, + 194 + ], + "blocks": [ + { + "bbox": [ + 220, + 118, + 304, + 194 + ], + "lines": [ + { + "bbox": [ + 220, + 118, + 304, + 194 + ], + "spans": [ + { + "bbox": [ + 220, + 118, + 304, + 194 + ], + "type": "image", + "image_path": "e6323f87d6abbb622c81d4b20ac09fd9d86aee026003b5c9d1dbf099c8aad482.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 309, + 118, + 394, + 194 + ], + "blocks": [ + { + "bbox": [ + 309, + 118, + 394, + 194 + ], + "lines": [ + { + "bbox": [ + 309, + 118, + 394, + 194 + ], + "spans": [ + { + "bbox": [ + 309, + 118, + 394, + 194 + ], + "type": "image", + "image_path": "1f64105f660563317f21db533cf64207e32da044e977fd7f4140d6b07108d03b.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 395, + 118, + 479, + 194 + ], + "blocks": [ + { + "bbox": [ + 395, + 118, + 479, + 194 + ], + "lines": [ + { + "bbox": [ + 395, + 118, + 479, + 194 + ], + "spans": [ + { + "bbox": [ + 395, + 118, + 479, + 194 + ], + "type": "image", + "image_path": "453f97d3caa55cc24482cc928b1a496b441d257a96f90c31ff438dcaf90468eb.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 277, + 480, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 277, + 480, + 348 + ], + "spans": [ + { + "bbox": [ + 130, + 277, + 480, + 348 + ], + "type": "text", + "content": "translation " + }, + { + "bbox": [ + 130, + 277, + 480, + 348 + ], + "type": "inline_equation", + "content": "T_{t}" + }, + { + "bbox": [ + 130, + 277, + 480, + 348 + ], + "type": "text", + "content": ". To elucidate this, we compare the performance of two models: the " + }, + { + "bbox": [ + 130, + 277, + 480, + 348 + ], + "type": "inline_equation", + "content": "T_{t}" + }, + { + "bbox": [ + 130, + 277, + 480, + 348 + ], + "type": "text", + "content": "-prediction baseline, which directly estimates head translation " + }, + { + "bbox": [ + 130, + 277, + 480, + 348 + ], + "type": "inline_equation", + "content": "T_{t}" + }, + { + "bbox": [ + 130, + 277, + 480, + 348 + ], + "type": "text", + "content": " and TRG. According to Table 1, while the " + }, + { + "bbox": [ + 130, + 277, + 480, + 348 + ], + "type": "inline_equation", + "content": "T_{t}" + }, + { + "bbox": [ + 130, + 277, + 480, + 348 + ], + "type": "text", + "content": "-prediction baseline demonstrates accurate estimation of head translation on the ARKitFace test data, its performance significantly declines on the BIWI dataset. We attribute this discrepancy to the differing translation distributions between the ARKitFace and BIWI datasets." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 350, + 480, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 350, + 480, + 456 + ], + "spans": [ + { + "bbox": [ + 130, + 350, + 480, + 456 + ], + "type": "text", + "content": "The first and second columns of Fig. 3 illustrate the ground-truth head translation distributions for ARKitFace and BIWI. While the translation distribution in the ARKitFace training data closely matches its test data, it significantly differs from that of BIWI. This discrepancy is particularly noticeable in the " + }, + { + "bbox": [ + 130, + 350, + 480, + 456 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 130, + 350, + 480, + 456 + ], + "type": "text", + "content": "-axis translations, indicating substantial divergence between the ARKitFace training data and BIWI. To achieve generalization from the ARKitFace training data to BIWI, a model must effectively extrapolate the " + }, + { + "bbox": [ + 130, + 350, + 480, + 456 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 130, + 350, + 480, + 456 + ], + "type": "text", + "content": "-axis translation. However, as evidenced by Table 1, this extrapolation poses a significant challenge for the direct translation estimation model." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 460, + 480, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 460, + 480, + 555 + ], + "spans": [ + { + "bbox": [ + 130, + 460, + 480, + 555 + ], + "type": "text", + "content": "The third and fourth columns of Fig. 3 visualize the distribution of the ground-truth correction parameters for both ARKitFace and BIWI datasets. A key observation here is that the variation in the correction parameter distribution is significantly smaller compared to the translation distribution. Based on these observations, we can conclude that shifting the estimation target from " + }, + { + "bbox": [ + 130, + 460, + 480, + 555 + ], + "type": "inline_equation", + "content": "T_{t}" + }, + { + "bbox": [ + 130, + 460, + 480, + 555 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 130, + 460, + 480, + 555 + ], + "type": "inline_equation", + "content": "c_{t}" + }, + { + "bbox": [ + 130, + 460, + 480, + 555 + ], + "type": "text", + "content": " effectively reduces distribution discrepancies. This strategic redefinition enhances the model's generalizability, particularly for data that fall outside the training distribution, as evidenced in Table 1." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 558, + 481, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 558, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 558, + 481, + 665 + ], + "type": "text", + "content": "The importance of utilizing facial geometry and the effectiveness of landmark-to-image alignment technique. For the purpose of our experiment, we designed a landmark-free baseline that does not estimate facial geometry " + }, + { + "bbox": [ + 130, + 558, + 481, + 665 + ], + "type": "inline_equation", + "content": "\\{V_t\\}_{t=1}^3" + }, + { + "bbox": [ + 130, + 558, + 481, + 665 + ], + "type": "text", + "content": ". Given the absence of facial geometry information, the landmark-free baseline is unable to utilize landmark-to-image alignment techniques. Consequently, it extracts grid sampled features from " + }, + { + "bbox": [ + 130, + 558, + 481, + 665 + ], + "type": "inline_equation", + "content": "\\{\\phi_t\\}_{t=1}^3" + }, + { + "bbox": [ + 130, + 558, + 481, + 665 + ], + "type": "text", + "content": " and inputs them into a face regressor. However, due to significant structural differences from TRG, we mitigate these differences by also designing a grid sampled baseline for incremental comparison. This grid sampled baseline is similar to the TRG, except" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 425, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 425, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 425, + 91, + 447, + 100 + ], + "type": "text", + "content": "TRG" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 480, + 100 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 133, + 114, + 219, + 171 + ], + "blocks": [ + { + "bbox": [ + 133, + 114, + 219, + 171 + ], + "lines": [ + { + "bbox": [ + 133, + 114, + 219, + 171 + ], + "spans": [ + { + "bbox": [ + 133, + 114, + 219, + 171 + ], + "type": "image", + "image_path": "e79d402db21b487599533f739557577ec0f8519bee357cce556ce22f0d9355c7.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 133, + 172, + 219, + 223 + ], + "blocks": [ + { + "bbox": [ + 133, + 172, + 219, + 223 + ], + "lines": [ + { + "bbox": [ + 133, + 172, + 219, + 223 + ], + "spans": [ + { + "bbox": [ + 133, + 172, + 219, + 223 + ], + "type": "image", + "image_path": "78ce7ea9e3785344dc870c8edb47fe95dc9bc0c852fba119ca612f211cf900a6.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 232, + 482, + 288 + ], + "lines": [ + { + "bbox": [ + 130, + 232, + 482, + 288 + ], + "spans": [ + { + "bbox": [ + 130, + 232, + 482, + 288 + ], + "type": "text", + "content": "Fig. 4: Qualitative comparison on the ARKitFace and BIWI datasets. The first and second rows show visualized results for ARKitFace and BIWI, respectively. The colors cyan, pink, gold, and gray represent JMLR, PerspNet, TRG, and ground truth, respectively. The red, green, and blue axes respectively represent the X, Y, and Z axes of the camera coordinate system." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 219, + 114, + 306, + 171 + ], + "blocks": [ + { + "bbox": [ + 219, + 114, + 306, + 171 + ], + "lines": [ + { + "bbox": [ + 219, + 114, + 306, + 171 + ], + "spans": [ + { + "bbox": [ + 219, + 114, + 306, + 171 + ], + "type": "image", + "image_path": "6ce119f5711db7a66b8b31dbbb8bc31bd39cb6202aaa6115cd73dcb0ad484f29.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 220, + 172, + 306, + 223 + ], + "blocks": [ + { + "bbox": [ + 220, + 172, + 306, + 223 + ], + "lines": [ + { + "bbox": [ + 220, + 172, + 306, + 223 + ], + "spans": [ + { + "bbox": [ + 220, + 172, + 306, + 223 + ], + "type": "image", + "image_path": "6a05fca9497ca74bfd4b8f6a85b2ac2d4c0ab130b2adc41152d986eb5459a871.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 306, + 114, + 392, + 171 + ], + "blocks": [ + { + "bbox": [ + 306, + 114, + 392, + 171 + ], + "lines": [ + { + "bbox": [ + 306, + 114, + 392, + 171 + ], + "spans": [ + { + "bbox": [ + 306, + 114, + 392, + 171 + ], + "type": "image", + "image_path": "c2ef4dcc973780196eac307ca6b0499787ab0edc17b95715d15ad2bd02309ac3.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 306, + 172, + 392, + 223 + ], + "blocks": [ + { + "bbox": [ + 306, + 172, + 392, + 223 + ], + "lines": [ + { + "bbox": [ + 306, + 172, + 392, + 223 + ], + "spans": [ + { + "bbox": [ + 306, + 172, + 392, + 223 + ], + "type": "image", + "image_path": "e6fcdc5b7e0156603dde8ba5e1339e4f8821166c928365ebb8161bd87f47b58b.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 392, + 114, + 480, + 171 + ], + "blocks": [ + { + "bbox": [ + 392, + 114, + 480, + 171 + ], + "lines": [ + { + "bbox": [ + 392, + 114, + 480, + 171 + ], + "spans": [ + { + "bbox": [ + 392, + 114, + 480, + 171 + ], + "type": "image", + "image_path": "03c7031213aff0cb3e4db5f04098e945fcb6f13c77bafe0fd3df20fc4817bf5d.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 392, + 172, + 480, + 223 + ], + "blocks": [ + { + "bbox": [ + 392, + 172, + 480, + 223 + ], + "lines": [ + { + "bbox": [ + 392, + 172, + 480, + 223 + ], + "spans": [ + { + "bbox": [ + 392, + 172, + 480, + 223 + ], + "type": "image", + "image_path": "457fe3e7ca3c6af4b5a678cd486d61ca8a69f028ad5647c7912cc64af05ff18e.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 312, + 480, + 347 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 312, + 480, + 347 + ], + "spans": [ + { + "bbox": [ + 130, + 312, + 480, + 347 + ], + "type": "text", + "content": "it does not employ the landmark-to-image alignment method, indicating that the primary distinction from the landmark-free baseline lies in whether facial geometry is estimated." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 130, + 348, + 480, + 420 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 348, + 480, + 420 + ], + "spans": [ + { + "bbox": [ + 130, + 348, + 480, + 420 + ], + "type": "text", + "content": "According to our findings, as presented in the Table 1, the landmark-free baseline underperforms compared to the grid sampled baseline. This supports our hypothesis that landmark information should be integrated during the 6DoF head pose estimation process. Furthermore, our results demonstrate that TRG outperforms the grid sampled baseline, affirming the superiority of our landmark-to-image alignment strategy." + } + ] + } + ], + "index": 12 + }, + { + "type": "table", + "bbox": [ + 133, + 507, + 329, + 578 + ], + "blocks": [ + { + "bbox": [ + 130, + 441, + 326, + 498 + ], + "lines": [ + { + "bbox": [ + 130, + 441, + 326, + 498 + ], + "spans": [ + { + "bbox": [ + 130, + 441, + 326, + 498 + ], + "type": "text", + "content": "Table 2: Comparison with previous methods for 6DoF head pose estimation on ARKit-Face test dataset. Models trained with multiple datasets are marked with the symbol " + }, + { + "bbox": [ + 130, + 441, + 326, + 498 + ], + "type": "inline_equation", + "content": "\\star" + }, + { + "bbox": [ + 130, + 441, + 326, + 498 + ], + "type": "text", + "content": ", and retrained models are indicated by the symbol " + }, + { + "bbox": [ + 130, + 441, + 326, + 498 + ], + "type": "inline_equation", + "content": "\\dagger" + }, + { + "bbox": [ + 130, + 441, + 326, + 498 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 133, + 507, + 329, + 578 + ], + "lines": [ + { + "bbox": [ + 133, + 507, + 329, + 578 + ], + "spans": [ + { + "bbox": [ + 133, + 507, + 329, + 578 + ], + "type": "table", + "html": "
MethodMAErGEMAEtADD
img2pose [1,25]5.55-7.0220.54
Direct 6DoF Regress [25]1.87-9.0621.39
Refined Pix2Pose [25,34]2.35-14.0036.44
JMLR [18] †1.162.394.8611.87
PerspNet [25]0.991.814.1810.01
TRG (Ours)0.921.803.648.74
TRG (Ours) †0.911.843.628.68
", + "image_path": "b8e87098f183a5ea68b757514e6365ead151676aaab98ee8f689f6ce2f1f7f9b.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_body" + } + ], + "index": 14 + }, + { + "type": "table", + "bbox": [ + 342, + 502, + 479, + 572 + ], + "blocks": [ + { + "bbox": [ + 339, + 448, + 482, + 491 + ], + "lines": [ + { + "bbox": [ + 339, + 448, + 482, + 491 + ], + "spans": [ + { + "bbox": [ + 339, + 448, + 482, + 491 + ], + "type": "text", + "content": "Table 3: Comparison with previous methods for dense 3D landmark estimation on ARKitFace test dataset." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 342, + 502, + 479, + 572 + ], + "lines": [ + { + "bbox": [ + 342, + 502, + 479, + 572 + ], + "spans": [ + { + "bbox": [ + 342, + 502, + 479, + 572 + ], + "type": "table", + "html": "
MethodMedianMean
PRNet [17]1.972.05
3DDFA-v2 [19]2.352.31
Deng et al. [13]2.462.55
JMLR [18] † *1.861.94
PerspNet [25]1.721.76
TRG (Ours)1.551.61
TRG (Ours) *1.551.58
", + "image_path": "9e6b1e6e6005fc0f46708575be99a2186ebb2bd614039bd7042ddb7bbc139ca1.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "table_body" + } + ], + "index": 16 + }, + { + "bbox": [ + 132, + 595, + 380, + 608 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 595, + 380, + 608 + ], + "spans": [ + { + "bbox": [ + 132, + 595, + 380, + 608 + ], + "type": "text", + "content": "4.5 Comparison with State-of-the-Art Methods" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 130, + 617, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 617, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 617, + 482, + 666 + ], + "type": "text", + "content": "In this experiment, we conducted a benchmark of our proposed method against existing approaches for 6DoF head pose estimation. The evaluation results on the ARKitFace and BIWI datasets are presented in Tables 2, 3 and 4. Model retrained for this comparison is marked with the symbol " + }, + { + "bbox": [ + 130, + 617, + 482, + 666 + ], + "type": "inline_equation", + "content": "\\dagger" + }, + { + "bbox": [ + 130, + 617, + 482, + 666 + ], + "type": "text", + "content": ". Multiple datasets" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 271, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 271, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 271, + 102 + ], + "type": "text", + "content": "S. Chun and J. Y. Chang" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 479, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 479, + 186 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 479, + 186 + ], + "type": "text", + "content": "were used for the model, which could be trained on multiple datasets. However, PerspNet was trained exclusively using the ARKitFace train dataset due to the difficulty of using two datasets [25,55] with differing 3D face mesh topologies. To ensure a fair comparison, we also present the results of TRG trained solely on the ARKitFace train dataset. Models trained on multiple datasets are denoted with the symbol " + }, + { + "bbox": [ + 130, + 116, + 479, + 186 + ], + "type": "inline_equation", + "content": "\\star" + }, + { + "bbox": [ + 130, + 116, + 479, + 186 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 188, + 479, + 235 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 188, + 479, + 235 + ], + "spans": [ + { + "bbox": [ + 130, + 188, + 479, + 235 + ], + "type": "text", + "content": "Evaluation on ARKitFace [25]. Img2pose directly infers the 6DoF head pose from images without utilizing face geometry information. However, the absence of face geometry information can lead to increased face size ambiguity, potentially worsening the performance of head pose inference, as can be seen in Table 2." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 236, + 481, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 236, + 481, + 437 + ], + "spans": [ + { + "bbox": [ + 130, + 236, + 481, + 437 + ], + "type": "text", + "content": "JMLR and PerspNet do not incorporate head pose information during the face geometry inference process. The predicted face geometry, derived without considering head pose information, is relatively inaccurate (Table 3). Consequently, methods that predict the 6DoF head pose based on this relatively imprecise geometry yield inaccurate results (Table 2). In contrast, TRG actively integrates face geometry information into the head pose estimation process. According to Table 2, TRG achieves state-of-the-art in head pose estimation, attributed to its explicit bidirectional interaction structure. Furthermore, owing to its depth-aware landmark prediction architecture, TRG maintains stable face landmark prediction accuracy even in selfie scenarios, as shown in Table 3. Fig. 4 visually illustrates the performance of TRG and existing models [18,25] for head pose estimation and face landmark prediction. When the geometries predicted by each model are aligned with the image, they appear to be well-aligned. However, a stark contrast in model performance becomes evident when comparing the ground-truth geometry with the predicted geometries in the 3D camera space. JMLR and PerspNet struggle to accurately predict the actual size of a human face, resulting in high translation errors." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 439, + 481, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 439, + 481, + 559 + ], + "spans": [ + { + "bbox": [ + 130, + 439, + 481, + 559 + ], + "type": "text", + "content": "Evaluation on BIWI [15]. According to Table 4, TRG significantly outperforms existing optimization-based methods [18,25,56] in head translation estimation. This superior performance is attributed to TRG's design, which effectively leverages the synergy between face geometry and head translation. Furthermore, TRG's landmark-to-image alignment method enables it to achieve high head rotation estimation accuracy, surpassing even methods that solely estimate 3D head rotation. Fig. 4 qualitatively demonstrates TRG's exceptional head pose estimation performance. To visualize how closely the predicted head pose matches the ground-truth pose, we utilized the ground-truth neutral mesh and the predicted head pose." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 575, + 219, + 586 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 575, + 219, + 586 + ], + "spans": [ + { + "bbox": [ + 132, + 575, + 219, + 586 + ], + "type": "text", + "content": "4.6 Limitations" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 594, + 479, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 594, + 479, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 594, + 479, + 665 + ], + "type": "text", + "content": "In the process of deriving depth from images using the proposed method, the requirement for camera intrinsics emerges as a necessary component. This necessity indicates that, in the absence of camera intrinsics, while it is still possible to estimate relative depth among faces in an image, achieving precise depth measurement poses a challenge. To address this challenge and ensure accurate depth determination between the face and the camera, incorporating algorithms that" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 425, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 425, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 425, + 91, + 447, + 100 + ], + "type": "text", + "content": "TRG" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 136, + 168, + 477, + 402 + ], + "blocks": [ + { + "bbox": [ + 132, + 114, + 480, + 159 + ], + "lines": [ + { + "bbox": [ + 132, + 114, + 480, + 159 + ], + "spans": [ + { + "bbox": [ + 132, + 114, + 480, + 159 + ], + "type": "text", + "content": "Table 4: Comparison with previous methods for 6DoF head pose estimation on BIWI dataset. The models were evaluated using BIWI solely for testing purposes, without utilizing it as training data. We used the camera intrinsics provided by BIWI for the evaluation of the head pose estimation performance of MICA [56]." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 136, + 168, + 477, + 402 + ], + "lines": [ + { + "bbox": [ + 136, + 168, + 477, + 402 + ], + "spans": [ + { + "bbox": [ + 136, + 168, + 477, + 402 + ], + "type": "table", + "html": "
MethodYawPitchRollMAErGEtxtytzMAEtADD
Dlib [26]11.8613.0019.5614.81------
3DDFA [55]5.5041.9013.2219.07------
EVA-GCN [45]4.014.782.983.92------
HopeNet [38]4.816.613.274.899.53-----
QuatNet [23]4.015.492.944.15------
Liu et al. [31]4.125.613.154.29------
FSA-Net [46]4.274.962.764.007.64-----
HPE [24]4.575.183.124.29------
WHENet-V [53]3.604.102.733.48------
RetinaFace [12] ★4.076.422.974.49------
FDN [48]4.524.702.563.93------
MNN [43]3.984.612.393.66------
TriNet [3]3.054.764.113.97------
6DRepNet [21]3.244.482.683.47------
Cao et al. [4]4.213.523.103.61------
TokenHPE [47]3.954.512.713.72------
Cobo et al. [9]4.584.652.713.987.30-----
img2pose [1] ★4.573.553.243.797.10-----
Direct 6DoF Regress [25]16.4914.035.8112.11-62.3685.01366.52171.30562.38
Refined Pix2Pose [25,34]5.755.0611.237.35-16.8221.30255.3697.83356.32
MICA [56] ★5.407.173.805.46-9.3213.6660.1327.7068.03
JMLR [18] † ★6.316.173.725.408.618.667.2732.6316.1939.71
PerspNet [25]3.103.372.382.955.614.156.4346.6919.09100.09
TRG (Ours)3.283.521.872.895.688.417.3827.1314.3132.10
TRG (Ours) ★3.043.441.782.755.357.836.9924.0712.9729.46
", + "image_path": "4a44840d4fe5a0235369a2d89747c55d678e5901adaa6d00e7c69fc03a04abc2.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 423, + 480, + 459 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 423, + 480, + 459 + ], + "spans": [ + { + "bbox": [ + 132, + 423, + 480, + 459 + ], + "type": "text", + "content": "estimate intrinsics becomes essential. This aspect of requiring camera intrinsics for depth calculations highlights an area for further exploration and adaptation in our method, especially when intrinsic parameters are not readily available." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 133, + 475, + 219, + 487 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 475, + 219, + 487 + ], + "spans": [ + { + "bbox": [ + 133, + 475, + 219, + 487 + ], + "type": "text", + "content": "5 Conclusion" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 498, + 481, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 498, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 132, + 498, + 481, + 665 + ], + "type": "text", + "content": "This study proposed a novel approach by introducing the TRG to predict a 6DoF head pose from a single image. Through extensive experimentation, we demonstrated the effectiveness of the explicit bidirectional interaction between the 6DoF head pose and the dense 3D face landmarks, a core feature of the TRG architecture. We further established that our method of estimating the correction parameters significantly enhances the generalizability of the model in cross-dataset evaluations. Evaluation on the ARKitFace and BIWI datasets showed TRG's superior performance in head pose estimation compared to existing state-of-the-art methods. Our extensive experiments have also highlighted the strength of TRG's depth-aware landmark prediction structure, particularly in images heavily influenced by perspective transformation, facilitating accurate estimation of face geometry. Based on these findings, our future work will focus on accurately reconstructing detailed facial geometries from close-up facial photos, such as selfies, further pushing the boundaries of facial analysis technology." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 270, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 270, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 270, + 102 + ], + "type": "text", + "content": "S. Chun and J. Y. Chang" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 133, + 114, + 240, + 129 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 114, + 240, + 129 + ], + "spans": [ + { + "bbox": [ + 133, + 114, + 240, + 129 + ], + "type": "text", + "content": "Acknowledgement" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 139, + 482, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 139, + 482, + 224 + ], + "spans": [ + { + "bbox": [ + 130, + 139, + 482, + 224 + ], + "type": "text", + "content": "This work was partly supported by Institute of Information & Communications Technology Planning & Evaluation (IITP) grant funded by the Korea government (MSIT) (No. RS-2023-00219700, Development of FACS-compatible Facial Expression Style Transfer Technology for Digital Human, " + }, + { + "bbox": [ + 130, + 139, + 482, + 224 + ], + "type": "inline_equation", + "content": "90\\%" + }, + { + "bbox": [ + 130, + 139, + 482, + 224 + ], + "type": "text", + "content": ") and National Research Foundation of Korea (NRF) grant funded by the Korea government (MSIT) (No. NRF-2022R1F1A1066170, Physically valid 3D human motion reconstruction from multi-view videos, " + }, + { + "bbox": [ + 130, + 139, + 482, + 224 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 130, + 139, + 482, + 224 + ], + "type": "text", + "content": ")." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 133, + 240, + 197, + 252 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 240, + 197, + 252 + ], + "spans": [ + { + "bbox": [ + 133, + 240, + 197, + 252 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 138, + 263, + 480, + 665 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 138, + 263, + 480, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 263, + 480, + 285 + ], + "spans": [ + { + "bbox": [ + 138, + 263, + 480, + 285 + ], + "type": "text", + "content": "1. Albiero, V., Chen, X., Yin, X., Pang, G., Hassner, T.: img2pose: Face alignment and detection via 6dof, face pose estimation. In: CVPR (2021)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 138, + 286, + 480, + 307 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 286, + 480, + 307 + ], + "spans": [ + { + "bbox": [ + 138, + 286, + 480, + 307 + ], + "type": "text", + "content": "2. Belhumeur, P.N., Jacobs, D.W., Kriegman, D.J., Kumar, N.: Localizing parts of faces using a consensus of exemplars. IEEE TPAMI 35(12), 2930-2940 (2013)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 138, + 308, + 480, + 329 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 308, + 480, + 329 + ], + "spans": [ + { + "bbox": [ + 138, + 308, + 480, + 329 + ], + "type": "text", + "content": "3. Cao, Z., Chu, Z., Liu, D., Chen, Y.: A vector-based representation to enhance head pose estimation. In: WACV (2021)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 138, + 330, + 480, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 330, + 480, + 350 + ], + "spans": [ + { + "bbox": [ + 138, + 330, + 480, + 350 + ], + "type": "text", + "content": "4. Cao, Z., Liu, D., Wang, Q., Chen, Y.: Towards unbiased label distribution learning for facial pose estimation using anisotropic spherical gaussian. In: ECCV (2022)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 138, + 351, + 480, + 383 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 351, + 480, + 383 + ], + "spans": [ + { + "bbox": [ + 138, + 351, + 480, + 383 + ], + "type": "text", + "content": "5. Chai, Z., Zhang, T., He, T., Tan, X., Baltrusaitis, T., Wu, H., Li, R., Zhao, S., Yuan, C., Bian, J.: Hiface: High-fidelity 3d face reconstruction by learning static and dynamic details. In: ICCV (2023)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 138, + 384, + 480, + 404 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 384, + 480, + 404 + ], + "spans": [ + { + "bbox": [ + 138, + 384, + 480, + 404 + ], + "type": "text", + "content": "6. Cho, J., Youwang, K., Oh, T.H.: Cross-attention of disentangled modalities for 3d human mesh recovery with transformers. In: ECCV (2022)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 138, + 405, + 480, + 426 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 405, + 480, + 426 + ], + "spans": [ + { + "bbox": [ + 138, + 405, + 480, + 426 + ], + "type": "text", + "content": "7. Chun, S., Park, S., Chang, J.Y.: Learnable human mesh triangulation for 3d human pose and shape estimation. In: WACV (2023)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 138, + 427, + 480, + 448 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 427, + 480, + 448 + ], + "spans": [ + { + "bbox": [ + 138, + 427, + 480, + 448 + ], + "type": "text", + "content": "8. Chun, S., Park, S., Chang, J.Y.: Representation learning of vertex heatmaps for 3d human mesh reconstruction from multi-view images. In: ICIP (2023)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 138, + 449, + 480, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 449, + 480, + 480 + ], + "spans": [ + { + "bbox": [ + 138, + 449, + 480, + 480 + ], + "type": "text", + "content": "9. Cobo, A., Valle, R., Buenaposada, J.M., Baumela, L.: On the representation and methodology for wide and short range head pose estimation. PR 149, 110263 (2024)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 138, + 481, + 480, + 503 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 481, + 480, + 503 + ], + "spans": [ + { + "bbox": [ + 138, + 481, + 480, + 503 + ], + "type": "text", + "content": "10. Danecek, R., Black, M.J., Bolkart, T.: EMOCA: Emotion driven monocular face capture and animation. In: CVPR (2022)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 138, + 503, + 480, + 524 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 503, + 480, + 524 + ], + "spans": [ + { + "bbox": [ + 138, + 503, + 480, + 524 + ], + "type": "text", + "content": "1. Deng, J., Dong, W., Socher, R., Li, L.J., Li, K., Fei-Fei, L.: Imagenet: A large-scale hierarchical image database. In: CVPR (2009)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 138, + 525, + 480, + 545 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 525, + 480, + 545 + ], + "spans": [ + { + "bbox": [ + 138, + 525, + 480, + 545 + ], + "type": "text", + "content": "2. Deng, J., Guo, J., Ververas, E., Kotsia, I., Zafeiriou, S.: Retinaface: Single-shot multi-level face localisation in the wild. In: CVPR (2020)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 138, + 546, + 480, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 546, + 480, + 578 + ], + "spans": [ + { + "bbox": [ + 138, + 546, + 480, + 578 + ], + "type": "text", + "content": "3. Deng, Y., Yang, J., Xu, S., Chen, D., Jia, Y., Tong, X.: Accurate 3d face reconstruction with weakly-supervised learning: From single image to image set. In: CVPRW (2019)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 138, + 579, + 480, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 579, + 480, + 600 + ], + "spans": [ + { + "bbox": [ + 138, + 579, + 480, + 600 + ], + "type": "text", + "content": "4. Dhingra, N.: Lwposr: Lightweight efficient fine grained head pose estimation. In: WACV (2022)" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 138, + 601, + 480, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 601, + 480, + 621 + ], + "spans": [ + { + "bbox": [ + 138, + 601, + 480, + 621 + ], + "type": "text", + "content": "5. Fanelli, G., Dantone, M., Gall, J., Fossati, A., Van Gool, L.: Random forests for real time 3d face analysis. IJCV 101, 437-458 (2013)" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 138, + 622, + 480, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 622, + 480, + 643 + ], + "spans": [ + { + "bbox": [ + 138, + 622, + 480, + 643 + ], + "type": "text", + "content": "6. Feng, Y., Feng, H., Black, M.J., Bolkart, T.: Learning an animatable detailed 3d face model from in-the-wild images. ACM TOG 40(4), 1-13 (2021)" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 138, + 644, + 480, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 644, + 480, + 665 + ], + "spans": [ + { + "bbox": [ + 138, + 644, + 480, + 665 + ], + "type": "text", + "content": "7. Feng, Y., Wu, F., Shao, X., Wang, Y., Zhou, X.: Joint 3d face reconstruction and dense alignment with position map regression network. In: ECCV (2018)" + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 425, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 425, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 425, + 91, + 447, + 100 + ], + "type": "text", + "content": "TRG" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 117, + 480, + 665 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 133, + 117, + 480, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 117, + 480, + 138 + ], + "spans": [ + { + "bbox": [ + 133, + 117, + 480, + 138 + ], + "type": "text", + "content": "18. Guo, J., Yu, J., Lattas, A., Deng, J.: Perspective reconstruction of human faces by joint mesh and landmark regression. In: ECCVW (2022)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 133, + 140, + 480, + 160 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 140, + 480, + 160 + ], + "spans": [ + { + "bbox": [ + 133, + 140, + 480, + 160 + ], + "type": "text", + "content": "19. Guo, J., Zhu, X., Yang, Y., Yang, F., Lei, Z., Li, S.Z.: Towards fast, accurate and stable 3d dense face alignment. In: ECCV (2020)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 133, + 161, + 480, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 161, + 480, + 182 + ], + "spans": [ + { + "bbox": [ + 133, + 161, + 480, + 182 + ], + "type": "text", + "content": "20. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: CVPR (2016)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 183, + 480, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 183, + 480, + 205 + ], + "spans": [ + { + "bbox": [ + 132, + 183, + 480, + 205 + ], + "type": "text", + "content": "21. Hempel, T., Abdelrahman, A.A., Al-Hamadi, A.: 6d rotation representation for unconstrained head pose estimation. In: ICIP (2022)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 205, + 480, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 205, + 480, + 237 + ], + "spans": [ + { + "bbox": [ + 132, + 205, + 480, + 237 + ], + "type": "text", + "content": "22. Hinterstoisser, S., Lepetit, V., Ilic, S., Holzer, S., Bradski, G., Konolige, K., Navab, N.: Model based training, detection and pose estimation of texture-less 3d objects in heavily cluttered scenes. In: ACCV (2013)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 238, + 480, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 238, + 480, + 270 + ], + "spans": [ + { + "bbox": [ + 132, + 238, + 480, + 270 + ], + "type": "text", + "content": "23. Hsu, H.W., Wu, T.Y., Wan, S., Wong, W.H., Lee, C.Y.: Quantnet: Quaternion-based head pose estimation with multiregression loss. IEEE TMM 21(4), 1035-1046 (2018)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 271, + 480, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 271, + 480, + 293 + ], + "spans": [ + { + "bbox": [ + 132, + 271, + 480, + 293 + ], + "type": "text", + "content": "24. Huang, B., Chen, R., Xu, W., Zhou, Q.: Improving head pose estimation using two-stage ensembles with top-k regression. IVC 93, 103827 (2020)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 293, + 480, + 325 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 293, + 480, + 325 + ], + "spans": [ + { + "bbox": [ + 132, + 293, + 480, + 325 + ], + "type": "text", + "content": "25. Kao, Y., Pan, B., Xu, M., Lyu, J., Zhu, X., Chang, Y., Li, X., Lei, Z.: Toward 3d face reconstruction in perspective projection: Estimating 6 dof face pose from monocular image. IEEE TIP 32, 3080-3091 (2023)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 326, + 480, + 347 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 326, + 480, + 347 + ], + "spans": [ + { + "bbox": [ + 132, + 326, + 480, + 347 + ], + "type": "text", + "content": "26. Kazemi, V., Sullivan, J.: One millisecond face alignment with an ensemble of regression trees. In: CVPR (2014)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 348, + 480, + 369 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 348, + 480, + 369 + ], + "spans": [ + { + "bbox": [ + 132, + 348, + 480, + 369 + ], + "type": "text", + "content": "27. Kumar, A., Alavi, A., Chellappa, R.: Kepler: Keypoint and pose estimation of unconstrained faces by learning efficient h-cnn regressors. In: FG (2017)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 369, + 480, + 391 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 369, + 480, + 391 + ], + "spans": [ + { + "bbox": [ + 132, + 369, + 480, + 391 + ], + "type": "text", + "content": "28. Li, H., Wang, B., Cheng, Y., Kankanhalli, M., Tan, R.T.: Dsfnet: Dual space fusion network for occlusion-robust 3d dense face alignment. In: CVPR (2023)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 132, + 392, + 480, + 413 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 392, + 480, + 413 + ], + "spans": [ + { + "bbox": [ + 132, + 392, + 480, + 413 + ], + "type": "text", + "content": "29. Li, Z., Liu, J., Zhang, Z., Xu, S., Yan, Y.: Cliff: Carrying location information in full frames into human pose and shape estimation. In: ECCV (2022)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 132, + 414, + 480, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 414, + 480, + 435 + ], + "spans": [ + { + "bbox": [ + 132, + 414, + 480, + 435 + ], + "type": "text", + "content": "30. Lin, K., Wang, L., Liu, Z.: End-to-end human pose and mesh reconstruction with transformers. In: CVPR (2021)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 132, + 436, + 480, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 436, + 480, + 456 + ], + "spans": [ + { + "bbox": [ + 132, + 436, + 480, + 456 + ], + "type": "text", + "content": "31. Liu, Z., Chen, Z., Bai, J., Li, S., Lian, S.: Facial pose estimation by deep learning from label distributions. In: ICCVW (2019)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 132, + 457, + 480, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 457, + 480, + 479 + ], + "spans": [ + { + "bbox": [ + 132, + 457, + 480, + 479 + ], + "type": "text", + "content": "32. Maas, A.L., Hannun, A.Y., Ng, A.Y., et al.: Rectifier nonlinearities improve neural network acoustic models. In: ICML (2013)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 132, + 479, + 480, + 501 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 479, + 480, + 501 + ], + "spans": [ + { + "bbox": [ + 132, + 479, + 480, + 501 + ], + "type": "text", + "content": "33. Moon, G., Lee, K.M.: I2l-meshnet: Image-to-lixel prediction network for accurate 3d human pose and mesh estimation from a single rgb image. In: ECCV (2020)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 132, + 502, + 480, + 522 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 502, + 480, + 522 + ], + "spans": [ + { + "bbox": [ + 132, + 502, + 480, + 522 + ], + "type": "text", + "content": "34. Park, K., Patten, T., Vincze, M.: Pix2pose: Pixel-wise coordinate regression of objects for 6d pose estimation. In: ICCV (2019)" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 132, + 523, + 480, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 523, + 480, + 544 + ], + "spans": [ + { + "bbox": [ + 132, + 523, + 480, + 544 + ], + "type": "text", + "content": "35. Paysan, P., Knothe, R., Amberg, B., Romdhani, S., Vetter, T.: A 3d face model for pose and illumination invariant face recognition. In: AVSS (2009)" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 132, + 545, + 480, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 545, + 480, + 567 + ], + "spans": [ + { + "bbox": [ + 132, + 545, + 480, + 567 + ], + "type": "text", + "content": "36. Ranjan, A., Bolkart, T., Sanyal, S., Black, M.J.: Generating 3d faces using convolutional mesh autoencoders. In: ECCV (2018)" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 132, + 567, + 480, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 567, + 480, + 599 + ], + "spans": [ + { + "bbox": [ + 132, + 567, + 480, + 599 + ], + "type": "text", + "content": "37. Ranjan, R., Patel, V.M., Chellappa, R.: Hyperface: A deep multi-task learning framework for face detection, landmark localization, pose estimation, and gender recognition. IEEE TPAMI 41(1), 121-135 (2019)" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 132, + 600, + 480, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 600, + 480, + 621 + ], + "spans": [ + { + "bbox": [ + 132, + 600, + 480, + 621 + ], + "type": "text", + "content": "38. Ruiz, N., Chong, E., Rehg, J.M.: Fine-grained head pose estimation without keypoints. In: CVPRW (2018)" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 132, + 622, + 480, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 622, + 480, + 643 + ], + "spans": [ + { + "bbox": [ + 132, + 622, + 480, + 643 + ], + "type": "text", + "content": "39. Sagonas, C., Tzimiropoulos, G., Zafeiriou, S., Pantic, M.: 300 faces in-the-wild challenge: The first facial landmark localization challenge. In: ICCVW (2013)" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 132, + 644, + 480, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 644, + 480, + 665 + ], + "spans": [ + { + "bbox": [ + 132, + 644, + 480, + 665 + ], + "type": "text", + "content": "40. Sagonas, C., Tzimiropoulos, G., Zafeiriou, S., Pantic, M.: 300 faces in-the-wild challenge: The first facial landmark localization challenge. In: ICCVW (2013)" + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 270, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 270, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 270, + 102 + ], + "type": "text", + "content": "S. Chun and J. Y. Chang" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 500 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 138 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 138 + ], + "type": "text", + "content": "41. Shao, M., Sun, Z., Ozay, M., Okatani, T.: Improving head pose estimation with a combined loss and bounding box margin adjustment. In: FG (2019)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 138, + 482, + 159 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 138, + 482, + 159 + ], + "spans": [ + { + "bbox": [ + 130, + 138, + 482, + 159 + ], + "type": "text", + "content": "42. Sun, X., Xiao, B., Wei, F., Liang, S., Wei, Y.: Integral human pose regression. In: ECCV (2018)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 160, + 482, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 160, + 482, + 182 + ], + "spans": [ + { + "bbox": [ + 130, + 160, + 482, + 182 + ], + "type": "text", + "content": "43. Valle, R., Buenaposada, J.M., Baumela, L.: Multi-task head pose estimation inthe-wild. IEEE TPAMI 43(8), 2874-2881 (2020)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 182, + 482, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 182, + 482, + 205 + ], + "spans": [ + { + "bbox": [ + 130, + 182, + 482, + 205 + ], + "type": "text", + "content": "44. Wu, C.Y., Xu, Q., Neumann, U.: Synergy between 3dmm and 3d landmarks for accurate 3d facial geometry. In: 3DV (2021)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 205, + 482, + 226 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 205, + 482, + 226 + ], + "spans": [ + { + "bbox": [ + 130, + 205, + 482, + 226 + ], + "type": "text", + "content": "45. Xin, M., Mo, S., Lin, Y.: Eva-gcn: Head pose estimation based on graph convolutional networks. In: CVPR (2021)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 226, + 482, + 258 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 226, + 482, + 258 + ], + "spans": [ + { + "bbox": [ + 130, + 226, + 482, + 258 + ], + "type": "text", + "content": "46. Yang, T.Y., Chen, Y.T., Lin, Y.Y., Chuang, Y.Y.: Fsa-net: Learning fine-grained structure aggregation for head pose estimation from a single image. In: CVPR (2019)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 259, + 482, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 259, + 482, + 281 + ], + "spans": [ + { + "bbox": [ + 130, + 259, + 482, + 281 + ], + "type": "text", + "content": "47. Zhang, C., Liu, H., Deng, Y., Xie, B., Li, Y.: Tokenhpe: Learning orientation tokens for efficient head pose estimation via transformers. In: CVPR (2023)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 281, + 482, + 303 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 281, + 482, + 303 + ], + "spans": [ + { + "bbox": [ + 130, + 281, + 482, + 303 + ], + "type": "text", + "content": "48. Zhang, H., Wang, M., Liu, Y., Yuan, Y.: Fdn: Feature decoupling network for head pose estimation. In: AAAI (2020)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 303, + 482, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 303, + 482, + 335 + ], + "spans": [ + { + "bbox": [ + 130, + 303, + 482, + 335 + ], + "type": "text", + "content": "49. Zhang, H., Tian, Y., Zhang, Y., Li, M., An, L., Sun, Z., Liu, Y.: Pymaf-x: Towards well-aligned full-body model regression from monocular images. IEEE TPAMI (2023)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 335, + 482, + 369 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 335, + 482, + 369 + ], + "spans": [ + { + "bbox": [ + 130, + 335, + 482, + 369 + ], + "type": "text", + "content": "50. Zhang, H., Tian, Y., Zhou, X., Ouyang, W., Liu, Y., Wang, L., Sun, Z.: Pymaf: 3d human pose and shape regression with pyramidal mesh alignment feedback loop. In: ICCV (2021)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 130, + 369, + 482, + 391 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 369, + 482, + 391 + ], + "spans": [ + { + "bbox": [ + 130, + 369, + 482, + 391 + ], + "type": "text", + "content": "51. Zhou, E., Fan, H., Cao, Z., Jiang, Y., Yin, Q.: Extensive facial landmark localization with coarse-to-fine convolutional network cascade. In: ICCVW (2013)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 130, + 391, + 482, + 413 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 391, + 482, + 413 + ], + "spans": [ + { + "bbox": [ + 130, + 391, + 482, + 413 + ], + "type": "text", + "content": "52. Zhou, Y., Barnes, C., Lu, J., Yang, J., Li, H.: On the continuity of rotation representations in neural networks. In: CVPR (2019)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 130, + 413, + 482, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 413, + 482, + 434 + ], + "spans": [ + { + "bbox": [ + 130, + 413, + 482, + 434 + ], + "type": "text", + "content": "53. Zhou, Y., Gregson, J.: Whenet: Real-time fine-grained estimation for wide range head pose. arXiv preprint arXiv:2005.10353 (2020)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 130, + 434, + 482, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 434, + 482, + 456 + ], + "spans": [ + { + "bbox": [ + 130, + 434, + 482, + 456 + ], + "type": "text", + "content": "54. Zhu, X., Ramanan, D.: Face detection, pose estimation, and landmark localization in the wild. In: CVPR (2012)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 130, + 456, + 482, + 478 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 456, + 482, + 478 + ], + "spans": [ + { + "bbox": [ + 130, + 456, + 482, + 478 + ], + "type": "text", + "content": "55. Zhu, X., Lei, Z., Liu, X., Shi, H., Li, S.Z.: Face alignment across large poses: A 3d solution. In: CVPR (2016)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 130, + 479, + 482, + 500 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 479, + 482, + 500 + ], + "spans": [ + { + "bbox": [ + 130, + 479, + 482, + 500 + ], + "type": "text", + "content": "56. Zielonka, W., Bolkart, T., Thies, J.: Towards metrical reconstruction of human faces. In: ECCV (2022)" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 425, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 425, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 425, + 91, + 447, + 100 + ], + "type": "text", + "content": "TRG" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/A Cephalometric Landmark Regression Method based on Dual-encoder for High-resolution X-ray Image/2cca7425-9c6a-47c2-b889-8be913ae41cc_content_list.json b/2024/A Cephalometric Landmark Regression Method based on Dual-encoder for High-resolution X-ray Image/2cca7425-9c6a-47c2-b889-8be913ae41cc_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..ab4edf6f9f0930dbc0f14f3415a6a7305cebf4e7 --- /dev/null +++ b/2024/A Cephalometric Landmark Regression Method based on Dual-encoder for High-resolution X-ray Image/2cca7425-9c6a-47c2-b889-8be913ae41cc_content_list.json @@ -0,0 +1,1685 @@ +[ + { + "type": "text", + "text": "A Cephalometric Landmark Regression Method based on Dual-encoder for High-resolution X-ray Image", + "text_level": 1, + "bbox": [ + 222, + 140, + 779, + 209 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Chao Dai $^{1\\dagger}$ , Yang Wang $^{2\\dagger(\\boxtimes)}$ , Chaolin Huang $^{3\\dagger}$ , Jiakai Zhou $^{4}$ , Qilin Xu $^{5}$ , and Minpeng Xu $^{1}$", + "bbox": [ + 223, + 233, + 779, + 265 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Tianjin University", + "bbox": [ + 431, + 277, + 570, + 290 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "2 Anhui University of Technology", + "bbox": [ + 387, + 291, + 612, + 305 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{3}$ Jiangxi University of Science and Technology", + "bbox": [ + 343, + 305, + 658, + 318 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{4}$ Nanjing University of Aeronautics and Astronautics", + "bbox": [ + 321, + 319, + 679, + 332 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "5 West Anhui University", + "bbox": [ + 419, + 333, + 584, + 345 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract. Accurate detection of cephalometric landmarks is crucial for orthodontic diagnosis and treatment planning. Current methods rely on a cascading form of multiple models to achieve higher accuracy, which greatly complicates both training and deployment processes. In this paper, we introduce a novel regression paradigm capable of simultaneously detecting all cephalometric landmarks in high-resolution X-ray images. Our approach only utilizes the encoder module from the transformer to design a dual-encoder architecture, enabling precise detection of cephalometric landmark positions from coarse to fine. Specifically, the entire model architecture comprises three main components: a feature extractor module, a reference encoder module, and a fine-tune encoder module. These components are respectively responsible for feature extraction and fusion for X-ray images, coarse localization of cephalometric landmark, and fine-tuning of cephalometric landmark positioning. Notably, our framework is fully end-to-end differentiable and innately learns to exploit the interdependencies among cephalometric landmarks. Experiments demonstrate that our method significantly surpasses the current state-of-the-art methods in Mean Radical Error (MRE) and the 2mm Success Detection Rate (SDR) metrics, while also reducing computational resource consumption. The code is available at https://github.com/huang229/D-CeLR", + "bbox": [ + 259, + 382, + 743, + 674 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Keywords: Cephalometric landmark $\\cdot$ High-resolution $\\cdot$ Dual-encoder $\\cdot$ Reference encoder $\\cdot$ Finetune encoder", + "bbox": [ + 259, + 686, + 740, + 714 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 215, + 739, + 375, + 755 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Cephalometric analysis represents a pivotal diagnostic tool extensively utilized in orthodontics and orthognathic surgery. This analysis involves the annotation of dental, skeletal, and soft tissue structures in lateral cephalometric radiographs.", + "bbox": [ + 212, + 770, + 787, + 816 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "$\\dagger$ Equal contribution. (Corresponding authors (youngnuaa@gmail.com).", + "bbox": [ + 230, + 824, + 725, + 840 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/ddb02340701773010dc447c6e4272a3d159550f84f95fcae76d431a00355ce03.jpg", + "image_caption": [ + "(a) Cephalometric landmark coordi-(b) Cephalometric landmark medical name nate positions.", + "Fig. 1: Cephalometric landmark visualization. (a) Cephalometric landmark coordinate positions. Red indicates hard tissue points and blue indicates soft tissue points. (b) Cephalometric landmark medical name." + ], + "image_footnote": [], + "bbox": [ + 272, + 146, + 480, + 323 + ], + "page_idx": 1 + }, + { + "type": "table", + "img_path": "images/dd8d394755327c8bc04e69e994a1fb50ab02fef8c50e49f75d806d831c4db2cf.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
19 Landmarks in Cephalometric
1Sella11Lower Incisor Tip
2Nasion12Upper Incisor Tip
3Orbitale13Labrale superius
4Porion14Labrale inferius
5Upper Incisor Apex15Subnasale
6B-point16Soft Tissue Pogonion
7Pogonion17Posterior Nasal Spine
8Menton18Anterior Nasal Spine
9Gnathion19Articulare
10Gonion
", + "bbox": [ + 480, + 145, + 730, + 323 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "As illustrated in Figure 1, these cephalometric landmarks are core to the analysis, providing reference points for subsequent qualitative assessments of angles and distances. However, the manual annotation of these landmarks is a laborious, time-consuming, and highly subjective task, impacting the accuracy of the annotations. Consequently, a precise and robust automated method for annotating cephalometric landmarks holds significant importance for effective treatment planning [1,6,7,14,23].", + "bbox": [ + 212, + 431, + 784, + 537 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Existing methods for cephalometric landmark detection can be broadly classified into two categories: heatmap-based and regression-based approaches. The heatmap-based approach involves predicting a heatmap that indicates the probability of each pixel in a region corresponding to various cephalometric landmarks. This modality has seen extensive applications in the detection of cephalometric landmarks. For example, Chen et al. [6] introduced a feature pyramid fusion-based heatmap method for simultaneous landmark detection, achieving impressive results. Qian J et al. [26] advanced the accuracy of cephalometric landmark detection by designing a multi-head attention module and a novel regional loss function. However, heatmap-based methods exhibit certain disadvantages. 1). The ground truth requires manual design and heuristic adjustments, with inevitable noise impacting the final outcomes [13,29,40]. 2). post-processing operations are necessary to locate single maximum values in heatmaps. These operations are typically heuristic and non-differentiable, undermining the model's capacity for end-to-end training. 3). models generally adopt a U-net structure [27,28,41], while processing high-resolution X-ray images, consumes more computational resources and is prone to missing cephalometric landmarks.", + "bbox": [ + 212, + 537, + 787, + 792 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Regression-based methods directly map the input image to the coordinates of cephalometric landmarks, typically employing a feedforward network (FFN) for prediction. The regression-based methods is considerably more streamlined", + "bbox": [ + 212, + 795, + 785, + 839 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "C. Dai, Y. Wang et al.", + "bbox": [ + 271, + 114, + 423, + 128 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "compared to heatmap-based methods, as the prediction of cephalometric landmarks is inherently a process of determining a set of coordinate values. Numerous regression-based techniques exist for predicting cephalometric landmarks. For example, Song Y et al. [29] utilizes a base network for coarse localization of cephalometric landmarks, followed by region-specific cropping and refined positioning using a secondary model. Gilmour L et al. [11] constructs individual models for each landmark to predict their locations. Regression-based methods circumvent the necessity for non-maximum suppression, heatmap generation, and quantization error correction. However, to achieve higher precision on high-resolution X-ray images, current approaches predominantly rely on cascading multiple models, which compromises the inherent advantages of end-to-end training and prediction for regression-based methods.", + "bbox": [ + 212, + 146, + 787, + 327 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To address these issues, we introduce a novel regression paradigm that exclusively utilizes the encoder module of transformer for the one-time detection of all cephalometric landmarks on high-resolution X-ray images. Specifically, we design a feature extraction module based on Convolutional Neural Networks (CNN) to accomplish feature extraction and fusion for X-ray images. Subsequently, the extracted features are fed into a reference encoder module for the coarse localization of cephalometric landmarks. Finally, the coarsely localized cephalometric landmarks, along with the fused features, are inputted into a finetune encoder module, which iteratively refines the positioning of the cephalometric landmarks from coarse to fine detail. Moreover, our method pioneers the complete end-to-end training and deployment for the detection of cephalometric landmarks on high-resolution X-ray images. Extensive experiments demonstrate that our approach achieves state-of-the-art performance on popular benchmarks with a ResNet-34 backbone. Specifically, we achieve a Mean Radial Error (MRE) of $1.01\\mathrm{mm}$ , $1.27\\mathrm{mm}$ , and $0.9372\\mathrm{mm}$ on the ISBI2015 test1, ISBI2015 test2, and ISBI2023 test datasets, respectively. Furthermore, our method significantly reduces GFLOPs, by $132\\%$ compared to the previously best method [11].", + "bbox": [ + 212, + 330, + 787, + 589 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The main contributions of this work are as follows:", + "bbox": [ + 238, + 593, + 607, + 606 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We propose an innovative regression paradigm for high-resolution X-ray images, which enables the prediction of all cephalometric landmarks through a single model. Moreover, our method facilitates end-to-end training and prediction, which not only improves efficiency but also enhances the feasibility of the model in practical applications.", + "- We have designed a dual-encoder structure, comprising a reference encoder module and a finetune encoder module. The reference encoder module accomplishes coarse localization of cephalometric landmarks, while the finetune encoder module refines this localization in a layer-by-layer updating manner.", + "- Our proposed regression approach significantly enhances the precision of cephalometric landmark detection. Compared to state-of-the-art methods, we achieve superior performance on both the ISBI2015 and ISBI2023 test datasets." + ], + "bbox": [ + 223, + 635, + 784, + 835 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Du-CeLR", + "bbox": [ + 663, + 114, + 730, + 126 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Related Work", + "text_level": 1, + "bbox": [ + 215, + 143, + 387, + 160 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "With the seminal work of Lee et al. [19], which first introduced the use of deep learning for cephalometric landmark detection. Deep learning-based methods [2,17,34] have fully surpassed traditional pattern matching [4,10] and random forest regression-based methods [3,22] in terms of accuracy for cephalometric landmark detection. This section primarily focuses on two deep learning-based approaches for cephalometric landmark detection and the transformer architectures for regression of keypoints.", + "bbox": [ + 212, + 175, + 787, + 282 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.1 Heatmap-Based Methods", + "text_level": 1, + "bbox": [ + 215, + 301, + 472, + 316 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Heatmap-based methods predict the likelihood of each pixel in the image corresponding to each cephalometric landmark. King C H et al. [17] utilized object detection techniques and designed a multitask loss without bounding box constraints to optimize landmark acquisition in the model. Chen R et al. [6] proposed a heatmap detection method based on feature pyramid fusion to complete all cephalometric landmark detection, surpassing other methods in effectiveness, but their multi-scale feature pyramid fusion is highly memory-intensive. Zhong Z et al. [40] adopted a two-stage landmark detection approach, which not only reduces memory consumption but also allows for fine-tuning of coarse landmark detection results on local image regions. Qian J et al. [26] enhanced the accuracy to new heights in the ISBI 2015 dataset by designing a multi-head attention module and a new regional loss function, while Ao Y et al. [2] developed a multiscale feature aggregation (MSFA) module and multi-head loss function. Although heatmap-based cephalometric landmark detection achieves high accuracy, its application to high-resolution X-ray images and the common use of U-net structures in models result in substantial memory resource consumption. Moreover, the post-processing required in heatmap-based methods disrupts the integrity of end-to-end training and deployment of the model.", + "bbox": [ + 212, + 325, + 787, + 599 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.2 Regression-Based Methods", + "text_level": 1, + "bbox": [ + 215, + 619, + 483, + 635 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Currently, the majority of regression methods for cephalometric landmark detection on high-resolution X-ray images utilize multi-stage or multi-model strategies. Song Yet et al. [29,30] proposed a method combining traditional regression algorithms with deep learning for coarse localization of landmarks, followed by cropping the region of interest in the original image to create a new image for refined localization using a secondary model. However, their accuracy is substantially lower than that achieved by heatmap-based methods [2, 26]. Zeng M et al. [36] introduced a three-tier cascading neural network for cephalometric landmark regression, akin to the concept used in the MTCNN model [38] for face detection. This approach significantly reduced memory resource consumption but did not achieve the desired level of accuracy. Gilmour L et al. trained 19 distinct models to predict each cephalometric landmark position, attaining accuracy on the ISBI 2015 cephalometric dataset comparable to heatmap-based", + "bbox": [ + 212, + 643, + 787, + 840 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 217, + 114, + 230, + 126 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "C. Dai, Y. Wang et al.", + "bbox": [ + 271, + 114, + 423, + 128 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "methods [26,34]. This greatly encouraged the use of low-memory-consuming regression methods in landmark detection. However, the necessity of maintaining a separate model for each landmark adds complexity to training and deployment. While some regression methods have reached heatmap-based method accuracy levels, they typically involve designing multiple network models for predictions. Moreover, these methods have also not achieved end-to-end training and deployment.", + "bbox": [ + 212, + 146, + 782, + 252 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "2.3 Transformer-based architectures", + "text_level": 1, + "bbox": [ + 214, + 275, + 526, + 287 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The Transformer [31], proposed by Vaswani et al., originally designed for natural language processing tasks, employs an encoder-decoder architecture based on self-attention and feed-forward networks. Recently, Transformer-based models have demonstrated significant potential in computer vision tasks [5,9], including various works applying the Transformer structure to keypoint estimation. Such as TransPose [33] and HRFormer [35] utilized the encoder-decoder structure of transformers for human keypoint regression. Poseur [24] and DTLD [20] have adopted the latest deformable transformer architecture for efficient regression of human keypoints and facial landmarks. Despite the high performance achieved by transformer-based methods in keypoint regression tasks, they present certain challenges: 1) They are primarily used for low-resolution images; 2) The deformable transformer architecture is more complex for deployment. In contrast, our method addresses these issues and achieves significantly higher performance.", + "bbox": [ + 212, + 299, + 784, + 496 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3 Method", + "text_level": 1, + "bbox": [ + 214, + 518, + 328, + 535 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The overall architecture, as illustrated in Figure 2, presents our proposed dual-encoder model which progressively predicts cephalometric landmark coordinate from coarse to fine on high-resolution X-ray images. It comprises a feature extractor for image feature extraction, a reference encoder for coarse cephalometric landmark localization, and a finetune encoder for precise cephalometric landmark localization. For the input image, we initially obtain multi-scale features (S2, S3, S4, and S5) and a fused feature $F_{u}$ through the feature extractor (Sec.3.1). The feature map S5 is flattened to produce the image context queries $V_{FR}^{C}$ , and coarse landmark content queries $V_{LR}^{C}$ are initialized randomly. The image context queries $V_{FR}^{C}$ and coarse landmark context queries $V_{LR}^{C}$ are fed into the reference encoder along with their position queries $V_{R}^{P}$ , updating to corresponding context queries $V_{LR}^{C'}$ and $V_{FR}^{C'}$ . Subsequently, the context queries $V_{LR}^{C'}$ are utilized to predict the coarse coordinate of cephalometric landmark $\\mu_{R} \\in R^{K \\times 2}$ and coarse distribution $\\sigma_{R} \\in R^{K \\times 1}$ via FFN (Sec.3.2). Next, the fused feature map Fu is also flattened to generate image context queries $V_{FA}^{C}$ , and fine landmark content queries $V_{LA}^{C}$ are initialized. Unlike the reference encoder module, which solely uses content and position queries as input, the coarse landmark coordinates $\\mu_{R}$ and feature map $F_{u}$ are also fed into the finetune encoder module to update the content queries $V_{LA}^{C'}$ and $V_{FA}^{C'}$ . Finally, the content queries $V_{LA}^{C'}$ is operated", + "bbox": [ + 212, + 551, + 785, + 843 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Du-CeLR", + "bbox": [ + 663, + 114, + 730, + 126 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/ffd05bc043a830e7a326b8f26e4a2e5a0d7eea9fb2b9ecbd02ae9a1c96a7bda1.jpg", + "image_caption": [ + "Fig. 2: The overview architecture of our method, which contains (a) feature extractor module, (b) reference encoder module and (c) finetune encoder module." + ], + "image_footnote": [], + "bbox": [ + 217, + 145, + 782, + 364 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "by the FFN to produce cephalometric landmark coordinate $\\mu_A \\in R^{K \\times 2}$ and distribution $\\sigma_A \\in R^{K \\times 1}$ (Sec.3.3). In addition, different loss functions are employed for supervising the training of various modules. For the feature extractor module, Dice loss and Mean Squared Error (MSE) loss are utilized to aid model optimization. For the reference encoder and finetune encoder modules, Residual Log-likelihood Estimation(RLE) loss is applied to optimize the model's output cephalometric landmark coordinates $\\mu$ and distribution $\\sigma$ (Sec.3.4).", + "bbox": [ + 212, + 431, + 787, + 540 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.1 Feature Extractor", + "text_level": 1, + "bbox": [ + 214, + 559, + 410, + 573 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "ResNet34 [16] is utilized as the backbone in our model, from which multi-level feature maps [39] are extracted, as illustrated in Figure 2. Initially, we apply downsampling operations to scale the feature maps $S2$ , $S3$ , and $S4$ to the same dimension and size as the feature map $S5$ . Subsequently, the feature maps outputted by the backbone are summed with their respective positional maps (Pos) to yield new feature maps $F2$ , $F3$ , $F4$ , and $F5$ . These feature maps $F2$ , $F3$ , $F4$ , and $F5$ are aggregated to generate the fused feature map $F_{u}$ . The feature map $S5$ is directly fed into the reference encoder module to coarse locate cephalometric landmark, while the fused feature map $F_{u}$ is fed into the finetune encoder module to precise locate cephalometric landmark. Moreover, to enhance the model's performance, the feature map $S5$ is processed through convolution to generate a heatmap, which is optimized by Dice loss and MSE loss.", + "bbox": [ + 212, + 583, + 787, + 765 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.2 Reference Encoder", + "text_level": 1, + "bbox": [ + 214, + 785, + 418, + 800 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The reference encoder aims to establish the relationship between cephalometric landmark queries and feature maps, thereby facilitating the coarse prediction of", + "bbox": [ + 212, + 809, + 787, + 840 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "C. Dai, Y. Wang et al.", + "bbox": [ + 271, + 114, + 423, + 128 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/0808e49b6de9810a9e2818388b5de4665ff31f8f3b5e9d18e4583ab83d8d7bb5.jpg", + "image_caption": [ + "(a) reference encoder module", + "Fig. 3: The detailed illustration of (a) reference encoder module and (b) finetune encoder module." + ], + "image_footnote": [], + "bbox": [ + 320, + 148, + 483, + 344 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/b0ae459d84061ac1761c757f913cc663354387780db2f166156eab90a690907e.jpg", + "image_caption": [ + "(b) finetune encoder module" + ], + "image_footnote": [], + "bbox": [ + 491, + 148, + 684, + 343 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "cephalometric landmark. As illustrated in Figures 2b and Figure 3a, the reference encoder module follows the typical transformer encoder paradigm. It comprises $N$ identical layers within the encoder, each layer consisting of Layer Normalization (LN), Multi-Head Self-Attention (MHSA), and Feed-Forward Networks (FFN). Specifically, we initialize $K$ cephalometric landmark content queries $V_{CL}^{R}$ and utilize the feature map $S5$ as the image content queries $V_{CF}^{R}$ . Drawing inspiration from the positional encoding of the BERT [8], we generate the positional queries $V_{P}^{R}$ . These content and positional queries are fed into the reference encoder. After $N$ layers of iteration, the reference encoder outputs the updated cephalometric landmark content queries $V_{LR}^{C'}$ . These content queries are calculated by FFN layer to predict the coarse cephalometric landmark coordinates $\\mu_{R}$ and distribution $\\sigma_{R}$ .", + "bbox": [ + 212, + 439, + 787, + 623 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.3 Finetune Encoder", + "text_level": 1, + "bbox": [ + 215, + 645, + 410, + 659 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The finetune encoder employs a layer-to-layer update mechanism to achieve more precise cephalometric landmark detection. The structure of the finetune encoder, as shown in Figure 2c and Figure 3b, also adheres to the typical transformer encoder paradigm, consisting of $M$ identical layers within the encoder. Unlike the reference encoder module, cephalometric landmark coordinate $\\mu_{R}$ is continually updated in each layer of the finetune encoder module. Specifically, we initialize $K$ cephalometric landmark content queries $V_{LA}^{C}$ and flatten the fused feature map $F_{u}$ to serve as the image content queries $V_{FA}^{C}$ . Drawing inspiration from the positional encoding of the BERT, we generate position queries $V_{A}^{P}$ . Five parameters are fed into the finetune encoder module, namely fine landmark context queries $V_{LA}^{C}$ , image context queries $V_{FA}^{C}$ , position queries $V_{A}^{P}$ , the fused feature map $F_{u}$ ,", + "bbox": [ + 212, + 672, + 787, + 843 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Du-CeLR", + "bbox": [ + 665, + 114, + 730, + 126 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 774, + 114, + 784, + 125 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "and coarse landmark coordinates $\\mu_R$ . Within the finetune encoder module, we first sample feature vectors on the fused feature map $F_u$ using coarse cephalometric landmark coordinates $\\mu_R$ , then add it to the fine landmark queries $V_{LA}^{C}$ . We combine content and position queries and feed them into the encoder to calculate the relationships among fine landmark and image context queries. Next, to adjust the landmark positions, we use the updated cephalometric landmark content queries $V_{LA}^{C'}$ to calculate the $(\\Delta x, \\Delta y)$ offsets by FFN layer and add them back to the previous cephalometric landmark coordinates $\\mu_R$ . In this way, the finetune encoder module refines the content queries progressively by stacking multiple aforementioned layers, outputting $V_{LA}^{C'}$ and $V_{FA}^{C'}$ . Finally, the cephalometric landmark content queries $V_{LA}^{C'}$ , followed by FFN layer, predicts the fine cephalometric landmark coordinates $\\mu_A$ and distribution $\\sigma_A$ .", + "bbox": [ + 212, + 146, + 787, + 330 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3.4 Loss Function", + "text_level": 1, + "bbox": [ + 215, + 348, + 377, + 363 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "As shown in Figure 2, the loss function of our method is composed of two key components: 1) The heatmap loss of the feature extraction module, 2) The cephalometric landmark regression loss for both the reference encoder and fine-tune encoder modules. The overall loss function of our method can be formulated as follows:", + "bbox": [ + 212, + 369, + 787, + 446 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\nL = \\lambda_ {H M} L _ {H M} + \\lambda_ {R E} L _ {R E} + \\lambda_ {F E} L _ {F E} \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 366, + 460, + 785, + 477 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "where $L_{HM}$ , $L_{RE}$ and $L_{FE}$ represent feature extraction, reference encoder, and finetune encoder module loss functions respectively. $\\lambda_{HM}$ , $\\lambda_{RE}$ , and, $\\lambda_{FE}$ are the hyper-parameters used to balance the three losses, and they are set to 1.0, 1.0, and 1.0, respectively. $L_{HM}$ consists of the Dice loss and the MSE loss. $L_{HM}$ is defined as follows:", + "bbox": [ + 214, + 481, + 787, + 556 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\nL _ {H M} = D i c e \\left(\\stackrel {\\wedge} {P} _ {h p}, P _ {h p}\\right) + M s e \\left(\\stackrel {\\wedge} {P} _ {h p}, P _ {h p}\\right) \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 354, + 561, + 785, + 585 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "where $\\hat{P}_{hp}$ and $P_{hp}$ are the prediction heatmap and ground truth heatmap respectively. For the cephalometric landmark regression loss of the reference encoder module, we adopt Residual Log-likelihood Estimation(RLE) loss. The loss is defined as follows:", + "bbox": [ + 212, + 595, + 785, + 657 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\nL _ {R E} = R L E \\left(\\mu_ {R}, \\sigma_ {R}; \\mu_ {g}\\right) \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 413, + 659, + 785, + 676 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "where $\\mu_R$ and $\\sigma_R$ are coarse cephalometric landmark coordinate and distribution output by the reference encoder module. $\\mu_g$ is cephalometric landmark ground truth coordinate. For the cephalometric landmark regression loss of the finetune encoder module, we also adopt RLE loss. The loss is defined as follows:", + "bbox": [ + 212, + 679, + 787, + 742 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\nL _ {F E} = \\sum_ {i = 1} ^ {M} R L E \\left(\\mu_ {A, i}, \\sigma_ {A, i}; \\mu_ {g}\\right) \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 392, + 747, + 785, + 787 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "where $M$ is number of finetune encoder layer. $\\mu_{A,i}$ and $\\sigma_{A,i}$ represent the cephalometric landmark coordinate and distribution output by the i-th layer reference encoder module.", + "bbox": [ + 212, + 794, + 787, + 839 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "C. Dai, Y. Wang et al.", + "bbox": [ + 271, + 114, + 423, + 128 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4 Experiments", + "text_level": 1, + "bbox": [ + 215, + 145, + 375, + 162 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In this section, we assess our method on some benchmarks for cephalometric landmark detection task. We first perform several ablation studies to underline the advantage of our proposed methods and to establish the optimal setting for hyperparameters. Finally, we compare the performance of our model with state-of-the-art methods.", + "bbox": [ + 212, + 179, + 787, + 253 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.1 Implementation Details", + "text_level": 1, + "bbox": [ + 215, + 277, + 457, + 292 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Our model is built on the PyTorch framework. We use ResNet-34, pre-trained on ImageNet, as the backbone. Our architecture includes 4 layers for both the reference encoder and finetune encoder module. All additional layers that we introduce are initialized randomly. The model training and testing are performed on one NVIDIA 3060(12GB) GPU. For model optimization, we use Adam [18], with parameters $\\beta 1 = 0.9$ , $\\beta 2 = 0.999$ , and a weight decay of $10^{-4}$ . The batch size is set to 4. The model is trained for 1000 epoch. The initial learning rate is $2 \\times 10^{-4}$ , and dynamically updated the learning rate using the cosine strategy during the training process. Data augmentation techniques are employed, encompassing random cropping and random rotation. For the random cropping operation, all cephalometric landmarks are preserved during each cropping process. Regarding the random rotation operation, we select a rotation angle range of [-30, 30] degrees. Ultimately, the image is scaled to $1024 \\times 1024$ for both training and inference of the model.", + "bbox": [ + 212, + 304, + 787, + 517 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.2 Dataset and Evaluation Metric", + "text_level": 1, + "bbox": [ + 215, + 541, + 517, + 555 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "ISBI 2015 Challenge Dataset [37]. This is a widely utilized benchmark dataset in the field of cephalometric landmark detection. This dataset comprises 400 cephalometric images, of which 150 are designated for training, 150 for Test 1, and the remaining images for Test 2. Each image has been annotated with 19 landmarks by two experienced medical practitioners, and the average of these annotations is taken as the ground truth. This dataset provides a rich array of annotated data, enabling researchers to effectively train and evaluate their cephalometric landmark detection methods.", + "bbox": [ + 212, + 566, + 787, + 686 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "ISBI 2023 Challenge Dataset [25]. This is a recently introduced cephalometric landmark detection dataset, collected from seven distinct imaging devices. Following the training strategy in reference [15], we randomly selected 500 images as training data, with the remaining 200 images utilized for evaluating model performance. Experiments were conducted with k-fold $(k = 10)$ method cross-validation, and the average results were considered as the final outcome. This dataset provides 29 landmarks, but only the same 19 landmarks as in the ISBI 2015 dataset are used, ensuring a fair comparison with other methods. This new dataset offers researchers a more challenging scenario to test the generalization capabilities of their methods across various imaging devices.", + "bbox": [ + 212, + 688, + 787, + 840 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Du-CeLR", + "bbox": [ + 663, + 114, + 730, + 126 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 774, + 116, + 785, + 126 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Evaluation metric. The evaluation of cephalometric landmark detection models typically employs the Mean Radial Error (MRE) and the Successful Detection Rate (SDR) [7]. MRE is used to calculate the distance error between the predicted cephalometric landmarks and the ground truth, commonly serving as a measure of detection accuracy. The calculation method for MRE is defined as follows:", + "bbox": [ + 212, + 146, + 787, + 234 + ], + "page_idx": 9 + }, + { + "type": "equation", + "text": "\n$$\nR _ {i} ^ {j} = \\parallel \\mu_ {A} ^ {j} \\left(x _ {i}, y _ {i}\\right) - \\mu_ {g} ^ {j} \\left(x _ {i}, y _ {i}\\right) \\parallel_ {2} \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 387, + 238, + 785, + 258 + ], + "page_idx": 9 + }, + { + "type": "equation", + "text": "\n$$\nM R E = \\frac {1}{T K} \\sum_ {i = 1} ^ {T} \\sum_ {j = 1} ^ {K} R _ {i} ^ {j} \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 413, + 281, + 785, + 324 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "where $R_{i}^{j}$ denotes the radial error of the $i - th$ landmark in the $j - th$ image. $\\mu_A^j (x_i,y_i)$ represents the coordinates of the $i - th$ cephalometric landmark predicted for the $j$ -th image. $\\mu_g^j (x_i,y_i)$ denotes the ground truth coordinates of the $i - th$ cephalometric landmark in the $j - th$ image. $T$ represents the number of test images, and $K$ denotes the number of cephalometric landmark in each image. SDR is employed to quantify the discrepancy between the predicted cephalometric landmark and the ground-truth. If the radial error $R_{i}^{j}$ is no greater than $z\\mathrm{mm}$ (where $z = 2\\mathrm{mm}$ , $2.5\\mathrm{mm}$ , $3\\mathrm{mm}$ , $4\\mathrm{mm}$ ), the detection is considered as a successful one (Usually, $2\\mathrm{mm}$ range is acceptable in medical analysis [32,40]). The SDR is defined as follows:", + "bbox": [ + 212, + 335, + 787, + 488 + ], + "page_idx": 9 + }, + { + "type": "equation", + "text": "\n$$\nS D R _ {i} = \\frac {1}{T K} \\sum_ {j = 1} ^ {T} \\sum_ {j = 1} ^ {K} \\left\\{R _ {i} ^ {j} < z \\right\\} \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 390, + 500, + 785, + 542 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4.3 Ablation Study", + "text_level": 1, + "bbox": [ + 215, + 568, + 388, + 583 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "In this section, we perform several ablation studies on ISBI 2015 Challenge dataset to illustrate the effectiveness of the proposed component.", + "bbox": [ + 212, + 599, + 785, + 630 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/2355e2e4c65371c831a6a8f5941f04333ec58eaf5171138b3362d5ff0896044c.jpg", + "table_caption": [ + "Table 1: Varying different model structures. \"MF\" denotes Multi-level Features. \"HP\" denotes Heatmap. \"RE\" denotes Reference Encoder. \"RL\" denotes RLE Loss. \"FE\" denotes Finetune Encoder." + ], + "table_footnote": [], + "table_body": "
ID BaselineFeature Extractor moduleReference Encoder moduleFinetune Encoder moduleMRE(mm)2mm(SDR%)
MFHPRERLFERL
12.897454.75
22.258661.91
32.569857.07
42.012565.01
51.674574.04
61.243483.65
71.146886.84
81.151486.31
91.023088.12
101.008889.51
", + "bbox": [ + 225, + 712, + 781, + 839 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "C. Dai, Y. Wang et al.", + "bbox": [ + 271, + 114, + 423, + 128 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/9b23a2860ab6266ea539399f6be4a473cb4b3a9bb16667c261b2968306e0883d.jpg", + "image_caption": [ + "Fig. 4: Headmap visualization. The attention heatmap come from the feature extraction module." + ], + "image_footnote": [], + "bbox": [ + 220, + 147, + 359, + 255 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/ee135c863857534f12a34beb6e7e2166a632a2b99df0302be0a671f6f1734b16.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 361, + 148, + 500, + 256 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/3e16d6d65242bfddf8cf98e1a4804110176c3bf7fb99b2badc7791559221a80e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 148, + 640, + 256 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/d7dff5e6889794f02041e3f7a4e59ecc2b7fc3bf7ead126dd5f7e62531ca3ed0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 642, + 148, + 781, + 255 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Varying the model structures. We conduct experiments to verify the different model structures. All experimental results are presented in Table 1. Regarding the feature extractor module, the combination of the multi-level feature (MF) module improves the baseline in MRE and $2\\mathrm{mm}$ SDR indicators by $0.3276\\mathrm{mm}$ and $7.16\\%$ respectively, while the introduction of the heatmap (HP) module improves the baseline by $0.3276\\mathrm{mm}$ and $2.32\\%$ . When both MF and HP modules are integrated, there is $0.8849\\mathrm{mm}$ and $10.26\\%$ enhancement over the Baseline, underscoring the significant role of the feature extractor module in accuracy improvement. For the reference encoder module, the addition of reference encoder (RE) components and RLE Loss (RL) elements on the baseline foundation yielded $1.654\\mathrm{mm}$ and $28.9\\%$ accuracy improvement. When used in conjunction with the feature extractor module, the model's accuracy further increased by $0.0996\\mathrm{mm}$ and $3.19\\%$ . Regarding the finetune encoder module, its combined use with the feature extractor module led to a $1.8744\\mathrm{mm}$ and $33.37\\%$ improvement in model accuracy. The highest accuracy, reaching $1.0088\\mathrm{mm}$ and $89.51\\%$ , was achieved when the finetune encoder module was used in combination with both the reference encoder module and the feature extractor module. This underscores the significant impact of the three proposed modules on enhancing model accuracy. Finally, we visualize the attention heatmap in Figure 4. The heatmap is highly responsive at locations near the cephalometric landmarks.", + "bbox": [ + 217, + 325, + 785, + 627 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Varying the levels of fuse feature map. We explore the impact of feeding different levels of fuse feature maps into the proposed finetune encoder. As shown in Table 2, the performance grows consistently with more levels of fuse feature maps, e.g., $89.20\\%$ , $89.33\\%$ , $89.42\\%$ , $89.51\\%$ for 2, 3, 4, 5 levels of feature maps on 2mm SDR, respectively.", + "bbox": [ + 217, + 628, + 785, + 703 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Varying parameter of encoder module. We study the impact of encoder module on model performance from two aspects: the number of layers and feature dimensions. To simple the validation approach, the reference encoder and finetune encoder modules are set to the same number of layers and dimensions. First, we investigate the effects of altering the dimension of the encoder module. As illustrated in Table 3, there is a discernible enhancement in model efficacy concomitant with an increase in the dimensions of encoder layers. The peak performance of the model is attained when the dimension is augmented to 512. Furthermore, we conduct experiments by varying the number of encoder layers.", + "bbox": [ + 217, + 705, + 785, + 839 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Du-CeLR", + "bbox": [ + 665, + 114, + 730, + 126 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 767, + 116, + 782, + 126 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "As shown in Table 4, the performance grows at the first four layers and saturates at the fifth decoder layer.", + "bbox": [ + 215, + 146, + 784, + 176 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/e3c60ae3c5bf1d035e965205e899c8d3e23d18e366f942610ae9542ba17a2414.jpg", + "table_caption": [ + "Table 2: Varying the scale levels of fuse Table 3: Varying feature queries dimension feature map for feature extraction module. sions of encoder module." + ], + "table_footnote": [], + "table_body": "
F5 F4 F3 F2 MRE(mm)SDR%
2mm2.5mm3mm4mm
1.018189.2093.4896.18 98.41
✓ ✓1.015689.3393.4996.32 98.41
✓ ✓ ✓1.011389.4293.5096.38 98.54
✓ ✓ ✓ ✓1.008889.5193.5496.42 98.56
", + "bbox": [ + 225, + 215, + 500, + 292 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/67e5ac0f3468fc60502a5c0f1ba4ab7e8c0265e84d210091f64d17b1958abd94.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Dim MRE(mm)SDR%
2mm2.5mm3mm4mm
1281.020189.0393.2695.7698.17
2561.019489.3293.3596.0298.32
5121.008889.5193.5896.4298.56
7681.009189.4793.6196.3998.53
", + "bbox": [ + 532, + 215, + 764, + 292 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Varying the input image resolutions. We undertake experimental investigations to ascertain the robustness of our method across varying input resolutions. As depicted in Table 5, there is a significant enhancement in the performance of the model concomitant with an increase in the resolution of input images. When the input image resolution is $1024 \\times 1024$ , the model reaches $1.0088\\mathrm{mm}$ and $89.51\\%$ in MRE and $2\\mathrm{mm}$ SDR metrics respectively. A further escalation in input image resolution results in a decline for model performance.", + "bbox": [ + 212, + 304, + 784, + 409 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/713b35652cc43b1d0a753ed70339a27d4a60a459b6f9b5a3902bd58615474bea.jpg", + "table_caption": [ + "Table 4: Varying the numbers of encoder Table 5: Varying the input image resolutions." + ], + "table_footnote": [], + "table_body": "
NumMRE(mm)SDR%
2mm2.5mm3mm4mm
11.083587.2393.0595.5197.96
21.024788.9893.3195.9298.32
31.013789.4693.4796.2898.47
41.008889.5193.5496.4298.56
51.009189.4893.5496.4598.59
", + "bbox": [ + 240, + 449, + 483, + 542 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/98a72a0cd8fc6ac7f623e8063485da54b37faa63df7861b26fbca06c899ca41f.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ResolutionMRE(mm)SDR%
2mm2.5mm3mm4mm
256×2561.201284.5691.7995.4498.49
512×5121.067488.0793.3096.2598.57
768×7681.012989.4093.3396.0798.60
1024×10241.008889.5193.5496.4298.56
1280×12801.015389.3193.5195.4498.32
", + "bbox": [ + 506, + 449, + 787, + 542 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "4.4 Main Result", + "text_level": 1, + "bbox": [ + 215, + 560, + 364, + 574 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "We evaluated our method on two cephalometric landmark datasets: ISBI 2015 Challenge [37] and ISBI 2023 Challenge datasets [25]. The final results are presented in Tables 6,7,8. The proposed approach achieved the least Mean Radical Error (MRE) and the highest $2\\mathrm{mm}$ Success Detection Rate (SDR), which is considered as the clinically accepted. Moreover, our method achieves end-to-end training and prediction for cephalometric landmarks.", + "bbox": [ + 212, + 583, + 784, + 672 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "ISBI 2015 Challenge test1. Table 6 presents the evaluation results for the ISBI 2015 Challenge test1 dataset. These state-of-the-art methods can be categorized into heatmap-based and regression-based methods. Our method demonstrates clear superiority over heatmap-based methods. Compared to the best heatmap-based method [2], our method achieves improvements of $0.11\\mathrm{mm}$ and $1.48\\%$ respectively in MRE and the $2\\mathrm{mm}$ SDR metrics. Additionally, compared to the best regression-based method, our method achieves improvements of $1.19\\%$ on the $2\\mathrm{mm}$ SDR metrics. Moreover, compared to the best approach, our method exhibits a significant advantage in terms of GFLOPs. In addition, compared to other low-resolution methods, our method has the lowest GFLOPs of only 23.0, while the $2\\mathrm{mm}$ SDR reaches $88.07\\%$ , which is superior to the other", + "bbox": [ + 212, + 674, + 785, + 840 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "C. Dai, Y. Wang et al.", + "bbox": [ + 271, + 114, + 421, + 128 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/f84675045c7467992bfbf029391e321cab81bb3bee095df7e9ec5d81b656dc83.jpg", + "image_caption": [ + "(a) ISBI 2015 Challenge test1" + ], + "image_footnote": [], + "bbox": [ + 222, + 143, + 344, + 271 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/96a60d3f51f97697365f9129c7e16828285d38b9b52a19b1ff374c982cfc87aa.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 344, + 145, + 419, + 271 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/69c709f56919e3106140e022a2d76da06f1a9b513b71628d8a9015e9ebb88024.jpg", + "image_caption": [ + "Fig. 5: Qualitative detection results on ISBI 2015 and 2023 Challenge datasets. (a) and (b) correspond the detection results for the ISBI 2015 Challenge test1 and test2. (c) depicts the detection outcomes for the ISBI 2023 Challenge. The blue landmarks represent results annotated by medical professionals, while the red landmarks indicate the outcomes predicted by the model." + ], + "image_footnote": [], + "bbox": [ + 421, + 143, + 527, + 271 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/1bc7c22a5cad2e8f9544b93a63a134fb58d958eca77bcc026b41b7409bd9cff0.jpg", + "image_caption": [ + "(b) ISBI 2015 Challenge test2" + ], + "image_footnote": [], + "bbox": [ + 527, + 145, + 602, + 271 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/55fb6e7001b9ed6cb435b90622122a995f57f3aea94b8d5906abf1e5a28e9e78.jpg", + "image_caption": [ + "(c) ISBI 2023 Challenge" + ], + "image_footnote": [], + "bbox": [ + 602, + 145, + 705, + 271 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/b1cbf1edff9f9048417490340bbfbda62f8e196527928a3ebc5e0734975aa7b8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 705, + 145, + 779, + 271 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/e8357a8061d49af14fc430c2ceada9674cd33b0afb98b0f0a52e4218fe3df37c.jpg", + "table_caption": [ + "Table 6: Quantitative results on the ISBI 2015 Challenge test1 dataset. * denotes other methods we implemented. Bold represents the best result." + ], + "table_footnote": [], + "table_body": "
MethodBackboneResolutionGFLOPs MRE(mm)SDR%
2mm2.5mm3mm4mm
Heatmap-based Methods
Chen R et al. [6]ResNet50800×640215.71.1786.6792.6795.5498.53
Zhong Z et al. [40]U-Net290×290+19×100×10092.21.1286.9191.8294.8897.90
CephaNN [26]ResNeXt50800×640982.81.1587.6193.1696.3598.74
Yao J et al. [34]ResNet18576×512+19×96×9640.11.1486.8493.0295.4398.95
Ao Y et al. [2]Densenet121800×640157.21.1288.0392.7395.9698.48
Huang K et al. [13]---1.0987.8792.4595.5498.59
SimCC* [21]HRNet48800×640164.91.1287.1691.9695.3798.18
Regression-based Methods
Gilmour L et al. [11]ResNet342432×1920220.21.0188.3293.1296.1498.63
Song Y et al. [29]ResNet50256×256+19×256×256102.51.0886.4091.7094.8097.80
Song Y et al. [30]U-Net480×387286.81.1985.2091.2094.4097.20
Zeng M et al. [36]---1.3481.3789.0993.7997.86
King C H et al. [17]---1.1786.1491.7294.9197.96
Hong W et al. [12]---1.1285.2690.6793.5497.19
Poseur* [24]ResNet50800×64046.11.1486.5691.0994.0097.23
OursResNet34512×51223.01.0788.0793.3096.2598.57
OursResNet341024×102495.01.0189.5193.5496.4298.56
", + "bbox": [ + 220, + 422, + 789, + 656 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "methods. The qualitative detection results of the ISBI 2015 Challenge test1 dataset are displayed in Figure 4.", + "bbox": [ + 212, + 686, + 782, + 717 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "ISBI 2015 Challenge test2. The evaluation results for the ISBI 2015 Challenge test2 dataset are presented in Table 7. Our method outperforms heatmap-based methods by significant margins. Compared to best method [13], our method achieves an increase of $0.07\\mathrm{mm}$ in MRE and $0.48\\%$ in $2\\mathrm{mm}$ SDR. In addition, We introduce an end-to-end human keypoint detection method into the cephalometric landmark detection task, which is implemented based on the deformable decoder architecture. Experiments show that our method is significantly better than the human keypoint method in accuracy. Moreover, our", + "bbox": [ + 212, + 718, + 784, + 839 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Du-CeLR", + "bbox": [ + 665, + 114, + 730, + 126 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 767, + 114, + 785, + 126 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "method is more convenient to deploy. Finally, the performance of the released methods on ISBI 2015 Challenge Test1 dataset are all better than Test2. It seems that the data distribution of Test1 dataset is more consistent with Train dataset. Qualitative detection results of our method on the ISBI 2015 Challenge test2 dataset can be found in Figure 5b.", + "bbox": [ + 212, + 146, + 787, + 223 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/e850726578b7f0fbe1d8386c72eafd277cd432170d298e1f4b86aedaceef7f8d.jpg", + "table_caption": [ + "Table 7: Quantitative results on the ISBI 2015 Challenge test2 dataset." + ], + "table_footnote": [], + "table_body": "
MethodMRE(mm)SDR%
2mm2.5mm3mm
Heatmap-based Methods
Chen R et al. [6]1.4875.0582.8488.5395.05
Zhong Z et al. [40]1.4276.0082.9088.7494.32
CephaNN [26]1.4376.3282.9587.9594.63
Yao J et al. [34]1.4875.4482.0386.6595.12
Ao Y et al. [2]1.4277.0084.4289.4795.21
Huang K et al. [13]1.3479.0587.9589.7995.05
SimCC* [21]1.5474.1680.6886.3294.05
Regression-based Methods
Gilmour L et al. [11]1.3377.0583.1688.8494.89
Song Y et al. [29]1.5474.0081.3087.5094.30
Song Y et al. [30]1.6472.2079.5085.0093.50
Zeng M et al. [36]1.6470.5879.5386.0593.32
King C H et al. [17]1.5074.5881.7487.2694.73
Hong W et al. [12]1.2879.2485.3290.4796.32
Poseur* [24]1.4874.4281.3786.6893.63
Ours1.2779.5386.4791.1196.32
", + "bbox": [ + 253, + 262, + 519, + 441 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/5269c341cafec6381c72a670de5c3ea1e56df004f25ba699681e5b8dc28cf6e1.jpg", + "table_caption": [ + "Table 8: Quantitative results on the ISBI 2023 Challenge." + ], + "table_footnote": [], + "table_body": "
methodMRE(mm)SDR%
2mm2.5mm3mm4mm
Jin H et al. [15]1.220083.7689.7192.7996.08
Poseur* [24]0.998288.5192.8295.3797.79
SimCC* [21]1.079588.3993.1295.3197.81
Huang K et al.* [13]1.074787.8792.5294.8797.42
Gilmour L et al.* [11]0.979389.3793.4795.9797.42
Ours0.937290.6894.2495.9797.89
", + "bbox": [ + 531, + 311, + 802, + 392 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "ISBI 2023 Challenge test. Regarding the ISBI 2023 Challenge test dataset, as illustrated in Table 8, Our method achieves the best performance on all metrics. Compared to the best-performing method [11], our approach significantly reduces the Mean Relative Error (MRE) from $0.9793\\mathrm{mm}$ to $0.9372\\mathrm{mm}$ and enhances the $2\\mathrm{mm}$ Success Detection Rate (SDR) from $89.37\\%$ to $90.68\\%$ . Moreover, in comparison with transformer-based methods, our approach demonstrates a lead of $0.061\\mathrm{mm}$ in MRE and $2.17\\%$ in $2\\mathrm{mm}$ SDR, respectively. Lastly, the qualitative detection results of our method on the ISBI 2023 Challenge test dataset are depicted in Figure 5c.", + "bbox": [ + 212, + 468, + 787, + 604 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "5 Conclusion", + "text_level": 1, + "bbox": [ + 215, + 627, + 359, + 643 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "In this paper, we propose a novel regression model for cephalometric landmark detection for high-resolution X-ray image. This model only employs the encoder module within the transformer framework to construct the relationship between landmark features and image features. It is capable of regressing cephalometric landmark coordinate from coarse to fine and completes end-to-end training. Moreover, our model, compared to heatmap-based method, boasts low memory consumption and robustness against missing landmark. It offers a more straightforward end-to-end design compared to current regression-based method, performing one-time landmark detection on high-resolution X-ray images. Extensive experiments on the ISBI2015 and ISBI2023 datasets demonstrate that our method can achieve state-of-the-art performance compare with regression-based and heatmap-based methods.", + "bbox": [ + 212, + 657, + 787, + 840 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "C. Dai, Y. Wang et al.", + "bbox": [ + 271, + 113, + 423, + 128 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Acknowledgements", + "text_level": 1, + "bbox": [ + 217, + 143, + 401, + 162 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "This work was supported by the National Natural Science Foundation of China (62122059, 82330064).", + "bbox": [ + 215, + 176, + 784, + 205 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 217, + 229, + 321, + 244 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "1. Albarakati, S., Kula, K., Ghoneima, A.: The reliability and reproducibility of cephalometric measurements: a comparison of conventional and digital methods. Dentomaxillofacial Radiology 41(1), 11-17 (2012)", + "2. Ao Y, W.H.: Feature aggregation and refinement network for 2d anatomical landmark detection. Journal of Digital Imaging 36(2), 547-561 (2023)", + "3. B. Ibragimov, B. Likar, F.P., Vrtovec, T.: Automatic cephalometric x-ray landmark detection by applying game theory and random forests. In Proc. ISBI Int. Symp. on Biomedical Imaging (2014)", + "4. Cardillo, J., Sid-Ahmed, M.A.: An image processing system for locating craniofacial landmarks. IEEE transactions on medical imaging 13(2), 275-289 (1994)", + "5. Carion N, Massa F, S.G.e.a.: End-to-end object detection with transformers. European conference on computer vision. Cham: Springer International Publishing pp. 213-229 (2020)", + "6. Chen, R., Ma, Y., Chen, N., Lee, D., Wang, W.: Cephalometric landmark detection by attentive feature pyramid fusion and regression-voting. In: Medical Image Computing and Computer Assisted Intervention-MICCAI 2019: 22nd International Conference, Shenzhen, China, October 13-17, 2019, Proceedings, Part III 22. pp. 873-881. Springer (2019)", + "7. Devereux, L., Moles, D., Cunningham, S.J., McKnight, M.: How important are lateral cephalometric radiographs in orthodontic treatment planning? American Journal of Orthodontics and Dentofacial Orthopedics 139(2), e175-e181 (2011)", + "8. Devlin, J., Chang, M.W., Lee, K., Toutanova, K.: Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805 (2018)", + "9. Dosovitskiy A, Beyer L, K.A.e.a.: An image is worth 16x16 words: Transformers for image recognition at scale. ArXiv preprint arXiv:2010.11929 (2020)", + "0. El-Feghi, M.S.A., Ahmadi, M.: Automatic localization of craniofacial landmarks for assisted cephalometry. Pattern Recognition 37(3), 609-621 (2004)", + "1. Gilmour L, R.N.: Locating cephalometric x-ray landmarks with foveated pyramid attention. Medical Imaging With Deep Learning. PMLR pp. 262-276 (2020)", + "2. Hong W, Kim S M, C.J.e.a.: Deep reinforcement learning using a multi-scale agent with a normalized reward strategy for automatic cephalometric landmark detection. 2023 4th International Conference on Big Data Analytics and Practices pp. 1-6 (2023)", + "3. Huang K, F.F.: An intelligent shooting reward learning network scheme for medical image landmark detection. Applied Sciences 12(20), 10190 (2022)", + "4. Indermun S, Shaik S, N.C.J.K.M.R.: Human examination and artificial intelligence in cephalometric landmark detection—is ai ready to take over? Dentomaxillofac Radiol 10.1259/dmfr.20220362 (2023)", + "5. Jin H, Che H, C.H.: Unsupervised domain adaptation for anatomical landmark detection. International Conference on Medical Image Computing and Computer-Assisted Intervention. Cham: Springer Nature Switzerland pp. 695-705 (2023)" + ], + "bbox": [ + 225, + 258, + 785, + 840 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Du-CeLR", + "bbox": [ + 663, + 114, + 730, + 126 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 767, + 116, + 784, + 126 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "16. Kaiming He, Xiangyu Zhang, S.R.J.S.: Deep residual learning for image recognition. ArXiv preprint arXiv:1512.03385 (2015)", + "17. King C H, Wang Y L, L.W.Y.e.a.: Automatic cephalometric landmark detection on x-ray images using object detection. 2022 IEEE 19th International Symposium on Biomedical Imaging (ISBI) pp. 1-4 (2022)", + "18. Kingma, D.P., Ba, J.: Adam: A method for stochastic optimization. ArXiv preprint arXiv:1412.6980 (2014)", + "19. Lee H, Park M, K.J.: Cephalometric landmark detection in dental x-ray images using convolutional neural networks. Medical imaging 2017: Computer-aided diagnosis 10134, 494-499 (2017)", + "20. Li, H., Guo, Z., Rhee, S.M., Han, S., Han, J.J.: Towards accurate facial landmark detection via cascaded transformers. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 4176-4185 (2022)", + "21. Li Y, Yang S, L.P.e.a.: Simcc: A simple coordinate classification perspective for human pose estimation. European Conference on Computer Vision. Cham: Springer Nature Switzerland 89-106 (2022)", + "22. Lindner, C., Cootes, T.: Fully automatic cephalometric evaluation using random forest regression-voting. IEEE International Symposium on Biomedical Imaging (ISBI) (2015)", + "23. Mamta Juneja, Poojita Garg, R.K.e.a.: A review on cephalometric landmark detection techniques. Biomedical Signal Processing and Control 66(102486) (2021)", + "24. Mao, W., Ge, Y., Shen, C., Tian, Z., Wang, X., Wang, Z., Hengel, A.v.d.: Poseur: Direct human pose regression with transformers. Proceedings of the European Conference on Computer Vision (ECCV) (October 2022)", + "25. Muhammad Anwaar Khalid, K.Z.e.a.: Cepha29: Automatic cephalometric landmark detection challenge 2023. ArXiv preprint arXiv:2212.04808 (2022)", + "26. Qian J, Luo W, C.M.e.a.: Cephann: a multi-head attention network for cephalometric landmark detection. IEEE Access 8, 112633-112641 (2020)", + "27. Ronneberger O, Fischer P, B.T.: U-net: Convolutional networks for biomedical image segmentation. Medical Image Computing and Computer-Assisted Intervention-MICCAI 2015: 18th International Conference, Munich, Germany, October pp. 5-9", + "28. Shaker A, Maaz M, R.H.e.a.: Unetr++: delving into efficient and accurate 3d medical image segmentation. ArXiv preprint arXiv:2212.04497 (2022)", + "29. Song, Y., Qiao, X., Iwamoto, Y., Chen, Y.w.: Automatic cephalometric landmark detection on x-ray images using a deep-learning method. Applied Sciences 10(7), 2547 (2020)", + "30. Song Y, Qiao X, I.Y.e.a.: An efficient deep learning based coarse-to-fine cephalometric landmark detection method. IEICE TRANSACTIONS on Information and Systems 104(8), 1359-1366 (2021)", + "31. Vaswani A, Shazeer N, P.N.e.a.: Attention is all you need. Advances in neural information processing systems (2017)", + "32. Wang, C.W., Huang, C.T., Hsieh, M.C., Li, C.H., Chang, S.W., Li, W.C., Vandaele, R., Marée, R., Jodogne, S., Geurts, P., et al.: Evaluation and comparison of anatomical landmark detection methods for cephalometric x-ray images: a grand challenge. IEEE transactions on medical imaging 34(9), 1890-1900 (2015)", + "33. Yang, S., Quan, Z., Nie, M., Yang, W.: Transpose: Keypoint localization via transformer. IEEE/CVF International Conference on Computer Vision (ICCV) (2021)", + "34. Yao J, Zeng W, H.T.e.a.: Automatic localization of cephalometric landmarks based on convolutional neural network. American journal of orthodontics and dentofacial orthopedics 161(3), e250-e259 (2022)" + ], + "bbox": [ + 215, + 146, + 784, + 839 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "C. Dai, Y. Wang et al.", + "bbox": [ + 271, + 114, + 421, + 127 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "35. Yuhui Yuan, Rao Fu, L.H.W.L.C.Z.X.C.J.W.: Hrformer: High-resolution transformer for dense prediction. ArXiv preprint arXiv:2110.09408 (2021)", + "36. Zeng M, Yan Z, L.S.e.a.: Cascaded convolutional networks for automatic cephalometric landmark detection. Medical Image Analysis 68, 101904 (2021)", + "37. Zhang H, Zhang J, L.C.S.E.S.P.N.T.G.S.W.Y.M.M.: All-net: Anatomical information lesion-wise loss function integrated into neural network for multiple sclerosis lesion segmentation. Neuroimage Clin 32(102854) (2021)", + "38. Zhang K, Zhang Z, L.Z.e.a.: Joint face detection and alignment using multitask cascaded convolutional networks. IEEE signal processing letters 23(10), 1499-1503 (2016)", + "39. Zhao, T., Wu, X.: Pyramid feature attention network for saliency detection. CVPR (2019)", + "40. Zhong Z, Li J, Z.Z.e.a.: An attention-guided deep regression model for landmark detection in cephalograms. Medical Image Computing and Computer Assisted Intervention-MICCAI 2019: 22nd International Conference, Shenzhen, China p. 13-17 (October 2019)", + "41. Ziyang Ye, H.Y., Li, B.: Uncertainty-aware u-net for medical landmark detection. Arxiv preprint arXiv:2303.10349v1 (2023)" + ], + "bbox": [ + 212, + 146, + 787, + 397 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Du-CeLR", + "bbox": [ + 663, + 114, + 730, + 126 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 767, + 116, + 784, + 126 + ], + "page_idx": 16 + } +] \ No newline at end of file diff --git a/2024/A Cephalometric Landmark Regression Method based on Dual-encoder for High-resolution X-ray Image/2cca7425-9c6a-47c2-b889-8be913ae41cc_model.json b/2024/A Cephalometric Landmark Regression Method based on Dual-encoder for High-resolution X-ray Image/2cca7425-9c6a-47c2-b889-8be913ae41cc_model.json new file mode 100644 index 0000000000000000000000000000000000000000..76be83e2c5136cc34bad5cb4a4786b24e7e75093 --- /dev/null +++ b/2024/A Cephalometric Landmark Regression Method based on Dual-encoder for High-resolution X-ray Image/2cca7425-9c6a-47c2-b889-8be913ae41cc_model.json @@ -0,0 +1,2225 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.223, + 0.141, + 0.78, + 0.21 + ], + "angle": 0, + "content": "A Cephalometric Landmark Regression Method based on Dual-encoder for High-resolution X-ray Image" + }, + { + "type": "text", + "bbox": [ + 0.225, + 0.234, + 0.78, + 0.266 + ], + "angle": 0, + "content": "Chao Dai\\(^{1\\dagger}\\), Yang Wang\\(^{2\\dagger(\\boxtimes)}\\), Chaolin Huang\\(^{3\\dagger}\\), Jiakai Zhou\\(^{4}\\), Qilin Xu\\(^{5}\\), and Minpeng Xu\\(^{1}\\)" + }, + { + "type": "text", + "bbox": [ + 0.433, + 0.278, + 0.571, + 0.291 + ], + "angle": 0, + "content": "\\(^{1}\\) Tianjin University" + }, + { + "type": "text", + "bbox": [ + 0.388, + 0.292, + 0.614, + 0.306 + ], + "angle": 0, + "content": "2 Anhui University of Technology" + }, + { + "type": "text", + "bbox": [ + 0.344, + 0.306, + 0.659, + 0.319 + ], + "angle": 0, + "content": "\\(^{3}\\) Jiangxi University of Science and Technology" + }, + { + "type": "text", + "bbox": [ + 0.323, + 0.32, + 0.681, + 0.333 + ], + "angle": 0, + "content": "\\(^{4}\\) Nanjing University of Aeronautics and Astronautics" + }, + { + "type": "text", + "bbox": [ + 0.42, + 0.334, + 0.585, + 0.347 + ], + "angle": 0, + "content": "5 West Anhui University" + }, + { + "type": "text", + "bbox": [ + 0.261, + 0.383, + 0.744, + 0.675 + ], + "angle": 0, + "content": "Abstract. Accurate detection of cephalometric landmarks is crucial for orthodontic diagnosis and treatment planning. Current methods rely on a cascading form of multiple models to achieve higher accuracy, which greatly complicates both training and deployment processes. In this paper, we introduce a novel regression paradigm capable of simultaneously detecting all cephalometric landmarks in high-resolution X-ray images. Our approach only utilizes the encoder module from the transformer to design a dual-encoder architecture, enabling precise detection of cephalometric landmark positions from coarse to fine. Specifically, the entire model architecture comprises three main components: a feature extractor module, a reference encoder module, and a fine-tune encoder module. These components are respectively responsible for feature extraction and fusion for X-ray images, coarse localization of cephalometric landmark, and fine-tuning of cephalometric landmark positioning. Notably, our framework is fully end-to-end differentiable and innately learns to exploit the interdependencies among cephalometric landmarks. Experiments demonstrate that our method significantly surpasses the current state-of-the-art methods in Mean Radical Error (MRE) and the 2mm Success Detection Rate (SDR) metrics, while also reducing computational resource consumption. The code is available at https://github.com/huang229/D-CeLR" + }, + { + "type": "text", + "bbox": [ + 0.261, + 0.687, + 0.741, + 0.715 + ], + "angle": 0, + "content": "Keywords: Cephalometric landmark \\(\\cdot\\) High-resolution \\(\\cdot\\) Dual-encoder \\(\\cdot\\) Reference encoder \\(\\cdot\\) Finetune encoder" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.74, + 0.377, + 0.756 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.771, + 0.788, + 0.817 + ], + "angle": 0, + "content": "Cephalometric analysis represents a pivotal diagnostic tool extensively utilized in orthodontics and orthognathic surgery. This analysis involves the annotation of dental, skeletal, and soft tissue structures in lateral cephalometric radiographs." + }, + { + "type": "page_footnote", + "bbox": [ + 0.231, + 0.825, + 0.727, + 0.841 + ], + "angle": 0, + "content": "\\(\\dagger\\) Equal contribution. (Corresponding authors (youngnuaa@gmail.com)." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "2" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.424, + 0.129 + ], + "angle": 0, + "content": "C. Dai, Y. Wang et al." + }, + { + "type": "image", + "bbox": [ + 0.273, + 0.147, + 0.481, + 0.324 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.271, + 0.326, + 0.732, + 0.348 + ], + "angle": 0, + "content": "(a) Cephalometric landmark coordi-(b) Cephalometric landmark medical name nate positions." + }, + { + "type": "table", + "bbox": [ + 0.482, + 0.146, + 0.732, + 0.324 + ], + "angle": 0, + "content": "
19 Landmarks in Cephalometric
1Sella11Lower Incisor Tip
2Nasion12Upper Incisor Tip
3Orbitale13Labrale superius
4Porion14Labrale inferius
5Upper Incisor Apex15Subnasale
6B-point16Soft Tissue Pogonion
7Pogonion17Posterior Nasal Spine
8Menton18Anterior Nasal Spine
9Gnathion19Articulare
10Gonion
" + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.36, + 0.788, + 0.403 + ], + "angle": 0, + "content": "Fig. 1: Cephalometric landmark visualization. (a) Cephalometric landmark coordinate positions. Red indicates hard tissue points and blue indicates soft tissue points. (b) Cephalometric landmark medical name." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.432, + 0.785, + 0.538 + ], + "angle": 0, + "content": "As illustrated in Figure 1, these cephalometric landmarks are core to the analysis, providing reference points for subsequent qualitative assessments of angles and distances. However, the manual annotation of these landmarks is a laborious, time-consuming, and highly subjective task, impacting the accuracy of the annotations. Consequently, a precise and robust automated method for annotating cephalometric landmarks holds significant importance for effective treatment planning [1,6,7,14,23]." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.539, + 0.788, + 0.794 + ], + "angle": 0, + "content": "Existing methods for cephalometric landmark detection can be broadly classified into two categories: heatmap-based and regression-based approaches. The heatmap-based approach involves predicting a heatmap that indicates the probability of each pixel in a region corresponding to various cephalometric landmarks. This modality has seen extensive applications in the detection of cephalometric landmarks. For example, Chen et al. [6] introduced a feature pyramid fusion-based heatmap method for simultaneous landmark detection, achieving impressive results. Qian J et al. [26] advanced the accuracy of cephalometric landmark detection by designing a multi-head attention module and a novel regional loss function. However, heatmap-based methods exhibit certain disadvantages. 1). The ground truth requires manual design and heuristic adjustments, with inevitable noise impacting the final outcomes [13,29,40]. 2). post-processing operations are necessary to locate single maximum values in heatmaps. These operations are typically heuristic and non-differentiable, undermining the model's capacity for end-to-end training. 3). models generally adopt a U-net structure [27,28,41], while processing high-resolution X-ray images, consumes more computational resources and is prone to missing cephalometric landmarks." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.796, + 0.787, + 0.84 + ], + "angle": 0, + "content": "Regression-based methods directly map the input image to the coordinates of cephalometric landmarks, typically employing a feedforward network (FFN) for prediction. The regression-based methods is considerably more streamlined" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.665, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "Du-CeLR" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "3" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.328 + ], + "angle": 0, + "content": "compared to heatmap-based methods, as the prediction of cephalometric landmarks is inherently a process of determining a set of coordinate values. Numerous regression-based techniques exist for predicting cephalometric landmarks. For example, Song Y et al. [29] utilizes a base network for coarse localization of cephalometric landmarks, followed by region-specific cropping and refined positioning using a secondary model. Gilmour L et al. [11] constructs individual models for each landmark to predict their locations. Regression-based methods circumvent the necessity for non-maximum suppression, heatmap generation, and quantization error correction. However, to achieve higher precision on high-resolution X-ray images, current approaches predominantly rely on cascading multiple models, which compromises the inherent advantages of end-to-end training and prediction for regression-based methods." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.332, + 0.788, + 0.59 + ], + "angle": 0, + "content": "To address these issues, we introduce a novel regression paradigm that exclusively utilizes the encoder module of transformer for the one-time detection of all cephalometric landmarks on high-resolution X-ray images. Specifically, we design a feature extraction module based on Convolutional Neural Networks (CNN) to accomplish feature extraction and fusion for X-ray images. Subsequently, the extracted features are fed into a reference encoder module for the coarse localization of cephalometric landmarks. Finally, the coarsely localized cephalometric landmarks, along with the fused features, are inputted into a finetune encoder module, which iteratively refines the positioning of the cephalometric landmarks from coarse to fine detail. Moreover, our method pioneers the complete end-to-end training and deployment for the detection of cephalometric landmarks on high-resolution X-ray images. Extensive experiments demonstrate that our approach achieves state-of-the-art performance on popular benchmarks with a ResNet-34 backbone. Specifically, we achieve a Mean Radial Error (MRE) of \\(1.01\\mathrm{mm}\\), \\(1.27\\mathrm{mm}\\), and \\(0.9372\\mathrm{mm}\\) on the ISBI2015 test1, ISBI2015 test2, and ISBI2023 test datasets, respectively. Furthermore, our method significantly reduces GFLOPs, by \\(132\\%\\) compared to the previously best method [11]." + }, + { + "type": "text", + "bbox": [ + 0.24, + 0.594, + 0.608, + 0.607 + ], + "angle": 0, + "content": "The main contributions of this work are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.226, + 0.636, + 0.784, + 0.71 + ], + "angle": 0, + "content": "- We propose an innovative regression paradigm for high-resolution X-ray images, which enables the prediction of all cephalometric landmarks through a single model. Moreover, our method facilitates end-to-end training and prediction, which not only improves efficiency but also enhances the feasibility of the model in practical applications." + }, + { + "type": "text", + "bbox": [ + 0.225, + 0.716, + 0.785, + 0.772 + ], + "angle": 0, + "content": "- We have designed a dual-encoder structure, comprising a reference encoder module and a finetune encoder module. The reference encoder module accomplishes coarse localization of cephalometric landmarks, while the finetune encoder module refines this localization in a layer-by-layer updating manner." + }, + { + "type": "text", + "bbox": [ + 0.225, + 0.78, + 0.785, + 0.836 + ], + "angle": 0, + "content": "- Our proposed regression approach significantly enhances the precision of cephalometric landmark detection. Compared to state-of-the-art methods, we achieve superior performance on both the ISBI2015 and ISBI2023 test datasets." + }, + { + "type": "list", + "bbox": [ + 0.225, + 0.636, + 0.785, + 0.836 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.232, + 0.127 + ], + "angle": 0, + "content": "4" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.424, + 0.129 + ], + "angle": 0, + "content": "C. Dai, Y. Wang et al." + }, + { + "type": "title", + "bbox": [ + 0.217, + 0.145, + 0.388, + 0.161 + ], + "angle": 0, + "content": "2 Related Work" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.176, + 0.788, + 0.283 + ], + "angle": 0, + "content": "With the seminal work of Lee et al. [19], which first introduced the use of deep learning for cephalometric landmark detection. Deep learning-based methods [2,17,34] have fully surpassed traditional pattern matching [4,10] and random forest regression-based methods [3,22] in terms of accuracy for cephalometric landmark detection. This section primarily focuses on two deep learning-based approaches for cephalometric landmark detection and the transformer architectures for regression of keypoints." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.303, + 0.473, + 0.318 + ], + "angle": 0, + "content": "2.1 Heatmap-Based Methods" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.327, + 0.789, + 0.6 + ], + "angle": 0, + "content": "Heatmap-based methods predict the likelihood of each pixel in the image corresponding to each cephalometric landmark. King C H et al. [17] utilized object detection techniques and designed a multitask loss without bounding box constraints to optimize landmark acquisition in the model. Chen R et al. [6] proposed a heatmap detection method based on feature pyramid fusion to complete all cephalometric landmark detection, surpassing other methods in effectiveness, but their multi-scale feature pyramid fusion is highly memory-intensive. Zhong Z et al. [40] adopted a two-stage landmark detection approach, which not only reduces memory consumption but also allows for fine-tuning of coarse landmark detection results on local image regions. Qian J et al. [26] enhanced the accuracy to new heights in the ISBI 2015 dataset by designing a multi-head attention module and a new regional loss function, while Ao Y et al. [2] developed a multiscale feature aggregation (MSFA) module and multi-head loss function. Although heatmap-based cephalometric landmark detection achieves high accuracy, its application to high-resolution X-ray images and the common use of U-net structures in models result in substantial memory resource consumption. Moreover, the post-processing required in heatmap-based methods disrupts the integrity of end-to-end training and deployment of the model." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.62, + 0.485, + 0.636 + ], + "angle": 0, + "content": "2.2 Regression-Based Methods" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.644, + 0.788, + 0.842 + ], + "angle": 0, + "content": "Currently, the majority of regression methods for cephalometric landmark detection on high-resolution X-ray images utilize multi-stage or multi-model strategies. Song Yet et al. [29,30] proposed a method combining traditional regression algorithms with deep learning for coarse localization of landmarks, followed by cropping the region of interest in the original image to create a new image for refined localization using a secondary model. However, their accuracy is substantially lower than that achieved by heatmap-based methods [2, 26]. Zeng M et al. [36] introduced a three-tier cascading neural network for cephalometric landmark regression, akin to the concept used in the MTCNN model [38] for face detection. This approach significantly reduced memory resource consumption but did not achieve the desired level of accuracy. Gilmour L et al. trained 19 distinct models to predict each cephalometric landmark position, attaining accuracy on the ISBI 2015 cephalometric dataset comparable to heatmap-based" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.665, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "Du-CeLR" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "5" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.784, + 0.253 + ], + "angle": 0, + "content": "methods [26,34]. This greatly encouraged the use of low-memory-consuming regression methods in landmark detection. However, the necessity of maintaining a separate model for each landmark adds complexity to training and deployment. While some regression methods have reached heatmap-based method accuracy levels, they typically involve designing multiple network models for predictions. Moreover, these methods have also not achieved end-to-end training and deployment." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.276, + 0.527, + 0.289 + ], + "angle": 0, + "content": "2.3 Transformer-based architectures" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.3, + 0.785, + 0.497 + ], + "angle": 0, + "content": "The Transformer [31], proposed by Vaswani et al., originally designed for natural language processing tasks, employs an encoder-decoder architecture based on self-attention and feed-forward networks. Recently, Transformer-based models have demonstrated significant potential in computer vision tasks [5,9], including various works applying the Transformer structure to keypoint estimation. Such as TransPose [33] and HRFormer [35] utilized the encoder-decoder structure of transformers for human keypoint regression. Poseur [24] and DTLD [20] have adopted the latest deformable transformer architecture for efficient regression of human keypoints and facial landmarks. Despite the high performance achieved by transformer-based methods in keypoint regression tasks, they present certain challenges: 1) They are primarily used for low-resolution images; 2) The deformable transformer architecture is more complex for deployment. In contrast, our method addresses these issues and achieves significantly higher performance." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.52, + 0.33, + 0.536 + ], + "angle": 0, + "content": "3 Method" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.552, + 0.787, + 0.844 + ], + "angle": 0, + "content": "The overall architecture, as illustrated in Figure 2, presents our proposed dual-encoder model which progressively predicts cephalometric landmark coordinate from coarse to fine on high-resolution X-ray images. It comprises a feature extractor for image feature extraction, a reference encoder for coarse cephalometric landmark localization, and a finetune encoder for precise cephalometric landmark localization. For the input image, we initially obtain multi-scale features (S2, S3, S4, and S5) and a fused feature \\( F_{u} \\) through the feature extractor (Sec.3.1). The feature map S5 is flattened to produce the image context queries \\( V_{FR}^{C} \\), and coarse landmark content queries \\( V_{LR}^{C} \\) are initialized randomly. The image context queries \\( V_{FR}^{C} \\) and coarse landmark context queries \\( V_{LR}^{C} \\) are fed into the reference encoder along with their position queries \\( V_{R}^{P} \\), updating to corresponding context queries \\( V_{LR}^{C'} \\) and \\( V_{FR}^{C'} \\). Subsequently, the context queries \\( V_{LR}^{C'} \\) are utilized to predict the coarse coordinate of cephalometric landmark \\( \\mu_{R} \\in R^{K \\times 2} \\) and coarse distribution \\( \\sigma_{R} \\in R^{K \\times 1} \\) via FFN (Sec.3.2). Next, the fused feature map Fu is also flattened to generate image context queries \\( V_{FA}^{C} \\), and fine landmark content queries \\( V_{LA}^{C} \\) are initialized. Unlike the reference encoder module, which solely uses content and position queries as input, the coarse landmark coordinates \\( \\mu_{R} \\) and feature map \\( F_{u} \\) are also fed into the finetune encoder module to update the content queries \\( V_{LA}^{C'} \\) and \\( V_{FA}^{C'} \\). Finally, the content queries \\( V_{LA}^{C'} \\) is operated" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "6" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.424, + 0.129 + ], + "angle": 0, + "content": "C. Dai, Y. Wang et al." + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.146, + 0.784, + 0.365 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.376, + 0.788, + 0.406 + ], + "angle": 0, + "content": "Fig. 2: The overview architecture of our method, which contains (a) feature extractor module, (b) reference encoder module and (c) finetune encoder module." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.433, + 0.788, + 0.541 + ], + "angle": 0, + "content": "by the FFN to produce cephalometric landmark coordinate \\(\\mu_A \\in R^{K \\times 2}\\) and distribution \\(\\sigma_A \\in R^{K \\times 1}\\) (Sec.3.3). In addition, different loss functions are employed for supervising the training of various modules. For the feature extractor module, Dice loss and Mean Squared Error (MSE) loss are utilized to aid model optimization. For the reference encoder and finetune encoder modules, Residual Log-likelihood Estimation(RLE) loss is applied to optimize the model's output cephalometric landmark coordinates \\(\\mu\\) and distribution \\(\\sigma\\) (Sec.3.4)." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.56, + 0.411, + 0.574 + ], + "angle": 0, + "content": "3.1 Feature Extractor" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.584, + 0.789, + 0.766 + ], + "angle": 0, + "content": "ResNet34 [16] is utilized as the backbone in our model, from which multi-level feature maps [39] are extracted, as illustrated in Figure 2. Initially, we apply downsampling operations to scale the feature maps \\( S2 \\), \\( S3 \\), and \\( S4 \\) to the same dimension and size as the feature map \\( S5 \\). Subsequently, the feature maps outputted by the backbone are summed with their respective positional maps (Pos) to yield new feature maps \\( F2 \\), \\( F3 \\), \\( F4 \\), and \\( F5 \\). These feature maps \\( F2 \\), \\( F3 \\), \\( F4 \\), and \\( F5 \\) are aggregated to generate the fused feature map \\( F_{u} \\). The feature map \\( S5 \\) is directly fed into the reference encoder module to coarse locate cephalometric landmark, while the fused feature map \\( F_{u} \\) is fed into the finetune encoder module to precise locate cephalometric landmark. Moreover, to enhance the model's performance, the feature map \\( S5 \\) is processed through convolution to generate a heatmap, which is optimized by Dice loss and MSE loss." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.786, + 0.419, + 0.801 + ], + "angle": 0, + "content": "3.2 Reference Encoder" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.81, + 0.789, + 0.842 + ], + "angle": 0, + "content": "The reference encoder aims to establish the relationship between cephalometric landmark queries and feature maps, thereby facilitating the coarse prediction of" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.666, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "Du-CeLR" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.116, + 0.785, + 0.126 + ], + "angle": 0, + "content": "7" + }, + { + "type": "image", + "bbox": [ + 0.321, + 0.149, + 0.484, + 0.345 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.318, + 0.347, + 0.489, + 0.359 + ], + "angle": 0, + "content": "(a) reference encoder module" + }, + { + "type": "image", + "bbox": [ + 0.493, + 0.15, + 0.685, + 0.344 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.501, + 0.347, + 0.668, + 0.359 + ], + "angle": 0, + "content": "(b) finetune encoder module" + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.38, + 0.784, + 0.409 + ], + "angle": 0, + "content": "Fig. 3: The detailed illustration of (a) reference encoder module and (b) finetune encoder module." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.44, + 0.788, + 0.624 + ], + "angle": 0, + "content": "cephalometric landmark. As illustrated in Figures 2b and Figure 3a, the reference encoder module follows the typical transformer encoder paradigm. It comprises \\(N\\) identical layers within the encoder, each layer consisting of Layer Normalization (LN), Multi-Head Self-Attention (MHSA), and Feed-Forward Networks (FFN). Specifically, we initialize \\(K\\) cephalometric landmark content queries \\(V_{CL}^{R}\\) and utilize the feature map \\(S5\\) as the image content queries \\(V_{CF}^{R}\\). Drawing inspiration from the positional encoding of the BERT [8], we generate the positional queries \\(V_{P}^{R}\\). These content and positional queries are fed into the reference encoder. After \\(N\\) layers of iteration, the reference encoder outputs the updated cephalometric landmark content queries \\(V_{LR}^{C'}\\). These content queries are calculated by FFN layer to predict the coarse cephalometric landmark coordinates \\(\\mu_{R}\\) and distribution \\(\\sigma_{R}\\)." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.646, + 0.411, + 0.66 + ], + "angle": 0, + "content": "3.3 Finetune Encoder" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.674, + 0.789, + 0.844 + ], + "angle": 0, + "content": "The finetune encoder employs a layer-to-layer update mechanism to achieve more precise cephalometric landmark detection. The structure of the finetune encoder, as shown in Figure 2c and Figure 3b, also adheres to the typical transformer encoder paradigm, consisting of \\(M\\) identical layers within the encoder. Unlike the reference encoder module, cephalometric landmark coordinate \\(\\mu_{R}\\) is continually updated in each layer of the finetune encoder module. Specifically, we initialize \\(K\\) cephalometric landmark content queries \\(V_{LA}^{C}\\) and flatten the fused feature map \\(F_{u}\\) to serve as the image content queries \\(V_{FA}^{C}\\). Drawing inspiration from the positional encoding of the BERT, we generate position queries \\(V_{A}^{P}\\). Five parameters are fed into the finetune encoder module, namely fine landmark context queries \\(V_{LA}^{C}\\), image context queries \\(V_{FA}^{C}\\), position queries \\(V_{A}^{P}\\), the fused feature map \\(F_{u}\\)," + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "8" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.424, + 0.129 + ], + "angle": 0, + "content": "C. Dai, Y. Wang et al." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.332 + ], + "angle": 0, + "content": "and coarse landmark coordinates \\(\\mu_R\\). Within the finetune encoder module, we first sample feature vectors on the fused feature map \\(F_u\\) using coarse cephalometric landmark coordinates \\(\\mu_R\\), then add it to the fine landmark queries \\(V_{LA}^{C}\\). We combine content and position queries and feed them into the encoder to calculate the relationships among fine landmark and image context queries. Next, to adjust the landmark positions, we use the updated cephalometric landmark content queries \\(V_{LA}^{C'}\\) to calculate the \\((\\Delta x, \\Delta y)\\) offsets by FFN layer and add them back to the previous cephalometric landmark coordinates \\(\\mu_R\\). In this way, the finetune encoder module refines the content queries progressively by stacking multiple aforementioned layers, outputting \\(V_{LA}^{C'}\\) and \\(V_{FA}^{C'}\\). Finally, the cephalometric landmark content queries \\(V_{LA}^{C'}\\), followed by FFN layer, predicts the fine cephalometric landmark coordinates \\(\\mu_A\\) and distribution \\(\\sigma_A\\)." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.349, + 0.379, + 0.364 + ], + "angle": 0, + "content": "3.4 Loss Function" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.371, + 0.788, + 0.448 + ], + "angle": 0, + "content": "As shown in Figure 2, the loss function of our method is composed of two key components: 1) The heatmap loss of the feature extraction module, 2) The cephalometric landmark regression loss for both the reference encoder and fine-tune encoder modules. The overall loss function of our method can be formulated as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.367, + 0.462, + 0.786, + 0.478 + ], + "angle": 0, + "content": "\\[\nL = \\lambda_ {H M} L _ {H M} + \\lambda_ {R E} L _ {R E} + \\lambda_ {F E} L _ {F E} \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.482, + 0.788, + 0.557 + ], + "angle": 0, + "content": "where \\( L_{HM} \\), \\( L_{RE} \\) and \\( L_{FE} \\) represent feature extraction, reference encoder, and finetune encoder module loss functions respectively. \\( \\lambda_{HM} \\), \\( \\lambda_{RE} \\), and, \\( \\lambda_{FE} \\) are the hyper-parameters used to balance the three losses, and they are set to 1.0, 1.0, and 1.0, respectively. \\( L_{HM} \\) consists of the Dice loss and the MSE loss. \\( L_{HM} \\) is defined as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.355, + 0.562, + 0.786, + 0.587 + ], + "angle": 0, + "content": "\\[\nL _ {H M} = D i c e \\left(\\stackrel {\\wedge} {P} _ {h p}, P _ {h p}\\right) + M s e \\left(\\stackrel {\\wedge} {P} _ {h p}, P _ {h p}\\right) \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.597, + 0.787, + 0.659 + ], + "angle": 0, + "content": "where \\(\\hat{P}_{hp}\\) and \\(P_{hp}\\) are the prediction heatmap and ground truth heatmap respectively. For the cephalometric landmark regression loss of the reference encoder module, we adopt Residual Log-likelihood Estimation(RLE) loss. The loss is defined as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.414, + 0.66, + 0.786, + 0.677 + ], + "angle": 0, + "content": "\\[\nL _ {R E} = R L E \\left(\\mu_ {R}, \\sigma_ {R}; \\mu_ {g}\\right) \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.68, + 0.788, + 0.743 + ], + "angle": 0, + "content": "where \\(\\mu_R\\) and \\(\\sigma_R\\) are coarse cephalometric landmark coordinate and distribution output by the reference encoder module. \\(\\mu_g\\) is cephalometric landmark ground truth coordinate. For the cephalometric landmark regression loss of the finetune encoder module, we also adopt RLE loss. The loss is defined as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.393, + 0.748, + 0.786, + 0.789 + ], + "angle": 0, + "content": "\\[\nL _ {F E} = \\sum_ {i = 1} ^ {M} R L E \\left(\\mu_ {A, i}, \\sigma_ {A, i}; \\mu_ {g}\\right) \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.795, + 0.788, + 0.84 + ], + "angle": 0, + "content": "where \\(M\\) is number of finetune encoder layer. \\(\\mu_{A,i}\\) and \\(\\sigma_{A,i}\\) represent the cephalometric landmark coordinate and distribution output by the i-th layer reference encoder module." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.665, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "Du-CeLR" + }, + { + "type": "page_number", + "bbox": [ + 0.776, + 0.117, + 0.786, + 0.127 + ], + "angle": 0, + "content": "9" + }, + { + "type": "title", + "bbox": [ + 0.217, + 0.146, + 0.377, + 0.163 + ], + "angle": 0, + "content": "4 Experiments" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.18, + 0.788, + 0.254 + ], + "angle": 0, + "content": "In this section, we assess our method on some benchmarks for cephalometric landmark detection task. We first perform several ablation studies to underline the advantage of our proposed methods and to establish the optimal setting for hyperparameters. Finally, we compare the performance of our model with state-of-the-art methods." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.279, + 0.458, + 0.294 + ], + "angle": 0, + "content": "4.1 Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.305, + 0.789, + 0.518 + ], + "angle": 0, + "content": "Our model is built on the PyTorch framework. We use ResNet-34, pre-trained on ImageNet, as the backbone. Our architecture includes 4 layers for both the reference encoder and finetune encoder module. All additional layers that we introduce are initialized randomly. The model training and testing are performed on one NVIDIA 3060(12GB) GPU. For model optimization, we use Adam [18], with parameters \\(\\beta 1 = 0.9\\), \\(\\beta 2 = 0.999\\), and a weight decay of \\(10^{-4}\\). The batch size is set to 4. The model is trained for 1000 epoch. The initial learning rate is \\(2 \\times 10^{-4}\\), and dynamically updated the learning rate using the cosine strategy during the training process. Data augmentation techniques are employed, encompassing random cropping and random rotation. For the random cropping operation, all cephalometric landmarks are preserved during each cropping process. Regarding the random rotation operation, we select a rotation angle range of [-30, 30] degrees. Ultimately, the image is scaled to \\(1024 \\times 1024\\) for both training and inference of the model." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.542, + 0.519, + 0.556 + ], + "angle": 0, + "content": "4.2 Dataset and Evaluation Metric" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.568, + 0.788, + 0.688 + ], + "angle": 0, + "content": "ISBI 2015 Challenge Dataset [37]. This is a widely utilized benchmark dataset in the field of cephalometric landmark detection. This dataset comprises 400 cephalometric images, of which 150 are designated for training, 150 for Test 1, and the remaining images for Test 2. Each image has been annotated with 19 landmarks by two experienced medical practitioners, and the average of these annotations is taken as the ground truth. This dataset provides a rich array of annotated data, enabling researchers to effectively train and evaluate their cephalometric landmark detection methods." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.689, + 0.789, + 0.841 + ], + "angle": 0, + "content": "ISBI 2023 Challenge Dataset [25]. This is a recently introduced cephalometric landmark detection dataset, collected from seven distinct imaging devices. Following the training strategy in reference [15], we randomly selected 500 images as training data, with the remaining 200 images utilized for evaluating model performance. Experiments were conducted with k-fold \\((k = 10)\\) method cross-validation, and the average results were considered as the final outcome. This dataset provides 29 landmarks, but only the same 19 landmarks as in the ISBI 2015 dataset are used, ensuring a fair comparison with other methods. This new dataset offers researchers a more challenging scenario to test the generalization capabilities of their methods across various imaging devices." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "10" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.424, + 0.129 + ], + "angle": 0, + "content": "C. Dai, Y. Wang et al." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.235 + ], + "angle": 0, + "content": "Evaluation metric. The evaluation of cephalometric landmark detection models typically employs the Mean Radial Error (MRE) and the Successful Detection Rate (SDR) [7]. MRE is used to calculate the distance error between the predicted cephalometric landmarks and the ground truth, commonly serving as a measure of detection accuracy. The calculation method for MRE is defined as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.388, + 0.239, + 0.786, + 0.259 + ], + "angle": 0, + "content": "\\[\nR _ {i} ^ {j} = \\parallel \\mu_ {A} ^ {j} \\left(x _ {i}, y _ {i}\\right) - \\mu_ {g} ^ {j} \\left(x _ {i}, y _ {i}\\right) \\parallel_ {2} \\tag {5}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.414, + 0.282, + 0.786, + 0.325 + ], + "angle": 0, + "content": "\\[\nM R E = \\frac {1}{T K} \\sum_ {i = 1} ^ {T} \\sum_ {j = 1} ^ {K} R _ {i} ^ {j} \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.336, + 0.789, + 0.489 + ], + "angle": 0, + "content": "where \\( R_{i}^{j} \\) denotes the radial error of the \\( i - th \\) landmark in the \\( j - th \\) image. \\( \\mu_A^j (x_i,y_i) \\) represents the coordinates of the \\( i - th \\) cephalometric landmark predicted for the \\( j \\)-th image. \\( \\mu_g^j (x_i,y_i) \\) denotes the ground truth coordinates of the \\( i - th \\) cephalometric landmark in the \\( j - th \\) image. \\( T \\) represents the number of test images, and \\( K \\) denotes the number of cephalometric landmark in each image. SDR is employed to quantify the discrepancy between the predicted cephalometric landmark and the ground-truth. If the radial error \\( R_{i}^{j} \\) is no greater than \\( z\\mathrm{mm} \\) (where \\( z = 2\\mathrm{mm} \\), \\( 2.5\\mathrm{mm} \\), \\( 3\\mathrm{mm} \\), \\( 4\\mathrm{mm} \\)), the detection is considered as a successful one (Usually, \\( 2\\mathrm{mm} \\) range is acceptable in medical analysis [32,40]). The SDR is defined as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.392, + 0.501, + 0.787, + 0.543 + ], + "angle": 0, + "content": "\\[\nS D R _ {i} = \\frac {1}{T K} \\sum_ {j = 1} ^ {T} \\sum_ {j = 1} ^ {K} \\left\\{R _ {i} ^ {j} < z \\right\\} \\tag {7}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.569, + 0.39, + 0.584 + ], + "angle": 0, + "content": "4.3 Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.6, + 0.787, + 0.631 + ], + "angle": 0, + "content": "In this section, we perform several ablation studies on ISBI 2015 Challenge dataset to illustrate the effectiveness of the proposed component." + }, + { + "type": "table_caption", + "bbox": [ + 0.214, + 0.659, + 0.788, + 0.701 + ], + "angle": 0, + "content": "Table 1: Varying different model structures. \"MF\" denotes Multi-level Features. \"HP\" denotes Heatmap. \"RE\" denotes Reference Encoder. \"RL\" denotes RLE Loss. \"FE\" denotes Finetune Encoder." + }, + { + "type": "table", + "bbox": [ + 0.227, + 0.713, + 0.782, + 0.84 + ], + "angle": 0, + "content": "
ID BaselineFeature Extractor moduleReference Encoder moduleFinetune Encoder moduleMRE(mm)2mm(SDR%)
MFHPRERLFERL
12.897454.75
22.258661.91
32.569857.07
42.012565.01
51.674574.04
61.243483.65
71.146886.84
81.151486.31
91.023088.12
101.008889.51
" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.666, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "Du-CeLR" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.117, + 0.784, + 0.127 + ], + "angle": 0, + "content": "11" + }, + { + "type": "image", + "bbox": [ + 0.221, + 0.148, + 0.36, + 0.256 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.362, + 0.149, + 0.501, + 0.257 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.149, + 0.642, + 0.257 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.643, + 0.149, + 0.782, + 0.256 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.217, + 0.268, + 0.785, + 0.294 + ], + "angle": 0, + "content": "Fig. 4: Headmap visualization. The attention heatmap come from the feature extraction module." + }, + { + "type": "text", + "bbox": [ + 0.218, + 0.327, + 0.786, + 0.628 + ], + "angle": 0, + "content": "Varying the model structures. We conduct experiments to verify the different model structures. All experimental results are presented in Table 1. Regarding the feature extractor module, the combination of the multi-level feature (MF) module improves the baseline in MRE and \\(2\\mathrm{mm}\\) SDR indicators by \\(0.3276\\mathrm{mm}\\) and \\(7.16\\%\\) respectively, while the introduction of the heatmap (HP) module improves the baseline by \\(0.3276\\mathrm{mm}\\) and \\(2.32\\%\\). When both MF and HP modules are integrated, there is \\(0.8849\\mathrm{mm}\\) and \\(10.26\\%\\) enhancement over the Baseline, underscoring the significant role of the feature extractor module in accuracy improvement. For the reference encoder module, the addition of reference encoder (RE) components and RLE Loss (RL) elements on the baseline foundation yielded \\(1.654\\mathrm{mm}\\) and \\(28.9\\%\\) accuracy improvement. When used in conjunction with the feature extractor module, the model's accuracy further increased by \\(0.0996\\mathrm{mm}\\) and \\(3.19\\%\\). Regarding the finetune encoder module, its combined use with the feature extractor module led to a \\(1.8744\\mathrm{mm}\\) and \\(33.37\\%\\) improvement in model accuracy. The highest accuracy, reaching \\(1.0088\\mathrm{mm}\\) and \\(89.51\\%\\), was achieved when the finetune encoder module was used in combination with both the reference encoder module and the feature extractor module. This underscores the significant impact of the three proposed modules on enhancing model accuracy. Finally, we visualize the attention heatmap in Figure 4. The heatmap is highly responsive at locations near the cephalometric landmarks." + }, + { + "type": "text", + "bbox": [ + 0.218, + 0.63, + 0.786, + 0.704 + ], + "angle": 0, + "content": "Varying the levels of fuse feature map. We explore the impact of feeding different levels of fuse feature maps into the proposed finetune encoder. As shown in Table 2, the performance grows consistently with more levels of fuse feature maps, e.g., \\(89.20\\%\\), \\(89.33\\%\\), \\(89.42\\%\\), \\(89.51\\%\\) for 2, 3, 4, 5 levels of feature maps on 2mm SDR, respectively." + }, + { + "type": "text", + "bbox": [ + 0.218, + 0.706, + 0.786, + 0.84 + ], + "angle": 0, + "content": "Varying parameter of encoder module. We study the impact of encoder module on model performance from two aspects: the number of layers and feature dimensions. To simple the validation approach, the reference encoder and finetune encoder modules are set to the same number of layers and dimensions. First, we investigate the effects of altering the dimension of the encoder module. As illustrated in Table 3, there is a discernible enhancement in model efficacy concomitant with an increase in the dimensions of encoder layers. The peak performance of the model is attained when the dimension is augmented to 512. Furthermore, we conduct experiments by varying the number of encoder layers." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "12" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.423, + 0.129 + ], + "angle": 0, + "content": "C. Dai, Y. Wang et al." + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.147, + 0.785, + 0.178 + ], + "angle": 0, + "content": "As shown in Table 4, the performance grows at the first four layers and saturates at the fifth decoder layer." + }, + { + "type": "table_caption", + "bbox": [ + 0.216, + 0.189, + 0.789, + 0.216 + ], + "angle": 0, + "content": "Table 2: Varying the scale levels of fuse Table 3: Varying feature queries dimension feature map for feature extraction module. sions of encoder module." + }, + { + "type": "table", + "bbox": [ + 0.226, + 0.217, + 0.501, + 0.294 + ], + "angle": 0, + "content": "
F5 F4 F3 F2 MRE(mm)SDR%
2mm2.5mm3mm4mm
1.018189.2093.4896.18 98.41
✓ ✓1.015689.3393.4996.32 98.41
✓ ✓ ✓1.011389.4293.5096.38 98.54
✓ ✓ ✓ ✓1.008889.5193.5496.42 98.56
" + }, + { + "type": "table", + "bbox": [ + 0.534, + 0.217, + 0.765, + 0.294 + ], + "angle": 0, + "content": "
Dim MRE(mm)SDR%
2mm2.5mm3mm4mm
1281.020189.0393.2695.7698.17
2561.019489.3293.3596.0298.32
5121.008889.5193.5896.4298.56
7681.009189.4793.6196.3998.53
" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.305, + 0.785, + 0.41 + ], + "angle": 0, + "content": "Varying the input image resolutions. We undertake experimental investigations to ascertain the robustness of our method across varying input resolutions. As depicted in Table 5, there is a significant enhancement in the performance of the model concomitant with an increase in the resolution of input images. When the input image resolution is \\(1024 \\times 1024\\), the model reaches \\(1.0088\\mathrm{mm}\\) and \\(89.51\\%\\) in MRE and \\(2\\mathrm{mm}\\) SDR metrics respectively. A further escalation in input image resolution results in a decline for model performance." + }, + { + "type": "table_caption", + "bbox": [ + 0.216, + 0.422, + 0.789, + 0.448 + ], + "angle": 0, + "content": "Table 4: Varying the numbers of encoder Table 5: Varying the input image resolutions." + }, + { + "type": "table", + "bbox": [ + 0.241, + 0.45, + 0.485, + 0.543 + ], + "angle": 0, + "content": "
NumMRE(mm)SDR%
2mm2.5mm3mm4mm
11.083587.2393.0595.5197.96
21.024788.9893.3195.9298.32
31.013789.4693.4796.2898.47
41.008889.5193.5496.4298.56
51.009189.4893.5496.4598.59
" + }, + { + "type": "table", + "bbox": [ + 0.507, + 0.45, + 0.788, + 0.543 + ], + "angle": 0, + "content": "
ResolutionMRE(mm)SDR%
2mm2.5mm3mm4mm
256×2561.201284.5691.7995.4498.49
512×5121.067488.0793.3096.2598.57
768×7681.012989.4093.3396.0798.60
1024×10241.008889.5193.5496.4298.56
1280×12801.015389.3193.5195.4498.32
" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.561, + 0.365, + 0.575 + ], + "angle": 0, + "content": "4.4 Main Result" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.584, + 0.785, + 0.674 + ], + "angle": 0, + "content": "We evaluated our method on two cephalometric landmark datasets: ISBI 2015 Challenge [37] and ISBI 2023 Challenge datasets [25]. The final results are presented in Tables 6,7,8. The proposed approach achieved the least Mean Radical Error (MRE) and the highest \\(2\\mathrm{mm}\\) Success Detection Rate (SDR), which is considered as the clinically accepted. Moreover, our method achieves end-to-end training and prediction for cephalometric landmarks." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.675, + 0.787, + 0.841 + ], + "angle": 0, + "content": "ISBI 2015 Challenge test1. Table 6 presents the evaluation results for the ISBI 2015 Challenge test1 dataset. These state-of-the-art methods can be categorized into heatmap-based and regression-based methods. Our method demonstrates clear superiority over heatmap-based methods. Compared to the best heatmap-based method [2], our method achieves improvements of \\(0.11\\mathrm{mm}\\) and \\(1.48\\%\\) respectively in MRE and the \\(2\\mathrm{mm}\\) SDR metrics. Additionally, compared to the best regression-based method, our method achieves improvements of \\(1.19\\%\\) on the \\(2\\mathrm{mm}\\) SDR metrics. Moreover, compared to the best approach, our method exhibits a significant advantage in terms of GFLOPs. In addition, compared to other low-resolution methods, our method has the lowest GFLOPs of only 23.0, while the \\(2\\mathrm{mm}\\) SDR reaches \\(88.07\\%\\), which is superior to the other" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.666, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "Du-CeLR" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.116, + 0.786, + 0.127 + ], + "angle": 0, + "content": "13" + }, + { + "type": "image", + "bbox": [ + 0.223, + 0.145, + 0.346, + 0.272 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.236, + 0.273, + 0.408, + 0.284 + ], + "angle": 0, + "content": "(a) ISBI 2015 Challenge test1" + }, + { + "type": "image", + "bbox": [ + 0.346, + 0.146, + 0.421, + 0.272 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.423, + 0.145, + 0.528, + 0.272 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.425, + 0.273, + 0.599, + 0.285 + ], + "angle": 0, + "content": "(b) ISBI 2015 Challenge test2" + }, + { + "type": "image", + "bbox": [ + 0.529, + 0.146, + 0.603, + 0.272 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.603, + 0.146, + 0.706, + 0.272 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.621, + 0.273, + 0.762, + 0.285 + ], + "angle": 0, + "content": "(c) ISBI 2023 Challenge" + }, + { + "type": "image", + "bbox": [ + 0.706, + 0.146, + 0.781, + 0.272 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.216, + 0.296, + 0.785, + 0.366 + ], + "angle": 0, + "content": "Fig. 5: Qualitative detection results on ISBI 2015 and 2023 Challenge datasets. (a) and (b) correspond the detection results for the ISBI 2015 Challenge test1 and test2. (c) depicts the detection outcomes for the ISBI 2023 Challenge. The blue landmarks represent results annotated by medical professionals, while the red landmarks indicate the outcomes predicted by the model." + }, + { + "type": "table_caption", + "bbox": [ + 0.216, + 0.383, + 0.785, + 0.412 + ], + "angle": 0, + "content": "Table 6: Quantitative results on the ISBI 2015 Challenge test1 dataset. * denotes other methods we implemented. Bold represents the best result." + }, + { + "type": "table", + "bbox": [ + 0.221, + 0.424, + 0.79, + 0.657 + ], + "angle": 0, + "content": "
MethodBackboneResolutionGFLOPs MRE(mm)SDR%
2mm2.5mm3mm4mm
Heatmap-based Methods
Chen R et al. [6]ResNet50800×640215.71.1786.6792.6795.5498.53
Zhong Z et al. [40]U-Net290×290+19×100×10092.21.1286.9191.8294.8897.90
CephaNN [26]ResNeXt50800×640982.81.1587.6193.1696.3598.74
Yao J et al. [34]ResNet18576×512+19×96×9640.11.1486.8493.0295.4398.95
Ao Y et al. [2]Densenet121800×640157.21.1288.0392.7395.9698.48
Huang K et al. [13]---1.0987.8792.4595.5498.59
SimCC* [21]HRNet48800×640164.91.1287.1691.9695.3798.18
Regression-based Methods
Gilmour L et al. [11]ResNet342432×1920220.21.0188.3293.1296.1498.63
Song Y et al. [29]ResNet50256×256+19×256×256102.51.0886.4091.7094.8097.80
Song Y et al. [30]U-Net480×387286.81.1985.2091.2094.4097.20
Zeng M et al. [36]---1.3481.3789.0993.7997.86
King C H et al. [17]---1.1786.1491.7294.9197.96
Hong W et al. [12]---1.1285.2690.6793.5497.19
Poseur* [24]ResNet50800×64046.11.1486.5691.0994.0097.23
OursResNet34512×51223.01.0788.0793.3096.2598.57
OursResNet341024×102495.01.0189.5193.5496.4298.56
" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.688, + 0.784, + 0.718 + ], + "angle": 0, + "content": "methods. The qualitative detection results of the ISBI 2015 Challenge test1 dataset are displayed in Figure 4." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.719, + 0.785, + 0.84 + ], + "angle": 0, + "content": "ISBI 2015 Challenge test2. The evaluation results for the ISBI 2015 Challenge test2 dataset are presented in Table 7. Our method outperforms heatmap-based methods by significant margins. Compared to best method [13], our method achieves an increase of \\(0.07\\mathrm{mm}\\) in MRE and \\(0.48\\%\\) in \\(2\\mathrm{mm}\\) SDR. In addition, We introduce an end-to-end human keypoint detection method into the cephalometric landmark detection task, which is implemented based on the deformable decoder architecture. Experiments show that our method is significantly better than the human keypoint method in accuracy. Moreover, our" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "14" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.114, + 0.424, + 0.129 + ], + "angle": 0, + "content": "C. Dai, Y. Wang et al." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.224 + ], + "angle": 0, + "content": "method is more convenient to deploy. Finally, the performance of the released methods on ISBI 2015 Challenge Test1 dataset are all better than Test2. It seems that the data distribution of Test1 dataset is more consistent with Train dataset. Qualitative detection results of our method on the ISBI 2015 Challenge test2 dataset can be found in Figure 5b." + }, + { + "type": "table_caption", + "bbox": [ + 0.24, + 0.234, + 0.528, + 0.262 + ], + "angle": 0, + "content": "Table 7: Quantitative results on the ISBI 2015 Challenge test2 dataset." + }, + { + "type": "table", + "bbox": [ + 0.254, + 0.263, + 0.52, + 0.442 + ], + "angle": 0, + "content": "
MethodMRE(mm)SDR%
2mm2.5mm3mm
Heatmap-based Methods
Chen R et al. [6]1.4875.0582.8488.5395.05
Zhong Z et al. [40]1.4276.0082.9088.7494.32
CephaNN [26]1.4376.3282.9587.9594.63
Yao J et al. [34]1.4875.4482.0386.6595.12
Ao Y et al. [2]1.4277.0084.4289.4795.21
Huang K et al. [13]1.3479.0587.9589.7995.05
SimCC* [21]1.5474.1680.6886.3294.05
Regression-based Methods
Gilmour L et al. [11]1.3377.0583.1688.8494.89
Song Y et al. [29]1.5474.0081.3087.5094.30
Song Y et al. [30]1.6472.2079.5085.0093.50
Zeng M et al. [36]1.6470.5879.5386.0593.32
King C H et al. [17]1.5074.5881.7487.2694.73
Hong W et al. [12]1.2879.2485.3290.4796.32
Poseur* [24]1.4874.4281.3786.6893.63
Ours1.2779.5386.4791.1196.32
" + }, + { + "type": "table_caption", + "bbox": [ + 0.529, + 0.285, + 0.76, + 0.312 + ], + "angle": 0, + "content": "Table 8: Quantitative results on the ISBI 2023 Challenge." + }, + { + "type": "table", + "bbox": [ + 0.532, + 0.313, + 0.803, + 0.393 + ], + "angle": 0, + "content": "
methodMRE(mm)SDR%
2mm2.5mm3mm4mm
Jin H et al. [15]1.220083.7689.7192.7996.08
Poseur* [24]0.998288.5192.8295.3797.79
SimCC* [21]1.079588.3993.1295.3197.81
Huang K et al.* [13]1.074787.8792.5294.8797.42
Gilmour L et al.* [11]0.979389.3793.4795.9797.42
Ours0.937290.6894.2495.9797.89
" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.469, + 0.788, + 0.606 + ], + "angle": 0, + "content": "ISBI 2023 Challenge test. Regarding the ISBI 2023 Challenge test dataset, as illustrated in Table 8, Our method achieves the best performance on all metrics. Compared to the best-performing method [11], our approach significantly reduces the Mean Relative Error (MRE) from \\(0.9793\\mathrm{mm}\\) to \\(0.9372\\mathrm{mm}\\) and enhances the \\(2\\mathrm{mm}\\) Success Detection Rate (SDR) from \\(89.37\\%\\) to \\(90.68\\%\\). Moreover, in comparison with transformer-based methods, our approach demonstrates a lead of \\(0.061\\mathrm{mm}\\) in MRE and \\(2.17\\%\\) in \\(2\\mathrm{mm}\\) SDR, respectively. Lastly, the qualitative detection results of our method on the ISBI 2023 Challenge test dataset are depicted in Figure 5c." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.628, + 0.36, + 0.644 + ], + "angle": 0, + "content": "5 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.659, + 0.788, + 0.841 + ], + "angle": 0, + "content": "In this paper, we propose a novel regression model for cephalometric landmark detection for high-resolution X-ray image. This model only employs the encoder module within the transformer framework to construct the relationship between landmark features and image features. It is capable of regressing cephalometric landmark coordinate from coarse to fine and completes end-to-end training. Moreover, our model, compared to heatmap-based method, boasts low memory consumption and robustness against missing landmark. It offers a more straightforward end-to-end design compared to current regression-based method, performing one-time landmark detection on high-resolution X-ray images. Extensive experiments on the ISBI2015 and ISBI2023 datasets demonstrate that our method can achieve state-of-the-art performance compare with regression-based and heatmap-based methods." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.665, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "Du-CeLR" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "15" + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.145, + 0.403, + 0.163 + ], + "angle": 0, + "content": "Acknowledgements" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.177, + 0.785, + 0.207 + ], + "angle": 0, + "content": "This work was supported by the National Natural Science Foundation of China (62122059, 82330064)." + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.23, + 0.323, + 0.245 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.26, + 0.786, + 0.302 + ], + "angle": 0, + "content": "1. Albarakati, S., Kula, K., Ghoneima, A.: The reliability and reproducibility of cephalometric measurements: a comparison of conventional and digital methods. Dentomaxillofacial Radiology 41(1), 11-17 (2012)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.302, + 0.786, + 0.33 + ], + "angle": 0, + "content": "2. Ao Y, W.H.: Feature aggregation and refinement network for 2d anatomical landmark detection. Journal of Digital Imaging 36(2), 547-561 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.33, + 0.786, + 0.372 + ], + "angle": 0, + "content": "3. B. Ibragimov, B. Likar, F.P., Vrtovec, T.: Automatic cephalometric x-ray landmark detection by applying game theory and random forests. In Proc. ISBI Int. Symp. on Biomedical Imaging (2014)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.372, + 0.786, + 0.399 + ], + "angle": 0, + "content": "4. Cardillo, J., Sid-Ahmed, M.A.: An image processing system for locating craniofacial landmarks. IEEE transactions on medical imaging 13(2), 275-289 (1994)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.4, + 0.786, + 0.44 + ], + "angle": 0, + "content": "5. Carion N, Massa F, S.G.e.a.: End-to-end object detection with transformers. European conference on computer vision. Cham: Springer International Publishing pp. 213-229 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.441, + 0.786, + 0.509 + ], + "angle": 0, + "content": "6. Chen, R., Ma, Y., Chen, N., Lee, D., Wang, W.: Cephalometric landmark detection by attentive feature pyramid fusion and regression-voting. In: Medical Image Computing and Computer Assisted Intervention-MICCAI 2019: 22nd International Conference, Shenzhen, China, October 13-17, 2019, Proceedings, Part III 22. pp. 873-881. Springer (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.51, + 0.786, + 0.551 + ], + "angle": 0, + "content": "7. Devereux, L., Moles, D., Cunningham, S.J., McKnight, M.: How important are lateral cephalometric radiographs in orthodontic treatment planning? American Journal of Orthodontics and Dentofacial Orthopedics 139(2), e175-e181 (2011)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.551, + 0.786, + 0.592 + ], + "angle": 0, + "content": "8. Devlin, J., Chang, M.W., Lee, K., Toutanova, K.: Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805 (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.593, + 0.786, + 0.62 + ], + "angle": 0, + "content": "9. Dosovitskiy A, Beyer L, K.A.e.a.: An image is worth 16x16 words: Transformers for image recognition at scale. ArXiv preprint arXiv:2010.11929 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.62, + 0.786, + 0.647 + ], + "angle": 0, + "content": "0. El-Feghi, M.S.A., Ahmadi, M.: Automatic localization of craniofacial landmarks for assisted cephalometry. Pattern Recognition 37(3), 609-621 (2004)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.648, + 0.786, + 0.675 + ], + "angle": 0, + "content": "1. Gilmour L, R.N.: Locating cephalometric x-ray landmarks with foveated pyramid attention. Medical Imaging With Deep Learning. PMLR pp. 262-276 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.675, + 0.786, + 0.731 + ], + "angle": 0, + "content": "2. Hong W, Kim S M, C.J.e.a.: Deep reinforcement learning using a multi-scale agent with a normalized reward strategy for automatic cephalometric landmark detection. 2023 4th International Conference on Big Data Analytics and Practices pp. 1-6 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.731, + 0.786, + 0.758 + ], + "angle": 0, + "content": "3. Huang K, F.F.: An intelligent shooting reward learning network scheme for medical image landmark detection. Applied Sciences 12(20), 10190 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.759, + 0.786, + 0.799 + ], + "angle": 0, + "content": "4. Indermun S, Shaik S, N.C.J.K.M.R.: Human examination and artificial intelligence in cephalometric landmark detection—is ai ready to take over? Dentomaxillofac Radiol 10.1259/dmfr.20220362 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.799, + 0.786, + 0.841 + ], + "angle": 0, + "content": "5. Jin H, Che H, C.H.: Unsupervised domain adaptation for anatomical landmark detection. International Conference on Medical Image Computing and Computer-Assisted Intervention. Cham: Springer Nature Switzerland pp. 695-705 (2023)" + }, + { + "type": "list", + "bbox": [ + 0.226, + 0.26, + 0.786, + 0.841 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "16" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.423, + 0.128 + ], + "angle": 0, + "content": "C. Dai, Y. Wang et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.147, + 0.785, + 0.175 + ], + "angle": 0, + "content": "16. Kaiming He, Xiangyu Zhang, S.R.J.S.: Deep residual learning for image recognition. ArXiv preprint arXiv:1512.03385 (2015)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.175, + 0.785, + 0.217 + ], + "angle": 0, + "content": "17. King C H, Wang Y L, L.W.Y.e.a.: Automatic cephalometric landmark detection on x-ray images using object detection. 2022 IEEE 19th International Symposium on Biomedical Imaging (ISBI) pp. 1-4 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.217, + 0.785, + 0.243 + ], + "angle": 0, + "content": "18. Kingma, D.P., Ba, J.: Adam: A method for stochastic optimization. ArXiv preprint arXiv:1412.6980 (2014)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.243, + 0.785, + 0.284 + ], + "angle": 0, + "content": "19. Lee H, Park M, K.J.: Cephalometric landmark detection in dental x-ray images using convolutional neural networks. Medical imaging 2017: Computer-aided diagnosis 10134, 494-499 (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.284, + 0.785, + 0.324 + ], + "angle": 0, + "content": "20. Li, H., Guo, Z., Rhee, S.M., Han, S., Han, J.J.: Towards accurate facial landmark detection via cascaded transformers. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 4176-4185 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.325, + 0.785, + 0.365 + ], + "angle": 0, + "content": "21. Li Y, Yang S, L.P.e.a.: Simcc: A simple coordinate classification perspective for human pose estimation. European Conference on Computer Vision. Cham: Springer Nature Switzerland 89-106 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.365, + 0.785, + 0.406 + ], + "angle": 0, + "content": "22. Lindner, C., Cootes, T.: Fully automatic cephalometric evaluation using random forest regression-voting. IEEE International Symposium on Biomedical Imaging (ISBI) (2015)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.406, + 0.785, + 0.433 + ], + "angle": 0, + "content": "23. Mamta Juneja, Poojita Garg, R.K.e.a.: A review on cephalometric landmark detection techniques. Biomedical Signal Processing and Control 66(102486) (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.433, + 0.785, + 0.473 + ], + "angle": 0, + "content": "24. Mao, W., Ge, Y., Shen, C., Tian, Z., Wang, X., Wang, Z., Hengel, A.v.d.: Poseur: Direct human pose regression with transformers. Proceedings of the European Conference on Computer Vision (ECCV) (October 2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.473, + 0.785, + 0.501 + ], + "angle": 0, + "content": "25. Muhammad Anwaar Khalid, K.Z.e.a.: Cepha29: Automatic cephalometric landmark detection challenge 2023. ArXiv preprint arXiv:2212.04808 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.501, + 0.785, + 0.528 + ], + "angle": 0, + "content": "26. Qian J, Luo W, C.M.e.a.: Cephann: a multi-head attention network for cephalometric landmark detection. IEEE Access 8, 112633-112641 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.528, + 0.785, + 0.582 + ], + "angle": 0, + "content": "27. Ronneberger O, Fischer P, B.T.: U-net: Convolutional networks for biomedical image segmentation. Medical Image Computing and Computer-Assisted Intervention-MICCAI 2015: 18th International Conference, Munich, Germany, October pp. 5-9" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.582, + 0.785, + 0.609 + ], + "angle": 0, + "content": "28. Shaker A, Maaz M, R.H.e.a.: Unetr++: delving into efficient and accurate 3d medical image segmentation. ArXiv preprint arXiv:2212.04497 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.609, + 0.785, + 0.65 + ], + "angle": 0, + "content": "29. Song, Y., Qiao, X., Iwamoto, Y., Chen, Y.w.: Automatic cephalometric landmark detection on x-ray images using a deep-learning method. Applied Sciences 10(7), 2547 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.65, + 0.785, + 0.691 + ], + "angle": 0, + "content": "30. Song Y, Qiao X, I.Y.e.a.: An efficient deep learning based coarse-to-fine cephalometric landmark detection method. IEICE TRANSACTIONS on Information and Systems 104(8), 1359-1366 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.691, + 0.785, + 0.718 + ], + "angle": 0, + "content": "31. Vaswani A, Shazeer N, P.N.e.a.: Attention is all you need. Advances in neural information processing systems (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.718, + 0.785, + 0.772 + ], + "angle": 0, + "content": "32. Wang, C.W., Huang, C.T., Hsieh, M.C., Li, C.H., Chang, S.W., Li, W.C., Vandaele, R., Marée, R., Jodogne, S., Geurts, P., et al.: Evaluation and comparison of anatomical landmark detection methods for cephalometric x-ray images: a grand challenge. IEEE transactions on medical imaging 34(9), 1890-1900 (2015)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.772, + 0.785, + 0.8 + ], + "angle": 0, + "content": "33. Yang, S., Quan, Z., Nie, M., Yang, W.: Transpose: Keypoint localization via transformer. IEEE/CVF International Conference on Computer Vision (ICCV) (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.8, + 0.785, + 0.84 + ], + "angle": 0, + "content": "34. Yao J, Zeng W, H.T.e.a.: Automatic localization of cephalometric landmarks based on convolutional neural network. American journal of orthodontics and dentofacial orthopedics 161(3), e250-e259 (2022)" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.147, + 0.785, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.665, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "Du-CeLR" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "17" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.175 + ], + "angle": 0, + "content": "35. Yuhui Yuan, Rao Fu, L.H.W.L.C.Z.X.C.J.W.: Hrformer: High-resolution transformer for dense prediction. ArXiv preprint arXiv:2110.09408 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.175, + 0.788, + 0.203 + ], + "angle": 0, + "content": "36. Zeng M, Yan Z, L.S.e.a.: Cascaded convolutional networks for automatic cephalometric landmark detection. Medical Image Analysis 68, 101904 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.203, + 0.788, + 0.245 + ], + "angle": 0, + "content": "37. Zhang H, Zhang J, L.C.S.E.S.P.N.T.G.S.W.Y.M.M.: All-net: Anatomical information lesion-wise loss function integrated into neural network for multiple sclerosis lesion segmentation. Neuroimage Clin 32(102854) (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.245, + 0.788, + 0.286 + ], + "angle": 0, + "content": "38. Zhang K, Zhang Z, L.Z.e.a.: Joint face detection and alignment using multitask cascaded convolutional networks. IEEE signal processing letters 23(10), 1499-1503 (2016)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.286, + 0.788, + 0.314 + ], + "angle": 0, + "content": "39. Zhao, T., Wu, X.: Pyramid feature attention network for saliency detection. CVPR (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.314, + 0.788, + 0.369 + ], + "angle": 0, + "content": "40. Zhong Z, Li J, Z.Z.e.a.: An attention-guided deep regression model for landmark detection in cephalograms. Medical Image Computing and Computer Assisted Intervention-MICCAI 2019: 22nd International Conference, Shenzhen, China p. 13-17 (October 2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.37, + 0.788, + 0.398 + ], + "angle": 0, + "content": "41. Ziyang Ye, H.Y., Li, B.: Uncertainty-aware u-net for medical landmark detection. Arxiv preprint arXiv:2303.10349v1 (2023)" + }, + { + "type": "list", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.398 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/2024/A Cephalometric Landmark Regression Method based on Dual-encoder for High-resolution X-ray Image/2cca7425-9c6a-47c2-b889-8be913ae41cc_origin.pdf b/2024/A Cephalometric Landmark Regression Method based on Dual-encoder for High-resolution X-ray Image/2cca7425-9c6a-47c2-b889-8be913ae41cc_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..4f921bb57225607c6d023e78d1014019aeabd27f --- /dev/null +++ b/2024/A Cephalometric Landmark Regression Method based on Dual-encoder for High-resolution X-ray Image/2cca7425-9c6a-47c2-b889-8be913ae41cc_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b83313e4b708400c8f4090eb5d872fd045c2279dc66469ac676612ebb50ad6af +size 6215386 diff --git a/2024/A Cephalometric Landmark Regression Method based on Dual-encoder for High-resolution X-ray Image/full.md b/2024/A Cephalometric Landmark Regression Method based on Dual-encoder for High-resolution X-ray Image/full.md new file mode 100644 index 0000000000000000000000000000000000000000..59e4d72ccf6041c58549505722f2ad6f54fab173 --- /dev/null +++ b/2024/A Cephalometric Landmark Regression Method based on Dual-encoder for High-resolution X-ray Image/full.md @@ -0,0 +1,286 @@ +# A Cephalometric Landmark Regression Method based on Dual-encoder for High-resolution X-ray Image + +Chao Dai $^{1\dagger}$ , Yang Wang $^{2\dagger(\boxtimes)}$ , Chaolin Huang $^{3\dagger}$ , Jiakai Zhou $^{4}$ , Qilin Xu $^{5}$ , and Minpeng Xu $^{1}$ + +$^{1}$ Tianjin University + +2 Anhui University of Technology + +$^{3}$ Jiangxi University of Science and Technology + +$^{4}$ Nanjing University of Aeronautics and Astronautics + +5 West Anhui University + +Abstract. Accurate detection of cephalometric landmarks is crucial for orthodontic diagnosis and treatment planning. Current methods rely on a cascading form of multiple models to achieve higher accuracy, which greatly complicates both training and deployment processes. In this paper, we introduce a novel regression paradigm capable of simultaneously detecting all cephalometric landmarks in high-resolution X-ray images. Our approach only utilizes the encoder module from the transformer to design a dual-encoder architecture, enabling precise detection of cephalometric landmark positions from coarse to fine. Specifically, the entire model architecture comprises three main components: a feature extractor module, a reference encoder module, and a fine-tune encoder module. These components are respectively responsible for feature extraction and fusion for X-ray images, coarse localization of cephalometric landmark, and fine-tuning of cephalometric landmark positioning. Notably, our framework is fully end-to-end differentiable and innately learns to exploit the interdependencies among cephalometric landmarks. Experiments demonstrate that our method significantly surpasses the current state-of-the-art methods in Mean Radical Error (MRE) and the 2mm Success Detection Rate (SDR) metrics, while also reducing computational resource consumption. The code is available at https://github.com/huang229/D-CeLR + +Keywords: Cephalometric landmark $\cdot$ High-resolution $\cdot$ Dual-encoder $\cdot$ Reference encoder $\cdot$ Finetune encoder + +# 1 Introduction + +Cephalometric analysis represents a pivotal diagnostic tool extensively utilized in orthodontics and orthognathic surgery. This analysis involves the annotation of dental, skeletal, and soft tissue structures in lateral cephalometric radiographs. + +![](images/ddb02340701773010dc447c6e4272a3d159550f84f95fcae76d431a00355ce03.jpg) +(a) Cephalometric landmark coordi-(b) Cephalometric landmark medical name nate positions. +Fig. 1: Cephalometric landmark visualization. (a) Cephalometric landmark coordinate positions. Red indicates hard tissue points and blue indicates soft tissue points. (b) Cephalometric landmark medical name. + +
19 Landmarks in Cephalometric
1Sella11Lower Incisor Tip
2Nasion12Upper Incisor Tip
3Orbitale13Labrale superius
4Porion14Labrale inferius
5Upper Incisor Apex15Subnasale
6B-point16Soft Tissue Pogonion
7Pogonion17Posterior Nasal Spine
8Menton18Anterior Nasal Spine
9Gnathion19Articulare
10Gonion
+ +As illustrated in Figure 1, these cephalometric landmarks are core to the analysis, providing reference points for subsequent qualitative assessments of angles and distances. However, the manual annotation of these landmarks is a laborious, time-consuming, and highly subjective task, impacting the accuracy of the annotations. Consequently, a precise and robust automated method for annotating cephalometric landmarks holds significant importance for effective treatment planning [1,6,7,14,23]. + +Existing methods for cephalometric landmark detection can be broadly classified into two categories: heatmap-based and regression-based approaches. The heatmap-based approach involves predicting a heatmap that indicates the probability of each pixel in a region corresponding to various cephalometric landmarks. This modality has seen extensive applications in the detection of cephalometric landmarks. For example, Chen et al. [6] introduced a feature pyramid fusion-based heatmap method for simultaneous landmark detection, achieving impressive results. Qian J et al. [26] advanced the accuracy of cephalometric landmark detection by designing a multi-head attention module and a novel regional loss function. However, heatmap-based methods exhibit certain disadvantages. 1). The ground truth requires manual design and heuristic adjustments, with inevitable noise impacting the final outcomes [13,29,40]. 2). post-processing operations are necessary to locate single maximum values in heatmaps. These operations are typically heuristic and non-differentiable, undermining the model's capacity for end-to-end training. 3). models generally adopt a U-net structure [27,28,41], while processing high-resolution X-ray images, consumes more computational resources and is prone to missing cephalometric landmarks. + +Regression-based methods directly map the input image to the coordinates of cephalometric landmarks, typically employing a feedforward network (FFN) for prediction. The regression-based methods is considerably more streamlined + +compared to heatmap-based methods, as the prediction of cephalometric landmarks is inherently a process of determining a set of coordinate values. Numerous regression-based techniques exist for predicting cephalometric landmarks. For example, Song Y et al. [29] utilizes a base network for coarse localization of cephalometric landmarks, followed by region-specific cropping and refined positioning using a secondary model. Gilmour L et al. [11] constructs individual models for each landmark to predict their locations. Regression-based methods circumvent the necessity for non-maximum suppression, heatmap generation, and quantization error correction. However, to achieve higher precision on high-resolution X-ray images, current approaches predominantly rely on cascading multiple models, which compromises the inherent advantages of end-to-end training and prediction for regression-based methods. + +To address these issues, we introduce a novel regression paradigm that exclusively utilizes the encoder module of transformer for the one-time detection of all cephalometric landmarks on high-resolution X-ray images. Specifically, we design a feature extraction module based on Convolutional Neural Networks (CNN) to accomplish feature extraction and fusion for X-ray images. Subsequently, the extracted features are fed into a reference encoder module for the coarse localization of cephalometric landmarks. Finally, the coarsely localized cephalometric landmarks, along with the fused features, are inputted into a finetune encoder module, which iteratively refines the positioning of the cephalometric landmarks from coarse to fine detail. Moreover, our method pioneers the complete end-to-end training and deployment for the detection of cephalometric landmarks on high-resolution X-ray images. Extensive experiments demonstrate that our approach achieves state-of-the-art performance on popular benchmarks with a ResNet-34 backbone. Specifically, we achieve a Mean Radial Error (MRE) of $1.01\mathrm{mm}$ , $1.27\mathrm{mm}$ , and $0.9372\mathrm{mm}$ on the ISBI2015 test1, ISBI2015 test2, and ISBI2023 test datasets, respectively. Furthermore, our method significantly reduces GFLOPs, by $132\%$ compared to the previously best method [11]. + +The main contributions of this work are as follows: + +- We propose an innovative regression paradigm for high-resolution X-ray images, which enables the prediction of all cephalometric landmarks through a single model. Moreover, our method facilitates end-to-end training and prediction, which not only improves efficiency but also enhances the feasibility of the model in practical applications. +- We have designed a dual-encoder structure, comprising a reference encoder module and a finetune encoder module. The reference encoder module accomplishes coarse localization of cephalometric landmarks, while the finetune encoder module refines this localization in a layer-by-layer updating manner. +- Our proposed regression approach significantly enhances the precision of cephalometric landmark detection. Compared to state-of-the-art methods, we achieve superior performance on both the ISBI2015 and ISBI2023 test datasets. + +# 2 Related Work + +With the seminal work of Lee et al. [19], which first introduced the use of deep learning for cephalometric landmark detection. Deep learning-based methods [2,17,34] have fully surpassed traditional pattern matching [4,10] and random forest regression-based methods [3,22] in terms of accuracy for cephalometric landmark detection. This section primarily focuses on two deep learning-based approaches for cephalometric landmark detection and the transformer architectures for regression of keypoints. + +# 2.1 Heatmap-Based Methods + +Heatmap-based methods predict the likelihood of each pixel in the image corresponding to each cephalometric landmark. King C H et al. [17] utilized object detection techniques and designed a multitask loss without bounding box constraints to optimize landmark acquisition in the model. Chen R et al. [6] proposed a heatmap detection method based on feature pyramid fusion to complete all cephalometric landmark detection, surpassing other methods in effectiveness, but their multi-scale feature pyramid fusion is highly memory-intensive. Zhong Z et al. [40] adopted a two-stage landmark detection approach, which not only reduces memory consumption but also allows for fine-tuning of coarse landmark detection results on local image regions. Qian J et al. [26] enhanced the accuracy to new heights in the ISBI 2015 dataset by designing a multi-head attention module and a new regional loss function, while Ao Y et al. [2] developed a multiscale feature aggregation (MSFA) module and multi-head loss function. Although heatmap-based cephalometric landmark detection achieves high accuracy, its application to high-resolution X-ray images and the common use of U-net structures in models result in substantial memory resource consumption. Moreover, the post-processing required in heatmap-based methods disrupts the integrity of end-to-end training and deployment of the model. + +# 2.2 Regression-Based Methods + +Currently, the majority of regression methods for cephalometric landmark detection on high-resolution X-ray images utilize multi-stage or multi-model strategies. Song Yet et al. [29,30] proposed a method combining traditional regression algorithms with deep learning for coarse localization of landmarks, followed by cropping the region of interest in the original image to create a new image for refined localization using a secondary model. However, their accuracy is substantially lower than that achieved by heatmap-based methods [2, 26]. Zeng M et al. [36] introduced a three-tier cascading neural network for cephalometric landmark regression, akin to the concept used in the MTCNN model [38] for face detection. This approach significantly reduced memory resource consumption but did not achieve the desired level of accuracy. Gilmour L et al. trained 19 distinct models to predict each cephalometric landmark position, attaining accuracy on the ISBI 2015 cephalometric dataset comparable to heatmap-based + +methods [26,34]. This greatly encouraged the use of low-memory-consuming regression methods in landmark detection. However, the necessity of maintaining a separate model for each landmark adds complexity to training and deployment. While some regression methods have reached heatmap-based method accuracy levels, they typically involve designing multiple network models for predictions. Moreover, these methods have also not achieved end-to-end training and deployment. + +# 2.3 Transformer-based architectures + +The Transformer [31], proposed by Vaswani et al., originally designed for natural language processing tasks, employs an encoder-decoder architecture based on self-attention and feed-forward networks. Recently, Transformer-based models have demonstrated significant potential in computer vision tasks [5,9], including various works applying the Transformer structure to keypoint estimation. Such as TransPose [33] and HRFormer [35] utilized the encoder-decoder structure of transformers for human keypoint regression. Poseur [24] and DTLD [20] have adopted the latest deformable transformer architecture for efficient regression of human keypoints and facial landmarks. Despite the high performance achieved by transformer-based methods in keypoint regression tasks, they present certain challenges: 1) They are primarily used for low-resolution images; 2) The deformable transformer architecture is more complex for deployment. In contrast, our method addresses these issues and achieves significantly higher performance. + +# 3 Method + +The overall architecture, as illustrated in Figure 2, presents our proposed dual-encoder model which progressively predicts cephalometric landmark coordinate from coarse to fine on high-resolution X-ray images. It comprises a feature extractor for image feature extraction, a reference encoder for coarse cephalometric landmark localization, and a finetune encoder for precise cephalometric landmark localization. For the input image, we initially obtain multi-scale features (S2, S3, S4, and S5) and a fused feature $F_{u}$ through the feature extractor (Sec.3.1). The feature map S5 is flattened to produce the image context queries $V_{FR}^{C}$ , and coarse landmark content queries $V_{LR}^{C}$ are initialized randomly. The image context queries $V_{FR}^{C}$ and coarse landmark context queries $V_{LR}^{C}$ are fed into the reference encoder along with their position queries $V_{R}^{P}$ , updating to corresponding context queries $V_{LR}^{C'}$ and $V_{FR}^{C'}$ . Subsequently, the context queries $V_{LR}^{C'}$ are utilized to predict the coarse coordinate of cephalometric landmark $\mu_{R} \in R^{K \times 2}$ and coarse distribution $\sigma_{R} \in R^{K \times 1}$ via FFN (Sec.3.2). Next, the fused feature map Fu is also flattened to generate image context queries $V_{FA}^{C}$ , and fine landmark content queries $V_{LA}^{C}$ are initialized. Unlike the reference encoder module, which solely uses content and position queries as input, the coarse landmark coordinates $\mu_{R}$ and feature map $F_{u}$ are also fed into the finetune encoder module to update the content queries $V_{LA}^{C'}$ and $V_{FA}^{C'}$ . Finally, the content queries $V_{LA}^{C'}$ is operated + +![](images/ffd05bc043a830e7a326b8f26e4a2e5a0d7eea9fb2b9ecbd02ae9a1c96a7bda1.jpg) +Fig. 2: The overview architecture of our method, which contains (a) feature extractor module, (b) reference encoder module and (c) finetune encoder module. + +by the FFN to produce cephalometric landmark coordinate $\mu_A \in R^{K \times 2}$ and distribution $\sigma_A \in R^{K \times 1}$ (Sec.3.3). In addition, different loss functions are employed for supervising the training of various modules. For the feature extractor module, Dice loss and Mean Squared Error (MSE) loss are utilized to aid model optimization. For the reference encoder and finetune encoder modules, Residual Log-likelihood Estimation(RLE) loss is applied to optimize the model's output cephalometric landmark coordinates $\mu$ and distribution $\sigma$ (Sec.3.4). + +# 3.1 Feature Extractor + +ResNet34 [16] is utilized as the backbone in our model, from which multi-level feature maps [39] are extracted, as illustrated in Figure 2. Initially, we apply downsampling operations to scale the feature maps $S2$ , $S3$ , and $S4$ to the same dimension and size as the feature map $S5$ . Subsequently, the feature maps outputted by the backbone are summed with their respective positional maps (Pos) to yield new feature maps $F2$ , $F3$ , $F4$ , and $F5$ . These feature maps $F2$ , $F3$ , $F4$ , and $F5$ are aggregated to generate the fused feature map $F_{u}$ . The feature map $S5$ is directly fed into the reference encoder module to coarse locate cephalometric landmark, while the fused feature map $F_{u}$ is fed into the finetune encoder module to precise locate cephalometric landmark. Moreover, to enhance the model's performance, the feature map $S5$ is processed through convolution to generate a heatmap, which is optimized by Dice loss and MSE loss. + +# 3.2 Reference Encoder + +The reference encoder aims to establish the relationship between cephalometric landmark queries and feature maps, thereby facilitating the coarse prediction of + +![](images/0808e49b6de9810a9e2818388b5de4665ff31f8f3b5e9d18e4583ab83d8d7bb5.jpg) +(a) reference encoder module +Fig. 3: The detailed illustration of (a) reference encoder module and (b) finetune encoder module. + +![](images/b0ae459d84061ac1761c757f913cc663354387780db2f166156eab90a690907e.jpg) +(b) finetune encoder module + +cephalometric landmark. As illustrated in Figures 2b and Figure 3a, the reference encoder module follows the typical transformer encoder paradigm. It comprises $N$ identical layers within the encoder, each layer consisting of Layer Normalization (LN), Multi-Head Self-Attention (MHSA), and Feed-Forward Networks (FFN). Specifically, we initialize $K$ cephalometric landmark content queries $V_{CL}^{R}$ and utilize the feature map $S5$ as the image content queries $V_{CF}^{R}$ . Drawing inspiration from the positional encoding of the BERT [8], we generate the positional queries $V_{P}^{R}$ . These content and positional queries are fed into the reference encoder. After $N$ layers of iteration, the reference encoder outputs the updated cephalometric landmark content queries $V_{LR}^{C'}$ . These content queries are calculated by FFN layer to predict the coarse cephalometric landmark coordinates $\mu_{R}$ and distribution $\sigma_{R}$ . + +# 3.3 Finetune Encoder + +The finetune encoder employs a layer-to-layer update mechanism to achieve more precise cephalometric landmark detection. The structure of the finetune encoder, as shown in Figure 2c and Figure 3b, also adheres to the typical transformer encoder paradigm, consisting of $M$ identical layers within the encoder. Unlike the reference encoder module, cephalometric landmark coordinate $\mu_{R}$ is continually updated in each layer of the finetune encoder module. Specifically, we initialize $K$ cephalometric landmark content queries $V_{LA}^{C}$ and flatten the fused feature map $F_{u}$ to serve as the image content queries $V_{FA}^{C}$ . Drawing inspiration from the positional encoding of the BERT, we generate position queries $V_{A}^{P}$ . Five parameters are fed into the finetune encoder module, namely fine landmark context queries $V_{LA}^{C}$ , image context queries $V_{FA}^{C}$ , position queries $V_{A}^{P}$ , the fused feature map $F_{u}$ , + +and coarse landmark coordinates $\mu_R$ . Within the finetune encoder module, we first sample feature vectors on the fused feature map $F_u$ using coarse cephalometric landmark coordinates $\mu_R$ , then add it to the fine landmark queries $V_{LA}^{C}$ . We combine content and position queries and feed them into the encoder to calculate the relationships among fine landmark and image context queries. Next, to adjust the landmark positions, we use the updated cephalometric landmark content queries $V_{LA}^{C'}$ to calculate the $(\Delta x, \Delta y)$ offsets by FFN layer and add them back to the previous cephalometric landmark coordinates $\mu_R$ . In this way, the finetune encoder module refines the content queries progressively by stacking multiple aforementioned layers, outputting $V_{LA}^{C'}$ and $V_{FA}^{C'}$ . Finally, the cephalometric landmark content queries $V_{LA}^{C'}$ , followed by FFN layer, predicts the fine cephalometric landmark coordinates $\mu_A$ and distribution $\sigma_A$ . + +# 3.4 Loss Function + +As shown in Figure 2, the loss function of our method is composed of two key components: 1) The heatmap loss of the feature extraction module, 2) The cephalometric landmark regression loss for both the reference encoder and fine-tune encoder modules. The overall loss function of our method can be formulated as follows: + +$$ +L = \lambda_ {H M} L _ {H M} + \lambda_ {R E} L _ {R E} + \lambda_ {F E} L _ {F E} \tag {1} +$$ + +where $L_{HM}$ , $L_{RE}$ and $L_{FE}$ represent feature extraction, reference encoder, and finetune encoder module loss functions respectively. $\lambda_{HM}$ , $\lambda_{RE}$ , and, $\lambda_{FE}$ are the hyper-parameters used to balance the three losses, and they are set to 1.0, 1.0, and 1.0, respectively. $L_{HM}$ consists of the Dice loss and the MSE loss. $L_{HM}$ is defined as follows: + +$$ +L _ {H M} = D i c e \left(\stackrel {\wedge} {P} _ {h p}, P _ {h p}\right) + M s e \left(\stackrel {\wedge} {P} _ {h p}, P _ {h p}\right) \tag {2} +$$ + +where $\hat{P}_{hp}$ and $P_{hp}$ are the prediction heatmap and ground truth heatmap respectively. For the cephalometric landmark regression loss of the reference encoder module, we adopt Residual Log-likelihood Estimation(RLE) loss. The loss is defined as follows: + +$$ +L _ {R E} = R L E \left(\mu_ {R}, \sigma_ {R}; \mu_ {g}\right) \tag {3} +$$ + +where $\mu_R$ and $\sigma_R$ are coarse cephalometric landmark coordinate and distribution output by the reference encoder module. $\mu_g$ is cephalometric landmark ground truth coordinate. For the cephalometric landmark regression loss of the finetune encoder module, we also adopt RLE loss. The loss is defined as follows: + +$$ +L _ {F E} = \sum_ {i = 1} ^ {M} R L E \left(\mu_ {A, i}, \sigma_ {A, i}; \mu_ {g}\right) \tag {4} +$$ + +where $M$ is number of finetune encoder layer. $\mu_{A,i}$ and $\sigma_{A,i}$ represent the cephalometric landmark coordinate and distribution output by the i-th layer reference encoder module. + +# 4 Experiments + +In this section, we assess our method on some benchmarks for cephalometric landmark detection task. We first perform several ablation studies to underline the advantage of our proposed methods and to establish the optimal setting for hyperparameters. Finally, we compare the performance of our model with state-of-the-art methods. + +# 4.1 Implementation Details + +Our model is built on the PyTorch framework. We use ResNet-34, pre-trained on ImageNet, as the backbone. Our architecture includes 4 layers for both the reference encoder and finetune encoder module. All additional layers that we introduce are initialized randomly. The model training and testing are performed on one NVIDIA 3060(12GB) GPU. For model optimization, we use Adam [18], with parameters $\beta 1 = 0.9$ , $\beta 2 = 0.999$ , and a weight decay of $10^{-4}$ . The batch size is set to 4. The model is trained for 1000 epoch. The initial learning rate is $2 \times 10^{-4}$ , and dynamically updated the learning rate using the cosine strategy during the training process. Data augmentation techniques are employed, encompassing random cropping and random rotation. For the random cropping operation, all cephalometric landmarks are preserved during each cropping process. Regarding the random rotation operation, we select a rotation angle range of [-30, 30] degrees. Ultimately, the image is scaled to $1024 \times 1024$ for both training and inference of the model. + +# 4.2 Dataset and Evaluation Metric + +ISBI 2015 Challenge Dataset [37]. This is a widely utilized benchmark dataset in the field of cephalometric landmark detection. This dataset comprises 400 cephalometric images, of which 150 are designated for training, 150 for Test 1, and the remaining images for Test 2. Each image has been annotated with 19 landmarks by two experienced medical practitioners, and the average of these annotations is taken as the ground truth. This dataset provides a rich array of annotated data, enabling researchers to effectively train and evaluate their cephalometric landmark detection methods. + +ISBI 2023 Challenge Dataset [25]. This is a recently introduced cephalometric landmark detection dataset, collected from seven distinct imaging devices. Following the training strategy in reference [15], we randomly selected 500 images as training data, with the remaining 200 images utilized for evaluating model performance. Experiments were conducted with k-fold $(k = 10)$ method cross-validation, and the average results were considered as the final outcome. This dataset provides 29 landmarks, but only the same 19 landmarks as in the ISBI 2015 dataset are used, ensuring a fair comparison with other methods. This new dataset offers researchers a more challenging scenario to test the generalization capabilities of their methods across various imaging devices. + +Evaluation metric. The evaluation of cephalometric landmark detection models typically employs the Mean Radial Error (MRE) and the Successful Detection Rate (SDR) [7]. MRE is used to calculate the distance error between the predicted cephalometric landmarks and the ground truth, commonly serving as a measure of detection accuracy. The calculation method for MRE is defined as follows: + +$$ +R _ {i} ^ {j} = \parallel \mu_ {A} ^ {j} \left(x _ {i}, y _ {i}\right) - \mu_ {g} ^ {j} \left(x _ {i}, y _ {i}\right) \parallel_ {2} \tag {5} +$$ + +$$ +M R E = \frac {1}{T K} \sum_ {i = 1} ^ {T} \sum_ {j = 1} ^ {K} R _ {i} ^ {j} \tag {6} +$$ + +where $R_{i}^{j}$ denotes the radial error of the $i - th$ landmark in the $j - th$ image. $\mu_A^j (x_i,y_i)$ represents the coordinates of the $i - th$ cephalometric landmark predicted for the $j$ -th image. $\mu_g^j (x_i,y_i)$ denotes the ground truth coordinates of the $i - th$ cephalometric landmark in the $j - th$ image. $T$ represents the number of test images, and $K$ denotes the number of cephalometric landmark in each image. SDR is employed to quantify the discrepancy between the predicted cephalometric landmark and the ground-truth. If the radial error $R_{i}^{j}$ is no greater than $z\mathrm{mm}$ (where $z = 2\mathrm{mm}$ , $2.5\mathrm{mm}$ , $3\mathrm{mm}$ , $4\mathrm{mm}$ ), the detection is considered as a successful one (Usually, $2\mathrm{mm}$ range is acceptable in medical analysis [32,40]). The SDR is defined as follows: + +$$ +S D R _ {i} = \frac {1}{T K} \sum_ {j = 1} ^ {T} \sum_ {j = 1} ^ {K} \left\{R _ {i} ^ {j} < z \right\} \tag {7} +$$ + +# 4.3 Ablation Study + +In this section, we perform several ablation studies on ISBI 2015 Challenge dataset to illustrate the effectiveness of the proposed component. + +Table 1: Varying different model structures. "MF" denotes Multi-level Features. "HP" denotes Heatmap. "RE" denotes Reference Encoder. "RL" denotes RLE Loss. "FE" denotes Finetune Encoder. + +
ID BaselineFeature Extractor moduleReference Encoder moduleFinetune Encoder moduleMRE(mm)2mm(SDR%)
MFHPRERLFERL
12.897454.75
22.258661.91
32.569857.07
42.012565.01
51.674574.04
61.243483.65
71.146886.84
81.151486.31
91.023088.12
101.008889.51
+ +![](images/9b23a2860ab6266ea539399f6be4a473cb4b3a9bb16667c261b2968306e0883d.jpg) +Fig. 4: Headmap visualization. The attention heatmap come from the feature extraction module. + +![](images/ee135c863857534f12a34beb6e7e2166a632a2b99df0302be0a671f6f1734b16.jpg) + +![](images/3e16d6d65242bfddf8cf98e1a4804110176c3bf7fb99b2badc7791559221a80e.jpg) + +![](images/d7dff5e6889794f02041e3f7a4e59ecc2b7fc3bf7ead126dd5f7e62531ca3ed0.jpg) + +Varying the model structures. We conduct experiments to verify the different model structures. All experimental results are presented in Table 1. Regarding the feature extractor module, the combination of the multi-level feature (MF) module improves the baseline in MRE and $2\mathrm{mm}$ SDR indicators by $0.3276\mathrm{mm}$ and $7.16\%$ respectively, while the introduction of the heatmap (HP) module improves the baseline by $0.3276\mathrm{mm}$ and $2.32\%$ . When both MF and HP modules are integrated, there is $0.8849\mathrm{mm}$ and $10.26\%$ enhancement over the Baseline, underscoring the significant role of the feature extractor module in accuracy improvement. For the reference encoder module, the addition of reference encoder (RE) components and RLE Loss (RL) elements on the baseline foundation yielded $1.654\mathrm{mm}$ and $28.9\%$ accuracy improvement. When used in conjunction with the feature extractor module, the model's accuracy further increased by $0.0996\mathrm{mm}$ and $3.19\%$ . Regarding the finetune encoder module, its combined use with the feature extractor module led to a $1.8744\mathrm{mm}$ and $33.37\%$ improvement in model accuracy. The highest accuracy, reaching $1.0088\mathrm{mm}$ and $89.51\%$ , was achieved when the finetune encoder module was used in combination with both the reference encoder module and the feature extractor module. This underscores the significant impact of the three proposed modules on enhancing model accuracy. Finally, we visualize the attention heatmap in Figure 4. The heatmap is highly responsive at locations near the cephalometric landmarks. + +Varying the levels of fuse feature map. We explore the impact of feeding different levels of fuse feature maps into the proposed finetune encoder. As shown in Table 2, the performance grows consistently with more levels of fuse feature maps, e.g., $89.20\%$ , $89.33\%$ , $89.42\%$ , $89.51\%$ for 2, 3, 4, 5 levels of feature maps on 2mm SDR, respectively. + +Varying parameter of encoder module. We study the impact of encoder module on model performance from two aspects: the number of layers and feature dimensions. To simple the validation approach, the reference encoder and finetune encoder modules are set to the same number of layers and dimensions. First, we investigate the effects of altering the dimension of the encoder module. As illustrated in Table 3, there is a discernible enhancement in model efficacy concomitant with an increase in the dimensions of encoder layers. The peak performance of the model is attained when the dimension is augmented to 512. Furthermore, we conduct experiments by varying the number of encoder layers. + +As shown in Table 4, the performance grows at the first four layers and saturates at the fifth decoder layer. + +Table 2: Varying the scale levels of fuse Table 3: Varying feature queries dimension feature map for feature extraction module. sions of encoder module. + +
F5 F4 F3 F2 MRE(mm)SDR%
2mm2.5mm3mm4mm
1.018189.2093.4896.18 98.41
✓ ✓1.015689.3393.4996.32 98.41
✓ ✓ ✓1.011389.4293.5096.38 98.54
✓ ✓ ✓ ✓1.008889.5193.5496.42 98.56
+ +
Dim MRE(mm)SDR%
2mm2.5mm3mm4mm
1281.020189.0393.2695.7698.17
2561.019489.3293.3596.0298.32
5121.008889.5193.5896.4298.56
7681.009189.4793.6196.3998.53
+ +Varying the input image resolutions. We undertake experimental investigations to ascertain the robustness of our method across varying input resolutions. As depicted in Table 5, there is a significant enhancement in the performance of the model concomitant with an increase in the resolution of input images. When the input image resolution is $1024 \times 1024$ , the model reaches $1.0088\mathrm{mm}$ and $89.51\%$ in MRE and $2\mathrm{mm}$ SDR metrics respectively. A further escalation in input image resolution results in a decline for model performance. + +Table 4: Varying the numbers of encoder Table 5: Varying the input image resolutions. + +
NumMRE(mm)SDR%
2mm2.5mm3mm4mm
11.083587.2393.0595.5197.96
21.024788.9893.3195.9298.32
31.013789.4693.4796.2898.47
41.008889.5193.5496.4298.56
51.009189.4893.5496.4598.59
+ +
ResolutionMRE(mm)SDR%
2mm2.5mm3mm4mm
256×2561.201284.5691.7995.4498.49
512×5121.067488.0793.3096.2598.57
768×7681.012989.4093.3396.0798.60
1024×10241.008889.5193.5496.4298.56
1280×12801.015389.3193.5195.4498.32
+ +# 4.4 Main Result + +We evaluated our method on two cephalometric landmark datasets: ISBI 2015 Challenge [37] and ISBI 2023 Challenge datasets [25]. The final results are presented in Tables 6,7,8. The proposed approach achieved the least Mean Radical Error (MRE) and the highest $2\mathrm{mm}$ Success Detection Rate (SDR), which is considered as the clinically accepted. Moreover, our method achieves end-to-end training and prediction for cephalometric landmarks. + +ISBI 2015 Challenge test1. Table 6 presents the evaluation results for the ISBI 2015 Challenge test1 dataset. These state-of-the-art methods can be categorized into heatmap-based and regression-based methods. Our method demonstrates clear superiority over heatmap-based methods. Compared to the best heatmap-based method [2], our method achieves improvements of $0.11\mathrm{mm}$ and $1.48\%$ respectively in MRE and the $2\mathrm{mm}$ SDR metrics. Additionally, compared to the best regression-based method, our method achieves improvements of $1.19\%$ on the $2\mathrm{mm}$ SDR metrics. Moreover, compared to the best approach, our method exhibits a significant advantage in terms of GFLOPs. In addition, compared to other low-resolution methods, our method has the lowest GFLOPs of only 23.0, while the $2\mathrm{mm}$ SDR reaches $88.07\%$ , which is superior to the other + +![](images/f84675045c7467992bfbf029391e321cab81bb3bee095df7e9ec5d81b656dc83.jpg) +(a) ISBI 2015 Challenge test1 + +![](images/96a60d3f51f97697365f9129c7e16828285d38b9b52a19b1ff374c982cfc87aa.jpg) + +![](images/69c709f56919e3106140e022a2d76da06f1a9b513b71628d8a9015e9ebb88024.jpg) +Fig. 5: Qualitative detection results on ISBI 2015 and 2023 Challenge datasets. (a) and (b) correspond the detection results for the ISBI 2015 Challenge test1 and test2. (c) depicts the detection outcomes for the ISBI 2023 Challenge. The blue landmarks represent results annotated by medical professionals, while the red landmarks indicate the outcomes predicted by the model. + +![](images/1bc7c22a5cad2e8f9544b93a63a134fb58d958eca77bcc026b41b7409bd9cff0.jpg) +(b) ISBI 2015 Challenge test2 + +![](images/55fb6e7001b9ed6cb435b90622122a995f57f3aea94b8d5906abf1e5a28e9e78.jpg) +(c) ISBI 2023 Challenge + +![](images/b1cbf1edff9f9048417490340bbfbda62f8e196527928a3ebc5e0734975aa7b8.jpg) + +Table 6: Quantitative results on the ISBI 2015 Challenge test1 dataset. * denotes other methods we implemented. Bold represents the best result. + +
MethodBackboneResolutionGFLOPs MRE(mm)SDR%
2mm2.5mm3mm4mm
Heatmap-based Methods
Chen R et al. [6]ResNet50800×640215.71.1786.6792.6795.5498.53
Zhong Z et al. [40]U-Net290×290+19×100×10092.21.1286.9191.8294.8897.90
CephaNN [26]ResNeXt50800×640982.81.1587.6193.1696.3598.74
Yao J et al. [34]ResNet18576×512+19×96×9640.11.1486.8493.0295.4398.95
Ao Y et al. [2]Densenet121800×640157.21.1288.0392.7395.9698.48
Huang K et al. [13]---1.0987.8792.4595.5498.59
SimCC* [21]HRNet48800×640164.91.1287.1691.9695.3798.18
Regression-based Methods
Gilmour L et al. [11]ResNet342432×1920220.21.0188.3293.1296.1498.63
Song Y et al. [29]ResNet50256×256+19×256×256102.51.0886.4091.7094.8097.80
Song Y et al. [30]U-Net480×387286.81.1985.2091.2094.4097.20
Zeng M et al. [36]---1.3481.3789.0993.7997.86
King C H et al. [17]---1.1786.1491.7294.9197.96
Hong W et al. [12]---1.1285.2690.6793.5497.19
Poseur* [24]ResNet50800×64046.11.1486.5691.0994.0097.23
OursResNet34512×51223.01.0788.0793.3096.2598.57
OursResNet341024×102495.01.0189.5193.5496.4298.56
+ +methods. The qualitative detection results of the ISBI 2015 Challenge test1 dataset are displayed in Figure 4. + +ISBI 2015 Challenge test2. The evaluation results for the ISBI 2015 Challenge test2 dataset are presented in Table 7. Our method outperforms heatmap-based methods by significant margins. Compared to best method [13], our method achieves an increase of $0.07\mathrm{mm}$ in MRE and $0.48\%$ in $2\mathrm{mm}$ SDR. In addition, We introduce an end-to-end human keypoint detection method into the cephalometric landmark detection task, which is implemented based on the deformable decoder architecture. Experiments show that our method is significantly better than the human keypoint method in accuracy. Moreover, our + +method is more convenient to deploy. Finally, the performance of the released methods on ISBI 2015 Challenge Test1 dataset are all better than Test2. It seems that the data distribution of Test1 dataset is more consistent with Train dataset. Qualitative detection results of our method on the ISBI 2015 Challenge test2 dataset can be found in Figure 5b. + +Table 7: Quantitative results on the ISBI 2015 Challenge test2 dataset. + +
MethodMRE(mm)SDR%
2mm2.5mm3mm
Heatmap-based Methods
Chen R et al. [6]1.4875.0582.8488.5395.05
Zhong Z et al. [40]1.4276.0082.9088.7494.32
CephaNN [26]1.4376.3282.9587.9594.63
Yao J et al. [34]1.4875.4482.0386.6595.12
Ao Y et al. [2]1.4277.0084.4289.4795.21
Huang K et al. [13]1.3479.0587.9589.7995.05
SimCC* [21]1.5474.1680.6886.3294.05
Regression-based Methods
Gilmour L et al. [11]1.3377.0583.1688.8494.89
Song Y et al. [29]1.5474.0081.3087.5094.30
Song Y et al. [30]1.6472.2079.5085.0093.50
Zeng M et al. [36]1.6470.5879.5386.0593.32
King C H et al. [17]1.5074.5881.7487.2694.73
Hong W et al. [12]1.2879.2485.3290.4796.32
Poseur* [24]1.4874.4281.3786.6893.63
Ours1.2779.5386.4791.1196.32
+ +Table 8: Quantitative results on the ISBI 2023 Challenge. + +
methodMRE(mm)SDR%
2mm2.5mm3mm4mm
Jin H et al. [15]1.220083.7689.7192.7996.08
Poseur* [24]0.998288.5192.8295.3797.79
SimCC* [21]1.079588.3993.1295.3197.81
Huang K et al.* [13]1.074787.8792.5294.8797.42
Gilmour L et al.* [11]0.979389.3793.4795.9797.42
Ours0.937290.6894.2495.9797.89
+ +ISBI 2023 Challenge test. Regarding the ISBI 2023 Challenge test dataset, as illustrated in Table 8, Our method achieves the best performance on all metrics. Compared to the best-performing method [11], our approach significantly reduces the Mean Relative Error (MRE) from $0.9793\mathrm{mm}$ to $0.9372\mathrm{mm}$ and enhances the $2\mathrm{mm}$ Success Detection Rate (SDR) from $89.37\%$ to $90.68\%$ . Moreover, in comparison with transformer-based methods, our approach demonstrates a lead of $0.061\mathrm{mm}$ in MRE and $2.17\%$ in $2\mathrm{mm}$ SDR, respectively. Lastly, the qualitative detection results of our method on the ISBI 2023 Challenge test dataset are depicted in Figure 5c. + +# 5 Conclusion + +In this paper, we propose a novel regression model for cephalometric landmark detection for high-resolution X-ray image. This model only employs the encoder module within the transformer framework to construct the relationship between landmark features and image features. It is capable of regressing cephalometric landmark coordinate from coarse to fine and completes end-to-end training. Moreover, our model, compared to heatmap-based method, boasts low memory consumption and robustness against missing landmark. It offers a more straightforward end-to-end design compared to current regression-based method, performing one-time landmark detection on high-resolution X-ray images. Extensive experiments on the ISBI2015 and ISBI2023 datasets demonstrate that our method can achieve state-of-the-art performance compare with regression-based and heatmap-based methods. + +# Acknowledgements + +This work was supported by the National Natural Science Foundation of China (62122059, 82330064). + +# References + +1. Albarakati, S., Kula, K., Ghoneima, A.: The reliability and reproducibility of cephalometric measurements: a comparison of conventional and digital methods. Dentomaxillofacial Radiology 41(1), 11-17 (2012) +2. Ao Y, W.H.: Feature aggregation and refinement network for 2d anatomical landmark detection. Journal of Digital Imaging 36(2), 547-561 (2023) +3. B. Ibragimov, B. Likar, F.P., Vrtovec, T.: Automatic cephalometric x-ray landmark detection by applying game theory and random forests. In Proc. ISBI Int. Symp. on Biomedical Imaging (2014) +4. Cardillo, J., Sid-Ahmed, M.A.: An image processing system for locating craniofacial landmarks. IEEE transactions on medical imaging 13(2), 275-289 (1994) +5. Carion N, Massa F, S.G.e.a.: End-to-end object detection with transformers. European conference on computer vision. Cham: Springer International Publishing pp. 213-229 (2020) +6. Chen, R., Ma, Y., Chen, N., Lee, D., Wang, W.: Cephalometric landmark detection by attentive feature pyramid fusion and regression-voting. In: Medical Image Computing and Computer Assisted Intervention-MICCAI 2019: 22nd International Conference, Shenzhen, China, October 13-17, 2019, Proceedings, Part III 22. pp. 873-881. Springer (2019) +7. Devereux, L., Moles, D., Cunningham, S.J., McKnight, M.: How important are lateral cephalometric radiographs in orthodontic treatment planning? American Journal of Orthodontics and Dentofacial Orthopedics 139(2), e175-e181 (2011) +8. Devlin, J., Chang, M.W., Lee, K., Toutanova, K.: Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805 (2018) +9. Dosovitskiy A, Beyer L, K.A.e.a.: An image is worth 16x16 words: Transformers for image recognition at scale. ArXiv preprint arXiv:2010.11929 (2020) +0. El-Feghi, M.S.A., Ahmadi, M.: Automatic localization of craniofacial landmarks for assisted cephalometry. Pattern Recognition 37(3), 609-621 (2004) +1. Gilmour L, R.N.: Locating cephalometric x-ray landmarks with foveated pyramid attention. Medical Imaging With Deep Learning. PMLR pp. 262-276 (2020) +2. Hong W, Kim S M, C.J.e.a.: Deep reinforcement learning using a multi-scale agent with a normalized reward strategy for automatic cephalometric landmark detection. 2023 4th International Conference on Big Data Analytics and Practices pp. 1-6 (2023) +3. Huang K, F.F.: An intelligent shooting reward learning network scheme for medical image landmark detection. Applied Sciences 12(20), 10190 (2022) +4. Indermun S, Shaik S, N.C.J.K.M.R.: Human examination and artificial intelligence in cephalometric landmark detection—is ai ready to take over? Dentomaxillofac Radiol 10.1259/dmfr.20220362 (2023) +5. Jin H, Che H, C.H.: Unsupervised domain adaptation for anatomical landmark detection. International Conference on Medical Image Computing and Computer-Assisted Intervention. Cham: Springer Nature Switzerland pp. 695-705 (2023) + +16. Kaiming He, Xiangyu Zhang, S.R.J.S.: Deep residual learning for image recognition. ArXiv preprint arXiv:1512.03385 (2015) +17. King C H, Wang Y L, L.W.Y.e.a.: Automatic cephalometric landmark detection on x-ray images using object detection. 2022 IEEE 19th International Symposium on Biomedical Imaging (ISBI) pp. 1-4 (2022) +18. Kingma, D.P., Ba, J.: Adam: A method for stochastic optimization. ArXiv preprint arXiv:1412.6980 (2014) +19. Lee H, Park M, K.J.: Cephalometric landmark detection in dental x-ray images using convolutional neural networks. Medical imaging 2017: Computer-aided diagnosis 10134, 494-499 (2017) +20. Li, H., Guo, Z., Rhee, S.M., Han, S., Han, J.J.: Towards accurate facial landmark detection via cascaded transformers. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 4176-4185 (2022) +21. Li Y, Yang S, L.P.e.a.: Simcc: A simple coordinate classification perspective for human pose estimation. European Conference on Computer Vision. Cham: Springer Nature Switzerland 89-106 (2022) +22. Lindner, C., Cootes, T.: Fully automatic cephalometric evaluation using random forest regression-voting. IEEE International Symposium on Biomedical Imaging (ISBI) (2015) +23. Mamta Juneja, Poojita Garg, R.K.e.a.: A review on cephalometric landmark detection techniques. Biomedical Signal Processing and Control 66(102486) (2021) +24. Mao, W., Ge, Y., Shen, C., Tian, Z., Wang, X., Wang, Z., Hengel, A.v.d.: Poseur: Direct human pose regression with transformers. Proceedings of the European Conference on Computer Vision (ECCV) (October 2022) +25. Muhammad Anwaar Khalid, K.Z.e.a.: Cepha29: Automatic cephalometric landmark detection challenge 2023. ArXiv preprint arXiv:2212.04808 (2022) +26. Qian J, Luo W, C.M.e.a.: Cephann: a multi-head attention network for cephalometric landmark detection. IEEE Access 8, 112633-112641 (2020) +27. Ronneberger O, Fischer P, B.T.: U-net: Convolutional networks for biomedical image segmentation. Medical Image Computing and Computer-Assisted Intervention-MICCAI 2015: 18th International Conference, Munich, Germany, October pp. 5-9 +28. Shaker A, Maaz M, R.H.e.a.: Unetr++: delving into efficient and accurate 3d medical image segmentation. ArXiv preprint arXiv:2212.04497 (2022) +29. Song, Y., Qiao, X., Iwamoto, Y., Chen, Y.w.: Automatic cephalometric landmark detection on x-ray images using a deep-learning method. Applied Sciences 10(7), 2547 (2020) +30. Song Y, Qiao X, I.Y.e.a.: An efficient deep learning based coarse-to-fine cephalometric landmark detection method. IEICE TRANSACTIONS on Information and Systems 104(8), 1359-1366 (2021) +31. Vaswani A, Shazeer N, P.N.e.a.: Attention is all you need. Advances in neural information processing systems (2017) +32. Wang, C.W., Huang, C.T., Hsieh, M.C., Li, C.H., Chang, S.W., Li, W.C., Vandaele, R., Marée, R., Jodogne, S., Geurts, P., et al.: Evaluation and comparison of anatomical landmark detection methods for cephalometric x-ray images: a grand challenge. IEEE transactions on medical imaging 34(9), 1890-1900 (2015) +33. Yang, S., Quan, Z., Nie, M., Yang, W.: Transpose: Keypoint localization via transformer. IEEE/CVF International Conference on Computer Vision (ICCV) (2021) +34. Yao J, Zeng W, H.T.e.a.: Automatic localization of cephalometric landmarks based on convolutional neural network. American journal of orthodontics and dentofacial orthopedics 161(3), e250-e259 (2022) + +35. Yuhui Yuan, Rao Fu, L.H.W.L.C.Z.X.C.J.W.: Hrformer: High-resolution transformer for dense prediction. ArXiv preprint arXiv:2110.09408 (2021) +36. Zeng M, Yan Z, L.S.e.a.: Cascaded convolutional networks for automatic cephalometric landmark detection. Medical Image Analysis 68, 101904 (2021) +37. Zhang H, Zhang J, L.C.S.E.S.P.N.T.G.S.W.Y.M.M.: All-net: Anatomical information lesion-wise loss function integrated into neural network for multiple sclerosis lesion segmentation. Neuroimage Clin 32(102854) (2021) +38. Zhang K, Zhang Z, L.Z.e.a.: Joint face detection and alignment using multitask cascaded convolutional networks. IEEE signal processing letters 23(10), 1499-1503 (2016) +39. Zhao, T., Wu, X.: Pyramid feature attention network for saliency detection. CVPR (2019) +40. Zhong Z, Li J, Z.Z.e.a.: An attention-guided deep regression model for landmark detection in cephalograms. Medical Image Computing and Computer Assisted Intervention-MICCAI 2019: 22nd International Conference, Shenzhen, China p. 13-17 (October 2019) +41. Ziyang Ye, H.Y., Li, B.: Uncertainty-aware u-net for medical landmark detection. Arxiv preprint arXiv:2303.10349v1 (2023) \ No newline at end of file diff --git a/2024/A Cephalometric Landmark Regression Method based on Dual-encoder for High-resolution X-ray Image/images.zip b/2024/A Cephalometric Landmark Regression Method based on Dual-encoder for High-resolution X-ray Image/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..344ad120eb137056c8c5054160f6f4375423f04f --- /dev/null +++ b/2024/A Cephalometric Landmark Regression Method based on Dual-encoder for High-resolution X-ray Image/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7cbdb492c7249c864959e37e966124df5e9d1a031d6883ee9ecd43541d4607e9 +size 513690 diff --git a/2024/A Cephalometric Landmark Regression Method based on Dual-encoder for High-resolution X-ray Image/layout.json b/2024/A Cephalometric Landmark Regression Method based on Dual-encoder for High-resolution X-ray Image/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..24e7c34587f4302aa3295931b827ec0eb42f4f39 --- /dev/null +++ b/2024/A Cephalometric Landmark Regression Method based on Dual-encoder for High-resolution X-ray Image/layout.json @@ -0,0 +1,10346 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 136, + 111, + 477, + 166 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 111, + 477, + 166 + ], + "spans": [ + { + "bbox": [ + 136, + 111, + 477, + 166 + ], + "type": "text", + "content": "A Cephalometric Landmark Regression Method based on Dual-encoder for High-resolution X-ray Image" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 137, + 185, + 477, + 210 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 185, + 477, + 210 + ], + "spans": [ + { + "bbox": [ + 137, + 185, + 477, + 210 + ], + "type": "text", + "content": "Chao Dai" + }, + { + "bbox": [ + 137, + 185, + 477, + 210 + ], + "type": "inline_equation", + "content": "^{1\\dagger}" + }, + { + "bbox": [ + 137, + 185, + 477, + 210 + ], + "type": "text", + "content": ", Yang Wang" + }, + { + "bbox": [ + 137, + 185, + 477, + 210 + ], + "type": "inline_equation", + "content": "^{2\\dagger(\\boxtimes)}" + }, + { + "bbox": [ + 137, + 185, + 477, + 210 + ], + "type": "text", + "content": ", Chaolin Huang" + }, + { + "bbox": [ + 137, + 185, + 477, + 210 + ], + "type": "inline_equation", + "content": "^{3\\dagger}" + }, + { + "bbox": [ + 137, + 185, + 477, + 210 + ], + "type": "text", + "content": ", Jiakai Zhou" + }, + { + "bbox": [ + 137, + 185, + 477, + 210 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 137, + 185, + 477, + 210 + ], + "type": "text", + "content": ", Qilin Xu" + }, + { + "bbox": [ + 137, + 185, + 477, + 210 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 137, + 185, + 477, + 210 + ], + "type": "text", + "content": ", and Minpeng Xu" + }, + { + "bbox": [ + 137, + 185, + 477, + 210 + ], + "type": "inline_equation", + "content": "^{1}" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 264, + 220, + 349, + 230 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 264, + 220, + 349, + 230 + ], + "spans": [ + { + "bbox": [ + 264, + 220, + 349, + 230 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 264, + 220, + 349, + 230 + ], + "type": "text", + "content": " Tianjin University" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 237, + 231, + 375, + 242 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 237, + 231, + 375, + 242 + ], + "spans": [ + { + "bbox": [ + 237, + 231, + 375, + 242 + ], + "type": "text", + "content": "2 Anhui University of Technology" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 210, + 242, + 403, + 252 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 210, + 242, + 403, + 252 + ], + "spans": [ + { + "bbox": [ + 210, + 242, + 403, + 252 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 210, + 242, + 403, + 252 + ], + "type": "text", + "content": " Jiangxi University of Science and Technology" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 197, + 253, + 416, + 263 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 197, + 253, + 416, + 263 + ], + "spans": [ + { + "bbox": [ + 197, + 253, + 416, + 263 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 197, + 253, + 416, + 263 + ], + "type": "text", + "content": " Nanjing University of Aeronautics and Astronautics" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 257, + 264, + 358, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 257, + 264, + 358, + 274 + ], + "spans": [ + { + "bbox": [ + 257, + 264, + 358, + 274 + ], + "type": "text", + "content": "5 West Anhui University" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 159, + 303, + 455, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 303, + 455, + 534 + ], + "spans": [ + { + "bbox": [ + 159, + 303, + 455, + 534 + ], + "type": "text", + "content": "Abstract. Accurate detection of cephalometric landmarks is crucial for orthodontic diagnosis and treatment planning. Current methods rely on a cascading form of multiple models to achieve higher accuracy, which greatly complicates both training and deployment processes. In this paper, we introduce a novel regression paradigm capable of simultaneously detecting all cephalometric landmarks in high-resolution X-ray images. Our approach only utilizes the encoder module from the transformer to design a dual-encoder architecture, enabling precise detection of cephalometric landmark positions from coarse to fine. Specifically, the entire model architecture comprises three main components: a feature extractor module, a reference encoder module, and a fine-tune encoder module. These components are respectively responsible for feature extraction and fusion for X-ray images, coarse localization of cephalometric landmark, and fine-tuning of cephalometric landmark positioning. Notably, our framework is fully end-to-end differentiable and innately learns to exploit the interdependencies among cephalometric landmarks. Experiments demonstrate that our method significantly surpasses the current state-of-the-art methods in Mean Radical Error (MRE) and the 2mm Success Detection Rate (SDR) metrics, while also reducing computational resource consumption. The code is available at https://github.com/huang229/D-CeLR" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 159, + 544, + 453, + 566 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 544, + 453, + 566 + ], + "spans": [ + { + "bbox": [ + 159, + 544, + 453, + 566 + ], + "type": "text", + "content": "Keywords: Cephalometric landmark " + }, + { + "bbox": [ + 159, + 544, + 453, + 566 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 159, + 544, + 453, + 566 + ], + "type": "text", + "content": " High-resolution " + }, + { + "bbox": [ + 159, + 544, + 453, + 566 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 159, + 544, + 453, + 566 + ], + "type": "text", + "content": " Dual-encoder " + }, + { + "bbox": [ + 159, + 544, + 453, + 566 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 159, + 544, + 453, + 566 + ], + "type": "text", + "content": " Reference encoder " + }, + { + "bbox": [ + 159, + 544, + 453, + 566 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 159, + 544, + 453, + 566 + ], + "type": "text", + "content": " Finetune encoder" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 586, + 230, + 598 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 586, + 230, + 598 + ], + "spans": [ + { + "bbox": [ + 132, + 586, + 230, + 598 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 610, + 482, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 610, + 482, + 647 + ], + "spans": [ + { + "bbox": [ + 130, + 610, + 482, + 647 + ], + "type": "text", + "content": "Cephalometric analysis represents a pivotal diagnostic tool extensively utilized in orthodontics and orthognathic surgery. This analysis involves the annotation of dental, skeletal, and soft tissue structures in lateral cephalometric radiographs." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 141, + 653, + 444, + 666 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 653, + 444, + 666 + ], + "spans": [ + { + "bbox": [ + 141, + 653, + 444, + 666 + ], + "type": "inline_equation", + "content": "\\dagger" + }, + { + "bbox": [ + 141, + 653, + 444, + 666 + ], + "type": "text", + "content": " Equal contribution. (Corresponding authors (youngnuaa@gmail.com)." + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 167, + 116, + 294, + 256 + ], + "blocks": [ + { + "bbox": [ + 167, + 116, + 294, + 256 + ], + "lines": [ + { + "bbox": [ + 167, + 116, + 294, + 256 + ], + "spans": [ + { + "bbox": [ + 167, + 116, + 294, + 256 + ], + "type": "image", + "image_path": "ddb02340701773010dc447c6e4272a3d159550f84f95fcae76d431a00355ce03.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 165, + 258, + 447, + 275 + ], + "lines": [ + { + "bbox": [ + 165, + 258, + 447, + 275 + ], + "spans": [ + { + "bbox": [ + 165, + 258, + 447, + 275 + ], + "type": "text", + "content": "(a) Cephalometric landmark coordi-(b) Cephalometric landmark medical name nate positions." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 130, + 285, + 482, + 319 + ], + "lines": [ + { + "bbox": [ + 130, + 285, + 482, + 319 + ], + "spans": [ + { + "bbox": [ + 130, + 285, + 482, + 319 + ], + "type": "text", + "content": "Fig. 1: Cephalometric landmark visualization. (a) Cephalometric landmark coordinate positions. Red indicates hard tissue points and blue indicates soft tissue points. (b) Cephalometric landmark medical name." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 294, + 115, + 447, + 256 + ], + "blocks": [ + { + "bbox": [ + 294, + 115, + 447, + 256 + ], + "lines": [ + { + "bbox": [ + 294, + 115, + 447, + 256 + ], + "spans": [ + { + "bbox": [ + 294, + 115, + 447, + 256 + ], + "type": "table", + "html": "
19 Landmarks in Cephalometric
1Sella11Lower Incisor Tip
2Nasion12Upper Incisor Tip
3Orbitale13Labrale superius
4Porion14Labrale inferius
5Upper Incisor Apex15Subnasale
6B-point16Soft Tissue Pogonion
7Pogonion17Posterior Nasal Spine
8Menton18Anterior Nasal Spine
9Gnathion19Articulare
10Gonion
", + "image_path": "dd8d394755327c8bc04e69e994a1fb50ab02fef8c50e49f75d806d831c4db2cf.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 342, + 480, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 342, + 480, + 426 + ], + "spans": [ + { + "bbox": [ + 130, + 342, + 480, + 426 + ], + "type": "text", + "content": "As illustrated in Figure 1, these cephalometric landmarks are core to the analysis, providing reference points for subsequent qualitative assessments of angles and distances. However, the manual annotation of these landmarks is a laborious, time-consuming, and highly subjective task, impacting the accuracy of the annotations. Consequently, a precise and robust automated method for annotating cephalometric landmarks holds significant importance for effective treatment planning [1,6,7,14,23]." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 426, + 482, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 426, + 482, + 628 + ], + "spans": [ + { + "bbox": [ + 130, + 426, + 482, + 628 + ], + "type": "text", + "content": "Existing methods for cephalometric landmark detection can be broadly classified into two categories: heatmap-based and regression-based approaches. The heatmap-based approach involves predicting a heatmap that indicates the probability of each pixel in a region corresponding to various cephalometric landmarks. This modality has seen extensive applications in the detection of cephalometric landmarks. For example, Chen et al. [6] introduced a feature pyramid fusion-based heatmap method for simultaneous landmark detection, achieving impressive results. Qian J et al. [26] advanced the accuracy of cephalometric landmark detection by designing a multi-head attention module and a novel regional loss function. However, heatmap-based methods exhibit certain disadvantages. 1). The ground truth requires manual design and heuristic adjustments, with inevitable noise impacting the final outcomes [13,29,40]. 2). post-processing operations are necessary to locate single maximum values in heatmaps. These operations are typically heuristic and non-differentiable, undermining the model's capacity for end-to-end training. 3). models generally adopt a U-net structure [27,28,41], while processing high-resolution X-ray images, consumes more computational resources and is prone to missing cephalometric landmarks." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 630, + 481, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 630, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 630, + 481, + 665 + ], + "type": "text", + "content": "Regression-based methods directly map the input image to the coordinates of cephalometric landmarks, typically employing a feedforward network (FFN) for prediction. The regression-based methods is considerably more streamlined" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 259, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 259, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 259, + 102 + ], + "type": "text", + "content": "C. Dai, Y. Wang et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 259 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 259 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 259 + ], + "type": "text", + "content": "compared to heatmap-based methods, as the prediction of cephalometric landmarks is inherently a process of determining a set of coordinate values. Numerous regression-based techniques exist for predicting cephalometric landmarks. For example, Song Y et al. [29] utilizes a base network for coarse localization of cephalometric landmarks, followed by region-specific cropping and refined positioning using a secondary model. Gilmour L et al. [11] constructs individual models for each landmark to predict their locations. Regression-based methods circumvent the necessity for non-maximum suppression, heatmap generation, and quantization error correction. However, to achieve higher precision on high-resolution X-ray images, current approaches predominantly rely on cascading multiple models, which compromises the inherent advantages of end-to-end training and prediction for regression-based methods." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 262, + 482, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 262, + 482, + 467 + ], + "spans": [ + { + "bbox": [ + 130, + 262, + 482, + 467 + ], + "type": "text", + "content": "To address these issues, we introduce a novel regression paradigm that exclusively utilizes the encoder module of transformer for the one-time detection of all cephalometric landmarks on high-resolution X-ray images. Specifically, we design a feature extraction module based on Convolutional Neural Networks (CNN) to accomplish feature extraction and fusion for X-ray images. Subsequently, the extracted features are fed into a reference encoder module for the coarse localization of cephalometric landmarks. Finally, the coarsely localized cephalometric landmarks, along with the fused features, are inputted into a finetune encoder module, which iteratively refines the positioning of the cephalometric landmarks from coarse to fine detail. Moreover, our method pioneers the complete end-to-end training and deployment for the detection of cephalometric landmarks on high-resolution X-ray images. Extensive experiments demonstrate that our approach achieves state-of-the-art performance on popular benchmarks with a ResNet-34 backbone. Specifically, we achieve a Mean Radial Error (MRE) of " + }, + { + "bbox": [ + 130, + 262, + 482, + 467 + ], + "type": "inline_equation", + "content": "1.01\\mathrm{mm}" + }, + { + "bbox": [ + 130, + 262, + 482, + 467 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 262, + 482, + 467 + ], + "type": "inline_equation", + "content": "1.27\\mathrm{mm}" + }, + { + "bbox": [ + 130, + 262, + 482, + 467 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 130, + 262, + 482, + 467 + ], + "type": "inline_equation", + "content": "0.9372\\mathrm{mm}" + }, + { + "bbox": [ + 130, + 262, + 482, + 467 + ], + "type": "text", + "content": " on the ISBI2015 test1, ISBI2015 test2, and ISBI2023 test datasets, respectively. Furthermore, our method significantly reduces GFLOPs, by " + }, + { + "bbox": [ + 130, + 262, + 482, + 467 + ], + "type": "inline_equation", + "content": "132\\%" + }, + { + "bbox": [ + 130, + 262, + 482, + 467 + ], + "type": "text", + "content": " compared to the previously best method [11]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 146, + 470, + 372, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 146, + 470, + 372, + 480 + ], + "spans": [ + { + "bbox": [ + 146, + 470, + 372, + 480 + ], + "type": "text", + "content": "The main contributions of this work are as follows:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 137, + 503, + 480, + 662 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 138, + 503, + 479, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 503, + 479, + 562 + ], + "spans": [ + { + "bbox": [ + 138, + 503, + 479, + 562 + ], + "type": "text", + "content": "- We propose an innovative regression paradigm for high-resolution X-ray images, which enables the prediction of all cephalometric landmarks through a single model. Moreover, our method facilitates end-to-end training and prediction, which not only improves efficiency but also enhances the feasibility of the model in practical applications." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 137, + 567, + 480, + 611 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 567, + 480, + 611 + ], + "spans": [ + { + "bbox": [ + 137, + 567, + 480, + 611 + ], + "type": "text", + "content": "- We have designed a dual-encoder structure, comprising a reference encoder module and a finetune encoder module. The reference encoder module accomplishes coarse localization of cephalometric landmarks, while the finetune encoder module refines this localization in a layer-by-layer updating manner." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 137, + 617, + 480, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 617, + 480, + 662 + ], + "spans": [ + { + "bbox": [ + 137, + 617, + 480, + 662 + ], + "type": "text", + "content": "- Our proposed regression approach significantly enhances the precision of cephalometric landmark detection. Compared to state-of-the-art methods, we achieve superior performance on both the ISBI2015 and ISBI2023 test datasets." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 406, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 406, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 406, + 91, + 447, + 100 + ], + "type": "text", + "content": "Du-CeLR" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 114, + 237, + 127 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 114, + 237, + 127 + ], + "spans": [ + { + "bbox": [ + 132, + 114, + 237, + 127 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 139, + 482, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 139, + 482, + 224 + ], + "spans": [ + { + "bbox": [ + 130, + 139, + 482, + 224 + ], + "type": "text", + "content": "With the seminal work of Lee et al. [19], which first introduced the use of deep learning for cephalometric landmark detection. Deep learning-based methods [2,17,34] have fully surpassed traditional pattern matching [4,10] and random forest regression-based methods [3,22] in terms of accuracy for cephalometric landmark detection. This section primarily focuses on two deep learning-based approaches for cephalometric landmark detection and the transformer architectures for regression of keypoints." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 239, + 289, + 251 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 239, + 289, + 251 + ], + "spans": [ + { + "bbox": [ + 132, + 239, + 289, + 251 + ], + "type": "text", + "content": "2.1 Heatmap-Based Methods" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 258, + 482, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 258, + 482, + 475 + ], + "spans": [ + { + "bbox": [ + 130, + 258, + 482, + 475 + ], + "type": "text", + "content": "Heatmap-based methods predict the likelihood of each pixel in the image corresponding to each cephalometric landmark. King C H et al. [17] utilized object detection techniques and designed a multitask loss without bounding box constraints to optimize landmark acquisition in the model. Chen R et al. [6] proposed a heatmap detection method based on feature pyramid fusion to complete all cephalometric landmark detection, surpassing other methods in effectiveness, but their multi-scale feature pyramid fusion is highly memory-intensive. Zhong Z et al. [40] adopted a two-stage landmark detection approach, which not only reduces memory consumption but also allows for fine-tuning of coarse landmark detection results on local image regions. Qian J et al. [26] enhanced the accuracy to new heights in the ISBI 2015 dataset by designing a multi-head attention module and a new regional loss function, while Ao Y et al. [2] developed a multiscale feature aggregation (MSFA) module and multi-head loss function. Although heatmap-based cephalometric landmark detection achieves high accuracy, its application to high-resolution X-ray images and the common use of U-net structures in models result in substantial memory resource consumption. Moreover, the post-processing required in heatmap-based methods disrupts the integrity of end-to-end training and deployment of the model." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 491, + 296, + 503 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 491, + 296, + 503 + ], + "spans": [ + { + "bbox": [ + 132, + 491, + 296, + 503 + ], + "type": "text", + "content": "2.2 Regression-Based Methods" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 510, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 510, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 510, + 482, + 666 + ], + "type": "text", + "content": "Currently, the majority of regression methods for cephalometric landmark detection on high-resolution X-ray images utilize multi-stage or multi-model strategies. Song Yet et al. [29,30] proposed a method combining traditional regression algorithms with deep learning for coarse localization of landmarks, followed by cropping the region of interest in the original image to create a new image for refined localization using a secondary model. However, their accuracy is substantially lower than that achieved by heatmap-based methods [2, 26]. Zeng M et al. [36] introduced a three-tier cascading neural network for cephalometric landmark regression, akin to the concept used in the MTCNN model [38] for face detection. This approach significantly reduced memory resource consumption but did not achieve the desired level of accuracy. Gilmour L et al. trained 19 distinct models to predict each cephalometric landmark position, attaining accuracy on the ISBI 2015 cephalometric dataset comparable to heatmap-based" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 141, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 141, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 141, + 100 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 259, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 259, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 259, + 102 + ], + "type": "text", + "content": "C. Dai, Y. Wang et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 479, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 479, + 200 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 479, + 200 + ], + "type": "text", + "content": "methods [26,34]. This greatly encouraged the use of low-memory-consuming regression methods in landmark detection. However, the necessity of maintaining a separate model for each landmark adds complexity to training and deployment. While some regression methods have reached heatmap-based method accuracy levels, they typically involve designing multiple network models for predictions. Moreover, these methods have also not achieved end-to-end training and deployment." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 131, + 218, + 322, + 228 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 218, + 322, + 228 + ], + "spans": [ + { + "bbox": [ + 131, + 218, + 322, + 228 + ], + "type": "text", + "content": "2.3 Transformer-based architectures" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 237, + 480, + 393 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 237, + 480, + 393 + ], + "spans": [ + { + "bbox": [ + 130, + 237, + 480, + 393 + ], + "type": "text", + "content": "The Transformer [31], proposed by Vaswani et al., originally designed for natural language processing tasks, employs an encoder-decoder architecture based on self-attention and feed-forward networks. Recently, Transformer-based models have demonstrated significant potential in computer vision tasks [5,9], including various works applying the Transformer structure to keypoint estimation. Such as TransPose [33] and HRFormer [35] utilized the encoder-decoder structure of transformers for human keypoint regression. Poseur [24] and DTLD [20] have adopted the latest deformable transformer architecture for efficient regression of human keypoints and facial landmarks. Despite the high performance achieved by transformer-based methods in keypoint regression tasks, they present certain challenges: 1) They are primarily used for low-resolution images; 2) The deformable transformer architecture is more complex for deployment. In contrast, our method addresses these issues and achieves significantly higher performance." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 131, + 411, + 201, + 424 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 411, + 201, + 424 + ], + "spans": [ + { + "bbox": [ + 131, + 411, + 201, + 424 + ], + "type": "text", + "content": "3 Method" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 437, + 481, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 437, + 481, + 668 + ], + "spans": [ + { + "bbox": [ + 130, + 437, + 481, + 668 + ], + "type": "text", + "content": "The overall architecture, as illustrated in Figure 2, presents our proposed dual-encoder model which progressively predicts cephalometric landmark coordinate from coarse to fine on high-resolution X-ray images. It comprises a feature extractor for image feature extraction, a reference encoder for coarse cephalometric landmark localization, and a finetune encoder for precise cephalometric landmark localization. For the input image, we initially obtain multi-scale features (S2, S3, S4, and S5) and a fused feature " + }, + { + "bbox": [ + 130, + 437, + 481, + 668 + ], + "type": "inline_equation", + "content": "F_{u}" + }, + { + "bbox": [ + 130, + 437, + 481, + 668 + ], + "type": "text", + "content": " through the feature extractor (Sec.3.1). The feature map S5 is flattened to produce the image context queries " + }, + { + "bbox": [ + 130, + 437, + 481, + 668 + ], + "type": "inline_equation", + "content": "V_{FR}^{C}" + }, + { + "bbox": [ + 130, + 437, + 481, + 668 + ], + "type": "text", + "content": ", and coarse landmark content queries " + }, + { + "bbox": [ + 130, + 437, + 481, + 668 + ], + "type": "inline_equation", + "content": "V_{LR}^{C}" + }, + { + "bbox": [ + 130, + 437, + 481, + 668 + ], + "type": "text", + "content": " are initialized randomly. The image context queries " + }, + { + "bbox": [ + 130, + 437, + 481, + 668 + ], + "type": "inline_equation", + "content": "V_{FR}^{C}" + }, + { + "bbox": [ + 130, + 437, + 481, + 668 + ], + "type": "text", + "content": " and coarse landmark context queries " + }, + { + "bbox": [ + 130, + 437, + 481, + 668 + ], + "type": "inline_equation", + "content": "V_{LR}^{C}" + }, + { + "bbox": [ + 130, + 437, + 481, + 668 + ], + "type": "text", + "content": " are fed into the reference encoder along with their position queries " + }, + { + "bbox": [ + 130, + 437, + 481, + 668 + ], + "type": "inline_equation", + "content": "V_{R}^{P}" + }, + { + "bbox": [ + 130, + 437, + 481, + 668 + ], + "type": "text", + "content": ", updating to corresponding context queries " + }, + { + "bbox": [ + 130, + 437, + 481, + 668 + ], + "type": "inline_equation", + "content": "V_{LR}^{C'}" + }, + { + "bbox": [ + 130, + 437, + 481, + 668 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 437, + 481, + 668 + ], + "type": "inline_equation", + "content": "V_{FR}^{C'}" + }, + { + "bbox": [ + 130, + 437, + 481, + 668 + ], + "type": "text", + "content": ". Subsequently, the context queries " + }, + { + "bbox": [ + 130, + 437, + 481, + 668 + ], + "type": "inline_equation", + "content": "V_{LR}^{C'}" + }, + { + "bbox": [ + 130, + 437, + 481, + 668 + ], + "type": "text", + "content": " are utilized to predict the coarse coordinate of cephalometric landmark " + }, + { + "bbox": [ + 130, + 437, + 481, + 668 + ], + "type": "inline_equation", + "content": "\\mu_{R} \\in R^{K \\times 2}" + }, + { + "bbox": [ + 130, + 437, + 481, + 668 + ], + "type": "text", + "content": " and coarse distribution " + }, + { + "bbox": [ + 130, + 437, + 481, + 668 + ], + "type": "inline_equation", + "content": "\\sigma_{R} \\in R^{K \\times 1}" + }, + { + "bbox": [ + 130, + 437, + 481, + 668 + ], + "type": "text", + "content": " via FFN (Sec.3.2). Next, the fused feature map Fu is also flattened to generate image context queries " + }, + { + "bbox": [ + 130, + 437, + 481, + 668 + ], + "type": "inline_equation", + "content": "V_{FA}^{C}" + }, + { + "bbox": [ + 130, + 437, + 481, + 668 + ], + "type": "text", + "content": ", and fine landmark content queries " + }, + { + "bbox": [ + 130, + 437, + 481, + 668 + ], + "type": "inline_equation", + "content": "V_{LA}^{C}" + }, + { + "bbox": [ + 130, + 437, + 481, + 668 + ], + "type": "text", + "content": " are initialized. Unlike the reference encoder module, which solely uses content and position queries as input, the coarse landmark coordinates " + }, + { + "bbox": [ + 130, + 437, + 481, + 668 + ], + "type": "inline_equation", + "content": "\\mu_{R}" + }, + { + "bbox": [ + 130, + 437, + 481, + 668 + ], + "type": "text", + "content": " and feature map " + }, + { + "bbox": [ + 130, + 437, + 481, + 668 + ], + "type": "inline_equation", + "content": "F_{u}" + }, + { + "bbox": [ + 130, + 437, + 481, + 668 + ], + "type": "text", + "content": " are also fed into the finetune encoder module to update the content queries " + }, + { + "bbox": [ + 130, + 437, + 481, + 668 + ], + "type": "inline_equation", + "content": "V_{LA}^{C'}" + }, + { + "bbox": [ + 130, + 437, + 481, + 668 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 437, + 481, + 668 + ], + "type": "inline_equation", + "content": "V_{FA}^{C'}" + }, + { + "bbox": [ + 130, + 437, + 481, + 668 + ], + "type": "text", + "content": ". Finally, the content queries " + }, + { + "bbox": [ + 130, + 437, + 481, + 668 + ], + "type": "inline_equation", + "content": "V_{LA}^{C'}" + }, + { + "bbox": [ + 130, + 437, + 481, + 668 + ], + "type": "text", + "content": " is operated" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 406, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 406, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 406, + 91, + 447, + 100 + ], + "type": "text", + "content": "Du-CeLR" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 133, + 115, + 479, + 289 + ], + "blocks": [ + { + "bbox": [ + 133, + 115, + 479, + 289 + ], + "lines": [ + { + "bbox": [ + 133, + 115, + 479, + 289 + ], + "spans": [ + { + "bbox": [ + 133, + 115, + 479, + 289 + ], + "type": "image", + "image_path": "ffd05bc043a830e7a326b8f26e4a2e5a0d7eea9fb2b9ecbd02ae9a1c96a7bda1.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 297, + 482, + 321 + ], + "lines": [ + { + "bbox": [ + 130, + 297, + 482, + 321 + ], + "spans": [ + { + "bbox": [ + 130, + 297, + 482, + 321 + ], + "type": "text", + "content": "Fig. 2: The overview architecture of our method, which contains (a) feature extractor module, (b) reference encoder module and (c) finetune encoder module." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 342, + 482, + 428 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 342, + 482, + 428 + ], + "spans": [ + { + "bbox": [ + 130, + 342, + 482, + 428 + ], + "type": "text", + "content": "by the FFN to produce cephalometric landmark coordinate " + }, + { + "bbox": [ + 130, + 342, + 482, + 428 + ], + "type": "inline_equation", + "content": "\\mu_A \\in R^{K \\times 2}" + }, + { + "bbox": [ + 130, + 342, + 482, + 428 + ], + "type": "text", + "content": " and distribution " + }, + { + "bbox": [ + 130, + 342, + 482, + 428 + ], + "type": "inline_equation", + "content": "\\sigma_A \\in R^{K \\times 1}" + }, + { + "bbox": [ + 130, + 342, + 482, + 428 + ], + "type": "text", + "content": " (Sec.3.3). In addition, different loss functions are employed for supervising the training of various modules. For the feature extractor module, Dice loss and Mean Squared Error (MSE) loss are utilized to aid model optimization. For the reference encoder and finetune encoder modules, Residual Log-likelihood Estimation(RLE) loss is applied to optimize the model's output cephalometric landmark coordinates " + }, + { + "bbox": [ + 130, + 342, + 482, + 428 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 130, + 342, + 482, + 428 + ], + "type": "text", + "content": " and distribution " + }, + { + "bbox": [ + 130, + 342, + 482, + 428 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 130, + 342, + 482, + 428 + ], + "type": "text", + "content": " (Sec.3.4)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 131, + 443, + 251, + 454 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 443, + 251, + 454 + ], + "spans": [ + { + "bbox": [ + 131, + 443, + 251, + 454 + ], + "type": "text", + "content": "3.1 Feature Extractor" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 462, + 482, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 462, + 482, + 606 + ], + "spans": [ + { + "bbox": [ + 130, + 462, + 482, + 606 + ], + "type": "text", + "content": "ResNet34 [16] is utilized as the backbone in our model, from which multi-level feature maps [39] are extracted, as illustrated in Figure 2. Initially, we apply downsampling operations to scale the feature maps " + }, + { + "bbox": [ + 130, + 462, + 482, + 606 + ], + "type": "inline_equation", + "content": "S2" + }, + { + "bbox": [ + 130, + 462, + 482, + 606 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 462, + 482, + 606 + ], + "type": "inline_equation", + "content": "S3" + }, + { + "bbox": [ + 130, + 462, + 482, + 606 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 130, + 462, + 482, + 606 + ], + "type": "inline_equation", + "content": "S4" + }, + { + "bbox": [ + 130, + 462, + 482, + 606 + ], + "type": "text", + "content": " to the same dimension and size as the feature map " + }, + { + "bbox": [ + 130, + 462, + 482, + 606 + ], + "type": "inline_equation", + "content": "S5" + }, + { + "bbox": [ + 130, + 462, + 482, + 606 + ], + "type": "text", + "content": ". Subsequently, the feature maps outputted by the backbone are summed with their respective positional maps (Pos) to yield new feature maps " + }, + { + "bbox": [ + 130, + 462, + 482, + 606 + ], + "type": "inline_equation", + "content": "F2" + }, + { + "bbox": [ + 130, + 462, + 482, + 606 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 462, + 482, + 606 + ], + "type": "inline_equation", + "content": "F3" + }, + { + "bbox": [ + 130, + 462, + 482, + 606 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 462, + 482, + 606 + ], + "type": "inline_equation", + "content": "F4" + }, + { + "bbox": [ + 130, + 462, + 482, + 606 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 130, + 462, + 482, + 606 + ], + "type": "inline_equation", + "content": "F5" + }, + { + "bbox": [ + 130, + 462, + 482, + 606 + ], + "type": "text", + "content": ". These feature maps " + }, + { + "bbox": [ + 130, + 462, + 482, + 606 + ], + "type": "inline_equation", + "content": "F2" + }, + { + "bbox": [ + 130, + 462, + 482, + 606 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 462, + 482, + 606 + ], + "type": "inline_equation", + "content": "F3" + }, + { + "bbox": [ + 130, + 462, + 482, + 606 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 462, + 482, + 606 + ], + "type": "inline_equation", + "content": "F4" + }, + { + "bbox": [ + 130, + 462, + 482, + 606 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 130, + 462, + 482, + 606 + ], + "type": "inline_equation", + "content": "F5" + }, + { + "bbox": [ + 130, + 462, + 482, + 606 + ], + "type": "text", + "content": " are aggregated to generate the fused feature map " + }, + { + "bbox": [ + 130, + 462, + 482, + 606 + ], + "type": "inline_equation", + "content": "F_{u}" + }, + { + "bbox": [ + 130, + 462, + 482, + 606 + ], + "type": "text", + "content": ". The feature map " + }, + { + "bbox": [ + 130, + 462, + 482, + 606 + ], + "type": "inline_equation", + "content": "S5" + }, + { + "bbox": [ + 130, + 462, + 482, + 606 + ], + "type": "text", + "content": " is directly fed into the reference encoder module to coarse locate cephalometric landmark, while the fused feature map " + }, + { + "bbox": [ + 130, + 462, + 482, + 606 + ], + "type": "inline_equation", + "content": "F_{u}" + }, + { + "bbox": [ + 130, + 462, + 482, + 606 + ], + "type": "text", + "content": " is fed into the finetune encoder module to precise locate cephalometric landmark. Moreover, to enhance the model's performance, the feature map " + }, + { + "bbox": [ + 130, + 462, + 482, + 606 + ], + "type": "inline_equation", + "content": "S5" + }, + { + "bbox": [ + 130, + 462, + 482, + 606 + ], + "type": "text", + "content": " is processed through convolution to generate a heatmap, which is optimized by Dice loss and MSE loss." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 131, + 622, + 256, + 634 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 622, + 256, + 634 + ], + "spans": [ + { + "bbox": [ + 131, + 622, + 256, + 634 + ], + "type": "text", + "content": "3.2 Reference Encoder" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 641, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 641, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 641, + 482, + 666 + ], + "type": "text", + "content": "The reference encoder aims to establish the relationship between cephalometric landmark queries and feature maps, thereby facilitating the coarse prediction of" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 259, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 259, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 259, + 102 + ], + "type": "text", + "content": "C. Dai, Y. Wang et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 196, + 118, + 296, + 273 + ], + "blocks": [ + { + "bbox": [ + 196, + 118, + 296, + 273 + ], + "lines": [ + { + "bbox": [ + 196, + 118, + 296, + 273 + ], + "spans": [ + { + "bbox": [ + 196, + 118, + 296, + 273 + ], + "type": "image", + "image_path": "0808e49b6de9810a9e2818388b5de4665ff31f8f3b5e9d18e4583ab83d8d7bb5.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 194, + 274, + 299, + 284 + ], + "lines": [ + { + "bbox": [ + 194, + 274, + 299, + 284 + ], + "spans": [ + { + "bbox": [ + 194, + 274, + 299, + 284 + ], + "type": "text", + "content": "(a) reference encoder module" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 130, + 300, + 479, + 323 + ], + "lines": [ + { + "bbox": [ + 130, + 300, + 479, + 323 + ], + "spans": [ + { + "bbox": [ + 130, + 300, + 479, + 323 + ], + "type": "text", + "content": "Fig. 3: The detailed illustration of (a) reference encoder module and (b) finetune encoder module." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 301, + 118, + 419, + 272 + ], + "blocks": [ + { + "bbox": [ + 301, + 118, + 419, + 272 + ], + "lines": [ + { + "bbox": [ + 301, + 118, + 419, + 272 + ], + "spans": [ + { + "bbox": [ + 301, + 118, + 419, + 272 + ], + "type": "image", + "image_path": "b0ae459d84061ac1761c757f913cc663354387780db2f166156eab90a690907e.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 306, + 274, + 408, + 284 + ], + "lines": [ + { + "bbox": [ + 306, + 274, + 408, + 284 + ], + "spans": [ + { + "bbox": [ + 306, + 274, + 408, + 284 + ], + "type": "text", + "content": "(b) finetune encoder module" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 348, + 482, + 494 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 348, + 482, + 494 + ], + "spans": [ + { + "bbox": [ + 130, + 348, + 482, + 494 + ], + "type": "text", + "content": "cephalometric landmark. As illustrated in Figures 2b and Figure 3a, the reference encoder module follows the typical transformer encoder paradigm. It comprises " + }, + { + "bbox": [ + 130, + 348, + 482, + 494 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 130, + 348, + 482, + 494 + ], + "type": "text", + "content": " identical layers within the encoder, each layer consisting of Layer Normalization (LN), Multi-Head Self-Attention (MHSA), and Feed-Forward Networks (FFN). Specifically, we initialize " + }, + { + "bbox": [ + 130, + 348, + 482, + 494 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 130, + 348, + 482, + 494 + ], + "type": "text", + "content": " cephalometric landmark content queries " + }, + { + "bbox": [ + 130, + 348, + 482, + 494 + ], + "type": "inline_equation", + "content": "V_{CL}^{R}" + }, + { + "bbox": [ + 130, + 348, + 482, + 494 + ], + "type": "text", + "content": " and utilize the feature map " + }, + { + "bbox": [ + 130, + 348, + 482, + 494 + ], + "type": "inline_equation", + "content": "S5" + }, + { + "bbox": [ + 130, + 348, + 482, + 494 + ], + "type": "text", + "content": " as the image content queries " + }, + { + "bbox": [ + 130, + 348, + 482, + 494 + ], + "type": "inline_equation", + "content": "V_{CF}^{R}" + }, + { + "bbox": [ + 130, + 348, + 482, + 494 + ], + "type": "text", + "content": ". Drawing inspiration from the positional encoding of the BERT [8], we generate the positional queries " + }, + { + "bbox": [ + 130, + 348, + 482, + 494 + ], + "type": "inline_equation", + "content": "V_{P}^{R}" + }, + { + "bbox": [ + 130, + 348, + 482, + 494 + ], + "type": "text", + "content": ". These content and positional queries are fed into the reference encoder. After " + }, + { + "bbox": [ + 130, + 348, + 482, + 494 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 130, + 348, + 482, + 494 + ], + "type": "text", + "content": " layers of iteration, the reference encoder outputs the updated cephalometric landmark content queries " + }, + { + "bbox": [ + 130, + 348, + 482, + 494 + ], + "type": "inline_equation", + "content": "V_{LR}^{C'}" + }, + { + "bbox": [ + 130, + 348, + 482, + 494 + ], + "type": "text", + "content": ". These content queries are calculated by FFN layer to predict the coarse cephalometric landmark coordinates " + }, + { + "bbox": [ + 130, + 348, + 482, + 494 + ], + "type": "inline_equation", + "content": "\\mu_{R}" + }, + { + "bbox": [ + 130, + 348, + 482, + 494 + ], + "type": "text", + "content": " and distribution " + }, + { + "bbox": [ + 130, + 348, + 482, + 494 + ], + "type": "inline_equation", + "content": "\\sigma_{R}" + }, + { + "bbox": [ + 130, + 348, + 482, + 494 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 511, + 251, + 522 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 511, + 251, + 522 + ], + "spans": [ + { + "bbox": [ + 132, + 511, + 251, + 522 + ], + "type": "text", + "content": "3.3 Finetune Encoder" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 533, + 482, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 533, + 482, + 668 + ], + "spans": [ + { + "bbox": [ + 130, + 533, + 482, + 668 + ], + "type": "text", + "content": "The finetune encoder employs a layer-to-layer update mechanism to achieve more precise cephalometric landmark detection. The structure of the finetune encoder, as shown in Figure 2c and Figure 3b, also adheres to the typical transformer encoder paradigm, consisting of " + }, + { + "bbox": [ + 130, + 533, + 482, + 668 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 130, + 533, + 482, + 668 + ], + "type": "text", + "content": " identical layers within the encoder. Unlike the reference encoder module, cephalometric landmark coordinate " + }, + { + "bbox": [ + 130, + 533, + 482, + 668 + ], + "type": "inline_equation", + "content": "\\mu_{R}" + }, + { + "bbox": [ + 130, + 533, + 482, + 668 + ], + "type": "text", + "content": " is continually updated in each layer of the finetune encoder module. Specifically, we initialize " + }, + { + "bbox": [ + 130, + 533, + 482, + 668 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 130, + 533, + 482, + 668 + ], + "type": "text", + "content": " cephalometric landmark content queries " + }, + { + "bbox": [ + 130, + 533, + 482, + 668 + ], + "type": "inline_equation", + "content": "V_{LA}^{C}" + }, + { + "bbox": [ + 130, + 533, + 482, + 668 + ], + "type": "text", + "content": " and flatten the fused feature map " + }, + { + "bbox": [ + 130, + 533, + 482, + 668 + ], + "type": "inline_equation", + "content": "F_{u}" + }, + { + "bbox": [ + 130, + 533, + 482, + 668 + ], + "type": "text", + "content": " to serve as the image content queries " + }, + { + "bbox": [ + 130, + 533, + 482, + 668 + ], + "type": "inline_equation", + "content": "V_{FA}^{C}" + }, + { + "bbox": [ + 130, + 533, + 482, + 668 + ], + "type": "text", + "content": ". Drawing inspiration from the positional encoding of the BERT, we generate position queries " + }, + { + "bbox": [ + 130, + 533, + 482, + 668 + ], + "type": "inline_equation", + "content": "V_{A}^{P}" + }, + { + "bbox": [ + 130, + 533, + 482, + 668 + ], + "type": "text", + "content": ". Five parameters are fed into the finetune encoder module, namely fine landmark context queries " + }, + { + "bbox": [ + 130, + 533, + 482, + 668 + ], + "type": "inline_equation", + "content": "V_{LA}^{C}" + }, + { + "bbox": [ + 130, + 533, + 482, + 668 + ], + "type": "text", + "content": ", image context queries " + }, + { + "bbox": [ + 130, + 533, + 482, + 668 + ], + "type": "inline_equation", + "content": "V_{FA}^{C}" + }, + { + "bbox": [ + 130, + 533, + 482, + 668 + ], + "type": "text", + "content": ", position queries " + }, + { + "bbox": [ + 130, + 533, + 482, + 668 + ], + "type": "inline_equation", + "content": "V_{A}^{P}" + }, + { + "bbox": [ + 130, + 533, + 482, + 668 + ], + "type": "text", + "content": ", the fused feature map " + }, + { + "bbox": [ + 130, + 533, + 482, + 668 + ], + "type": "inline_equation", + "content": "F_{u}" + }, + { + "bbox": [ + 130, + 533, + 482, + 668 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 407, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 407, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 407, + 91, + 447, + 100 + ], + "type": "text", + "content": "Du-CeLR" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 91, + 480, + 99 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 91, + 480, + 99 + ], + "spans": [ + { + "bbox": [ + 474, + 91, + 480, + 99 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 262 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 262 + ], + "type": "text", + "content": "and coarse landmark coordinates " + }, + { + "bbox": [ + 130, + 116, + 482, + 262 + ], + "type": "inline_equation", + "content": "\\mu_R" + }, + { + "bbox": [ + 130, + 116, + 482, + 262 + ], + "type": "text", + "content": ". Within the finetune encoder module, we first sample feature vectors on the fused feature map " + }, + { + "bbox": [ + 130, + 116, + 482, + 262 + ], + "type": "inline_equation", + "content": "F_u" + }, + { + "bbox": [ + 130, + 116, + 482, + 262 + ], + "type": "text", + "content": " using coarse cephalometric landmark coordinates " + }, + { + "bbox": [ + 130, + 116, + 482, + 262 + ], + "type": "inline_equation", + "content": "\\mu_R" + }, + { + "bbox": [ + 130, + 116, + 482, + 262 + ], + "type": "text", + "content": ", then add it to the fine landmark queries " + }, + { + "bbox": [ + 130, + 116, + 482, + 262 + ], + "type": "inline_equation", + "content": "V_{LA}^{C}" + }, + { + "bbox": [ + 130, + 116, + 482, + 262 + ], + "type": "text", + "content": ". We combine content and position queries and feed them into the encoder to calculate the relationships among fine landmark and image context queries. Next, to adjust the landmark positions, we use the updated cephalometric landmark content queries " + }, + { + "bbox": [ + 130, + 116, + 482, + 262 + ], + "type": "inline_equation", + "content": "V_{LA}^{C'}" + }, + { + "bbox": [ + 130, + 116, + 482, + 262 + ], + "type": "text", + "content": " to calculate the " + }, + { + "bbox": [ + 130, + 116, + 482, + 262 + ], + "type": "inline_equation", + "content": "(\\Delta x, \\Delta y)" + }, + { + "bbox": [ + 130, + 116, + 482, + 262 + ], + "type": "text", + "content": " offsets by FFN layer and add them back to the previous cephalometric landmark coordinates " + }, + { + "bbox": [ + 130, + 116, + 482, + 262 + ], + "type": "inline_equation", + "content": "\\mu_R" + }, + { + "bbox": [ + 130, + 116, + 482, + 262 + ], + "type": "text", + "content": ". In this way, the finetune encoder module refines the content queries progressively by stacking multiple aforementioned layers, outputting " + }, + { + "bbox": [ + 130, + 116, + 482, + 262 + ], + "type": "inline_equation", + "content": "V_{LA}^{C'}" + }, + { + "bbox": [ + 130, + 116, + 482, + 262 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 116, + 482, + 262 + ], + "type": "inline_equation", + "content": "V_{FA}^{C'}" + }, + { + "bbox": [ + 130, + 116, + 482, + 262 + ], + "type": "text", + "content": ". Finally, the cephalometric landmark content queries " + }, + { + "bbox": [ + 130, + 116, + 482, + 262 + ], + "type": "inline_equation", + "content": "V_{LA}^{C'}" + }, + { + "bbox": [ + 130, + 116, + 482, + 262 + ], + "type": "text", + "content": ", followed by FFN layer, predicts the fine cephalometric landmark coordinates " + }, + { + "bbox": [ + 130, + 116, + 482, + 262 + ], + "type": "inline_equation", + "content": "\\mu_A" + }, + { + "bbox": [ + 130, + 116, + 482, + 262 + ], + "type": "text", + "content": " and distribution " + }, + { + "bbox": [ + 130, + 116, + 482, + 262 + ], + "type": "inline_equation", + "content": "\\sigma_A" + }, + { + "bbox": [ + 130, + 116, + 482, + 262 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 276, + 231, + 288 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 276, + 231, + 288 + ], + "spans": [ + { + "bbox": [ + 132, + 276, + 231, + 288 + ], + "type": "text", + "content": "3.4 Loss Function" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 293, + 482, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 293, + 482, + 354 + ], + "spans": [ + { + "bbox": [ + 130, + 293, + 482, + 354 + ], + "type": "text", + "content": "As shown in Figure 2, the loss function of our method is composed of two key components: 1) The heatmap loss of the feature extraction module, 2) The cephalometric landmark regression loss for both the reference encoder and fine-tune encoder modules. The overall loss function of our method can be formulated as follows:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 224, + 365, + 481, + 378 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 224, + 365, + 481, + 378 + ], + "spans": [ + { + "bbox": [ + 224, + 365, + 481, + 378 + ], + "type": "interline_equation", + "content": "L = \\lambda_ {H M} L _ {H M} + \\lambda_ {R E} L _ {R E} + \\lambda_ {F E} L _ {F E} \\tag {1}", + "image_path": "b9205f86493577680cdf2a9ac0d3d51844d8770d409770dc003282dfceeaafd5.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 131, + 381, + 482, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 381, + 482, + 441 + ], + "spans": [ + { + "bbox": [ + 131, + 381, + 482, + 441 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 131, + 381, + 482, + 441 + ], + "type": "inline_equation", + "content": "L_{HM}" + }, + { + "bbox": [ + 131, + 381, + 482, + 441 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 131, + 381, + 482, + 441 + ], + "type": "inline_equation", + "content": "L_{RE}" + }, + { + "bbox": [ + 131, + 381, + 482, + 441 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 131, + 381, + 482, + 441 + ], + "type": "inline_equation", + "content": "L_{FE}" + }, + { + "bbox": [ + 131, + 381, + 482, + 441 + ], + "type": "text", + "content": " represent feature extraction, reference encoder, and finetune encoder module loss functions respectively. " + }, + { + "bbox": [ + 131, + 381, + 482, + 441 + ], + "type": "inline_equation", + "content": "\\lambda_{HM}" + }, + { + "bbox": [ + 131, + 381, + 482, + 441 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 131, + 381, + 482, + 441 + ], + "type": "inline_equation", + "content": "\\lambda_{RE}" + }, + { + "bbox": [ + 131, + 381, + 482, + 441 + ], + "type": "text", + "content": ", and, " + }, + { + "bbox": [ + 131, + 381, + 482, + 441 + ], + "type": "inline_equation", + "content": "\\lambda_{FE}" + }, + { + "bbox": [ + 131, + 381, + 482, + 441 + ], + "type": "text", + "content": " are the hyper-parameters used to balance the three losses, and they are set to 1.0, 1.0, and 1.0, respectively. " + }, + { + "bbox": [ + 131, + 381, + 482, + 441 + ], + "type": "inline_equation", + "content": "L_{HM}" + }, + { + "bbox": [ + 131, + 381, + 482, + 441 + ], + "type": "text", + "content": " consists of the Dice loss and the MSE loss. " + }, + { + "bbox": [ + 131, + 381, + 482, + 441 + ], + "type": "inline_equation", + "content": "L_{HM}" + }, + { + "bbox": [ + 131, + 381, + 482, + 441 + ], + "type": "text", + "content": " is defined as follows:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 217, + 445, + 481, + 464 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 217, + 445, + 481, + 464 + ], + "spans": [ + { + "bbox": [ + 217, + 445, + 481, + 464 + ], + "type": "interline_equation", + "content": "L _ {H M} = D i c e \\left(\\stackrel {\\wedge} {P} _ {h p}, P _ {h p}\\right) + M s e \\left(\\stackrel {\\wedge} {P} _ {h p}, P _ {h p}\\right) \\tag {2}", + "image_path": "7b378fb28772dd7e15823a5039b8a0f70049bb7f69313fe12c4af7dda0335477.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 472, + 481, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 472, + 481, + 521 + ], + "spans": [ + { + "bbox": [ + 130, + 472, + 481, + 521 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 472, + 481, + 521 + ], + "type": "inline_equation", + "content": "\\hat{P}_{hp}" + }, + { + "bbox": [ + 130, + 472, + 481, + 521 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 472, + 481, + 521 + ], + "type": "inline_equation", + "content": "P_{hp}" + }, + { + "bbox": [ + 130, + 472, + 481, + 521 + ], + "type": "text", + "content": " are the prediction heatmap and ground truth heatmap respectively. For the cephalometric landmark regression loss of the reference encoder module, we adopt Residual Log-likelihood Estimation(RLE) loss. The loss is defined as follows:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 253, + 522, + 481, + 536 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 253, + 522, + 481, + 536 + ], + "spans": [ + { + "bbox": [ + 253, + 522, + 481, + 536 + ], + "type": "interline_equation", + "content": "L _ {R E} = R L E \\left(\\mu_ {R}, \\sigma_ {R}; \\mu_ {g}\\right) \\tag {3}", + "image_path": "ed613c9a7d8f3b76da30fb123d1b5cd96a5306f2f9fa53be797e95f29656026d.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 538, + 482, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 538, + 482, + 588 + ], + "spans": [ + { + "bbox": [ + 130, + 538, + 482, + 588 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 538, + 482, + 588 + ], + "type": "inline_equation", + "content": "\\mu_R" + }, + { + "bbox": [ + 130, + 538, + 482, + 588 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 538, + 482, + 588 + ], + "type": "inline_equation", + "content": "\\sigma_R" + }, + { + "bbox": [ + 130, + 538, + 482, + 588 + ], + "type": "text", + "content": " are coarse cephalometric landmark coordinate and distribution output by the reference encoder module. " + }, + { + "bbox": [ + 130, + 538, + 482, + 588 + ], + "type": "inline_equation", + "content": "\\mu_g" + }, + { + "bbox": [ + 130, + 538, + 482, + 588 + ], + "type": "text", + "content": " is cephalometric landmark ground truth coordinate. For the cephalometric landmark regression loss of the finetune encoder module, we also adopt RLE loss. The loss is defined as follows:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 240, + 592, + 481, + 624 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 240, + 592, + 481, + 624 + ], + "spans": [ + { + "bbox": [ + 240, + 592, + 481, + 624 + ], + "type": "interline_equation", + "content": "L _ {F E} = \\sum_ {i = 1} ^ {M} R L E \\left(\\mu_ {A, i}, \\sigma_ {A, i}; \\mu_ {g}\\right) \\tag {4}", + "image_path": "c7e8411003e4c7dd821c7a04f9c11334d6f028311ea4120e3c1dc12020c5b0d0.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 130, + 629, + 482, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 629, + 482, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 629, + 482, + 665 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 629, + 482, + 665 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 130, + 629, + 482, + 665 + ], + "type": "text", + "content": " is number of finetune encoder layer. " + }, + { + "bbox": [ + 130, + 629, + 482, + 665 + ], + "type": "inline_equation", + "content": "\\mu_{A,i}" + }, + { + "bbox": [ + 130, + 629, + 482, + 665 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 629, + 482, + 665 + ], + "type": "inline_equation", + "content": "\\sigma_{A,i}" + }, + { + "bbox": [ + 130, + 629, + 482, + 665 + ], + "type": "text", + "content": " represent the cephalometric landmark coordinate and distribution output by the i-th layer reference encoder module." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 259, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 259, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 259, + 102 + ], + "type": "text", + "content": "C. Dai, Y. Wang et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 115, + 230, + 129 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 115, + 230, + 129 + ], + "spans": [ + { + "bbox": [ + 132, + 115, + 230, + 129 + ], + "type": "text", + "content": "4 Experiments" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 142, + 482, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 142, + 482, + 201 + ], + "spans": [ + { + "bbox": [ + 130, + 142, + 482, + 201 + ], + "type": "text", + "content": "In this section, we assess our method on some benchmarks for cephalometric landmark detection task. We first perform several ablation studies to underline the advantage of our proposed methods and to establish the optimal setting for hyperparameters. Finally, we compare the performance of our model with state-of-the-art methods." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 220, + 280, + 232 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 220, + 280, + 232 + ], + "spans": [ + { + "bbox": [ + 132, + 220, + 280, + 232 + ], + "type": "text", + "content": "4.1 Implementation Details" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 241, + 482, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 241, + 482, + 410 + ], + "spans": [ + { + "bbox": [ + 130, + 241, + 482, + 410 + ], + "type": "text", + "content": "Our model is built on the PyTorch framework. We use ResNet-34, pre-trained on ImageNet, as the backbone. Our architecture includes 4 layers for both the reference encoder and finetune encoder module. All additional layers that we introduce are initialized randomly. The model training and testing are performed on one NVIDIA 3060(12GB) GPU. For model optimization, we use Adam [18], with parameters " + }, + { + "bbox": [ + 130, + 241, + 482, + 410 + ], + "type": "inline_equation", + "content": "\\beta 1 = 0.9" + }, + { + "bbox": [ + 130, + 241, + 482, + 410 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 241, + 482, + 410 + ], + "type": "inline_equation", + "content": "\\beta 2 = 0.999" + }, + { + "bbox": [ + 130, + 241, + 482, + 410 + ], + "type": "text", + "content": ", and a weight decay of " + }, + { + "bbox": [ + 130, + 241, + 482, + 410 + ], + "type": "inline_equation", + "content": "10^{-4}" + }, + { + "bbox": [ + 130, + 241, + 482, + 410 + ], + "type": "text", + "content": ". The batch size is set to 4. The model is trained for 1000 epoch. The initial learning rate is " + }, + { + "bbox": [ + 130, + 241, + 482, + 410 + ], + "type": "inline_equation", + "content": "2 \\times 10^{-4}" + }, + { + "bbox": [ + 130, + 241, + 482, + 410 + ], + "type": "text", + "content": ", and dynamically updated the learning rate using the cosine strategy during the training process. Data augmentation techniques are employed, encompassing random cropping and random rotation. For the random cropping operation, all cephalometric landmarks are preserved during each cropping process. Regarding the random rotation operation, we select a rotation angle range of [-30, 30] degrees. Ultimately, the image is scaled to " + }, + { + "bbox": [ + 130, + 241, + 482, + 410 + ], + "type": "inline_equation", + "content": "1024 \\times 1024" + }, + { + "bbox": [ + 130, + 241, + 482, + 410 + ], + "type": "text", + "content": " for both training and inference of the model." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 429, + 317, + 440 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 429, + 317, + 440 + ], + "spans": [ + { + "bbox": [ + 132, + 429, + 317, + 440 + ], + "type": "text", + "content": "4.2 Dataset and Evaluation Metric" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 449, + 482, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 449, + 482, + 544 + ], + "spans": [ + { + "bbox": [ + 130, + 449, + 482, + 544 + ], + "type": "text", + "content": "ISBI 2015 Challenge Dataset [37]. This is a widely utilized benchmark dataset in the field of cephalometric landmark detection. This dataset comprises 400 cephalometric images, of which 150 are designated for training, 150 for Test 1, and the remaining images for Test 2. Each image has been annotated with 19 landmarks by two experienced medical practitioners, and the average of these annotations is taken as the ground truth. This dataset provides a rich array of annotated data, enabling researchers to effectively train and evaluate their cephalometric landmark detection methods." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 545, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 545, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 545, + 482, + 666 + ], + "type": "text", + "content": "ISBI 2023 Challenge Dataset [25]. This is a recently introduced cephalometric landmark detection dataset, collected from seven distinct imaging devices. Following the training strategy in reference [15], we randomly selected 500 images as training data, with the remaining 200 images utilized for evaluating model performance. Experiments were conducted with k-fold " + }, + { + "bbox": [ + 130, + 545, + 482, + 666 + ], + "type": "inline_equation", + "content": "(k = 10)" + }, + { + "bbox": [ + 130, + 545, + 482, + 666 + ], + "type": "text", + "content": " method cross-validation, and the average results were considered as the final outcome. This dataset provides 29 landmarks, but only the same 19 landmarks as in the ISBI 2015 dataset are used, ensuring a fair comparison with other methods. This new dataset offers researchers a more challenging scenario to test the generalization capabilities of their methods across various imaging devices." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 406, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 406, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 406, + 91, + 447, + 100 + ], + "type": "text", + "content": "Du-CeLR" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 481, + 100 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 186 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 186 + ], + "type": "text", + "content": "Evaluation metric. The evaluation of cephalometric landmark detection models typically employs the Mean Radial Error (MRE) and the Successful Detection Rate (SDR) [7]. MRE is used to calculate the distance error between the predicted cephalometric landmarks and the ground truth, commonly serving as a measure of detection accuracy. The calculation method for MRE is defined as follows:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 237, + 189, + 481, + 205 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 237, + 189, + 481, + 205 + ], + "spans": [ + { + "bbox": [ + 237, + 189, + 481, + 205 + ], + "type": "interline_equation", + "content": "R _ {i} ^ {j} = \\parallel \\mu_ {A} ^ {j} \\left(x _ {i}, y _ {i}\\right) - \\mu_ {g} ^ {j} \\left(x _ {i}, y _ {i}\\right) \\parallel_ {2} \\tag {5}", + "image_path": "7acc54c960bdad9947f0d4cf97321e459a6b8bdf426e911f15e2210f6671be71.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 253, + 223, + 481, + 257 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 253, + 223, + 481, + 257 + ], + "spans": [ + { + "bbox": [ + 253, + 223, + 481, + 257 + ], + "type": "interline_equation", + "content": "M R E = \\frac {1}{T K} \\sum_ {i = 1} ^ {T} \\sum_ {j = 1} ^ {K} R _ {i} ^ {j} \\tag {6}", + "image_path": "b28bd622e827f1c6899eaf4da09b1d43850a66acfd565259de58f207dc3675b3.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 266, + 482, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 266, + 482, + 387 + ], + "spans": [ + { + "bbox": [ + 130, + 266, + 482, + 387 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 266, + 482, + 387 + ], + "type": "inline_equation", + "content": "R_{i}^{j}" + }, + { + "bbox": [ + 130, + 266, + 482, + 387 + ], + "type": "text", + "content": " denotes the radial error of the " + }, + { + "bbox": [ + 130, + 266, + 482, + 387 + ], + "type": "inline_equation", + "content": "i - th" + }, + { + "bbox": [ + 130, + 266, + 482, + 387 + ], + "type": "text", + "content": " landmark in the " + }, + { + "bbox": [ + 130, + 266, + 482, + 387 + ], + "type": "inline_equation", + "content": "j - th" + }, + { + "bbox": [ + 130, + 266, + 482, + 387 + ], + "type": "text", + "content": " image. " + }, + { + "bbox": [ + 130, + 266, + 482, + 387 + ], + "type": "inline_equation", + "content": "\\mu_A^j (x_i,y_i)" + }, + { + "bbox": [ + 130, + 266, + 482, + 387 + ], + "type": "text", + "content": " represents the coordinates of the " + }, + { + "bbox": [ + 130, + 266, + 482, + 387 + ], + "type": "inline_equation", + "content": "i - th" + }, + { + "bbox": [ + 130, + 266, + 482, + 387 + ], + "type": "text", + "content": " cephalometric landmark predicted for the " + }, + { + "bbox": [ + 130, + 266, + 482, + 387 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 130, + 266, + 482, + 387 + ], + "type": "text", + "content": "-th image. " + }, + { + "bbox": [ + 130, + 266, + 482, + 387 + ], + "type": "inline_equation", + "content": "\\mu_g^j (x_i,y_i)" + }, + { + "bbox": [ + 130, + 266, + 482, + 387 + ], + "type": "text", + "content": " denotes the ground truth coordinates of the " + }, + { + "bbox": [ + 130, + 266, + 482, + 387 + ], + "type": "inline_equation", + "content": "i - th" + }, + { + "bbox": [ + 130, + 266, + 482, + 387 + ], + "type": "text", + "content": " cephalometric landmark in the " + }, + { + "bbox": [ + 130, + 266, + 482, + 387 + ], + "type": "inline_equation", + "content": "j - th" + }, + { + "bbox": [ + 130, + 266, + 482, + 387 + ], + "type": "text", + "content": " image. " + }, + { + "bbox": [ + 130, + 266, + 482, + 387 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 130, + 266, + 482, + 387 + ], + "type": "text", + "content": " represents the number of test images, and " + }, + { + "bbox": [ + 130, + 266, + 482, + 387 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 130, + 266, + 482, + 387 + ], + "type": "text", + "content": " denotes the number of cephalometric landmark in each image. SDR is employed to quantify the discrepancy between the predicted cephalometric landmark and the ground-truth. If the radial error " + }, + { + "bbox": [ + 130, + 266, + 482, + 387 + ], + "type": "inline_equation", + "content": "R_{i}^{j}" + }, + { + "bbox": [ + 130, + 266, + 482, + 387 + ], + "type": "text", + "content": " is no greater than " + }, + { + "bbox": [ + 130, + 266, + 482, + 387 + ], + "type": "inline_equation", + "content": "z\\mathrm{mm}" + }, + { + "bbox": [ + 130, + 266, + 482, + 387 + ], + "type": "text", + "content": " (where " + }, + { + "bbox": [ + 130, + 266, + 482, + 387 + ], + "type": "inline_equation", + "content": "z = 2\\mathrm{mm}" + }, + { + "bbox": [ + 130, + 266, + 482, + 387 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 266, + 482, + 387 + ], + "type": "inline_equation", + "content": "2.5\\mathrm{mm}" + }, + { + "bbox": [ + 130, + 266, + 482, + 387 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 266, + 482, + 387 + ], + "type": "inline_equation", + "content": "3\\mathrm{mm}" + }, + { + "bbox": [ + 130, + 266, + 482, + 387 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 266, + 482, + 387 + ], + "type": "inline_equation", + "content": "4\\mathrm{mm}" + }, + { + "bbox": [ + 130, + 266, + 482, + 387 + ], + "type": "text", + "content": "), the detection is considered as a successful one (Usually, " + }, + { + "bbox": [ + 130, + 266, + 482, + 387 + ], + "type": "inline_equation", + "content": "2\\mathrm{mm}" + }, + { + "bbox": [ + 130, + 266, + 482, + 387 + ], + "type": "text", + "content": " range is acceptable in medical analysis [32,40]). The SDR is defined as follows:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 239, + 396, + 481, + 430 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 239, + 396, + 481, + 430 + ], + "spans": [ + { + "bbox": [ + 239, + 396, + 481, + 430 + ], + "type": "interline_equation", + "content": "S D R _ {i} = \\frac {1}{T K} \\sum_ {j = 1} ^ {T} \\sum_ {j = 1} ^ {K} \\left\\{R _ {i} ^ {j} < z \\right\\} \\tag {7}", + "image_path": "3eb3e75cc83f3e32db9236a05013d7c854eebbe7660e9b19478f48c341359196.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 450, + 238, + 462 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 450, + 238, + 462 + ], + "spans": [ + { + "bbox": [ + 132, + 450, + 238, + 462 + ], + "type": "text", + "content": "4.3 Ablation Study" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 475, + 481, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 475, + 481, + 499 + ], + "spans": [ + { + "bbox": [ + 130, + 475, + 481, + 499 + ], + "type": "text", + "content": "In this section, we perform several ablation studies on ISBI 2015 Challenge dataset to illustrate the effectiveness of the proposed component." + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 138, + 564, + 478, + 665 + ], + "blocks": [ + { + "bbox": [ + 130, + 521, + 482, + 555 + ], + "lines": [ + { + "bbox": [ + 130, + 521, + 482, + 555 + ], + "spans": [ + { + "bbox": [ + 130, + 521, + 482, + 555 + ], + "type": "text", + "content": "Table 1: Varying different model structures. \"MF\" denotes Multi-level Features. \"HP\" denotes Heatmap. \"RE\" denotes Reference Encoder. \"RL\" denotes RLE Loss. \"FE\" denotes Finetune Encoder." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 138, + 564, + 478, + 665 + ], + "lines": [ + { + "bbox": [ + 138, + 564, + 478, + 665 + ], + "spans": [ + { + "bbox": [ + 138, + 564, + 478, + 665 + ], + "type": "table", + "html": "
ID BaselineFeature Extractor moduleReference Encoder moduleFinetune Encoder moduleMRE(mm)2mm(SDR%)
MFHPRERLFERL
12.897454.75
22.258661.91
32.569857.07
42.012565.01
51.674574.04
61.243483.65
71.146886.84
81.151486.31
91.023088.12
101.008889.51
", + "image_path": "2355e2e4c65371c831a6a8f5941f04333ec58eaf5171138b3362d5ff0896044c.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 259, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 259, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 259, + 102 + ], + "type": "text", + "content": "C. Dai, Y. Wang et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 135, + 117, + 220, + 202 + ], + "blocks": [ + { + "bbox": [ + 135, + 117, + 220, + 202 + ], + "lines": [ + { + "bbox": [ + 135, + 117, + 220, + 202 + ], + "spans": [ + { + "bbox": [ + 135, + 117, + 220, + 202 + ], + "type": "image", + "image_path": "9b23a2860ab6266ea539399f6be4a473cb4b3a9bb16667c261b2968306e0883d.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 132, + 212, + 480, + 232 + ], + "lines": [ + { + "bbox": [ + 132, + 212, + 480, + 232 + ], + "spans": [ + { + "bbox": [ + 132, + 212, + 480, + 232 + ], + "type": "text", + "content": "Fig. 4: Headmap visualization. The attention heatmap come from the feature extraction module." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 221, + 118, + 306, + 203 + ], + "blocks": [ + { + "bbox": [ + 221, + 118, + 306, + 203 + ], + "lines": [ + { + "bbox": [ + 221, + 118, + 306, + 203 + ], + "spans": [ + { + "bbox": [ + 221, + 118, + 306, + 203 + ], + "type": "image", + "image_path": "ee135c863857534f12a34beb6e7e2166a632a2b99df0302be0a671f6f1734b16.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 307, + 118, + 392, + 203 + ], + "blocks": [ + { + "bbox": [ + 307, + 118, + 392, + 203 + ], + "lines": [ + { + "bbox": [ + 307, + 118, + 392, + 203 + ], + "spans": [ + { + "bbox": [ + 307, + 118, + 392, + 203 + ], + "type": "image", + "image_path": "3e16d6d65242bfddf8cf98e1a4804110176c3bf7fb99b2badc7791559221a80e.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 393, + 118, + 478, + 202 + ], + "blocks": [ + { + "bbox": [ + 393, + 118, + 478, + 202 + ], + "lines": [ + { + "bbox": [ + 393, + 118, + 478, + 202 + ], + "spans": [ + { + "bbox": [ + 393, + 118, + 478, + 202 + ], + "type": "image", + "image_path": "d7dff5e6889794f02041e3f7a4e59ecc2b7fc3bf7ead126dd5f7e62531ca3ed0.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 133, + 258, + 481, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 258, + 481, + 497 + ], + "spans": [ + { + "bbox": [ + 133, + 258, + 481, + 497 + ], + "type": "text", + "content": "Varying the model structures. We conduct experiments to verify the different model structures. All experimental results are presented in Table 1. Regarding the feature extractor module, the combination of the multi-level feature (MF) module improves the baseline in MRE and " + }, + { + "bbox": [ + 133, + 258, + 481, + 497 + ], + "type": "inline_equation", + "content": "2\\mathrm{mm}" + }, + { + "bbox": [ + 133, + 258, + 481, + 497 + ], + "type": "text", + "content": " SDR indicators by " + }, + { + "bbox": [ + 133, + 258, + 481, + 497 + ], + "type": "inline_equation", + "content": "0.3276\\mathrm{mm}" + }, + { + "bbox": [ + 133, + 258, + 481, + 497 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 133, + 258, + 481, + 497 + ], + "type": "inline_equation", + "content": "7.16\\%" + }, + { + "bbox": [ + 133, + 258, + 481, + 497 + ], + "type": "text", + "content": " respectively, while the introduction of the heatmap (HP) module improves the baseline by " + }, + { + "bbox": [ + 133, + 258, + 481, + 497 + ], + "type": "inline_equation", + "content": "0.3276\\mathrm{mm}" + }, + { + "bbox": [ + 133, + 258, + 481, + 497 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 133, + 258, + 481, + 497 + ], + "type": "inline_equation", + "content": "2.32\\%" + }, + { + "bbox": [ + 133, + 258, + 481, + 497 + ], + "type": "text", + "content": ". When both MF and HP modules are integrated, there is " + }, + { + "bbox": [ + 133, + 258, + 481, + 497 + ], + "type": "inline_equation", + "content": "0.8849\\mathrm{mm}" + }, + { + "bbox": [ + 133, + 258, + 481, + 497 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 133, + 258, + 481, + 497 + ], + "type": "inline_equation", + "content": "10.26\\%" + }, + { + "bbox": [ + 133, + 258, + 481, + 497 + ], + "type": "text", + "content": " enhancement over the Baseline, underscoring the significant role of the feature extractor module in accuracy improvement. For the reference encoder module, the addition of reference encoder (RE) components and RLE Loss (RL) elements on the baseline foundation yielded " + }, + { + "bbox": [ + 133, + 258, + 481, + 497 + ], + "type": "inline_equation", + "content": "1.654\\mathrm{mm}" + }, + { + "bbox": [ + 133, + 258, + 481, + 497 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 133, + 258, + 481, + 497 + ], + "type": "inline_equation", + "content": "28.9\\%" + }, + { + "bbox": [ + 133, + 258, + 481, + 497 + ], + "type": "text", + "content": " accuracy improvement. When used in conjunction with the feature extractor module, the model's accuracy further increased by " + }, + { + "bbox": [ + 133, + 258, + 481, + 497 + ], + "type": "inline_equation", + "content": "0.0996\\mathrm{mm}" + }, + { + "bbox": [ + 133, + 258, + 481, + 497 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 133, + 258, + 481, + 497 + ], + "type": "inline_equation", + "content": "3.19\\%" + }, + { + "bbox": [ + 133, + 258, + 481, + 497 + ], + "type": "text", + "content": ". Regarding the finetune encoder module, its combined use with the feature extractor module led to a " + }, + { + "bbox": [ + 133, + 258, + 481, + 497 + ], + "type": "inline_equation", + "content": "1.8744\\mathrm{mm}" + }, + { + "bbox": [ + 133, + 258, + 481, + 497 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 133, + 258, + 481, + 497 + ], + "type": "inline_equation", + "content": "33.37\\%" + }, + { + "bbox": [ + 133, + 258, + 481, + 497 + ], + "type": "text", + "content": " improvement in model accuracy. The highest accuracy, reaching " + }, + { + "bbox": [ + 133, + 258, + 481, + 497 + ], + "type": "inline_equation", + "content": "1.0088\\mathrm{mm}" + }, + { + "bbox": [ + 133, + 258, + 481, + 497 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 133, + 258, + 481, + 497 + ], + "type": "inline_equation", + "content": "89.51\\%" + }, + { + "bbox": [ + 133, + 258, + 481, + 497 + ], + "type": "text", + "content": ", was achieved when the finetune encoder module was used in combination with both the reference encoder module and the feature extractor module. This underscores the significant impact of the three proposed modules on enhancing model accuracy. Finally, we visualize the attention heatmap in Figure 4. The heatmap is highly responsive at locations near the cephalometric landmarks." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 133, + 498, + 481, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 498, + 481, + 557 + ], + "spans": [ + { + "bbox": [ + 133, + 498, + 481, + 557 + ], + "type": "text", + "content": "Varying the levels of fuse feature map. We explore the impact of feeding different levels of fuse feature maps into the proposed finetune encoder. As shown in Table 2, the performance grows consistently with more levels of fuse feature maps, e.g., " + }, + { + "bbox": [ + 133, + 498, + 481, + 557 + ], + "type": "inline_equation", + "content": "89.20\\%" + }, + { + "bbox": [ + 133, + 498, + 481, + 557 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 133, + 498, + 481, + 557 + ], + "type": "inline_equation", + "content": "89.33\\%" + }, + { + "bbox": [ + 133, + 498, + 481, + 557 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 133, + 498, + 481, + 557 + ], + "type": "inline_equation", + "content": "89.42\\%" + }, + { + "bbox": [ + 133, + 498, + 481, + 557 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 133, + 498, + 481, + 557 + ], + "type": "inline_equation", + "content": "89.51\\%" + }, + { + "bbox": [ + 133, + 498, + 481, + 557 + ], + "type": "text", + "content": " for 2, 3, 4, 5 levels of feature maps on 2mm SDR, respectively." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 133, + 559, + 481, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 559, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 133, + 559, + 481, + 665 + ], + "type": "text", + "content": "Varying parameter of encoder module. We study the impact of encoder module on model performance from two aspects: the number of layers and feature dimensions. To simple the validation approach, the reference encoder and finetune encoder modules are set to the same number of layers and dimensions. First, we investigate the effects of altering the dimension of the encoder module. As illustrated in Table 3, there is a discernible enhancement in model efficacy concomitant with an increase in the dimensions of encoder layers. The peak performance of the model is attained when the dimension is augmented to 512. Furthermore, we conduct experiments by varying the number of encoder layers." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 407, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 407, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 407, + 91, + 447, + 100 + ], + "type": "text", + "content": "Du-CeLR" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 479, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 479, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 479, + 100 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 480, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 116, + 480, + 140 + ], + "spans": [ + { + "bbox": [ + 132, + 116, + 480, + 140 + ], + "type": "text", + "content": "As shown in Table 4, the performance grows at the first four layers and saturates at the fifth decoder layer." + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 138, + 171, + 306, + 232 + ], + "blocks": [ + { + "bbox": [ + 132, + 149, + 482, + 171 + ], + "lines": [ + { + "bbox": [ + 132, + 149, + 482, + 171 + ], + "spans": [ + { + "bbox": [ + 132, + 149, + 482, + 171 + ], + "type": "text", + "content": "Table 2: Varying the scale levels of fuse Table 3: Varying feature queries dimension feature map for feature extraction module. sions of encoder module." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 138, + 171, + 306, + 232 + ], + "lines": [ + { + "bbox": [ + 138, + 171, + 306, + 232 + ], + "spans": [ + { + "bbox": [ + 138, + 171, + 306, + 232 + ], + "type": "table", + "html": "
F5 F4 F3 F2 MRE(mm)SDR%
2mm2.5mm3mm4mm
1.018189.2093.4896.18 98.41
✓ ✓1.015689.3393.4996.32 98.41
✓ ✓ ✓1.011389.4293.5096.38 98.54
✓ ✓ ✓ ✓1.008889.5193.5496.42 98.56
", + "image_path": "e3c60ae3c5bf1d035e965205e899c8d3e23d18e366f942610ae9542ba17a2414.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 326, + 171, + 468, + 232 + ], + "blocks": [ + { + "bbox": [ + 326, + 171, + 468, + 232 + ], + "lines": [ + { + "bbox": [ + 326, + 171, + 468, + 232 + ], + "spans": [ + { + "bbox": [ + 326, + 171, + 468, + 232 + ], + "type": "table", + "html": "
Dim MRE(mm)SDR%
2mm2.5mm3mm4mm
1281.020189.0393.2695.7698.17
2561.019489.3293.3596.0298.32
5121.008889.5193.5896.4298.56
7681.009189.4793.6196.3998.53
", + "image_path": "67e5ac0f3468fc60502a5c0f1ba4ab7e8c0265e84d210091f64d17b1958abd94.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 241, + 480, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 241, + 480, + 324 + ], + "spans": [ + { + "bbox": [ + 130, + 241, + 480, + 324 + ], + "type": "text", + "content": "Varying the input image resolutions. We undertake experimental investigations to ascertain the robustness of our method across varying input resolutions. As depicted in Table 5, there is a significant enhancement in the performance of the model concomitant with an increase in the resolution of input images. When the input image resolution is " + }, + { + "bbox": [ + 130, + 241, + 480, + 324 + ], + "type": "inline_equation", + "content": "1024 \\times 1024" + }, + { + "bbox": [ + 130, + 241, + 480, + 324 + ], + "type": "text", + "content": ", the model reaches " + }, + { + "bbox": [ + 130, + 241, + 480, + 324 + ], + "type": "inline_equation", + "content": "1.0088\\mathrm{mm}" + }, + { + "bbox": [ + 130, + 241, + 480, + 324 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 241, + 480, + 324 + ], + "type": "inline_equation", + "content": "89.51\\%" + }, + { + "bbox": [ + 130, + 241, + 480, + 324 + ], + "type": "text", + "content": " in MRE and " + }, + { + "bbox": [ + 130, + 241, + 480, + 324 + ], + "type": "inline_equation", + "content": "2\\mathrm{mm}" + }, + { + "bbox": [ + 130, + 241, + 480, + 324 + ], + "type": "text", + "content": " SDR metrics respectively. A further escalation in input image resolution results in a decline for model performance." + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 147, + 356, + 296, + 430 + ], + "blocks": [ + { + "bbox": [ + 132, + 334, + 482, + 354 + ], + "lines": [ + { + "bbox": [ + 132, + 334, + 482, + 354 + ], + "spans": [ + { + "bbox": [ + 132, + 334, + 482, + 354 + ], + "type": "text", + "content": "Table 4: Varying the numbers of encoder Table 5: Varying the input image resolutions." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 147, + 356, + 296, + 430 + ], + "lines": [ + { + "bbox": [ + 147, + 356, + 296, + 430 + ], + "spans": [ + { + "bbox": [ + 147, + 356, + 296, + 430 + ], + "type": "table", + "html": "
NumMRE(mm)SDR%
2mm2.5mm3mm4mm
11.083587.2393.0595.5197.96
21.024788.9893.3195.9298.32
31.013789.4693.4796.2898.47
41.008889.5193.5496.4298.56
51.009189.4893.5496.4598.59
", + "image_path": "713b35652cc43b1d0a753ed70339a27d4a60a459b6f9b5a3902bd58615474bea.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 310, + 356, + 482, + 430 + ], + "blocks": [ + { + "bbox": [ + 310, + 356, + 482, + 430 + ], + "lines": [ + { + "bbox": [ + 310, + 356, + 482, + 430 + ], + "spans": [ + { + "bbox": [ + 310, + 356, + 482, + 430 + ], + "type": "table", + "html": "
ResolutionMRE(mm)SDR%
2mm2.5mm3mm4mm
256×2561.201284.5691.7995.4498.49
512×5121.067488.0793.3096.2598.57
768×7681.012989.4093.3396.0798.60
1024×10241.008889.5193.5496.4298.56
1280×12801.015389.3193.5195.4498.32
", + "image_path": "98a72a0cd8fc6ac7f623e8063485da54b37faa63df7861b26fbca06c899ca41f.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 444, + 223, + 455 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 444, + 223, + 455 + ], + "spans": [ + { + "bbox": [ + 132, + 444, + 223, + 455 + ], + "type": "text", + "content": "4.4 Main Result" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 462, + 480, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 462, + 480, + 533 + ], + "spans": [ + { + "bbox": [ + 130, + 462, + 480, + 533 + ], + "type": "text", + "content": "We evaluated our method on two cephalometric landmark datasets: ISBI 2015 Challenge [37] and ISBI 2023 Challenge datasets [25]. The final results are presented in Tables 6,7,8. The proposed approach achieved the least Mean Radical Error (MRE) and the highest " + }, + { + "bbox": [ + 130, + 462, + 480, + 533 + ], + "type": "inline_equation", + "content": "2\\mathrm{mm}" + }, + { + "bbox": [ + 130, + 462, + 480, + 533 + ], + "type": "text", + "content": " Success Detection Rate (SDR), which is considered as the clinically accepted. Moreover, our method achieves end-to-end training and prediction for cephalometric landmarks." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 130, + 534, + 481, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 534, + 481, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 534, + 481, + 666 + ], + "type": "text", + "content": "ISBI 2015 Challenge test1. Table 6 presents the evaluation results for the ISBI 2015 Challenge test1 dataset. These state-of-the-art methods can be categorized into heatmap-based and regression-based methods. Our method demonstrates clear superiority over heatmap-based methods. Compared to the best heatmap-based method [2], our method achieves improvements of " + }, + { + "bbox": [ + 130, + 534, + 481, + 666 + ], + "type": "inline_equation", + "content": "0.11\\mathrm{mm}" + }, + { + "bbox": [ + 130, + 534, + 481, + 666 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 534, + 481, + 666 + ], + "type": "inline_equation", + "content": "1.48\\%" + }, + { + "bbox": [ + 130, + 534, + 481, + 666 + ], + "type": "text", + "content": " respectively in MRE and the " + }, + { + "bbox": [ + 130, + 534, + 481, + 666 + ], + "type": "inline_equation", + "content": "2\\mathrm{mm}" + }, + { + "bbox": [ + 130, + 534, + 481, + 666 + ], + "type": "text", + "content": " SDR metrics. Additionally, compared to the best regression-based method, our method achieves improvements of " + }, + { + "bbox": [ + 130, + 534, + 481, + 666 + ], + "type": "inline_equation", + "content": "1.19\\%" + }, + { + "bbox": [ + 130, + 534, + 481, + 666 + ], + "type": "text", + "content": " on the " + }, + { + "bbox": [ + 130, + 534, + 481, + 666 + ], + "type": "inline_equation", + "content": "2\\mathrm{mm}" + }, + { + "bbox": [ + 130, + 534, + 481, + 666 + ], + "type": "text", + "content": " SDR metrics. Moreover, compared to the best approach, our method exhibits a significant advantage in terms of GFLOPs. In addition, compared to other low-resolution methods, our method has the lowest GFLOPs of only 23.0, while the " + }, + { + "bbox": [ + 130, + 534, + 481, + 666 + ], + "type": "inline_equation", + "content": "2\\mathrm{mm}" + }, + { + "bbox": [ + 130, + 534, + 481, + 666 + ], + "type": "text", + "content": " SDR reaches " + }, + { + "bbox": [ + 130, + 534, + 481, + 666 + ], + "type": "inline_equation", + "content": "88.07\\%" + }, + { + "bbox": [ + 130, + 534, + 481, + 666 + ], + "type": "text", + "content": ", which is superior to the other" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 258, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 258, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 258, + 102 + ], + "type": "text", + "content": "C. Dai, Y. Wang et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 136, + 114, + 211, + 215 + ], + "blocks": [ + { + "bbox": [ + 136, + 114, + 211, + 215 + ], + "lines": [ + { + "bbox": [ + 136, + 114, + 211, + 215 + ], + "spans": [ + { + "bbox": [ + 136, + 114, + 211, + 215 + ], + "type": "image", + "image_path": "f84675045c7467992bfbf029391e321cab81bb3bee095df7e9ec5d81b656dc83.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 144, + 216, + 249, + 224 + ], + "lines": [ + { + "bbox": [ + 144, + 216, + 249, + 224 + ], + "spans": [ + { + "bbox": [ + 144, + 216, + 249, + 224 + ], + "type": "text", + "content": "(a) ISBI 2015 Challenge test1" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 211, + 115, + 257, + 215 + ], + "blocks": [ + { + "bbox": [ + 211, + 115, + 257, + 215 + ], + "lines": [ + { + "bbox": [ + 211, + 115, + 257, + 215 + ], + "spans": [ + { + "bbox": [ + 211, + 115, + 257, + 215 + ], + "type": "image", + "image_path": "96a60d3f51f97697365f9129c7e16828285d38b9b52a19b1ff374c982cfc87aa.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 258, + 114, + 323, + 215 + ], + "blocks": [ + { + "bbox": [ + 258, + 114, + 323, + 215 + ], + "lines": [ + { + "bbox": [ + 258, + 114, + 323, + 215 + ], + "spans": [ + { + "bbox": [ + 258, + 114, + 323, + 215 + ], + "type": "image", + "image_path": "69c709f56919e3106140e022a2d76da06f1a9b513b71628d8a9015e9ebb88024.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 132, + 234, + 480, + 289 + ], + "lines": [ + { + "bbox": [ + 132, + 234, + 480, + 289 + ], + "spans": [ + { + "bbox": [ + 132, + 234, + 480, + 289 + ], + "type": "text", + "content": "Fig. 5: Qualitative detection results on ISBI 2015 and 2023 Challenge datasets. (a) and (b) correspond the detection results for the ISBI 2015 Challenge test1 and test2. (c) depicts the detection outcomes for the ISBI 2023 Challenge. The blue landmarks represent results annotated by medical professionals, while the red landmarks indicate the outcomes predicted by the model." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 323, + 115, + 369, + 215 + ], + "blocks": [ + { + "bbox": [ + 260, + 216, + 366, + 225 + ], + "lines": [ + { + "bbox": [ + 260, + 216, + 366, + 225 + ], + "spans": [ + { + "bbox": [ + 260, + 216, + 366, + 225 + ], + "type": "text", + "content": "(b) ISBI 2015 Challenge test2" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 323, + 115, + 369, + 215 + ], + "lines": [ + { + "bbox": [ + 323, + 115, + 369, + 215 + ], + "spans": [ + { + "bbox": [ + 323, + 115, + 369, + 215 + ], + "type": "image", + "image_path": "1bc7c22a5cad2e8f9544b93a63a134fb58d958eca77bcc026b41b7409bd9cff0.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 369, + 115, + 432, + 215 + ], + "blocks": [ + { + "bbox": [ + 369, + 115, + 432, + 215 + ], + "lines": [ + { + "bbox": [ + 369, + 115, + 432, + 215 + ], + "spans": [ + { + "bbox": [ + 369, + 115, + 432, + 215 + ], + "type": "image", + "image_path": "55fb6e7001b9ed6cb435b90622122a995f57f3aea94b8d5906abf1e5a28e9e78.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 380, + 216, + 466, + 225 + ], + "lines": [ + { + "bbox": [ + 380, + 216, + 466, + 225 + ], + "spans": [ + { + "bbox": [ + 380, + 216, + 466, + 225 + ], + "type": "text", + "content": "(c) ISBI 2023 Challenge" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 432, + 115, + 477, + 215 + ], + "blocks": [ + { + "bbox": [ + 432, + 115, + 477, + 215 + ], + "lines": [ + { + "bbox": [ + 432, + 115, + 477, + 215 + ], + "spans": [ + { + "bbox": [ + 432, + 115, + 477, + 215 + ], + "type": "image", + "image_path": "b1cbf1edff9f9048417490340bbfbda62f8e196527928a3ebc5e0734975aa7b8.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "table", + "bbox": [ + 135, + 335, + 483, + 520 + ], + "blocks": [ + { + "bbox": [ + 132, + 303, + 480, + 326 + ], + "lines": [ + { + "bbox": [ + 132, + 303, + 480, + 326 + ], + "spans": [ + { + "bbox": [ + 132, + 303, + 480, + 326 + ], + "type": "text", + "content": "Table 6: Quantitative results on the ISBI 2015 Challenge test1 dataset. * denotes other methods we implemented. Bold represents the best result." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 135, + 335, + 483, + 520 + ], + "lines": [ + { + "bbox": [ + 135, + 335, + 483, + 520 + ], + "spans": [ + { + "bbox": [ + 135, + 335, + 483, + 520 + ], + "type": "table", + "html": "
MethodBackboneResolutionGFLOPs MRE(mm)SDR%
2mm2.5mm3mm4mm
Heatmap-based Methods
Chen R et al. [6]ResNet50800×640215.71.1786.6792.6795.5498.53
Zhong Z et al. [40]U-Net290×290+19×100×10092.21.1286.9191.8294.8897.90
CephaNN [26]ResNeXt50800×640982.81.1587.6193.1696.3598.74
Yao J et al. [34]ResNet18576×512+19×96×9640.11.1486.8493.0295.4398.95
Ao Y et al. [2]Densenet121800×640157.21.1288.0392.7395.9698.48
Huang K et al. [13]---1.0987.8792.4595.5498.59
SimCC* [21]HRNet48800×640164.91.1287.1691.9695.3798.18
Regression-based Methods
Gilmour L et al. [11]ResNet342432×1920220.21.0188.3293.1296.1498.63
Song Y et al. [29]ResNet50256×256+19×256×256102.51.0886.4091.7094.8097.80
Song Y et al. [30]U-Net480×387286.81.1985.2091.2094.4097.20
Zeng M et al. [36]---1.3481.3789.0993.7997.86
King C H et al. [17]---1.1786.1491.7294.9197.96
Hong W et al. [12]---1.1285.2690.6793.5497.19
Poseur* [24]ResNet50800×64046.11.1486.5691.0994.0097.23
OursResNet34512×51223.01.0788.0793.3096.2598.57
OursResNet341024×102495.01.0189.5193.5496.4298.56
", + "image_path": "e8357a8061d49af14fc430c2ceada9674cd33b0afb98b0f0a52e4218fe3df37c.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 130, + 544, + 479, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 544, + 479, + 568 + ], + "spans": [ + { + "bbox": [ + 130, + 544, + 479, + 568 + ], + "type": "text", + "content": "methods. The qualitative detection results of the ISBI 2015 Challenge test1 dataset are displayed in Figure 4." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 130, + 569, + 480, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 569, + 480, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 569, + 480, + 665 + ], + "type": "text", + "content": "ISBI 2015 Challenge test2. The evaluation results for the ISBI 2015 Challenge test2 dataset are presented in Table 7. Our method outperforms heatmap-based methods by significant margins. Compared to best method [13], our method achieves an increase of " + }, + { + "bbox": [ + 130, + 569, + 480, + 665 + ], + "type": "inline_equation", + "content": "0.07\\mathrm{mm}" + }, + { + "bbox": [ + 130, + 569, + 480, + 665 + ], + "type": "text", + "content": " in MRE and " + }, + { + "bbox": [ + 130, + 569, + 480, + 665 + ], + "type": "inline_equation", + "content": "0.48\\%" + }, + { + "bbox": [ + 130, + 569, + 480, + 665 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 130, + 569, + 480, + 665 + ], + "type": "inline_equation", + "content": "2\\mathrm{mm}" + }, + { + "bbox": [ + 130, + 569, + 480, + 665 + ], + "type": "text", + "content": " SDR. In addition, We introduce an end-to-end human keypoint detection method into the cephalometric landmark detection task, which is implemented based on the deformable decoder architecture. Experiments show that our method is significantly better than the human keypoint method in accuracy. Moreover, our" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 407, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 407, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 407, + 91, + 447, + 100 + ], + "type": "text", + "content": "Du-CeLR" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 177 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 177 + ], + "type": "text", + "content": "method is more convenient to deploy. Finally, the performance of the released methods on ISBI 2015 Challenge Test1 dataset are all better than Test2. It seems that the data distribution of Test1 dataset is more consistent with Train dataset. Qualitative detection results of our method on the ISBI 2015 Challenge test2 dataset can be found in Figure 5b." + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 155, + 208, + 318, + 350 + ], + "blocks": [ + { + "bbox": [ + 146, + 185, + 323, + 207 + ], + "lines": [ + { + "bbox": [ + 146, + 185, + 323, + 207 + ], + "spans": [ + { + "bbox": [ + 146, + 185, + 323, + 207 + ], + "type": "text", + "content": "Table 7: Quantitative results on the ISBI 2015 Challenge test2 dataset." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 155, + 208, + 318, + 350 + ], + "lines": [ + { + "bbox": [ + 155, + 208, + 318, + 350 + ], + "spans": [ + { + "bbox": [ + 155, + 208, + 318, + 350 + ], + "type": "table", + "html": "
MethodMRE(mm)SDR%
2mm2.5mm3mm
Heatmap-based Methods
Chen R et al. [6]1.4875.0582.8488.5395.05
Zhong Z et al. [40]1.4276.0082.9088.7494.32
CephaNN [26]1.4376.3282.9587.9594.63
Yao J et al. [34]1.4875.4482.0386.6595.12
Ao Y et al. [2]1.4277.0084.4289.4795.21
Huang K et al. [13]1.3479.0587.9589.7995.05
SimCC* [21]1.5474.1680.6886.3294.05
Regression-based Methods
Gilmour L et al. [11]1.3377.0583.1688.8494.89
Song Y et al. [29]1.5474.0081.3087.5094.30
Song Y et al. [30]1.6472.2079.5085.0093.50
Zeng M et al. [36]1.6470.5879.5386.0593.32
King C H et al. [17]1.5074.5881.7487.2694.73
Hong W et al. [12]1.2879.2485.3290.4796.32
Poseur* [24]1.4874.4281.3786.6893.63
Ours1.2779.5386.4791.1196.32
", + "image_path": "e850726578b7f0fbe1d8386c72eafd277cd432170d298e1f4b86aedaceef7f8d.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 325, + 247, + 491, + 311 + ], + "blocks": [ + { + "bbox": [ + 323, + 225, + 465, + 247 + ], + "lines": [ + { + "bbox": [ + 323, + 225, + 465, + 247 + ], + "spans": [ + { + "bbox": [ + 323, + 225, + 465, + 247 + ], + "type": "text", + "content": "Table 8: Quantitative results on the ISBI 2023 Challenge." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 325, + 247, + 491, + 311 + ], + "lines": [ + { + "bbox": [ + 325, + 247, + 491, + 311 + ], + "spans": [ + { + "bbox": [ + 325, + 247, + 491, + 311 + ], + "type": "table", + "html": "
methodMRE(mm)SDR%
2mm2.5mm3mm4mm
Jin H et al. [15]1.220083.7689.7192.7996.08
Poseur* [24]0.998288.5192.8295.3797.79
SimCC* [21]1.079588.3993.1295.3197.81
Huang K et al.* [13]1.074787.8792.5294.8797.42
Gilmour L et al.* [11]0.979389.3793.4795.9797.42
Ours0.937290.6894.2495.9797.89
", + "image_path": "5269c341cafec6381c72a670de5c3ea1e56df004f25ba699681e5b8dc28cf6e1.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 371, + 482, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 371, + 482, + 479 + ], + "spans": [ + { + "bbox": [ + 130, + 371, + 482, + 479 + ], + "type": "text", + "content": "ISBI 2023 Challenge test. Regarding the ISBI 2023 Challenge test dataset, as illustrated in Table 8, Our method achieves the best performance on all metrics. Compared to the best-performing method [11], our approach significantly reduces the Mean Relative Error (MRE) from " + }, + { + "bbox": [ + 130, + 371, + 482, + 479 + ], + "type": "inline_equation", + "content": "0.9793\\mathrm{mm}" + }, + { + "bbox": [ + 130, + 371, + 482, + 479 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 130, + 371, + 482, + 479 + ], + "type": "inline_equation", + "content": "0.9372\\mathrm{mm}" + }, + { + "bbox": [ + 130, + 371, + 482, + 479 + ], + "type": "text", + "content": " and enhances the " + }, + { + "bbox": [ + 130, + 371, + 482, + 479 + ], + "type": "inline_equation", + "content": "2\\mathrm{mm}" + }, + { + "bbox": [ + 130, + 371, + 482, + 479 + ], + "type": "text", + "content": " Success Detection Rate (SDR) from " + }, + { + "bbox": [ + 130, + 371, + 482, + 479 + ], + "type": "inline_equation", + "content": "89.37\\%" + }, + { + "bbox": [ + 130, + 371, + 482, + 479 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 130, + 371, + 482, + 479 + ], + "type": "inline_equation", + "content": "90.68\\%" + }, + { + "bbox": [ + 130, + 371, + 482, + 479 + ], + "type": "text", + "content": ". Moreover, in comparison with transformer-based methods, our approach demonstrates a lead of " + }, + { + "bbox": [ + 130, + 371, + 482, + 479 + ], + "type": "inline_equation", + "content": "0.061\\mathrm{mm}" + }, + { + "bbox": [ + 130, + 371, + 482, + 479 + ], + "type": "text", + "content": " in MRE and " + }, + { + "bbox": [ + 130, + 371, + 482, + 479 + ], + "type": "inline_equation", + "content": "2.17\\%" + }, + { + "bbox": [ + 130, + 371, + 482, + 479 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 130, + 371, + 482, + 479 + ], + "type": "inline_equation", + "content": "2\\mathrm{mm}" + }, + { + "bbox": [ + 130, + 371, + 482, + 479 + ], + "type": "text", + "content": " SDR, respectively. Lastly, the qualitative detection results of our method on the ISBI 2023 Challenge test dataset are depicted in Figure 5c." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 497, + 220, + 510 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 497, + 220, + 510 + ], + "spans": [ + { + "bbox": [ + 132, + 497, + 220, + 510 + ], + "type": "text", + "content": "5 Conclusion" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 521, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 521, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 521, + 482, + 666 + ], + "type": "text", + "content": "In this paper, we propose a novel regression model for cephalometric landmark detection for high-resolution X-ray image. This model only employs the encoder module within the transformer framework to construct the relationship between landmark features and image features. It is capable of regressing cephalometric landmark coordinate from coarse to fine and completes end-to-end training. Moreover, our model, compared to heatmap-based method, boasts low memory consumption and robustness against missing landmark. It offers a more straightforward end-to-end design compared to current regression-based method, performing one-time landmark detection on high-resolution X-ray images. Extensive experiments on the ISBI2015 and ISBI2023 datasets demonstrate that our method can achieve state-of-the-art performance compare with regression-based and heatmap-based methods." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 90, + 259, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 90, + 259, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 90, + 259, + 102 + ], + "type": "text", + "content": "C. Dai, Y. Wang et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 133, + 114, + 246, + 129 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 114, + 246, + 129 + ], + "spans": [ + { + "bbox": [ + 133, + 114, + 246, + 129 + ], + "type": "text", + "content": "Acknowledgements" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 140, + 480, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 140, + 480, + 163 + ], + "spans": [ + { + "bbox": [ + 132, + 140, + 480, + 163 + ], + "type": "text", + "content": "This work was supported by the National Natural Science Foundation of China (62122059, 82330064)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 133, + 182, + 197, + 194 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 182, + 197, + 194 + ], + "spans": [ + { + "bbox": [ + 133, + 182, + 197, + 194 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 138, + 205, + 481, + 666 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 138, + 205, + 481, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 205, + 481, + 239 + ], + "spans": [ + { + "bbox": [ + 138, + 205, + 481, + 239 + ], + "type": "text", + "content": "1. Albarakati, S., Kula, K., Ghoneima, A.: The reliability and reproducibility of cephalometric measurements: a comparison of conventional and digital methods. Dentomaxillofacial Radiology 41(1), 11-17 (2012)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 138, + 239, + 481, + 261 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 239, + 481, + 261 + ], + "spans": [ + { + "bbox": [ + 138, + 239, + 481, + 261 + ], + "type": "text", + "content": "2. Ao Y, W.H.: Feature aggregation and refinement network for 2d anatomical landmark detection. Journal of Digital Imaging 36(2), 547-561 (2023)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 138, + 261, + 481, + 294 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 261, + 481, + 294 + ], + "spans": [ + { + "bbox": [ + 138, + 261, + 481, + 294 + ], + "type": "text", + "content": "3. B. Ibragimov, B. Likar, F.P., Vrtovec, T.: Automatic cephalometric x-ray landmark detection by applying game theory and random forests. In Proc. ISBI Int. Symp. on Biomedical Imaging (2014)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 138, + 294, + 481, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 294, + 481, + 316 + ], + "spans": [ + { + "bbox": [ + 138, + 294, + 481, + 316 + ], + "type": "text", + "content": "4. Cardillo, J., Sid-Ahmed, M.A.: An image processing system for locating craniofacial landmarks. IEEE transactions on medical imaging 13(2), 275-289 (1994)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 138, + 316, + 481, + 348 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 316, + 481, + 348 + ], + "spans": [ + { + "bbox": [ + 138, + 316, + 481, + 348 + ], + "type": "text", + "content": "5. Carion N, Massa F, S.G.e.a.: End-to-end object detection with transformers. European conference on computer vision. Cham: Springer International Publishing pp. 213-229 (2020)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 138, + 349, + 481, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 349, + 481, + 403 + ], + "spans": [ + { + "bbox": [ + 138, + 349, + 481, + 403 + ], + "type": "text", + "content": "6. Chen, R., Ma, Y., Chen, N., Lee, D., Wang, W.: Cephalometric landmark detection by attentive feature pyramid fusion and regression-voting. In: Medical Image Computing and Computer Assisted Intervention-MICCAI 2019: 22nd International Conference, Shenzhen, China, October 13-17, 2019, Proceedings, Part III 22. pp. 873-881. Springer (2019)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 138, + 403, + 481, + 436 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 403, + 481, + 436 + ], + "spans": [ + { + "bbox": [ + 138, + 403, + 481, + 436 + ], + "type": "text", + "content": "7. Devereux, L., Moles, D., Cunningham, S.J., McKnight, M.: How important are lateral cephalometric radiographs in orthodontic treatment planning? American Journal of Orthodontics and Dentofacial Orthopedics 139(2), e175-e181 (2011)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 138, + 436, + 481, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 436, + 481, + 468 + ], + "spans": [ + { + "bbox": [ + 138, + 436, + 481, + 468 + ], + "type": "text", + "content": "8. Devlin, J., Chang, M.W., Lee, K., Toutanova, K.: Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805 (2018)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 138, + 469, + 481, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 469, + 481, + 491 + ], + "spans": [ + { + "bbox": [ + 138, + 469, + 481, + 491 + ], + "type": "text", + "content": "9. Dosovitskiy A, Beyer L, K.A.e.a.: An image is worth 16x16 words: Transformers for image recognition at scale. ArXiv preprint arXiv:2010.11929 (2020)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 138, + 491, + 481, + 512 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 491, + 481, + 512 + ], + "spans": [ + { + "bbox": [ + 138, + 491, + 481, + 512 + ], + "type": "text", + "content": "0. El-Feghi, M.S.A., Ahmadi, M.: Automatic localization of craniofacial landmarks for assisted cephalometry. Pattern Recognition 37(3), 609-621 (2004)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 138, + 513, + 481, + 534 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 513, + 481, + 534 + ], + "spans": [ + { + "bbox": [ + 138, + 513, + 481, + 534 + ], + "type": "text", + "content": "1. Gilmour L, R.N.: Locating cephalometric x-ray landmarks with foveated pyramid attention. Medical Imaging With Deep Learning. PMLR pp. 262-276 (2020)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 138, + 534, + 481, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 534, + 481, + 578 + ], + "spans": [ + { + "bbox": [ + 138, + 534, + 481, + 578 + ], + "type": "text", + "content": "2. Hong W, Kim S M, C.J.e.a.: Deep reinforcement learning using a multi-scale agent with a normalized reward strategy for automatic cephalometric landmark detection. 2023 4th International Conference on Big Data Analytics and Practices pp. 1-6 (2023)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 138, + 578, + 481, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 578, + 481, + 600 + ], + "spans": [ + { + "bbox": [ + 138, + 578, + 481, + 600 + ], + "type": "text", + "content": "3. Huang K, F.F.: An intelligent shooting reward learning network scheme for medical image landmark detection. Applied Sciences 12(20), 10190 (2022)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 138, + 601, + 481, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 601, + 481, + 632 + ], + "spans": [ + { + "bbox": [ + 138, + 601, + 481, + 632 + ], + "type": "text", + "content": "4. Indermun S, Shaik S, N.C.J.K.M.R.: Human examination and artificial intelligence in cephalometric landmark detection—is ai ready to take over? Dentomaxillofac Radiol 10.1259/dmfr.20220362 (2023)" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 138, + 632, + 481, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 632, + 481, + 666 + ], + "spans": [ + { + "bbox": [ + 138, + 632, + 481, + 666 + ], + "type": "text", + "content": "5. Jin H, Che H, C.H.: Unsupervised domain adaptation for anatomical landmark detection. International Conference on Medical Image Computing and Computer-Assisted Intervention. Cham: Springer Nature Switzerland pp. 695-705 (2023)" + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 406, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 406, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 406, + 91, + 447, + 100 + ], + "type": "text", + "content": "Du-CeLR" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 480, + 665 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 133, + 116, + 480, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 116, + 480, + 138 + ], + "spans": [ + { + "bbox": [ + 133, + 116, + 480, + 138 + ], + "type": "text", + "content": "16. Kaiming He, Xiangyu Zhang, S.R.J.S.: Deep residual learning for image recognition. ArXiv preprint arXiv:1512.03385 (2015)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 133, + 138, + 480, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 138, + 480, + 171 + ], + "spans": [ + { + "bbox": [ + 133, + 138, + 480, + 171 + ], + "type": "text", + "content": "17. King C H, Wang Y L, L.W.Y.e.a.: Automatic cephalometric landmark detection on x-ray images using object detection. 2022 IEEE 19th International Symposium on Biomedical Imaging (ISBI) pp. 1-4 (2022)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 133, + 171, + 480, + 192 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 171, + 480, + 192 + ], + "spans": [ + { + "bbox": [ + 133, + 171, + 480, + 192 + ], + "type": "text", + "content": "18. Kingma, D.P., Ba, J.: Adam: A method for stochastic optimization. ArXiv preprint arXiv:1412.6980 (2014)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 133, + 192, + 480, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 192, + 480, + 224 + ], + "spans": [ + { + "bbox": [ + 133, + 192, + 480, + 224 + ], + "type": "text", + "content": "19. Lee H, Park M, K.J.: Cephalometric landmark detection in dental x-ray images using convolutional neural networks. Medical imaging 2017: Computer-aided diagnosis 10134, 494-499 (2017)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 224, + 480, + 256 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 224, + 480, + 256 + ], + "spans": [ + { + "bbox": [ + 132, + 224, + 480, + 256 + ], + "type": "text", + "content": "20. Li, H., Guo, Z., Rhee, S.M., Han, S., Han, J.J.: Towards accurate facial landmark detection via cascaded transformers. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 4176-4185 (2022)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 257, + 480, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 257, + 480, + 289 + ], + "spans": [ + { + "bbox": [ + 132, + 257, + 480, + 289 + ], + "type": "text", + "content": "21. Li Y, Yang S, L.P.e.a.: Simcc: A simple coordinate classification perspective for human pose estimation. European Conference on Computer Vision. Cham: Springer Nature Switzerland 89-106 (2022)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 289, + 480, + 321 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 289, + 480, + 321 + ], + "spans": [ + { + "bbox": [ + 132, + 289, + 480, + 321 + ], + "type": "text", + "content": "22. Lindner, C., Cootes, T.: Fully automatic cephalometric evaluation using random forest regression-voting. IEEE International Symposium on Biomedical Imaging (ISBI) (2015)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 321, + 480, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 321, + 480, + 342 + ], + "spans": [ + { + "bbox": [ + 132, + 321, + 480, + 342 + ], + "type": "text", + "content": "23. Mamta Juneja, Poojita Garg, R.K.e.a.: A review on cephalometric landmark detection techniques. Biomedical Signal Processing and Control 66(102486) (2021)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 342, + 480, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 342, + 480, + 374 + ], + "spans": [ + { + "bbox": [ + 132, + 342, + 480, + 374 + ], + "type": "text", + "content": "24. Mao, W., Ge, Y., Shen, C., Tian, Z., Wang, X., Wang, Z., Hengel, A.v.d.: Poseur: Direct human pose regression with transformers. Proceedings of the European Conference on Computer Vision (ECCV) (October 2022)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 374, + 480, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 374, + 480, + 396 + ], + "spans": [ + { + "bbox": [ + 132, + 374, + 480, + 396 + ], + "type": "text", + "content": "25. Muhammad Anwaar Khalid, K.Z.e.a.: Cepha29: Automatic cephalometric landmark detection challenge 2023. ArXiv preprint arXiv:2212.04808 (2022)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 396, + 480, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 396, + 480, + 418 + ], + "spans": [ + { + "bbox": [ + 132, + 396, + 480, + 418 + ], + "type": "text", + "content": "26. Qian J, Luo W, C.M.e.a.: Cephann: a multi-head attention network for cephalometric landmark detection. IEEE Access 8, 112633-112641 (2020)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 132, + 418, + 480, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 418, + 480, + 460 + ], + "spans": [ + { + "bbox": [ + 132, + 418, + 480, + 460 + ], + "type": "text", + "content": "27. Ronneberger O, Fischer P, B.T.: U-net: Convolutional networks for biomedical image segmentation. Medical Image Computing and Computer-Assisted Intervention-MICCAI 2015: 18th International Conference, Munich, Germany, October pp. 5-9" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 132, + 460, + 480, + 482 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 460, + 480, + 482 + ], + "spans": [ + { + "bbox": [ + 132, + 460, + 480, + 482 + ], + "type": "text", + "content": "28. Shaker A, Maaz M, R.H.e.a.: Unetr++: delving into efficient and accurate 3d medical image segmentation. ArXiv preprint arXiv:2212.04497 (2022)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 132, + 482, + 480, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 482, + 480, + 514 + ], + "spans": [ + { + "bbox": [ + 132, + 482, + 480, + 514 + ], + "type": "text", + "content": "29. Song, Y., Qiao, X., Iwamoto, Y., Chen, Y.w.: Automatic cephalometric landmark detection on x-ray images using a deep-learning method. Applied Sciences 10(7), 2547 (2020)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 132, + 514, + 480, + 547 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 514, + 480, + 547 + ], + "spans": [ + { + "bbox": [ + 132, + 514, + 480, + 547 + ], + "type": "text", + "content": "30. Song Y, Qiao X, I.Y.e.a.: An efficient deep learning based coarse-to-fine cephalometric landmark detection method. IEICE TRANSACTIONS on Information and Systems 104(8), 1359-1366 (2021)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 132, + 547, + 480, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 547, + 480, + 568 + ], + "spans": [ + { + "bbox": [ + 132, + 547, + 480, + 568 + ], + "type": "text", + "content": "31. Vaswani A, Shazeer N, P.N.e.a.: Attention is all you need. Advances in neural information processing systems (2017)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 132, + 568, + 480, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 568, + 480, + 611 + ], + "spans": [ + { + "bbox": [ + 132, + 568, + 480, + 611 + ], + "type": "text", + "content": "32. Wang, C.W., Huang, C.T., Hsieh, M.C., Li, C.H., Chang, S.W., Li, W.C., Vandaele, R., Marée, R., Jodogne, S., Geurts, P., et al.: Evaluation and comparison of anatomical landmark detection methods for cephalometric x-ray images: a grand challenge. IEEE transactions on medical imaging 34(9), 1890-1900 (2015)" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 132, + 611, + 480, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 611, + 480, + 633 + ], + "spans": [ + { + "bbox": [ + 132, + 611, + 480, + 633 + ], + "type": "text", + "content": "33. Yang, S., Quan, Z., Nie, M., Yang, W.: Transpose: Keypoint localization via transformer. IEEE/CVF International Conference on Computer Vision (ICCV) (2021)" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 132, + 633, + 480, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 633, + 480, + 665 + ], + "spans": [ + { + "bbox": [ + 132, + 633, + 480, + 665 + ], + "type": "text", + "content": "34. Yao J, Zeng W, H.T.e.a.: Automatic localization of cephalometric landmarks based on convolutional neural network. American journal of orthodontics and dentofacial orthopedics 161(3), e250-e259 (2022)" + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 258, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 258, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 258, + 101 + ], + "type": "text", + "content": "C. Dai, Y. Wang et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 315 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 138 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 138 + ], + "type": "text", + "content": "35. Yuhui Yuan, Rao Fu, L.H.W.L.C.Z.X.C.J.W.: Hrformer: High-resolution transformer for dense prediction. ArXiv preprint arXiv:2110.09408 (2021)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 138, + 482, + 160 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 138, + 482, + 160 + ], + "spans": [ + { + "bbox": [ + 130, + 138, + 482, + 160 + ], + "type": "text", + "content": "36. Zeng M, Yan Z, L.S.e.a.: Cascaded convolutional networks for automatic cephalometric landmark detection. Medical Image Analysis 68, 101904 (2021)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 160, + 482, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 160, + 482, + 194 + ], + "spans": [ + { + "bbox": [ + 130, + 160, + 482, + 194 + ], + "type": "text", + "content": "37. Zhang H, Zhang J, L.C.S.E.S.P.N.T.G.S.W.Y.M.M.: All-net: Anatomical information lesion-wise loss function integrated into neural network for multiple sclerosis lesion segmentation. Neuroimage Clin 32(102854) (2021)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 194, + 482, + 226 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 194, + 482, + 226 + ], + "spans": [ + { + "bbox": [ + 130, + 194, + 482, + 226 + ], + "type": "text", + "content": "38. Zhang K, Zhang Z, L.Z.e.a.: Joint face detection and alignment using multitask cascaded convolutional networks. IEEE signal processing letters 23(10), 1499-1503 (2016)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 226, + 482, + 248 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 226, + 482, + 248 + ], + "spans": [ + { + "bbox": [ + 130, + 226, + 482, + 248 + ], + "type": "text", + "content": "39. Zhao, T., Wu, X.: Pyramid feature attention network for saliency detection. CVPR (2019)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 248, + 482, + 292 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 248, + 482, + 292 + ], + "spans": [ + { + "bbox": [ + 130, + 248, + 482, + 292 + ], + "type": "text", + "content": "40. Zhong Z, Li J, Z.Z.e.a.: An attention-guided deep regression model for landmark detection in cephalograms. Medical Image Computing and Computer Assisted Intervention-MICCAI 2019: 22nd International Conference, Shenzhen, China p. 13-17 (October 2019)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 293, + 482, + 315 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 293, + 482, + 315 + ], + "spans": [ + { + "bbox": [ + 130, + 293, + 482, + 315 + ], + "type": "text", + "content": "41. Ziyang Ye, H.Y., Li, B.: Uncertainty-aware u-net for medical landmark detection. Arxiv preprint arXiv:2303.10349v1 (2023)" + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 406, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 406, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 406, + 91, + 447, + 100 + ], + "type": "text", + "content": "Du-CeLR" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/A Closer Look at GAN Priors_ Exploiting Intermediate Features for Enhanced Model Inversion Attacks/ae02311f-0ba1-4342-a539-c7ea4e71402f_content_list.json b/2024/A Closer Look at GAN Priors_ Exploiting Intermediate Features for Enhanced Model Inversion Attacks/ae02311f-0ba1-4342-a539-c7ea4e71402f_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..5971cb3dd2eeff77ff005764888738422fb8a16e --- /dev/null +++ b/2024/A Closer Look at GAN Priors_ Exploiting Intermediate Features for Enhanced Model Inversion Attacks/ae02311f-0ba1-4342-a539-c7ea4e71402f_content_list.json @@ -0,0 +1,1920 @@ +[ + { + "type": "text", + "text": "A Closer Look at GAN Priors: Exploiting Intermediate Features for Enhanced Model Inversion Attacks", + "text_level": 1, + "bbox": [ + 256, + 140, + 750, + 205 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yixiang Qiu $^{1,2\\dagger}$ , Hao Fang $^{2\\dagger}$ , Hongyao Yu $^{1\\dagger}$ , Bin Chen $^{1,3,4\\#}$ , MeiKang Qiu $^{5}$ , and Shu-Tao Xia $^{2,4}$", + "bbox": [ + 223, + 233, + 781, + 265 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Harbin Institute of Technology, Shenzhen \n2 Tsinghua Shenzhen International Graduate School, Tsinghua University \n3 Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies \n4 Pengcheng Laboratory 5 Augusta University \nqiuyixiang@stu.hit.edu.cn, fang-h23@mails.tsinghua.edu.cn \nyuhongyao@stu.hit.edu.cn, chenbin2021@hit.edu.cn \nqiumeikang@yahoo.com, xiast@sz.tsinghua.edu.cn", + "bbox": [ + 220, + 276, + 781, + 376 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract. Model Inversion (MI) attacks aim to reconstruct privacy-sensitive training data from released models by utilizing output information, raising extensive concerns about the security of Deep Neural Networks (DNNs). Recent advances in generative adversarial networks (GANs) have contributed significantly to the improved performance of MI attacks due to their powerful ability to generate realistic images with high fidelity and appropriate semantics. However, previous MI attacks have solely disclosed private information in the latent space of GAN priors, limiting their semantic extraction and transferability across multiple target models and datasets. To address this challenge, we propose a novel method, Intermediate Features enhanced Generative Model Inversion (IF-GMI), which disassembles the GAN structure and exploits features between intermediate blocks. This allows us to extend the optimization space from latent code to intermediate features with enhanced expressive capabilities. To prevent GAN priors from generating unrealistic images, we apply a $l_{1}$ ball constraint to the optimization process. Experiments on multiple benchmarks demonstrate that our method significantly outperforms previous approaches and achieves state-of-the-art results under various settings, especially in the out-of-distribution (OOD) scenario. Our code is available at: https://github.com/final-solution/IF-GMI", + "bbox": [ + 261, + 402, + 743, + 681 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Keywords: Privacy $\\cdot$ Model Inversion $\\cdot$ Generative Priors", + "bbox": [ + 261, + 694, + 656, + 708 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 215, + 729, + 377, + 747 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In recent years, Deep Neural Networks (DNNs) have experienced unprecedented development and achieved tremendous success in a wide range of applications,", + "bbox": [ + 212, + 758, + 787, + 790 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "This work was done while Yixiang Qiu was pre-admitted to Tsinghua University.", + "bbox": [ + 232, + 825, + 769, + 839 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "$\\dagger$ Equal contribution.", + "bbox": [ + 232, + 796, + 370, + 811 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "Corresponding author.", + "bbox": [ + 233, + 811, + 393, + 825 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "including face recognition [17], personalized recommendations [42], and audio recognition [7]. While DNNs bring us many practical benefits, concerns [4,10,11, 46] about privacy and security have also been raised and drawn great attention. Recent studies have demonstrated that there is a certain risk of privacy leakage for DNNs as an adversary could reveal private information from these pre-trained models. Various types of novel privacy attacks [27,33,49] have been proposed, such as membership inference attack [20,36] and gradient inversion attack [10, 46]. Among the new attack methods, Model Inversion (MI) attack [12] poses a greater threat due to its powerful capability in recovering the privacy-sensitive datasets that are collected and utilized for model training.", + "bbox": [ + 212, + 146, + 787, + 297 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "[14] proposes the first MI attack to reconstruct sensitive features of genomic data and demonstrate that linear regression models are vulnerable to such privacy attacks. Subsequent studies [13, 37, 43] have extended MI attacks to more Machine Learning (ML) models, but are still limited to models with simple structure and low-dimensional data such as grayscale images. Recent advances in the MI attack field have overcome the challenges in image data recovery by applying Generative Adversarial Networks (GANs) [16], resulting in the extension to DNNs with more complex structure and high-dimensional data such as RGB images. [51] first introduces the GANs to MI attack scenarios, serving as image priors. To better reveal privacy-sensitive information, [51] and subsequent GAN-based methods [5, 41, 47, 48] train GANs with publicly available datasets that have structural similarity with target private datasets. Furthermore, [38] propose to leverage the public pre-trained GAN models (e.g., StyleGAN [24]) as GAN priors, which have a stronger ability to generate high-resolution images and do not require a time-consuming training process.", + "bbox": [ + 212, + 301, + 787, + 529 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Although the aforementioned methods have achieved great progress in recovering high-quality and privacy-sensitive images, the effectiveness of GAN-based MI attacks is limited under certain scenarios. One typical challenge is the out-of-distribution (OOD) scenario, where there is a significant distributional shift between the target private dataset and the public dataset used in the training process of GAN priors. Most previous methods [5, 41, 48, 51] merely work well under scenarios with slight distributional shifts. For instance, they split the same dataset into two parts, one used as the public dataset and the other used as the private dataset. In recent years, some studies [3, 8, 35, 40, 45] have demonstrated that there is rich semantic information encoded in the latent code and intermediate features of GANs. Inspired by these works, we empirically observe that the rich semantic information encoded in the intermediate features helps to sufficiently recover high-quality private data under more rigorous settings, as shown in Figure 1. Therefore, it is imperative to explore methods for leveraging the GAN's intrinsic layered knowledge into MI attacks, mitigating the OOD issue.", + "bbox": [ + 212, + 532, + 787, + 760 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To this end, we propose a novel MI attack method, Intermediate Features enhanced Generative Model Inversion (IF-GMI), which effectively disassembles the GAN structure and leverages features between intermediate blocks. Specifically, we consider the generator of the GAN as a concatenation of multiple blocks and the vectors produced between the blocks as intermediate features. We first", + "bbox": [ + 212, + 763, + 787, + 839 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Y. Qiu et al.", + "bbox": [ + 271, + 114, + 357, + 128 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/4057bffa9d95fa63769b3a9fc77a3827b617b6e5f53b3229c90871dbf3c98f35.jpg", + "image_caption": [ + "0.0000" + ], + "image_footnote": [], + "bbox": [ + 225, + 146, + 281, + 186 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/6f5f6c0af33072bd26f8227755b3670ec1d5f67bfc325aaf947dd57cda4ede4a.jpg", + "image_caption": [ + "0.0000", + "Generation" + ], + "image_footnote": [], + "bbox": [ + 225, + 198, + 281, + 238 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/c4e121353fc9d4fce58b215281bb3996d31bcf1615473a40c10492f40e6770f7.jpg", + "image_caption": [ + "0.3600" + ], + "image_footnote": [], + "bbox": [ + 287, + 146, + 338, + 188 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/6f1614344c9dc466438968c5eb94d1b676bfb2ce116f4512e96ad811e0ec3676.jpg", + "image_caption": [ + "0.2200", + "PPA" + ], + "image_footnote": [], + "bbox": [ + 287, + 198, + 336, + 238 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/850be9d20fd9e7e0bf7c398c12f999b7b9cd793dde409625621992fb51588c95.jpg", + "image_caption": [ + "0.9975" + ], + "image_footnote": [], + "bbox": [ + 344, + 146, + 395, + 188 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/2536e1bf41eb0f8ec381ed92212c55e6c182395414fb17892502f5b5d14d093a.jpg", + "image_caption": [ + "0.9988", + "IF-GMI" + ], + "image_footnote": [], + "bbox": [ + 344, + 198, + 395, + 238 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/50c3d0634734bf852aff41eaa2b402646d6a5b8fcb588d72973a65fb920978ee.jpg", + "image_caption": [ + "" + ], + "image_footnote": [], + "bbox": [ + 403, + 146, + 454, + 188 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/6ed32123a3bf4d8d6b5661bb50102962aea8ee00622e289b8f0ec367e4629b9a.jpg", + "image_caption": [ + "Original" + ], + "image_footnote": [], + "bbox": [ + 401, + 198, + 454, + 238 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/406733f23327a1bc9b2c56e1adbbe0d2dd9b40270bb44ce2ce19bba8a062f521.jpg", + "image_caption": [ + "(a) Visual examples of IF-GMI to PPA", + "(b) Comparison of IF-GMI to PPA", + "Fig. 1: (a) Comparison of our proposed IF-GMI with baselines. The blue number below the images is the predicted confidence by the evaluation model. The first column shows the randomly generated images and the second column presents the reconstructed results by PPA [38], a typical GAN-based method focusing on directly optimizing the latent code of GAN model. The last two columns exhibit the results of our proposed IF-GMI and the ground truth images in the private dataset, respectively. (b) Top-1 attack accuracy of PPA and IF-GMI (ours) on four OOD scenarios." + ], + "image_footnote": [], + "bbox": [ + 472, + 148, + 782, + 267 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "optimize the latent code input to the generator and then successively optimize the intermediate features from the start layer to the end layer. To avoid unreal image generation, we utilize a $l_{1}$ ball constraint to restrict the deviation when optimizing the intermediate features. In the end, we collect the output images after each intermediate layer optimization process and select the final results with a simple strategy. We conduct comprehensive experiments to evaluate our method in multiple settings, including OOD scenarios, various target models, and different GAN priors. The encouraging experimental results demonstrate that the proposed method outperforms baselines on multiple metrics and achieves high attack accuracy on OOD settings. Finally, we perform extensive experiments and ablation studies to validate the effectiveness of the proposed method. Our main contributions are as follows:", + "bbox": [ + 212, + 421, + 784, + 601 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We propose a novel GAN-based MI attack method, which disassembles the pre-trained generator and successively optimizes the latent code and intermediate features under the $l_{1}$ ball constraint.", + "- We demonstrate that our proposed achieves state-of-the-art performance in a range of scenarios, especially under the challenging OOD settings.", + "- We conduct extensive experiments to validate the effectiveness and outstanding transferability of our method." + ], + "bbox": [ + 223, + 609, + 782, + 713 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Related Work", + "text_level": 1, + "bbox": [ + 214, + 743, + 385, + 758 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.1 GAN as prior knowledge", + "text_level": 1, + "bbox": [ + 214, + 771, + 465, + 787 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "GANs [15] are a class of deep neural networks that consist of two functional components, a generator and a discriminator, trained concurrently through adversarial processes to generate realistic data. The objective of a GAN is to learn the", + "bbox": [ + 212, + 794, + 782, + 839 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "A Closer Look at GAN Priors", + "bbox": [ + 529, + 114, + 730, + 126 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "distribution of the training dataset and generate more samples from the learned probability distribution [16]. Well-trained GANs are able to generate high-fidelity and diverse images, excellent representative of which are StyleGANs [24,25]. The generator of the StyleGAN consists of a mapping network and a synthesis network. The former maps latent vectors into the intermediate latent space (i.e. $\\mathcal{W}$ space), and the latter generates images through style vectors. The feature in the $\\mathcal{W}$ space is well-disentangled, which means that images sharing similar features correspond to analogous style vectors. Therefore, PPA [38] performs their attacks by searching the style vectors in $\\mathcal{W}$ space. The style vectors in the front layers tend to control high-level aspects of the generated images like pose, face shape, and general hair style, while those in the back ones have more influence on details [24], such as smaller scale facial features and eyes open/closed. Moreover, style vectors in $\\mathcal{W}$ space do not need to follow the same distribution with the training data, which means that more diverse images can be generated by controlling the vectors [24].", + "bbox": [ + 212, + 146, + 787, + 372 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Recent works [10, 32, 52] have shown the richness of intermediate features in GANs, our investigation also tries to explore the potential of leveraging intermediate latent space of different layers to enhance MI attacks. Our findings reveal that this approach significantly improves attack accuracy and obtains high-quality inversion results, particularly under the harder OOD scenario.", + "bbox": [ + 212, + 373, + 787, + 449 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.2 Model Inversion Attacks", + "text_level": 1, + "bbox": [ + 214, + 468, + 465, + 484 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Model inversion (MI) attacks aim at reconstructing the private training data from a trained model. Typically, MI attacks can be divided into the white-box scenario [51] and black-box scenario [22]. We only focus on the white-box scenario in this paper, which means that the attacker has full access to the trained model. This kind of attack is initially demonstrated through an attempt to extract genomic markers from a linear regression model, as highlighted in the earliest research by [14]. Building on this foundation, subsequent researches [13, 37, 43] have broadened the scope of MI attacks, applying them to more machine learning models like shallow networks, and simple forms of data, such as low-resolution grayscale images. However, as the scale of both the data and the models increases, the efficacy of MI attack methods diminishes dramatically.", + "bbox": [ + 212, + 492, + 787, + 657 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In response to this challenge, a novel approach known as GMI, introduced by [51], employs a GAN-based methodology to enhance the ability of MI attacks with deeper and wider DNNs. This innovative strategy leverages a GAN model trained on publicly available data to encapsulate the distributional characteristics of image data, thereby facilitating the generation of high-quality image reconstructions. The process involves the attackers first generating a set of preliminary images by inputting a batch of randomly sampled latent vectors into the GAN. These generated images are then fed into the target image classifier to obtain initial predictions. To refine the attack, the attackers iteratively optimize the input latent vectors. This optimization process aims to minimize the discrepancy between the classifier's predictions and the intended target class, as measured by the cross-entropy loss, while also reducing the discriminator loss.", + "bbox": [ + 212, + 659, + 787, + 840 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Y. Qiu et al.", + "bbox": [ + 271, + 114, + 357, + 128 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/d50aeea83fea1e663e2ca3791e109e1db489790c54f2b18b1b2da20bd0120b0c.jpg", + "image_caption": [ + "Fig. 2: Overview of our proposed IF-GMI. Firstly, the latent vectors are sampled from standard Gaussian distribution and mapped into disentangled latent codes with semantic meanings by Mapping Network. Then we perform random augmentation on these latent codes to select optimal ones denoted as $\\mathbf{w}^*$ for optimization. The Synthesis Network is disassembled into multiple blocks to search the intermediate features, which are successively updated with the identity loss calculated from the target model. Finally, the reconstructed images are generated from the last layer as results." + ], + "image_footnote": [], + "bbox": [ + 223, + 150, + 779, + 300 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "With the help of the GAN, GMI seeks to achieve more precise and convincing reconstructions of complex data, thereby representing a significant advancement in the field of MI attacks.", + "bbox": [ + 212, + 438, + 784, + 482 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Lots of researches in recent years improve the attack performance on the white-box scenario based on GMI. SecretGen [48] explores the scenario when the attackers know some auxiliary information about the private data. KEDMI [5] improves the discriminator by incorporating target labels and recover the distribution of the input latent vectors for a target class. VMI [41] reformulates the MI attack from the perspective of variational inference and introduce KL-divergence as a regularization to better approximate the target distribution with a variational distribution. PPA [38] employs pre-trained StyleGAN2 to reduce the time cost of attacks and extend the attacks to high-resolution images thanks to the excellent generative ability of StyleGAN2. Moreover, they propose a set of strategies to heighten attack accuracy and robustness, including initial selection, post-selection, and data augmentation. LOMMA [31] introduces model augmentation into MI attacks to reduce overfitting of the target model. They train some surrogate models from the target model via model distillation, co-guiding the optimization process with improved loss function. PLGMI [47] proposes a top-n selection strategy, using target models to generate pseudo labels for publicly available images, thereby directing the training process for the conditional GAN.", + "bbox": [ + 212, + 484, + 787, + 741 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3 Methodology", + "text_level": 1, + "bbox": [ + 215, + 763, + 380, + 780 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In this section, we begin by explaining the fundamental paradigm of MI attacks and provide a formulation for the MI problem. Subsequently, we present our main components and elaborate the detailed pipeline of the proposed IF-GMI,", + "bbox": [ + 212, + 794, + 785, + 840 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "A Closer Look at GAN Priors", + "bbox": [ + 529, + 114, + 730, + 127 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "which contributes to the improved performance under the OOD scenario. See Figure 2 for an overview of our method.", + "bbox": [ + 215, + 146, + 782, + 176 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.1 Preliminaries", + "text_level": 1, + "bbox": [ + 215, + 194, + 372, + 209 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In this paper, we focus on the MI attacks under white-box settings, which means all the parameters and components of target models are available to the attacker. For image classification tasks, the malicious adversary aims to reconstruct privacy-sensitive images by leveraging the output prediction confidence of the target classifier and other auxiliary priors. Early works [44] directly optimize pixels in randomly sampled dummy images $\\mathbf{x}$ to approximate target images $\\mathbf{x}^*$ given the target model $T_{\\theta}$ and target label $c$ , which can be formulated as follows:", + "bbox": [ + 212, + 215, + 784, + 320 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\mathbf {x}} = \\underset {\\mathbf {x}} {\\arg \\min } \\mathcal {L} \\left(T _ {\\theta} (\\mathbf {x}), c\\right), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 416, + 333, + 784, + 354 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\hat{\\mathbf{x}}$ is the reconstructed image, $\\mathcal{L}(\\cdot ,\\cdot)$ denotes the classification loss designed for image optimization and $T_{\\theta}(\\mathbf{x})$ represent the output confidence. Due to the full access to the target model in white-box settings, the attacker can calculate loss and directly perform backpropagation to update dummy images.", + "bbox": [ + 212, + 358, + 782, + 419 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "However, the methods above are no longer functional when $\\mathbf{x}$ turns into high-dimensional data which has excessive search space. To tackle such issues, recent studies [5, 38, 47, 51] introduce GANs as image priors due to their superior capability to generate high-fidelity RGB images. They propose to train a specially designed GAN with publicly available datasets that have structural similarities with the private dataset or utilize a public pre-trained GAN before the attack. Furthermore, the optimization objective is replaced with the latent vectors $\\mathbf{z}$ of the generator, which has fewer parameters to optimize. With the aforementioned techniques, the MI problem is transformed into the following formulation:", + "bbox": [ + 212, + 419, + 784, + 554 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {\\mathbf {z}} = \\underset {\\mathbf {z}} {\\arg \\min } \\mathcal {L} _ {i d} \\left(T _ {\\theta} (G (\\mathbf {z}), c) + \\lambda \\mathcal {L} _ {a u x} (\\mathbf {z}) \\right. \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 359, + 561, + 784, + 584 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $G$ represents the trained generator, $\\mathcal{L}_{id}(\\cdot, \\cdot)$ denotes the identity loss calculated from the target model $T_{\\theta}$ and $\\mathcal{L}_{aux}(\\cdot)$ is an optional auxiliary loss (e.g., the discriminator loss) with a hyperparameter $\\lambda$ . By minimizing the Eq.2, the adversary updates the latent vectors $\\mathbf{z}$ into the optimal results $\\hat{\\mathbf{z}}$ and generate final images through $\\hat{\\mathbf{x}} = G(\\hat{\\mathbf{z}})$ .", + "bbox": [ + 212, + 589, + 782, + 665 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Intuitively, directly optimizing the input latent code of GAN priors serves as a natural method to acquire ideal reconstructed images, leading to its widespread application in all the previous works. However, recent studies [3,8,35,40] have indicated that there is fairly rich semantic information in the intermediate features of GANs except for the input latent code. This inspires us to surpass the limitation of merely searching the latent space and propose a novel method focusing on the intermediate feature domains, which are more close to the output.", + "bbox": [ + 212, + 666, + 784, + 771 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.2 Exploiting Intermediate Features for Enhanced MI Attacks", + "text_level": 1, + "bbox": [ + 215, + 787, + 750, + 803 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In the following part, we delve into the internal structure of the GAN prior, attempting to explore the hierarchical layers for enhanced utilization of the rich", + "bbox": [ + 212, + 809, + 782, + 839 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Y. Qiu et al.", + "bbox": [ + 271, + 114, + 357, + 128 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "semantics learned by the generator. Following the pipeline shown in Figure 2, we will elucidate each component in detail.", + "bbox": [ + 212, + 146, + 782, + 176 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The GAN prior. Most previous GAN-based attacks [5,31,47,51] require training a specialized GAN with essential auxiliary dataset towards the specific target classifier. However, the prior knowledge of GANs trained under the above setting will be excessively aligned with the target model and the auxiliary dataset, leading to significant reduction in transferability and generalization.", + "bbox": [ + 212, + 191, + 782, + 265 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Therefore, our method relies on the pre-trained StyleGAN2 [23] instead of training a GAN from scratch. The generator of StyleGAN2 can be simply divided into two components, consisting of a mapping network $G_{map}:\\mathcal{Z}\\to \\mathcal{W}$ which maps the initial latent vectors $\\mathbf{z}\\in \\mathcal{Z}$ into the extended $\\mathcal{W}$ space [1], and a synthesis network $G_{syn}:\\mathcal{W}\\rightarrow \\mathcal{X}$ which generates images $\\mathbf{x}$ with mapped vectors $\\mathbf{w}\\in \\mathcal{W}$ . Due to the reduced feature entanglement in $\\mathcal{W}$ space that facilitates better style generation, we set $\\mathbf{w}$ as the initial optimization objective rather than the commonly used latent code $\\mathbf{z}$ in previous works. Specifically, we first randomly sample a batch of latent vectors $\\mathbf{z}$ from Gaussian distribution and then map them with $G_{map}$ to acquire $\\mathbf{w}$ , which will be iteratively updated in the first step of intermediate features optimization. Moreover, the StyleGAN2 is pre-trained without the utilization of the target model $T_{\\theta}$ or other auxiliary prior corresponding to the target dataset, ensuring the flexibility and transferability of our method when attacking different target models and datasets.", + "bbox": [ + 212, + 267, + 787, + 479 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Initial Selection. Owing to the randomness in sampling latent vectors $\\mathbf{z}$ , it is potential part of them cannot facilitate the generation of appropriate images, leading to a decrease in attack accuracy. To reduce the risk of generating misleading and low-quality images, previous studies [2, 38, 48] have explored the technique of initial selection and validated its effectiveness in obtaining robust latent vectors. Specifically, we first generate images with the randomly samples $\\mathbf{z}$ , apply a series of transformations $Aug(\\cdot)$ to the images, and feed them into the target classifier $T_{\\theta}$ for corresponding prediction confidence. By selecting the latent vectors with higher scores, we can significantly improve the quality of the final images to better approximate the target distribution.", + "bbox": [ + 212, + 493, + 787, + 643 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Inspired by these prior studies [2,38,48], we also include the initial selection technique in our method and apply standard image transformations, such as random cropping, resizing and flipping. Different from previous methods, we perform initial selection on the mapped vectors $\\mathbf{w}$ instead of latent vectors $\\mathbf{z}$ . The robust vectors $\\mathbf{w}$ are obtained with the following equation:", + "bbox": [ + 212, + 645, + 787, + 720 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {w} _ {\\text {i n i t}} = \\underset {\\mathbf {w}} {\\arg \\max } \\operatorname {C o n f} \\left(T _ {\\theta} \\left(A u g \\left(G _ {\\text {s y n}} (\\mathbf {w})\\right)\\right), c\\right), \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 343, + 732, + 784, + 753 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $\\operatorname{Conf}(\\cdot, \\cdot)$ measures the confidence score for augmented images $Aug(G_{syn}(\\mathbf{w}))$ given the specific label $c$ .", + "bbox": [ + 212, + 763, + 802, + 795 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Intermediate Features Optimization. According to the research of [24], the front blocks in the generator control the overall characteristics while the back", + "bbox": [ + 214, + 809, + 785, + 839 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "A Closer Look at GAN Priors", + "bbox": [ + 529, + 114, + 730, + 127 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 774, + 114, + 784, + 126 + ], + "page_idx": 6 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "Algorithm 1 Pseudocode of the core algorithm in our proposed IF-GMI" + ], + "code_body": "Input: $G_{syn}$ : a pre-trained generator; $L$ : the number of intermediate features; $T_{\\theta}$ : the target classifier; $\\mathcal{L}_{id}$ : the identity loss; $r[1 \\dots L]$ : the radius value of $l_{1}$ ball for each hierarchical features; $N$ : the number of iterations;", + "guess_lang": "txt", + "bbox": [ + 215, + 164, + 785, + 215 + ], + "page_idx": 7 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Output: Reconstructed images $\\mathbf{x}^*$" + ], + "code_body": "1: Acquire latent vectors $\\mathbf{w}_{init}$ via initial selection process \n2: $\\mathbf{w}_{(0)} \\gets \\underset{\\mathbf{w}}{\\arg \\min} \\mathcal{L}_{id}(G_{syn}(\\mathbf{w}_{init}))$ \n3: Decompose the $G_{syn}$ into $G_{L+1} \\circ G_{L} \\circ \\dots \\circ G_{2} \\circ G_{1}$ \n4: Obtain the first intermediate feature $\\mathbf{f}_{(1)}^{0} = G_{1}(\\mathbf{w}_{(0)})$ \n5: Set $\\mathbf{w}_{(1)}^{0} = \\mathbf{w}_{(0)}$ \n6: for $i \\gets 1$ to $L$ do \n7: Set $G_{remain} = G_{L+1} \\circ G_{L} \\dots \\circ G_{i+1}$ \n8: for $j \\gets 1$ to $N$ do \n9: loss = $\\mathcal{L}_{id}(G_{remain}(\\mathbf{f}_{(i)}^{j-1}, \\mathbf{w}_{(i)}^{j-1}))$ \n10: $\\mathbf{f}_{(i)}^{j} \\gets Adam(\\mathbf{f}_{(i)}^{j-1}; loss), ||\\mathbf{f}_{(i)}^{j} - \\mathbf{f}_{(i)}^{0}||_{1} \\leq r[i]$ \n11: $\\mathbf{w}_{(i)}^{j} \\gets Adam(\\mathbf{w}_{(i)}^{j-1}; loss), ||\\mathbf{w}_{(i)}^{j} - \\mathbf{w}_{(i)}^{0}||_{1} \\leq r[i]$ \n12: end for \n13: $\\mathbf{f}_{(i+1)}^{0} = G_{i+1}(\\mathbf{f}_{(i)}^{N}, \\mathbf{w}_{(i)}^{N}), \\mathbf{w}_{(i+1)}^{0} = \\mathbf{w}_{(i)}^{N}$ \n14: end for \n15: The final images $\\mathbf{x}^{*} = \\mathbf{f}_{(L+1)}^{0}$ \n16: return $\\mathbf{x}^{*}$", + "bbox": [ + 225, + 234, + 643, + 518 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "ones have more influence on local details. Previous studies [38, 47, 51] neglect the role of the latter, which limits the attack performance. To take advantage of the individual blocks, we propose intermediate features optimization, as shown in the Algorithm 1. We first optimize the selected latent vectors $\\mathbf{w}_{init}$ to obtain the optimal ones $\\mathbf{w}_{(0)}$ before launching intermediate features optimization. Then we disassemble the pre-trained generator into $L + 1$ blocks for hierarchical layer searching, i.e.,", + "bbox": [ + 212, + 551, + 787, + 657 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\nG _ {s y n} = G _ {L + 1} \\circ G _ {L} \\circ \\dots G _ {2} \\circ G _ {1}. \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 382, + 659, + 785, + 676 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "And we can feed $\\mathbf{w}_{(0)}$ into block $G_{1}$ to attain the first intermediate feature $\\mathbf{f}_{(1)}^{0}$ .", + "bbox": [ + 215, + 684, + 785, + 702 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "For each intermediate block $G_{i+1}, i \\in [1, \\ldots, L]$ , the corresponding intermediate features $\\mathbf{f}_{(i+1)}^0$ are acquired with following steps. First, we generate images utilizing the remaining blocks (i.e., $\\mathbf{x}_i = G_{L+1} \\circ G_L \\dots G_{i+1}(\\mathbf{f}_{(i)}, \\mathbf{w}_{(i)})$ ) and input them into the target classifier $T_\\theta$ to compute the prediction confidence for loss function. Then, we repeat the aforementioned process to iteratively update both $\\mathbf{w}_{(i)}$ and $\\mathbf{f}_{(i)}$ . During the optimization process, we restrict the $\\mathbf{f}_{(i)}$ within the $l_1$ ball with radius $r[i]$ centered at the initial intermediate feature $\\mathbf{f}_{(i)}^0$ to avoid excessive shift that may lead to collapse image generation. Once the iteration process is completed, the optimized $\\mathbf{w}_{(i)}^N$ and $\\mathbf{f}_{(i)}^N$ are fed into the block $G_i$ to", + "bbox": [ + 212, + 702, + 787, + 843 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Y. Qiu et al.", + "bbox": [ + 271, + 114, + 357, + 128 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "obtain the next intermediate features $\\mathbf{f}_{(i + 1)}^{0}$ . Moreover, we denote the optimized $\\mathbf{w}_{(i)}^{N}$ as the initial latent vector $\\mathbf{w}_{(i + 1)}^{0}$ before the next layer optimization starts.", + "bbox": [ + 212, + 146, + 782, + 181 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Once we finish searching the last intermediate layer, we can generate the final images $\\mathbf{x}^*$ from the last intermediate feature $\\mathbf{f}_{(L)}^{N}$ , i.e., $\\mathbf{x}^* = \\mathbf{f}_{L + 1}^{0} = G_{i + 1}(\\mathbf{f}_{(L)}^{N})$ .", + "bbox": [ + 212, + 181, + 784, + 212 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "The Overall Loss. While the cross-entropy loss $\\mathcal{L}_{CE}$ serves as the identity loss in most early works [5, 48, 51], there is a major drawback of $\\mathcal{L}_{CE}$ . Specifically, the gradient vanishing problem emerges when the prediction confidence of target label $c$ approaches the ground truth in the one-hot vector. Following the previous study [38], we rely on the Poincaré loss function to overcome this problem. Therefore, the identity loss function utilized in our method is defined as follows:", + "bbox": [ + 212, + 220, + 782, + 311 + ], + "page_idx": 8 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {i d} = \\operatorname {a r c c o s h} \\left(1 + \\frac {2 \\| v _ {1} - v _ {2} \\| _ {2} ^ {2}}{(1 - \\| v _ {1} \\| _ {2} ^ {2}) (1 - \\| v _ {2} \\| _ {2} ^ {2})}\\right), \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 338, + 323, + 784, + 356 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "where $||v||_2$ is the Euclidean norm for the given vector. In our experiments, we denote $v_{1}$ as the normalized prediction confidence and $v_{2}$ as the one-hot vector for ground truth. Notably, the original number 1 in $v_{2}$ is substituted with 0.9999 to avoid division by zero.", + "bbox": [ + 212, + 359, + 784, + 419 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4 Experiments", + "text_level": 1, + "bbox": [ + 214, + 439, + 375, + 455 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In this section, we first illustrate the details of our experimental settings. Then, we compare our method with state-of-the-art baselines to evaluate the attack performance. Furthermore, we conduct extensive experiments on multiple target datasets and models to further validate the effectiveness of our method in various settings. Finally, the ablation study will be evaluated on the first 100 classes of the whole dataset due to cost concerns.", + "bbox": [ + 212, + 467, + 784, + 556 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.1 Experimental Setup", + "text_level": 1, + "bbox": [ + 214, + 577, + 426, + 593 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Datasets. We evaluate our method on two classification tasks, including facial image classification and dog breed classification. For the facial image classification task, we select the FaceScrub [30] and CelebFaces Attributes [28] (CelebA) as private datasets to train the target models. FaceScrub consists of facial images of actors and actresses with 530 identities in total. CelebA contains facial images of 10177 identities with coarse alignment. For FaceScrub, we utilize all the identities in the major experiment. For CelebA, we select the top 1000 identities with the most images for our experiment, consisting of over 30000 images. We use Flickr-Faces-HQ [24] (FFHQ) and MetFaces [23] as public datasets. FFHQ consists of 70000 high-quality human face images. MetFaces is an image dataset of 1336 human faces extracted from the Metropolitan Museum of Art Collection, which has a huge distributional shift with real human faces. For the dog breed classification task, we use Stanford Dogs [9] as a private dataset and Animal Faces-HQ Dogs [6] (AFHQ) as a public dataset. To adapt to the target model, all images in the various datasets are pre-processed to a resolution size of $224 \\times 224$ pixels in our experiment.", + "bbox": [ + 212, + 598, + 785, + 840 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "A Closer Look at GAN Priors", + "bbox": [ + 529, + 114, + 730, + 127 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Models. We trained a variety of classification models on the private datasets mentioned above, including various architectures such as ResNet-18 [18], DenseNet-169 [21], ResNet-152 [18], and ResNeSt-101 [50], as target models. Following the settings in the previous work [38], we select Inception-v3 [39] as the evaluation model. For the generative model, we employ publicly released StyleGAN2 pre-trained on the aforementioned public datasets.", + "bbox": [ + 212, + 146, + 797, + 238 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Metrics. Following PPA [38], we evaluate the performance of our attack method on various kinds of metrics as follows:", + "bbox": [ + 212, + 248, + 784, + 277 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Attack Accuracy. This metric serves as a criterion on how well the generated samples resemble the target class. We use the evaluation model trained on the same dataset with the target model to predict the labels on reconstructed samples and compute the top-1 and top-5 accuracy for target classes, denoted as $Acc@1$ and $Acc@5$ respectively. The higher the reconstructed samples achieve attack accuracy on the evaluation model, the more private information in the dataset can be considered to be exposed [51].", + "- Feature Distance. The feature is defined as the output of the model's penultimate layer. We compute the shortest feature $l_{2}$ distance between reconstructed samples and private training data for each class and calculate the average distance. The evaluated feature distances on the evaluation model and a pre-trained FaceNet [34] are denoted as $\\delta_{eval}$ and $\\delta_{face}$ , respectively.", + "- Fréchet Inception Distance (FID). FID [19] is commonly used to evaluate the generated images of GANs. It computes the distance between the feature vectors from target private data and reconstructed samples. The feature vectors are extracted by Inception-v3 pre-trained on ImageNet. The lower FID score shows higher realism and overall diversity [41].", + "- Sample Diversity. We compute Precision-Recall [26] and Density-Coverage [29] scores, whose higher values indicate greater intra-class diversity of the reconstructed samples. Our results for these four metrics are stated and analyzed in the Appendix." + ], + "bbox": [ + 225, + 282, + 787, + 598 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4.2 The Number of Optimized Layers", + "text_level": 1, + "bbox": [ + 215, + 622, + 540, + 638 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "To obtain the highest attack performance, the number of intermediate features $L$ should be explored before conducting the major experiments. When $L$ takes a small value, there is a risk of underfitting as we merely optimize the intermediate features of the previous few layers to reconstruct the target images, especially in the OOD scenario. In contrast, when $L$ is too large, the latter layers have a greater influence on the local details [24], which may lead to overfitting to the target model in some details and produce unrealistic images. Therefore, we must balance underfitting and overfitting when choosing $L$ . We conduct a simple attack on only 10 classes for each combination of public and private datasets to select $L$ according to the results. For instance, Figure 3(a) shows the Acc@1 result for GAN prior pre-trained on FFHQ against the target DenseNet-169 trained on CelebA. The Acc@1 reaches the highest when $L = 3$ . Hence, we keep this configuration in conducting the following experiments.", + "bbox": [ + 212, + 643, + 787, + 840 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Y. Qiu et al.", + "bbox": [ + 271, + 114, + 357, + 128 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/0e44ab91fb2d2ab96d903b61bf75ae4623ffc976fc2924dcc0e7f44412b5fdb0.jpg", + "image_caption": [ + "(a) StyleGAN2" + ], + "image_footnote": [], + "bbox": [ + 228, + 162, + 493, + 268 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/302a4a29c9abef413f36f6b081d3639797110b6a84b5adb6171e8ffa8e02e483.jpg", + "image_caption": [ + "layer 0" + ], + "image_footnote": [], + "bbox": [ + 511, + 162, + 571, + 208 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/2d9297ebe571fde373116271bd7ed018985aee102c677cf495ac580a3dc1979e.jpg", + "image_caption": [ + "layer 1" + ], + "image_footnote": [], + "bbox": [ + 576, + 162, + 635, + 208 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/791e673b5a6a204cc1c216f6d0c44b563f71fb7a10be133c5519aa4304b860f0.jpg", + "image_caption": [ + "layer 2" + ], + "image_footnote": [], + "bbox": [ + 642, + 162, + 700, + 208 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/d7ad69a8642367d78b77826bad4c7e6c79ac95248b0919e61bf453b213f39c4d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 707, + 162, + 766, + 208 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/d8aa836cce8f78142345ead42a9144964cf8bb42544f3ea20d72493d9fbc1065.jpg", + "image_caption": [ + "layer 4", + "Fig. 3: (a) Comparison of Acc@1 metric under various settings of $L$ (i.e., the number of intermediate features). (b) Visual results generated from different end layers. We define $L = 0$ as a special case that our method degenerates into merely optimizing the latent vectors $\\mathbf{w}$ ." + ], + "image_footnote": [], + "bbox": [ + 513, + 220, + 571, + 267 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/5418e5f60f52d07697ee9aa2264db7a86841f632302cdd65b352984bd8945d1b.jpg", + "image_caption": [ + "ayer 5", + "(b) visual samples for each layer" + ], + "image_footnote": [], + "bbox": [ + 576, + 220, + 635, + 267 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/7cb97fa038eb3ab9754ad0420bdc7c3ce2fe3756f0ffaea5bfd83bbef8a6579b.jpg", + "image_caption": [ + "layer 6" + ], + "image_footnote": [], + "bbox": [ + 643, + 220, + 700, + 267 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/59a4ddcf6813cc4f0e2110506f036935f69707a298f8d942bddd9ea2338d00a1.jpg", + "image_caption": [ + "layer 3", + "layer 7" + ], + "image_footnote": [], + "bbox": [ + 707, + 220, + 766, + 267 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/c792262ddfd26a0427609a7c5d099687bc69718d510db7f3eacf970bdb35d75e.jpg", + "table_caption": [ + "Table 1: Comparison of our method with state-of-the-art methods against ResNet-18 trained on FaceScrub." + ], + "table_footnote": [], + "table_body": "
Public DatasetMethod↑ Acc@1↑ Acc@5↓ δface↓ δeval↓FID
FFHQGMI [51]0.1310.3391.260149.53077.800
KEDMI [5]0.1270.3171.155186.409144.195
PPA [38]0.9620.9960.707117.83441.688
LOMMA+GMI [31]0.8280.9450.784126.17855.840
LOMMA+KEDMI [31]0.5490.8140.916217.991114.045
PLGMI [47]0.7580.9280.676214.978154.497
IF-GMI(ours)0.9790.9960.667112.91540.581
MetFacesGMI [51]0.0380.1361.361161.036114.648
KEDMI [5]0.0030.0171.651212.952347.468
PPA [38]0.6280.8541.035146.74962.518
LOMMA+GMI [31]0.1600.3611.220156.297101.600
LOMMA+KEDMI [31]0.0020.0201.623214.883333.572
PLGMI [47]0.4380.7310.796205.222245.208
IF-GMI(ours)0.9490.9920.838120.35468.107
", + "bbox": [ + 220, + 422, + 787, + 630 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "4.3 Comparison with Previous State-of-the-art Attacks", + "text_level": 1, + "bbox": [ + 214, + 659, + 683, + 674 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "We compare our method with state-of-the-art MI attack methods, including GMI [51], KEDMI [5], PPA [38], LOMMA [31] and PLGMI [47]. Note that LOMMA [31] is a plug-and-play technique designed to augment existing attack methods. We use their original setup where LOMMA is integrated with GMI and KEDMI as our baselines.", + "bbox": [ + 212, + 686, + 784, + 762 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "The GAN structures employed by GMI, KEDMI, and PLGMI are inherently limited to generating images at a resolution of $64 \\times 64$ pixels. To ensure a fair comparison, we adopt the same operation used in PPA [38], which modifies the architecture of the generators and discriminators to enable the generation of images at an enhanced resolution of $256 \\times 256$ pixels, i.e., adding two ex", + "bbox": [ + 212, + 763, + 785, + 839 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "A Closer Look at GAN Priors", + "bbox": [ + 529, + 114, + 730, + 127 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 767, + 114, + 782, + 126 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/b90b989e4c6f839c952787458555202005ff6fa865d5a92ea072a65f8ba80d9e.jpg", + "image_caption": [ + "Private", + "Fig. 4: Visual comparison of reconstructed images from different methods against the ResNet-18 trained on FaceScrub. The first column shows ground truth images of the target class in the private dataset." + ], + "image_footnote": [], + "bbox": [ + 243, + 148, + 292, + 325 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/2035d4fdac018c526da493640c6bac4170088e367c6f7c56434a501d34ac616d.jpg", + "image_caption": [ + "GMI" + ], + "image_footnote": [], + "bbox": [ + 325, + 150, + 372, + 325 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/74cd5d890531dacfba963b3c14eff81b6e7399c1315c833bb44ad6a1291aec2b.jpg", + "image_caption": [ + "KEDMI" + ], + "image_footnote": [], + "bbox": [ + 387, + 150, + 436, + 325 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/4f1ded3517cfc59ebe50dee073b78bbbcb12b8c7d4cc16706939dd58e6436c20.jpg", + "image_caption": [ + "PPA" + ], + "image_footnote": [], + "bbox": [ + 450, + 150, + 500, + 325 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/936d7b5c460b1573aa457c056b2499497cea2e27cfb21f6e8966ee25a32f6516.jpg", + "image_caption": [ + "LOMMA +GMI" + ], + "image_footnote": [], + "bbox": [ + 514, + 150, + 563, + 325 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/bb49ef5912d38e70d6e03a89365c65fc228bf200738bd8599253bdc0c1c40527.jpg", + "image_caption": [ + "LOMMA +KEDMI" + ], + "image_footnote": [], + "bbox": [ + 578, + 150, + 627, + 325 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/db40d1cc3c1b0b9be0828b3adbfbc759f852a73df17cad5d1afce224d188bb42.jpg", + "image_caption": [ + "PLGMI" + ], + "image_footnote": [], + "bbox": [ + 643, + 150, + 692, + 325 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/4a579351b4e119779b0bd6a66b0bb9ea3a2e8f940b196bc0c4dc1279aae51b6c.jpg", + "image_caption": [ + "ours" + ], + "image_footnote": [], + "bbox": [ + 705, + 150, + 754, + 325 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "tra upsampling layers for the generator and two downsampling layers for the discriminator respectively.", + "bbox": [ + 212, + 433, + 784, + 463 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "We provide quantitative results against ResNet-18 [18] trained on the Face-Scrub dataset in Table 1. We can observe that our method achieves significant improvements over previous methods. Especially when the generator is trained on MetFaces, IF-GMI remarkably improves the Acc@1 by $15.1\\%$ and the Acc@5 is nearly to $100\\%$ . Moreover, our method generally achieves a lower feature distance than baselines between reconstructed samples and private data. For instance, we reduce the distance by more than $10\\%$ compared to the PPA on the MetFaces dataset. Notably, the MetFaces dataset is composed of artworks and thus has a larger distributional shift with real human faces compared with the FFHQ dataset. We note that this severely reduces the reconstruction performance of previous attack methods, while our proposed method still exhibits outstanding performance, highlighting the excellent generalization ability of our approach. Visualization results of the recovered images using generators trained on FFHQ are shown in Figure 4. Compared with previous methods, our reconstructed images have higher fidelity and realism, demonstrating the superiority of exploiting GAN's intermediate features.", + "bbox": [ + 212, + 465, + 787, + 705 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "4.4 Comparison under different target datasets and models", + "text_level": 1, + "bbox": [ + 214, + 734, + 718, + 750 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "To validate the effectiveness of the proposed method, we conducted extensive experiments on various datasets using different target models with different architectures. We chose the PPA method as our baseline for comparison due to its comprehensive performance in both accuracy and fidelity. Additional experimental results are in the Appendix.", + "bbox": [ + 212, + 763, + 787, + 840 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Y. Qiu et al.", + "bbox": [ + 271, + 114, + 357, + 128 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/884b06e52a1cfe2701a2e8f698962928efb6878351541f0ff55419fae06492c4.jpg", + "table_caption": [ + "Table 2: Comparison results against ResNet-152 trained on CelebA." + ], + "table_footnote": [], + "table_body": "
Public DatasetMethod↑Acc@1↑Acc@5↓δface↓δeval↓FID
FFHQPPA0.8060.9460.736312.58040.430
IF-GMI(ours)0.9120.9820.678314.39230.685
MetFacesPPA0.3960.6431.063387.81074.030
IF-GMI(ours)0.7840.9290.835340.89474.504
", + "bbox": [ + 220, + 170, + 785, + 260 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/d033d4173505cd8f9e044c6f4de600ee912f78efbc12627b1952648722ddc7df.jpg", + "table_caption": [ + "Table 3: Comparison results against different target models trained on FaceScrub with the public dataset being MetFaces." + ], + "table_footnote": [], + "table_body": "
Target ModelMethod↑ Acc@1↑ Acc@5↓ δface↓ δeval↓FID
ResNet-152PPA0.7310.9200.966139.38068.540
IF-GMI(ours)0.9040.9840.882138.75269.937
ResNeSt-101PPA0.7500.9270.979137.17088.660
IF-GMI(ours)0.9220.9830.884132.60976.195
DenseNet-169PPA0.7980.9480.938129.44077.520
IF-GMI(ours)0.9330.9870.851125.05082.123
", + "bbox": [ + 220, + 311, + 785, + 435 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "As shown in Table 2, our proposed IF-GMI maintains superiority in most metrics against the ResNet-152 trained on the CelebA. Our method achieves a remarkable increase of $10.6\\%$ in Acc@1 and significantly reduces the FID value using the StyleGAN2 trained on FFHQ. When utilizing the MetFaces StyleGAN2, our method still achieves much better results than the baseline despite a larger distributional shift, including a $38.8\\%$ increase in Acc@1 and competitive feature distance. In addition to ResNet-18, we evaluate the performance of the proposed method on more target models trained on FaceScrub, including ResNet-152, ResNeSt-101, and DenseNet-169. Benefiting from the fully utilized generative prior, our method achieves $13\\% \\sim 17\\%$ improvement in Acc@1 metrics than the baselines and also achieves better results in most of the other metrics, as illustrated in Table 3.", + "bbox": [ + 212, + 462, + 784, + 642 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The results presented above demonstrate that our method maintains outstanding attack performance in a variety of settings, exhibiting excellent generalizability and transferability. We also provide additional experimental results on more datasets and architectures in the Appendix.", + "bbox": [ + 212, + 643, + 784, + 704 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "4.5 Ablation Studies", + "text_level": 1, + "bbox": [ + 215, + 724, + 401, + 739 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "To estimate the contributions from each component in our method, we conduct ablation studies on the ResNet-152 trained on the CelebA dataset using the StyleGAN2 trained on FFHQ. The results are presented in Table 4. More ablation studies are listed in the Appendix.", + "bbox": [ + 212, + 750, + 784, + 809 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Intermediate Features Optimization. We merely remove the intermediate features optimization from our pipeline while keeping the remaining param-", + "bbox": [ + 212, + 809, + 785, + 840 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "A Closer Look at GAN Priors", + "bbox": [ + 529, + 114, + 730, + 127 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 767, + 114, + 785, + 126 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/79b7704ad9143b850a5f85c4444f82c3a581f71a57b9a4f350e3089297b086a3.jpg", + "table_caption": [ + "Table 4: Ablation study performed on ResNet-152 trained on CelebA dataset with FFHQ as the public dataset. IF-GMI- $i$ removes the intermediate feature optimization and only searches the latent space. IF-GMI- $l$ removes the $l_{1}$ ball constraint compared to IF-GMI." + ], + "table_footnote": [], + "table_body": "
Method↑Acc@1↑Acc@5↓δface↓δeval↓FID
IF-GMI-i0.8030.9280.732314.27543.576
IF-GMI-l0.9450.9920.678315.27837.528
IF-GMI0.9470.9930.677315.03237.461
", + "bbox": [ + 277, + 212, + 730, + 291 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "eters unchanged. As shown in the first row of Table 4, it leads to degradation up to $14\\%$ in Acc@1 and much worse FID without this technique, demonstrating the superiority of utilizing the hierarchical features of intermediate layers.", + "bbox": [ + 212, + 316, + 782, + 359 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "$l_{1}$ Ball Constraint. To avoid unreal image generation, we introduce the $l_{1}$ ball constraint into the intermediate features optimization. By observing the results shown in the second row of Table 4, the $l_{1}$ ball is beneficial in improving the performance in all metrics. Thus, we demonstrate the necessity of restricting the intermediate features within the $l_{1}$ ball constraint.", + "bbox": [ + 212, + 361, + 787, + 436 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "5 Conclusion", + "text_level": 1, + "bbox": [ + 215, + 458, + 359, + 474 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "We proposed IF-GMI, a novel model inversion attack that performs effective attack in the OOD scenario. Surpassing the limitation of treating the generator as a black-box, we studied the structure and decomposed the generator into hierarchical layers, extending the optimization space from latent code to intermediate features to generate stable and high-quality images. Moreover, to avoid generating low-fidelity images, we applied a $l_{1}$ ball constraint to the optimization process. Through our extensive experiments, we demonstrated that the proposed IF-GMI achieves the state-of-the-art attack accuracy while generating samples with high fidelity and diversity.", + "bbox": [ + 212, + 487, + 787, + 622 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Our exploration of enhanced utilization of intermediate features in the GAN prior contributes to advances in MI attack field, paving the way to more practical employment for MI attacks. We hope this paper can raise concerns about privacy leakage risk of released pre-trained models and facilitate more response to the threat of MI attacks.", + "bbox": [ + 212, + 623, + 787, + 698 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Acknowledgments", + "text_level": 1, + "bbox": [ + 215, + 719, + 393, + 736 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "This work is supported in part by the National Natural Science Foundation of China under grant 62171248, 62301189, Guangdong Basic and Applied Basic Research Foundation under grant 2021A1515110066, the PCNL KEY project (PCL2021A07), and Shenzhen Science and Technology Program under Grant JCYJ20220818101012025, RCBS20221008093124061, GXWD20220811172936001.", + "bbox": [ + 212, + 748, + 794, + 825 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Y. Qiu et al.", + "bbox": [ + 271, + 114, + 357, + 128 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 217, + 143, + 321, + 159 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "1. Abdal, R., Qin, Y., Wonka, P.: Image2stylegan: How to embed images into the stylegan latent space? In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 4432-4441 (2019)", + "2. An, S., Tao, G., Xu, Q., Liu, Y., Shen, G., Yao, Y., Xu, J., Zhang, X.: Mirror: Model inversion for deep learning network with high fidelity. In: NDSS (2022)", + "3. Bau, D., Zhu, J.Y., Strobelt, H., Zhou, B., Tenenbaum, J.B., Freeman, W.T., Torralba, A.: Gan dissection: Visualizing and understanding generative adversarial networks. arXiv preprint arXiv:1811.10597 (2018)", + "4. Chen, B., Feng, Y., Dai, T., Bai, J., Jiang, Y., Xia, S.T., Wang, X.: Adversarial examples generation for deep product quantization networks on image retrieval. IEEE Transactions on Pattern Analysis and Machine Intelligence 45(2), 1388-1404 (2022)", + "5. Chen, S., Kahla, M., Jia, R., Qi, G.J.: Knowledge-enriched distributional model inversion attacks. In: ICCV (2021)", + "6. Choi, Y., Uh, Y., Yoo, J., Ha, J.W.: Stargan v2: Diverse image synthesis for multiple domains. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 8188-8197 (2020)", + "7. Conneau, A., Baevski, A., Collobert, R., Mohamed, A., Auli, M.: Unsupervised cross-lingual representation learning for speech recognition. arXiv preprint arXiv:2006.13979 (2020)", + "8. Daras, G., Dean, J., Jalal, A., Dimakis, A.G.: Intermediate layer optimization for inverse problems using deep generative models. arXiv preprint arXiv:2102.07364 (2021)", + "9. Dataset, E.: Novel datasets for fine-grained image categorization. In: First Workshop on Fine Grained Visual Categorization, CVPR. Citeseer. Citeseer. Citeseer (2011)", + "0. Fang, H., Chen, B., Wang, X., Wang, Z., Xia, S.T.: Gidf: A generative gradient inversion method with feature domain optimization. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 4967-4976 (2023)", + "1. Fang, H., Kong, J., Yu, W., Chen, B., Li, J., Xia, S., Xu, K.: One perturbation is enough: On generating universal adversarial perturbations against vision-language pre-training models. arXiv preprint arXiv:2406.05491 (2024)", + "2. Fang, H., Qiu, Y., Yu, H., Yu, W., Kong, J., Chong, B., Chen, B., Wang, X., Xia, S.T.: Privacy leakage on dnns: A survey of model inversion attacks and defenses. arXiv preprint arXiv:2402.04013 (2024)", + "3. Fredrikson, M., Jha, S., Ristenpart, T.: Model inversion attacks that exploit confidence information and basic countermeasures. In: CCS. pp. 1322-1333 (2015)", + "4. Fredrikson, M., Lantz, E., Jha, S., Lin, S., Page, D., Ristenpart, T.: Privacy in pharmacogenetics: An {End-to-End} case study of personalized warfarin dosing. In: USENIX Security. pp. 17-32 (2014)", + "5. Goodfellow, I., Pouget-Abadie, J., Mirza, M., Xu, B., Warde-Farley, D., Ozair, S., Courville, A., Bengio, Y.: Generative adversarial nets. Advances in neural information processing systems 27 (2014)", + "6. Goodfellow, I., Pouget-Abadie, J., Mirza, M., Xu, B., Warde-Farley, D., Ozair, S., Courville, A., Bengio, Y.: Generative adversarial networks. Communications of the ACM 63(11), 139–144 (2020)", + "7. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 770-778 (2016)" + ], + "bbox": [ + 225, + 172, + 785, + 839 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "A Closer Look at GAN Priors", + "bbox": [ + 529, + 114, + 730, + 127 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 767, + 116, + 784, + 126 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "18. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 770-778 (2016)", + "19. Heusel, M., Ramsauer, H., Unterthiner, T., Nessler, B., Hochreiter, S.: Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems 30 (2017)", + "20. Hu, H., Salcic, Z., Sun, L., Dobbie, G., Yu, P.S., Zhang, X.: Membership inference attacks on machine learning: A survey. ACM Computing Surveys (CSUR) 54(11s), 1-37 (2022)", + "21. Huang, G., Liu, Z., Van Der Maaten, L., Weinberger, K.Q.: Densely connected convolutional networks. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 4700-4708 (2017)", + "22. Kahla, M., Chen, S., Just, H.A., Jia, R.: Label-only model inversion attacks via boundary repulsion. In: CVPR (2022)", + "23. Karras, T., Aittala, M., Hellsten, J., Laine, S., Lehtinen, J., Aila, T.: Training generative adversarial networks with limited data. Advances in neural information processing systems 33, 12104-12114 (2020)", + "24. Karras, T., Laine, S., Aila, T.: A style-based generator architecture for generative adversarial networks. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 4401-4410 (2019)", + "25. Karras, T., Laine, S., Aittala, M., Hellsten, J., Lehtinen, J., Aila, T.: Analyzing and improving the image quality of stylegan. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 8110-8119 (2020)", + "26. Kynkänniemi, T., Karras, T., Laine, S., Lehtinen, J., Aila, T.: Improved precision and recall metric for assessing generative models. Advances in Neural Information Processing Systems 32 (2019)", + "27. Li, C., Qiu, M.: Reinforcement learning for cyber-physical systems: with cybersecurity case studies. Chapman and Hall/CRC (2019)", + "28. Liu, Z., Luo, P., Wang, X., Tang, X.: Deep learning face attributes in the wild. In: Proceedings of the IEEE international conference on computer vision. pp. 3730-3738 (2015)", + "29. Naeem, M.F., Oh, S.J., Uh, Y., Choi, Y., Yoo, J.: Reliable fidelity and diversity metrics for generative models. In: International Conference on Machine Learning. pp. 7176-7185. PMLR (2020)", + "30. Ng, H.W., Winkler, S.: A data-driven approach to cleaning large face datasets. In: 2014 IEEE international conference on image processing (ICIP). pp. 343-347. IEEE (2014)", + "31. Nguyen, N.B., Chandrasegaran, K., Abdollahzadeh, M., Cheung, N.M.: Rethinking model inversion attacks against deep neural networks. In: CVPR. pp. 16384-16393 (2023)", + "32. Park, J.Y., Smedemark-Margulies, N., Daniels, M., Yu, R., van de Meent, J.W., HAnd, P.: Generator surgery for compressed sensing. In: NeurIPS 2020 Workshop on Deep Learning and Inverse Problems (2020), https://openreview.net/forum?id=s2EucjZ6d2s", + "33. Qiu, H., Dong, T., Zhang, T., Lu, J., Memmi, G., Qiu, M.: Adversarial attacks against network intrusion detection in IoT systems. IEEE Internet of Things Journal 8(13), 10327-10335 (2020)", + "34. Schroff, F., Kalenichenko, D., Philbin, J.: Facenet: A unified embedding for face recognition and clustering. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 815-823 (2015)" + ], + "bbox": [ + 215, + 146, + 784, + 839 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Y. Qiu et al.", + "bbox": [ + 271, + 114, + 357, + 128 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "35. Shen, Y., Gu, J., Tang, X., Zhou, B.: Interpreting the latent space of gans for semantic face editing. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 9243-9252 (2020)", + "36. Shokri, R., Stronati, M., Song, C., Shmatikov, V.: Membership inference attacks against machine learning models. In: 2017 IEEE symposium on security and privacy (SP). pp. 3-18. IEEE (2017)", + "37. Song, C., Ristenpart, T., Shmatikov, V.: Machine learning models that remember too much. In: CCS. pp. 587-601 (2017)", + "38. Struppek, L., Hintersdorf, D., Correira, A.D.A., Adler, A., Kersting, K.: Plug & play attacks: Towards robust and flexible model inversion attacks. In: ICML (2022)", + "39. Szegedy, C., Vanhoucke, V., Ioffe, S., Shlens, J., Wojna, Z.: Rethinking the inception architecture for computer vision. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 2818-2826 (2016)", + "40. Tewari, A., Elgharib, M., Bernard, F., Seidel, H.P., Pérez, P., Zollhöfer, M., Theobalt, C.: Pie: Portrait image embedding for semantic control. ACM Transactions on Graphics (TOG) 39(6), 1-14 (2020)", + "41. Wang, K.C., Fu, Y., Li, K., Khisti, A., Zemel, R., Makhzani, A.: Variational model inversion attacks. In: NeurIPS (2021)", + "42. Wu, C., Yan, M.: Session-aware information embedding for e-commerce product recommendation. In: Proceedings of the 2017 ACM on conference on information and knowledge management. pp. 2379-2382 (2017)", + "43. Yang, Z., Zhang, J., Chang, E.C., Liang, Z.: Neural network inversion in adversarial setting via background knowledge alignment. In: CCS (2019)", + "44. Yin, H., Molchanov, P., Alvarez, J.M., Li, Z., Mallya, A., Hoiem, D., Jha, N.K., Kautz, J.: Dreaming to distill: Data-free knowledge transfer via deepinversion. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 8715-8724 (2020)", + "45. Yu, W., Chen, B., Zhang, Q., Xia, S.T.: Editable-deepsc: Cross-modal editable semantic communication systems. arXiv preprint arXiv:2310.10347 (2023)", + "46. Yu, W., Fang, H., Chen, B., Sui, X., Chen, C., Wu, H., Xia, S.T., Xu, K.: Gi-nas: Boosting gradient inversion attacks through adaptive neural architecture search. arXiv preprint arXiv:2405.20725 (2024)", + "47. Yuan, X., Chen, K., Zhang, J., Zhang, W., Yu, N., Zhang, Y.: Pseudo label-guided model inversion attack via conditional generative adversarial network. In: AAAI (2023)", + "48. Yuan, Z., Wu, F., Long, Y., Xiao, C., Li, B.: Secretgen: Privacy recovery on pretrained models via distribution discrimination. In: ECCV (2022)", + "49. Zeng, Y., Pan, M., Just, H.A., Lyu, L., Qiu, M., Jia, R.: Narcissus: A practical clean-label backdoor attack with limited information. In: Proceedings of the 2023 ACM SIGSAC Conference on Computer and Communications Security. pp. 771-785 (2023)", + "50. Zhang, H., Wu, C., Zhang, Z., Zhu, Y., Lin, H., Zhang, Z., Sun, Y., He, T., Mueller, J., Manmatha, R., et al.: Resnest: Split-attention networks. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 2736-2746 (2022)", + "51. Zhang, Y., Jia, R., Pei, H., Wang, W., Li, B., Song, D.: The secret revealer: Generative model-inversion attacks against deep neural networks. In: CVPR (2020)", + "52. Zhong, X., Fang, H., Chen, B., Gu, X., Dai, T., Qiu, M., Xia, S.T.: Hierarchical features matter: A deep exploration of gan priors for improved dataset distillation. arXiv preprint arXiv:2406.05704 (2024)" + ], + "bbox": [ + 212, + 146, + 787, + 830 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "A Closer Look at GAN Priors", + "bbox": [ + 529, + 114, + 730, + 126 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 767, + 116, + 784, + 126 + ], + "page_idx": 16 + } +] \ No newline at end of file diff --git a/2024/A Closer Look at GAN Priors_ Exploiting Intermediate Features for Enhanced Model Inversion Attacks/ae02311f-0ba1-4342-a539-c7ea4e71402f_model.json b/2024/A Closer Look at GAN Priors_ Exploiting Intermediate Features for Enhanced Model Inversion Attacks/ae02311f-0ba1-4342-a539-c7ea4e71402f_model.json new file mode 100644 index 0000000000000000000000000000000000000000..4bdbfe0b42cf9c79c5eed5f4a7b623a6546b1182 --- /dev/null +++ b/2024/A Closer Look at GAN Priors_ Exploiting Intermediate Features for Enhanced Model Inversion Attacks/ae02311f-0ba1-4342-a539-c7ea4e71402f_model.json @@ -0,0 +1,2830 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.257, + 0.141, + 0.75, + 0.207 + ], + "angle": 0, + "content": "A Closer Look at GAN Priors: Exploiting Intermediate Features for Enhanced Model Inversion Attacks" + }, + { + "type": "text", + "bbox": [ + 0.224, + 0.234, + 0.782, + 0.266 + ], + "angle": 0, + "content": "Yixiang Qiu\\(^{1,2\\dagger}\\), Hao Fang\\(^{2\\dagger}\\), Hongyao Yu\\(^{1\\dagger}\\), Bin Chen\\(^{1,3,4\\#}\\), MeiKang Qiu\\(^{5}\\), and Shu-Tao Xia\\(^{2,4}\\)" + }, + { + "type": "text", + "bbox": [ + 0.221, + 0.277, + 0.782, + 0.377 + ], + "angle": 0, + "content": "1 Harbin Institute of Technology, Shenzhen \n2 Tsinghua Shenzhen International Graduate School, Tsinghua University \n3 Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies \n4 Pengcheng Laboratory 5 Augusta University \nqiuyixiang@stu.hit.edu.cn, fang-h23@mails.tsinghua.edu.cn \nyuhongyao@stu.hit.edu.cn, chenbin2021@hit.edu.cn \nqiumeikang@yahoo.com, xiast@sz.tsinghua.edu.cn" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.404, + 0.744, + 0.683 + ], + "angle": 0, + "content": "Abstract. Model Inversion (MI) attacks aim to reconstruct privacy-sensitive training data from released models by utilizing output information, raising extensive concerns about the security of Deep Neural Networks (DNNs). Recent advances in generative adversarial networks (GANs) have contributed significantly to the improved performance of MI attacks due to their powerful ability to generate realistic images with high fidelity and appropriate semantics. However, previous MI attacks have solely disclosed private information in the latent space of GAN priors, limiting their semantic extraction and transferability across multiple target models and datasets. To address this challenge, we propose a novel method, Intermediate Features enhanced Generative Model Inversion (IF-GMI), which disassembles the GAN structure and exploits features between intermediate blocks. This allows us to extend the optimization space from latent code to intermediate features with enhanced expressive capabilities. To prevent GAN priors from generating unrealistic images, we apply a \\( l_{1} \\) ball constraint to the optimization process. Experiments on multiple benchmarks demonstrate that our method significantly outperforms previous approaches and achieves state-of-the-art results under various settings, especially in the out-of-distribution (OOD) scenario. Our code is available at: https://github.com/final-solution/IF-GMI" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.695, + 0.658, + 0.709 + ], + "angle": 0, + "content": "Keywords: Privacy \\(\\cdot\\) Model Inversion \\(\\cdot\\) Generative Priors" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.731, + 0.378, + 0.748 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.759, + 0.788, + 0.791 + ], + "angle": 0, + "content": "In recent years, Deep Neural Networks (DNNs) have experienced unprecedented development and achieved tremendous success in a wide range of applications," + }, + { + "type": "page_footnote", + "bbox": [ + 0.233, + 0.797, + 0.371, + 0.812 + ], + "angle": 0, + "content": "\\(\\dagger\\) Equal contribution." + }, + { + "type": "page_footnote", + "bbox": [ + 0.234, + 0.812, + 0.395, + 0.826 + ], + "angle": 0, + "content": "Corresponding author." + }, + { + "type": "list", + "bbox": [ + 0.233, + 0.797, + 0.395, + 0.826 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.233, + 0.826, + 0.771, + 0.84 + ], + "angle": 0, + "content": "This work was done while Yixiang Qiu was pre-admitted to Tsinghua University." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "2" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.358, + 0.129 + ], + "angle": 0, + "content": "Y. Qiu et al." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.298 + ], + "angle": 0, + "content": "including face recognition [17], personalized recommendations [42], and audio recognition [7]. While DNNs bring us many practical benefits, concerns [4,10,11, 46] about privacy and security have also been raised and drawn great attention. Recent studies have demonstrated that there is a certain risk of privacy leakage for DNNs as an adversary could reveal private information from these pre-trained models. Various types of novel privacy attacks [27,33,49] have been proposed, such as membership inference attack [20,36] and gradient inversion attack [10, 46]. Among the new attack methods, Model Inversion (MI) attack [12] poses a greater threat due to its powerful capability in recovering the privacy-sensitive datasets that are collected and utilized for model training." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.303, + 0.788, + 0.53 + ], + "angle": 0, + "content": "[14] proposes the first MI attack to reconstruct sensitive features of genomic data and demonstrate that linear regression models are vulnerable to such privacy attacks. Subsequent studies [13, 37, 43] have extended MI attacks to more Machine Learning (ML) models, but are still limited to models with simple structure and low-dimensional data such as grayscale images. Recent advances in the MI attack field have overcome the challenges in image data recovery by applying Generative Adversarial Networks (GANs) [16], resulting in the extension to DNNs with more complex structure and high-dimensional data such as RGB images. [51] first introduces the GANs to MI attack scenarios, serving as image priors. To better reveal privacy-sensitive information, [51] and subsequent GAN-based methods [5, 41, 47, 48] train GANs with publicly available datasets that have structural similarity with target private datasets. Furthermore, [38] propose to leverage the public pre-trained GAN models (e.g., StyleGAN [24]) as GAN priors, which have a stronger ability to generate high-resolution images and do not require a time-consuming training process." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.534, + 0.788, + 0.761 + ], + "angle": 0, + "content": "Although the aforementioned methods have achieved great progress in recovering high-quality and privacy-sensitive images, the effectiveness of GAN-based MI attacks is limited under certain scenarios. One typical challenge is the out-of-distribution (OOD) scenario, where there is a significant distributional shift between the target private dataset and the public dataset used in the training process of GAN priors. Most previous methods [5, 41, 48, 51] merely work well under scenarios with slight distributional shifts. For instance, they split the same dataset into two parts, one used as the public dataset and the other used as the private dataset. In recent years, some studies [3, 8, 35, 40, 45] have demonstrated that there is rich semantic information encoded in the latent code and intermediate features of GANs. Inspired by these works, we empirically observe that the rich semantic information encoded in the intermediate features helps to sufficiently recover high-quality private data under more rigorous settings, as shown in Figure 1. Therefore, it is imperative to explore methods for leveraging the GAN's intrinsic layered knowledge into MI attacks, mitigating the OOD issue." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.765, + 0.788, + 0.84 + ], + "angle": 0, + "content": "To this end, we propose a novel MI attack method, Intermediate Features enhanced Generative Model Inversion (IF-GMI), which effectively disassembles the GAN structure and leverages features between intermediate blocks. Specifically, we consider the generator of the GAN as a concatenation of multiple blocks and the vectors produced between the blocks as intermediate features. We first" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.531, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "A Closer Look at GAN Priors" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "3" + }, + { + "type": "image", + "bbox": [ + 0.227, + 0.147, + 0.282, + 0.188 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.238, + 0.19, + 0.27, + 0.197 + ], + "angle": 0, + "content": "0.0000" + }, + { + "type": "image", + "bbox": [ + 0.227, + 0.199, + 0.282, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.235, + 0.24, + 0.276, + 0.25 + ], + "angle": 0, + "content": "0.0000" + }, + { + "type": "image_caption", + "bbox": [ + 0.229, + 0.255, + 0.281, + 0.265 + ], + "angle": 0, + "content": "Generation" + }, + { + "type": "image", + "bbox": [ + 0.288, + 0.147, + 0.339, + 0.189 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.298, + 0.19, + 0.328, + 0.197 + ], + "angle": 0, + "content": "0.3600" + }, + { + "type": "image", + "bbox": [ + 0.289, + 0.199, + 0.338, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.301, + 0.24, + 0.33, + 0.248 + ], + "angle": 0, + "content": "0.2200" + }, + { + "type": "image_caption", + "bbox": [ + 0.306, + 0.252, + 0.327, + 0.261 + ], + "angle": 0, + "content": "PPA" + }, + { + "type": "image", + "bbox": [ + 0.345, + 0.147, + 0.397, + 0.189 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.359, + 0.19, + 0.388, + 0.197 + ], + "angle": 0, + "content": "0.9975" + }, + { + "type": "image", + "bbox": [ + 0.346, + 0.199, + 0.397, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.359, + 0.24, + 0.389, + 0.248 + ], + "angle": 0, + "content": "0.9988" + }, + { + "type": "image_caption", + "bbox": [ + 0.355, + 0.252, + 0.392, + 0.26 + ], + "angle": 0, + "content": "IF-GMI" + }, + { + "type": "image", + "bbox": [ + 0.404, + 0.147, + 0.455, + 0.189 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.411, + 0.19, + 0.441, + 0.197 + ], + "angle": 0, + "content": "" + }, + { + "type": "image", + "bbox": [ + 0.402, + 0.199, + 0.455, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.411, + 0.251, + 0.45, + 0.26 + ], + "angle": 0, + "content": "Original" + }, + { + "type": "image_caption", + "bbox": [ + 0.243, + 0.268, + 0.444, + 0.28 + ], + "angle": 0, + "content": "(a) Visual examples of IF-GMI to PPA" + }, + { + "type": "image", + "bbox": [ + 0.473, + 0.149, + 0.783, + 0.268 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.541, + 0.269, + 0.72, + 0.281 + ], + "angle": 0, + "content": "(b) Comparison of IF-GMI to PPA" + }, + { + "type": "image_caption", + "bbox": [ + 0.215, + 0.297, + 0.785, + 0.394 + ], + "angle": 0, + "content": "Fig. 1: (a) Comparison of our proposed IF-GMI with baselines. The blue number below the images is the predicted confidence by the evaluation model. The first column shows the randomly generated images and the second column presents the reconstructed results by PPA [38], a typical GAN-based method focusing on directly optimizing the latent code of GAN model. The last two columns exhibit the results of our proposed IF-GMI and the ground truth images in the private dataset, respectively. (b) Top-1 attack accuracy of PPA and IF-GMI (ours) on four OOD scenarios." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.422, + 0.785, + 0.602 + ], + "angle": 0, + "content": "optimize the latent code input to the generator and then successively optimize the intermediate features from the start layer to the end layer. To avoid unreal image generation, we utilize a \\( l_{1} \\) ball constraint to restrict the deviation when optimizing the intermediate features. In the end, we collect the output images after each intermediate layer optimization process and select the final results with a simple strategy. We conduct comprehensive experiments to evaluate our method in multiple settings, including OOD scenarios, various target models, and different GAN priors. The encouraging experimental results demonstrate that the proposed method outperforms baselines on multiple metrics and achieves high attack accuracy on OOD settings. Finally, we perform extensive experiments and ablation studies to validate the effectiveness of the proposed method. Our main contributions are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.226, + 0.611, + 0.784, + 0.654 + ], + "angle": 0, + "content": "- We propose a novel GAN-based MI attack method, which disassembles the pre-trained generator and successively optimizes the latent code and intermediate features under the \\( l_{1} \\) ball constraint." + }, + { + "type": "text", + "bbox": [ + 0.225, + 0.656, + 0.784, + 0.685 + ], + "angle": 0, + "content": "- We demonstrate that our proposed achieves state-of-the-art performance in a range of scenarios, especially under the challenging OOD settings." + }, + { + "type": "text", + "bbox": [ + 0.225, + 0.686, + 0.784, + 0.714 + ], + "angle": 0, + "content": "- We conduct extensive experiments to validate the effectiveness and outstanding transferability of our method." + }, + { + "type": "list", + "bbox": [ + 0.225, + 0.611, + 0.784, + 0.714 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.744, + 0.387, + 0.759 + ], + "angle": 0, + "content": "2 Related Work" + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.772, + 0.467, + 0.788 + ], + "angle": 0, + "content": "2.1 GAN as prior knowledge" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.795, + 0.784, + 0.84 + ], + "angle": 0, + "content": "GANs [15] are a class of deep neural networks that consist of two functional components, a generator and a discriminator, trained concurrently through adversarial processes to generate realistic data. The objective of a GAN is to learn the" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "4" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.358, + 0.129 + ], + "angle": 0, + "content": "Y. Qiu et al." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.373 + ], + "angle": 0, + "content": "distribution of the training dataset and generate more samples from the learned probability distribution [16]. Well-trained GANs are able to generate high-fidelity and diverse images, excellent representative of which are StyleGANs [24,25]. The generator of the StyleGAN consists of a mapping network and a synthesis network. The former maps latent vectors into the intermediate latent space (i.e. \\(\\mathcal{W}\\) space), and the latter generates images through style vectors. The feature in the \\(\\mathcal{W}\\) space is well-disentangled, which means that images sharing similar features correspond to analogous style vectors. Therefore, PPA [38] performs their attacks by searching the style vectors in \\(\\mathcal{W}\\) space. The style vectors in the front layers tend to control high-level aspects of the generated images like pose, face shape, and general hair style, while those in the back ones have more influence on details [24], such as smaller scale facial features and eyes open/closed. Moreover, style vectors in \\(\\mathcal{W}\\) space do not need to follow the same distribution with the training data, which means that more diverse images can be generated by controlling the vectors [24]." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.374, + 0.788, + 0.45 + ], + "angle": 0, + "content": "Recent works [10, 32, 52] have shown the richness of intermediate features in GANs, our investigation also tries to explore the potential of leveraging intermediate latent space of different layers to enhance MI attacks. Our findings reveal that this approach significantly improves attack accuracy and obtains high-quality inversion results, particularly under the harder OOD scenario." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.469, + 0.466, + 0.485 + ], + "angle": 0, + "content": "2.2 Model Inversion Attacks" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.493, + 0.788, + 0.658 + ], + "angle": 0, + "content": "Model inversion (MI) attacks aim at reconstructing the private training data from a trained model. Typically, MI attacks can be divided into the white-box scenario [51] and black-box scenario [22]. We only focus on the white-box scenario in this paper, which means that the attacker has full access to the trained model. This kind of attack is initially demonstrated through an attempt to extract genomic markers from a linear regression model, as highlighted in the earliest research by [14]. Building on this foundation, subsequent researches [13, 37, 43] have broadened the scope of MI attacks, applying them to more machine learning models like shallow networks, and simple forms of data, such as low-resolution grayscale images. However, as the scale of both the data and the models increases, the efficacy of MI attack methods diminishes dramatically." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.66, + 0.788, + 0.841 + ], + "angle": 0, + "content": "In response to this challenge, a novel approach known as GMI, introduced by [51], employs a GAN-based methodology to enhance the ability of MI attacks with deeper and wider DNNs. This innovative strategy leverages a GAN model trained on publicly available data to encapsulate the distributional characteristics of image data, thereby facilitating the generation of high-quality image reconstructions. The process involves the attackers first generating a set of preliminary images by inputting a batch of randomly sampled latent vectors into the GAN. These generated images are then fed into the target image classifier to obtain initial predictions. To refine the attack, the attackers iteratively optimize the input latent vectors. This optimization process aims to minimize the discrepancy between the classifier's predictions and the intended target class, as measured by the cross-entropy loss, while also reducing the discriminator loss." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.531, + 0.115, + 0.732, + 0.128 + ], + "angle": 0, + "content": "A Closer Look at GAN Priors" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "5" + }, + { + "type": "image", + "bbox": [ + 0.224, + 0.151, + 0.78, + 0.301 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.313, + 0.788, + 0.41 + ], + "angle": 0, + "content": "Fig. 2: Overview of our proposed IF-GMI. Firstly, the latent vectors are sampled from standard Gaussian distribution and mapped into disentangled latent codes with semantic meanings by Mapping Network. Then we perform random augmentation on these latent codes to select optimal ones denoted as \\(\\mathbf{w}^*\\) for optimization. The Synthesis Network is disassembled into multiple blocks to search the intermediate features, which are successively updated with the identity loss calculated from the target model. Finally, the reconstructed images are generated from the last layer as results." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.439, + 0.785, + 0.483 + ], + "angle": 0, + "content": "With the help of the GAN, GMI seeks to achieve more precise and convincing reconstructions of complex data, thereby representing a significant advancement in the field of MI attacks." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.485, + 0.789, + 0.742 + ], + "angle": 0, + "content": "Lots of researches in recent years improve the attack performance on the white-box scenario based on GMI. SecretGen [48] explores the scenario when the attackers know some auxiliary information about the private data. KEDMI [5] improves the discriminator by incorporating target labels and recover the distribution of the input latent vectors for a target class. VMI [41] reformulates the MI attack from the perspective of variational inference and introduce KL-divergence as a regularization to better approximate the target distribution with a variational distribution. PPA [38] employs pre-trained StyleGAN2 to reduce the time cost of attacks and extend the attacks to high-resolution images thanks to the excellent generative ability of StyleGAN2. Moreover, they propose a set of strategies to heighten attack accuracy and robustness, including initial selection, post-selection, and data augmentation. LOMMA [31] introduces model augmentation into MI attacks to reduce overfitting of the target model. They train some surrogate models from the target model via model distillation, co-guiding the optimization process with improved loss function. PLGMI [47] proposes a top-n selection strategy, using target models to generate pseudo labels for publicly available images, thereby directing the training process for the conditional GAN." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.764, + 0.381, + 0.781 + ], + "angle": 0, + "content": "3 Methodology" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.795, + 0.787, + 0.842 + ], + "angle": 0, + "content": "In this section, we begin by explaining the fundamental paradigm of MI attacks and provide a formulation for the MI problem. Subsequently, we present our main components and elaborate the detailed pipeline of the proposed IF-GMI," + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "6" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.358, + 0.129 + ], + "angle": 0, + "content": "Y. Qiu et al." + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.147, + 0.784, + 0.177 + ], + "angle": 0, + "content": "which contributes to the improved performance under the OOD scenario. See Figure 2 for an overview of our method." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.195, + 0.373, + 0.21 + ], + "angle": 0, + "content": "3.1 Preliminaries" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.216, + 0.785, + 0.321 + ], + "angle": 0, + "content": "In this paper, we focus on the MI attacks under white-box settings, which means all the parameters and components of target models are available to the attacker. For image classification tasks, the malicious adversary aims to reconstruct privacy-sensitive images by leveraging the output prediction confidence of the target classifier and other auxiliary priors. Early works [44] directly optimize pixels in randomly sampled dummy images \\(\\mathbf{x}\\) to approximate target images \\(\\mathbf{x}^*\\) given the target model \\(T_{\\theta}\\) and target label \\(c\\), which can be formulated as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.417, + 0.334, + 0.785, + 0.356 + ], + "angle": 0, + "content": "\\[\n\\hat {\\mathbf {x}} = \\underset {\\mathbf {x}} {\\arg \\min } \\mathcal {L} \\left(T _ {\\theta} (\\mathbf {x}), c\\right), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.359, + 0.784, + 0.42 + ], + "angle": 0, + "content": "where \\(\\hat{\\mathbf{x}}\\) is the reconstructed image, \\(\\mathcal{L}(\\cdot ,\\cdot)\\) denotes the classification loss designed for image optimization and \\(T_{\\theta}(\\mathbf{x})\\) represent the output confidence. Due to the full access to the target model in white-box settings, the attacker can calculate loss and directly perform backpropagation to update dummy images." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.42, + 0.785, + 0.555 + ], + "angle": 0, + "content": "However, the methods above are no longer functional when \\(\\mathbf{x}\\) turns into high-dimensional data which has excessive search space. To tackle such issues, recent studies [5, 38, 47, 51] introduce GANs as image priors due to their superior capability to generate high-fidelity RGB images. They propose to train a specially designed GAN with publicly available datasets that have structural similarities with the private dataset or utilize a public pre-trained GAN before the attack. Furthermore, the optimization objective is replaced with the latent vectors \\(\\mathbf{z}\\) of the generator, which has fewer parameters to optimize. With the aforementioned techniques, the MI problem is transformed into the following formulation:" + }, + { + "type": "equation", + "bbox": [ + 0.36, + 0.562, + 0.785, + 0.585 + ], + "angle": 0, + "content": "\\[\n\\hat {\\mathbf {z}} = \\underset {\\mathbf {z}} {\\arg \\min } \\mathcal {L} _ {i d} \\left(T _ {\\theta} (G (\\mathbf {z}), c) + \\lambda \\mathcal {L} _ {a u x} (\\mathbf {z}) \\right. \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.59, + 0.784, + 0.666 + ], + "angle": 0, + "content": "where \\( G \\) represents the trained generator, \\( \\mathcal{L}_{id}(\\cdot, \\cdot) \\) denotes the identity loss calculated from the target model \\( T_{\\theta} \\) and \\( \\mathcal{L}_{aux}(\\cdot) \\) is an optional auxiliary loss (e.g., the discriminator loss) with a hyperparameter \\( \\lambda \\). By minimizing the Eq.2, the adversary updates the latent vectors \\( \\mathbf{z} \\) into the optimal results \\( \\hat{\\mathbf{z}} \\) and generate final images through \\( \\hat{\\mathbf{x}} = G(\\hat{\\mathbf{z}}) \\)." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.667, + 0.785, + 0.772 + ], + "angle": 0, + "content": "Intuitively, directly optimizing the input latent code of GAN priors serves as a natural method to acquire ideal reconstructed images, leading to its widespread application in all the previous works. However, recent studies [3,8,35,40] have indicated that there is fairly rich semantic information in the intermediate features of GANs except for the input latent code. This inspires us to surpass the limitation of merely searching the latent space and propose a novel method focusing on the intermediate feature domains, which are more close to the output." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.789, + 0.75, + 0.804 + ], + "angle": 0, + "content": "3.2 Exploiting Intermediate Features for Enhanced MI Attacks" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.81, + 0.784, + 0.84 + ], + "angle": 0, + "content": "In the following part, we delve into the internal structure of the GAN prior, attempting to explore the hierarchical layers for enhanced utilization of the rich" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.53, + 0.115, + 0.732, + 0.128 + ], + "angle": 0, + "content": "A Closer Look at GAN Priors" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.116, + 0.785, + 0.127 + ], + "angle": 0, + "content": "7" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.784, + 0.178 + ], + "angle": 0, + "content": "semantics learned by the generator. Following the pipeline shown in Figure 2, we will elucidate each component in detail." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.192, + 0.784, + 0.266 + ], + "angle": 0, + "content": "The GAN prior. Most previous GAN-based attacks [5,31,47,51] require training a specialized GAN with essential auxiliary dataset towards the specific target classifier. However, the prior knowledge of GANs trained under the above setting will be excessively aligned with the target model and the auxiliary dataset, leading to significant reduction in transferability and generalization." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.268, + 0.789, + 0.48 + ], + "angle": 0, + "content": "Therefore, our method relies on the pre-trained StyleGAN2 [23] instead of training a GAN from scratch. The generator of StyleGAN2 can be simply divided into two components, consisting of a mapping network \\(G_{map}:\\mathcal{Z}\\to \\mathcal{W}\\) which maps the initial latent vectors \\(\\mathbf{z}\\in \\mathcal{Z}\\) into the extended \\(\\mathcal{W}\\) space [1], and a synthesis network \\(G_{syn}:\\mathcal{W}\\rightarrow \\mathcal{X}\\) which generates images \\(\\mathbf{x}\\) with mapped vectors \\(\\mathbf{w}\\in \\mathcal{W}\\). Due to the reduced feature entanglement in \\(\\mathcal{W}\\) space that facilitates better style generation, we set \\(\\mathbf{w}\\) as the initial optimization objective rather than the commonly used latent code \\(\\mathbf{z}\\) in previous works. Specifically, we first randomly sample a batch of latent vectors \\(\\mathbf{z}\\) from Gaussian distribution and then map them with \\(G_{map}\\) to acquire \\(\\mathbf{w}\\), which will be iteratively updated in the first step of intermediate features optimization. Moreover, the StyleGAN2 is pre-trained without the utilization of the target model \\(T_{\\theta}\\) or other auxiliary prior corresponding to the target dataset, ensuring the flexibility and transferability of our method when attacking different target models and datasets." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.494, + 0.789, + 0.644 + ], + "angle": 0, + "content": "Initial Selection. Owing to the randomness in sampling latent vectors \\(\\mathbf{z}\\), it is potential part of them cannot facilitate the generation of appropriate images, leading to a decrease in attack accuracy. To reduce the risk of generating misleading and low-quality images, previous studies [2, 38, 48] have explored the technique of initial selection and validated its effectiveness in obtaining robust latent vectors. Specifically, we first generate images with the randomly samples \\(\\mathbf{z}\\), apply a series of transformations \\(Aug(\\cdot)\\) to the images, and feed them into the target classifier \\(T_{\\theta}\\) for corresponding prediction confidence. By selecting the latent vectors with higher scores, we can significantly improve the quality of the final images to better approximate the target distribution." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.646, + 0.789, + 0.721 + ], + "angle": 0, + "content": "Inspired by these prior studies [2,38,48], we also include the initial selection technique in our method and apply standard image transformations, such as random cropping, resizing and flipping. Different from previous methods, we perform initial selection on the mapped vectors \\(\\mathbf{w}\\) instead of latent vectors \\(\\mathbf{z}\\). The robust vectors \\(\\mathbf{w}\\) are obtained with the following equation:" + }, + { + "type": "equation", + "bbox": [ + 0.344, + 0.733, + 0.785, + 0.755 + ], + "angle": 0, + "content": "\\[\n\\mathbf {w} _ {\\text {i n i t}} = \\underset {\\mathbf {w}} {\\arg \\max } \\operatorname {C o n f} \\left(T _ {\\theta} \\left(A u g \\left(G _ {\\text {s y n}} (\\mathbf {w})\\right)\\right), c\\right), \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.765, + 0.803, + 0.796 + ], + "angle": 0, + "content": "where \\(\\operatorname{Conf}(\\cdot, \\cdot)\\) measures the confidence score for augmented images \\(Aug(G_{syn}(\\mathbf{w}))\\) given the specific label \\(c\\)." + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.81, + 0.787, + 0.84 + ], + "angle": 0, + "content": "Intermediate Features Optimization. According to the research of [24], the front blocks in the generator control the overall characteristics while the back" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "8" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.358, + 0.129 + ], + "angle": 0, + "content": "Y. Qiu et al." + }, + { + "type": "code_caption", + "bbox": [ + 0.218, + 0.147, + 0.744, + 0.163 + ], + "angle": 0, + "content": "Algorithm 1 Pseudocode of the core algorithm in our proposed IF-GMI" + }, + { + "type": "code", + "bbox": [ + 0.217, + 0.165, + 0.787, + 0.216 + ], + "angle": 0, + "content": "Input: \\( G_{syn} \\): a pre-trained generator; \\( L \\): the number of intermediate features; \\( T_{\\theta} \\): the target classifier; \\( \\mathcal{L}_{id} \\): the identity loss; \\( r[1 \\dots L] \\): the radius value of \\( l_{1} \\) ball for each hierarchical features; \\( N \\): the number of iterations;" + }, + { + "type": "code_caption", + "bbox": [ + 0.217, + 0.218, + 0.477, + 0.233 + ], + "angle": 0, + "content": "Output: Reconstructed images \\(\\mathbf{x}^*\\)" + }, + { + "type": "algorithm", + "bbox": [ + 0.226, + 0.236, + 0.644, + 0.519 + ], + "angle": 0, + "content": "1: Acquire latent vectors \\(\\mathbf{w}_{init}\\) via initial selection process \n2: \\(\\mathbf{w}_{(0)} \\gets \\underset{\\mathbf{w}}{\\arg \\min} \\mathcal{L}_{id}(G_{syn}(\\mathbf{w}_{init}))\\) \n3: Decompose the \\(G_{syn}\\) into \\(G_{L+1} \\circ G_{L} \\circ \\dots \\circ G_{2} \\circ G_{1}\\) \n4: Obtain the first intermediate feature \\(\\mathbf{f}_{(1)}^{0} = G_{1}(\\mathbf{w}_{(0)})\\) \n5: Set \\(\\mathbf{w}_{(1)}^{0} = \\mathbf{w}_{(0)}\\) \n6: for \\(i \\gets 1\\) to \\(L\\) do \n7: Set \\(G_{remain} = G_{L+1} \\circ G_{L} \\dots \\circ G_{i+1}\\) \n8: for \\(j \\gets 1\\) to \\(N\\) do \n9: loss = \\(\\mathcal{L}_{id}(G_{remain}(\\mathbf{f}_{(i)}^{j-1}, \\mathbf{w}_{(i)}^{j-1}))\\) \n10: \\(\\mathbf{f}_{(i)}^{j} \\gets Adam(\\mathbf{f}_{(i)}^{j-1}; loss), ||\\mathbf{f}_{(i)}^{j} - \\mathbf{f}_{(i)}^{0}||_{1} \\leq r[i]\\) \n11: \\(\\mathbf{w}_{(i)}^{j} \\gets Adam(\\mathbf{w}_{(i)}^{j-1}; loss), ||\\mathbf{w}_{(i)}^{j} - \\mathbf{w}_{(i)}^{0}||_{1} \\leq r[i]\\) \n12: end for \n13: \\(\\mathbf{f}_{(i+1)}^{0} = G_{i+1}(\\mathbf{f}_{(i)}^{N}, \\mathbf{w}_{(i)}^{N}), \\mathbf{w}_{(i+1)}^{0} = \\mathbf{w}_{(i)}^{N}\\) \n14: end for \n15: The final images \\(\\mathbf{x}^{*} = \\mathbf{f}_{(L+1)}^{0}\\) \n16: return \\(\\mathbf{x}^{*}\\)" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.552, + 0.788, + 0.658 + ], + "angle": 0, + "content": "ones have more influence on local details. Previous studies [38, 47, 51] neglect the role of the latter, which limits the attack performance. To take advantage of the individual blocks, we propose intermediate features optimization, as shown in the Algorithm 1. We first optimize the selected latent vectors \\(\\mathbf{w}_{init}\\) to obtain the optimal ones \\(\\mathbf{w}_{(0)}\\) before launching intermediate features optimization. Then we disassemble the pre-trained generator into \\(L + 1\\) blocks for hierarchical layer searching, i.e.," + }, + { + "type": "equation", + "bbox": [ + 0.383, + 0.66, + 0.786, + 0.677 + ], + "angle": 0, + "content": "\\[\nG _ {s y n} = G _ {L + 1} \\circ G _ {L} \\circ \\dots G _ {2} \\circ G _ {1}. \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.685, + 0.786, + 0.703 + ], + "angle": 0, + "content": "And we can feed \\(\\mathbf{w}_{(0)}\\) into block \\(G_{1}\\) to attain the first intermediate feature \\(\\mathbf{f}_{(1)}^{0}\\)." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.703, + 0.789, + 0.844 + ], + "angle": 0, + "content": "For each intermediate block \\( G_{i+1}, i \\in [1, \\ldots, L] \\), the corresponding intermediate features \\( \\mathbf{f}_{(i+1)}^0 \\) are acquired with following steps. First, we generate images utilizing the remaining blocks (i.e., \\( \\mathbf{x}_i = G_{L+1} \\circ G_L \\dots G_{i+1}(\\mathbf{f}_{(i)}, \\mathbf{w}_{(i)}) \\)) and input them into the target classifier \\( T_\\theta \\) to compute the prediction confidence for loss function. Then, we repeat the aforementioned process to iteratively update both \\( \\mathbf{w}_{(i)} \\) and \\( \\mathbf{f}_{(i)} \\). During the optimization process, we restrict the \\( \\mathbf{f}_{(i)} \\) within the \\( l_1 \\) ball with radius \\( r[i] \\) centered at the initial intermediate feature \\( \\mathbf{f}_{(i)}^0 \\) to avoid excessive shift that may lead to collapse image generation. Once the iteration process is completed, the optimized \\( \\mathbf{w}_{(i)}^N \\) and \\( \\mathbf{f}_{(i)}^N \\) are fed into the block \\( G_i \\) to" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.53, + 0.115, + 0.732, + 0.128 + ], + "angle": 0, + "content": "A Closer Look at GAN Priors" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "9" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.784, + 0.182 + ], + "angle": 0, + "content": "obtain the next intermediate features \\(\\mathbf{f}_{(i + 1)}^{0}\\). Moreover, we denote the optimized \\(\\mathbf{w}_{(i)}^{N}\\) as the initial latent vector \\(\\mathbf{w}_{(i + 1)}^{0}\\) before the next layer optimization starts." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.182, + 0.785, + 0.213 + ], + "angle": 0, + "content": "Once we finish searching the last intermediate layer, we can generate the final images \\(\\mathbf{x}^*\\) from the last intermediate feature \\(\\mathbf{f}_{(L)}^{N}\\), i.e., \\(\\mathbf{x}^* = \\mathbf{f}_{L + 1}^{0} = G_{i + 1}(\\mathbf{f}_{(L)}^{N})\\)." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.222, + 0.784, + 0.312 + ], + "angle": 0, + "content": "The Overall Loss. While the cross-entropy loss \\(\\mathcal{L}_{CE}\\) serves as the identity loss in most early works [5, 48, 51], there is a major drawback of \\(\\mathcal{L}_{CE}\\). Specifically, the gradient vanishing problem emerges when the prediction confidence of target label \\(c\\) approaches the ground truth in the one-hot vector. Following the previous study [38], we rely on the Poincaré loss function to overcome this problem. Therefore, the identity loss function utilized in our method is defined as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.339, + 0.324, + 0.785, + 0.357 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {i d} = \\operatorname {a r c c o s h} \\left(1 + \\frac {2 \\| v _ {1} - v _ {2} \\| _ {2} ^ {2}}{(1 - \\| v _ {1} \\| _ {2} ^ {2}) (1 - \\| v _ {2} \\| _ {2} ^ {2})}\\right), \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.36, + 0.785, + 0.42 + ], + "angle": 0, + "content": "where \\( ||v||_2 \\) is the Euclidean norm for the given vector. In our experiments, we denote \\( v_{1} \\) as the normalized prediction confidence and \\( v_{2} \\) as the one-hot vector for ground truth. Notably, the original number 1 in \\( v_{2} \\) is substituted with 0.9999 to avoid division by zero." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.44, + 0.376, + 0.457 + ], + "angle": 0, + "content": "4 Experiments" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.468, + 0.785, + 0.557 + ], + "angle": 0, + "content": "In this section, we first illustrate the details of our experimental settings. Then, we compare our method with state-of-the-art baselines to evaluate the attack performance. Furthermore, we conduct extensive experiments on multiple target datasets and models to further validate the effectiveness of our method in various settings. Finally, the ablation study will be evaluated on the first 100 classes of the whole dataset due to cost concerns." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.578, + 0.427, + 0.594 + ], + "angle": 0, + "content": "4.1 Experimental Setup" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.599, + 0.787, + 0.841 + ], + "angle": 0, + "content": "Datasets. We evaluate our method on two classification tasks, including facial image classification and dog breed classification. For the facial image classification task, we select the FaceScrub [30] and CelebFaces Attributes [28] (CelebA) as private datasets to train the target models. FaceScrub consists of facial images of actors and actresses with 530 identities in total. CelebA contains facial images of 10177 identities with coarse alignment. For FaceScrub, we utilize all the identities in the major experiment. For CelebA, we select the top 1000 identities with the most images for our experiment, consisting of over 30000 images. We use Flickr-Faces-HQ [24] (FFHQ) and MetFaces [23] as public datasets. FFHQ consists of 70000 high-quality human face images. MetFaces is an image dataset of 1336 human faces extracted from the Metropolitan Museum of Art Collection, which has a huge distributional shift with real human faces. For the dog breed classification task, we use Stanford Dogs [9] as a private dataset and Animal Faces-HQ Dogs [6] (AFHQ) as a public dataset. To adapt to the target model, all images in the various datasets are pre-processed to a resolution size of \\(224 \\times 224\\) pixels in our experiment." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "10" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.358, + 0.129 + ], + "angle": 0, + "content": "Y. Qiu et al." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.798, + 0.239 + ], + "angle": 0, + "content": "Models. We trained a variety of classification models on the private datasets mentioned above, including various architectures such as ResNet-18 [18], DenseNet-169 [21], ResNet-152 [18], and ResNeSt-101 [50], as target models. Following the settings in the previous work [38], we select Inception-v3 [39] as the evaluation model. For the generative model, we employ publicly released StyleGAN2 pre-trained on the aforementioned public datasets." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.249, + 0.785, + 0.279 + ], + "angle": 0, + "content": "Metrics. Following PPA [38], we evaluate the performance of our attack method on various kinds of metrics as follows:" + }, + { + "type": "text", + "bbox": [ + 0.226, + 0.284, + 0.788, + 0.389 + ], + "angle": 0, + "content": "- Attack Accuracy. This metric serves as a criterion on how well the generated samples resemble the target class. We use the evaluation model trained on the same dataset with the target model to predict the labels on reconstructed samples and compute the top-1 and top-5 accuracy for target classes, denoted as \\( Acc@1 \\) and \\( Acc@5 \\) respectively. The higher the reconstructed samples achieve attack accuracy on the evaluation model, the more private information in the dataset can be considered to be exposed [51]." + }, + { + "type": "text", + "bbox": [ + 0.226, + 0.389, + 0.788, + 0.464 + ], + "angle": 0, + "content": "- Feature Distance. The feature is defined as the output of the model's penultimate layer. We compute the shortest feature \\( l_{2} \\) distance between reconstructed samples and private training data for each class and calculate the average distance. The evaluated feature distances on the evaluation model and a pre-trained FaceNet [34] are denoted as \\( \\delta_{eval} \\) and \\( \\delta_{face} \\), respectively." + }, + { + "type": "text", + "bbox": [ + 0.226, + 0.464, + 0.788, + 0.538 + ], + "angle": 0, + "content": "- Fréchet Inception Distance (FID). FID [19] is commonly used to evaluate the generated images of GANs. It computes the distance between the feature vectors from target private data and reconstructed samples. The feature vectors are extracted by Inception-v3 pre-trained on ImageNet. The lower FID score shows higher realism and overall diversity [41]." + }, + { + "type": "text", + "bbox": [ + 0.226, + 0.538, + 0.788, + 0.599 + ], + "angle": 0, + "content": "- Sample Diversity. We compute Precision-Recall [26] and Density-Coverage [29] scores, whose higher values indicate greater intra-class diversity of the reconstructed samples. Our results for these four metrics are stated and analyzed in the Appendix." + }, + { + "type": "list", + "bbox": [ + 0.226, + 0.284, + 0.788, + 0.599 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.623, + 0.542, + 0.639 + ], + "angle": 0, + "content": "4.2 The Number of Optimized Layers" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.644, + 0.788, + 0.841 + ], + "angle": 0, + "content": "To obtain the highest attack performance, the number of intermediate features \\( L \\) should be explored before conducting the major experiments. When \\( L \\) takes a small value, there is a risk of underfitting as we merely optimize the intermediate features of the previous few layers to reconstruct the target images, especially in the OOD scenario. In contrast, when \\( L \\) is too large, the latter layers have a greater influence on the local details [24], which may lead to overfitting to the target model in some details and produce unrealistic images. Therefore, we must balance underfitting and overfitting when choosing \\( L \\). We conduct a simple attack on only 10 classes for each combination of public and private datasets to select \\( L \\) according to the results. For instance, Figure 3(a) shows the Acc@1 result for GAN prior pre-trained on FFHQ against the target DenseNet-169 trained on CelebA. The Acc@1 reaches the highest when \\( L = 3 \\). Hence, we keep this configuration in conducting the following experiments." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.53, + 0.115, + 0.732, + 0.128 + ], + "angle": 0, + "content": "A Closer Look at GAN Priors" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.116, + 0.784, + 0.127 + ], + "angle": 0, + "content": "11" + }, + { + "type": "image", + "bbox": [ + 0.229, + 0.164, + 0.495, + 0.27 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.333, + 0.282, + 0.396, + 0.293 + ], + "angle": 0, + "content": "(a) StyleGAN2" + }, + { + "type": "image", + "bbox": [ + 0.512, + 0.164, + 0.572, + 0.209 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.53, + 0.212, + 0.557, + 0.22 + ], + "angle": 0, + "content": "layer 0" + }, + { + "type": "image", + "bbox": [ + 0.578, + 0.164, + 0.636, + 0.209 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.595, + 0.212, + 0.621, + 0.22 + ], + "angle": 0, + "content": "layer 1" + }, + { + "type": "image", + "bbox": [ + 0.643, + 0.164, + 0.701, + 0.209 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.66, + 0.212, + 0.686, + 0.22 + ], + "angle": 0, + "content": "layer 2" + }, + { + "type": "image", + "bbox": [ + 0.709, + 0.164, + 0.767, + 0.209 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.725, + 0.212, + 0.751, + 0.22 + ], + "angle": 0, + "content": "layer 3" + }, + { + "type": "image", + "bbox": [ + 0.514, + 0.222, + 0.572, + 0.268 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.531, + 0.271, + 0.558, + 0.278 + ], + "angle": 0, + "content": "layer 4" + }, + { + "type": "image", + "bbox": [ + 0.578, + 0.222, + 0.636, + 0.268 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.6, + 0.271, + 0.628, + 0.278 + ], + "angle": 0, + "content": "ayer 5" + }, + { + "type": "image", + "bbox": [ + 0.644, + 0.222, + 0.702, + 0.268 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.66, + 0.27, + 0.687, + 0.278 + ], + "angle": 0, + "content": "layer 6" + }, + { + "type": "image", + "bbox": [ + 0.709, + 0.222, + 0.767, + 0.268 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.727, + 0.27, + 0.751, + 0.278 + ], + "angle": 0, + "content": "layer 7" + }, + { + "type": "image_caption", + "bbox": [ + 0.576, + 0.282, + 0.715, + 0.293 + ], + "angle": 0, + "content": "(b) visual samples for each layer" + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.313, + 0.785, + 0.368 + ], + "angle": 0, + "content": "Fig. 3: (a) Comparison of Acc@1 metric under various settings of \\( L \\) (i.e., the number of intermediate features). (b) Visual results generated from different end layers. We define \\( L = 0 \\) as a special case that our method degenerates into merely optimizing the latent vectors \\( \\mathbf{w} \\)." + }, + { + "type": "table_caption", + "bbox": [ + 0.215, + 0.384, + 0.785, + 0.411 + ], + "angle": 0, + "content": "Table 1: Comparison of our method with state-of-the-art methods against ResNet-18 trained on FaceScrub." + }, + { + "type": "table", + "bbox": [ + 0.222, + 0.424, + 0.789, + 0.631 + ], + "angle": 0, + "content": "
Public DatasetMethod↑ Acc@1↑ Acc@5↓ δface↓ δeval↓FID
FFHQGMI [51]0.1310.3391.260149.53077.800
KEDMI [5]0.1270.3171.155186.409144.195
PPA [38]0.9620.9960.707117.83441.688
LOMMA+GMI [31]0.8280.9450.784126.17855.840
LOMMA+KEDMI [31]0.5490.8140.916217.991114.045
PLGMI [47]0.7580.9280.676214.978154.497
IF-GMI(ours)0.9790.9960.667112.91540.581
MetFacesGMI [51]0.0380.1361.361161.036114.648
KEDMI [5]0.0030.0171.651212.952347.468
PPA [38]0.6280.8541.035146.74962.518
LOMMA+GMI [31]0.1600.3611.220156.297101.600
LOMMA+KEDMI [31]0.0020.0201.623214.883333.572
PLGMI [47]0.4380.7310.796205.222245.208
IF-GMI(ours)0.9490.9920.838120.35468.107
" + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.66, + 0.684, + 0.675 + ], + "angle": 0, + "content": "4.3 Comparison with Previous State-of-the-art Attacks" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.688, + 0.785, + 0.763 + ], + "angle": 0, + "content": "We compare our method with state-of-the-art MI attack methods, including GMI [51], KEDMI [5], PPA [38], LOMMA [31] and PLGMI [47]. Note that LOMMA [31] is a plug-and-play technique designed to augment existing attack methods. We use their original setup where LOMMA is integrated with GMI and KEDMI as our baselines." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.765, + 0.787, + 0.84 + ], + "angle": 0, + "content": "The GAN structures employed by GMI, KEDMI, and PLGMI are inherently limited to generating images at a resolution of \\(64 \\times 64\\) pixels. To ensure a fair comparison, we adopt the same operation used in PPA [38], which modifies the architecture of the generators and discriminators to enable the generation of images at an enhanced resolution of \\(256 \\times 256\\) pixels, i.e., adding two ex" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "12" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.358, + 0.129 + ], + "angle": 0, + "content": "Y. Qiu et al." + }, + { + "type": "image", + "bbox": [ + 0.244, + 0.15, + 0.293, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.254, + 0.327, + 0.285, + 0.335 + ], + "angle": 0, + "content": "Private" + }, + { + "type": "image", + "bbox": [ + 0.326, + 0.151, + 0.374, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.342, + 0.327, + 0.361, + 0.335 + ], + "angle": 0, + "content": "GMI" + }, + { + "type": "image", + "bbox": [ + 0.388, + 0.151, + 0.437, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.399, + 0.327, + 0.428, + 0.335 + ], + "angle": 0, + "content": "KEDMI" + }, + { + "type": "image", + "bbox": [ + 0.452, + 0.151, + 0.501, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.468, + 0.327, + 0.484, + 0.335 + ], + "angle": 0, + "content": "PPA" + }, + { + "type": "image", + "bbox": [ + 0.516, + 0.151, + 0.565, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.526, + 0.327, + 0.558, + 0.343 + ], + "angle": 0, + "content": "LOMMA +GMI" + }, + { + "type": "image", + "bbox": [ + 0.579, + 0.151, + 0.629, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.587, + 0.327, + 0.622, + 0.343 + ], + "angle": 0, + "content": "LOMMA +KEDMI" + }, + { + "type": "image", + "bbox": [ + 0.645, + 0.151, + 0.693, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.653, + 0.327, + 0.682, + 0.335 + ], + "angle": 0, + "content": "PLGMI" + }, + { + "type": "image", + "bbox": [ + 0.706, + 0.151, + 0.755, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.721, + 0.328, + 0.741, + 0.335 + ], + "angle": 0, + "content": "ours" + }, + { + "type": "image_caption", + "bbox": [ + 0.215, + 0.36, + 0.788, + 0.403 + ], + "angle": 0, + "content": "Fig. 4: Visual comparison of reconstructed images from different methods against the ResNet-18 trained on FaceScrub. The first column shows ground truth images of the target class in the private dataset." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.434, + 0.785, + 0.464 + ], + "angle": 0, + "content": "tra upsampling layers for the generator and two downsampling layers for the discriminator respectively." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.466, + 0.788, + 0.707 + ], + "angle": 0, + "content": "We provide quantitative results against ResNet-18 [18] trained on the Face-Scrub dataset in Table 1. We can observe that our method achieves significant improvements over previous methods. Especially when the generator is trained on MetFaces, IF-GMI remarkably improves the Acc@1 by \\(15.1\\%\\) and the Acc@5 is nearly to \\(100\\%\\). Moreover, our method generally achieves a lower feature distance than baselines between reconstructed samples and private data. For instance, we reduce the distance by more than \\(10\\%\\) compared to the PPA on the MetFaces dataset. Notably, the MetFaces dataset is composed of artworks and thus has a larger distributional shift with real human faces compared with the FFHQ dataset. We note that this severely reduces the reconstruction performance of previous attack methods, while our proposed method still exhibits outstanding performance, highlighting the excellent generalization ability of our approach. Visualization results of the recovered images using generators trained on FFHQ are shown in Figure 4. Compared with previous methods, our reconstructed images have higher fidelity and realism, demonstrating the superiority of exploiting GAN's intermediate features." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.735, + 0.719, + 0.75 + ], + "angle": 0, + "content": "4.4 Comparison under different target datasets and models" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.765, + 0.788, + 0.841 + ], + "angle": 0, + "content": "To validate the effectiveness of the proposed method, we conducted extensive experiments on various datasets using different target models with different architectures. We chose the PPA method as our baseline for comparison due to its comprehensive performance in both accuracy and fidelity. Additional experimental results are in the Appendix." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.531, + 0.115, + 0.732, + 0.128 + ], + "angle": 0, + "content": "A Closer Look at GAN Priors" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.116, + 0.786, + 0.127 + ], + "angle": 0, + "content": "13" + }, + { + "type": "table_caption", + "bbox": [ + 0.271, + 0.145, + 0.731, + 0.16 + ], + "angle": 0, + "content": "Table 2: Comparison results against ResNet-152 trained on CelebA." + }, + { + "type": "table", + "bbox": [ + 0.222, + 0.171, + 0.787, + 0.261 + ], + "angle": 0, + "content": "
Public DatasetMethod↑Acc@1↑Acc@5↓δface↓δeval↓FID
FFHQPPA0.8060.9460.736312.58040.430
IF-GMI(ours)0.9120.9820.678314.39230.685
MetFacesPPA0.3960.6431.063387.81074.030
IF-GMI(ours)0.7840.9290.835340.89474.504
" + }, + { + "type": "table_caption", + "bbox": [ + 0.216, + 0.272, + 0.785, + 0.3 + ], + "angle": 0, + "content": "Table 3: Comparison results against different target models trained on FaceScrub with the public dataset being MetFaces." + }, + { + "type": "table", + "bbox": [ + 0.222, + 0.313, + 0.787, + 0.436 + ], + "angle": 0, + "content": "
Target ModelMethod↑ Acc@1↑ Acc@5↓ δface↓ δeval↓FID
ResNet-152PPA0.7310.9200.966139.38068.540
IF-GMI(ours)0.9040.9840.882138.75269.937
ResNeSt-101PPA0.7500.9270.979137.17088.660
IF-GMI(ours)0.9220.9830.884132.60976.195
DenseNet-169PPA0.7980.9480.938129.44077.520
IF-GMI(ours)0.9330.9870.851125.05082.123
" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.463, + 0.785, + 0.643 + ], + "angle": 0, + "content": "As shown in Table 2, our proposed IF-GMI maintains superiority in most metrics against the ResNet-152 trained on the CelebA. Our method achieves a remarkable increase of \\(10.6\\%\\) in Acc@1 and significantly reduces the FID value using the StyleGAN2 trained on FFHQ. When utilizing the MetFaces StyleGAN2, our method still achieves much better results than the baseline despite a larger distributional shift, including a \\(38.8\\%\\) increase in Acc@1 and competitive feature distance. In addition to ResNet-18, we evaluate the performance of the proposed method on more target models trained on FaceScrub, including ResNet-152, ResNeSt-101, and DenseNet-169. Benefiting from the fully utilized generative prior, our method achieves \\(13\\% \\sim 17\\%\\) improvement in Acc@1 metrics than the baselines and also achieves better results in most of the other metrics, as illustrated in Table 3." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.645, + 0.785, + 0.705 + ], + "angle": 0, + "content": "The results presented above demonstrate that our method maintains outstanding attack performance in a variety of settings, exhibiting excellent generalizability and transferability. We also provide additional experimental results on more datasets and architectures in the Appendix." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.726, + 0.403, + 0.74 + ], + "angle": 0, + "content": "4.5 Ablation Studies" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.75, + 0.785, + 0.81 + ], + "angle": 0, + "content": "To estimate the contributions from each component in our method, we conduct ablation studies on the ResNet-152 trained on the CelebA dataset using the StyleGAN2 trained on FFHQ. The results are presented in Table 4. More ablation studies are listed in the Appendix." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.81, + 0.787, + 0.842 + ], + "angle": 0, + "content": "Intermediate Features Optimization. We merely remove the intermediate features optimization from our pipeline while keeping the remaining param-" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "14" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.358, + 0.129 + ], + "angle": 0, + "content": "Y. Qiu et al." + }, + { + "type": "table_caption", + "bbox": [ + 0.214, + 0.145, + 0.788, + 0.2 + ], + "angle": 0, + "content": "Table 4: Ablation study performed on ResNet-152 trained on CelebA dataset with FFHQ as the public dataset. IF-GMI-\\(i\\) removes the intermediate feature optimization and only searches the latent space. IF-GMI-\\(l\\) removes the \\(l_{1}\\) ball constraint compared to IF-GMI." + }, + { + "type": "table", + "bbox": [ + 0.279, + 0.213, + 0.731, + 0.292 + ], + "angle": 0, + "content": "
Method↑Acc@1↑Acc@5↓δface↓δeval↓FID
IF-GMI-i0.8030.9280.732314.27543.576
IF-GMI-l0.9450.9920.678315.27837.528
IF-GMI0.9470.9930.677315.03237.461
" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.317, + 0.784, + 0.361 + ], + "angle": 0, + "content": "eters unchanged. As shown in the first row of Table 4, it leads to degradation up to \\(14\\%\\) in Acc@1 and much worse FID without this technique, demonstrating the superiority of utilizing the hierarchical features of intermediate layers." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.362, + 0.788, + 0.437 + ], + "angle": 0, + "content": "\\(l_{1}\\) Ball Constraint. To avoid unreal image generation, we introduce the \\(l_{1}\\) ball constraint into the intermediate features optimization. By observing the results shown in the second row of Table 4, the \\(l_{1}\\) ball is beneficial in improving the performance in all metrics. Thus, we demonstrate the necessity of restricting the intermediate features within the \\(l_{1}\\) ball constraint." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.459, + 0.36, + 0.475 + ], + "angle": 0, + "content": "5 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.488, + 0.788, + 0.623 + ], + "angle": 0, + "content": "We proposed IF-GMI, a novel model inversion attack that performs effective attack in the OOD scenario. Surpassing the limitation of treating the generator as a black-box, we studied the structure and decomposed the generator into hierarchical layers, extending the optimization space from latent code to intermediate features to generate stable and high-quality images. Moreover, to avoid generating low-fidelity images, we applied a \\( l_{1} \\) ball constraint to the optimization process. Through our extensive experiments, we demonstrated that the proposed IF-GMI achieves the state-of-the-art attack accuracy while generating samples with high fidelity and diversity." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.624, + 0.788, + 0.699 + ], + "angle": 0, + "content": "Our exploration of enhanced utilization of intermediate features in the GAN prior contributes to advances in MI attack field, paving the way to more practical employment for MI attacks. We hope this paper can raise concerns about privacy leakage risk of released pre-trained models and facilitate more response to the threat of MI attacks." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.72, + 0.395, + 0.737 + ], + "angle": 0, + "content": "Acknowledgments" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.749, + 0.795, + 0.826 + ], + "angle": 0, + "content": "This work is supported in part by the National Natural Science Foundation of China under grant 62171248, 62301189, Guangdong Basic and Applied Basic Research Foundation under grant 2021A1515110066, the PCNL KEY project (PCL2021A07), and Shenzhen Science and Technology Program under Grant JCYJ20220818101012025, RCBS20221008093124061, GXWD20220811172936001." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.531, + 0.115, + 0.732, + 0.128 + ], + "angle": 0, + "content": "A Closer Look at GAN Priors" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "15" + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.145, + 0.323, + 0.16 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.173, + 0.786, + 0.214 + ], + "angle": 0, + "content": "1. Abdal, R., Qin, Y., Wonka, P.: Image2stylegan: How to embed images into the stylegan latent space? In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 4432-4441 (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.215, + 0.786, + 0.242 + ], + "angle": 0, + "content": "2. An, S., Tao, G., Xu, Q., Liu, Y., Shen, G., Yao, Y., Xu, J., Zhang, X.: Mirror: Model inversion for deep learning network with high fidelity. In: NDSS (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.242, + 0.786, + 0.282 + ], + "angle": 0, + "content": "3. Bau, D., Zhu, J.Y., Strobelt, H., Zhou, B., Tenenbaum, J.B., Freeman, W.T., Torralba, A.: Gan dissection: Visualizing and understanding generative adversarial networks. arXiv preprint arXiv:1811.10597 (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.283, + 0.786, + 0.336 + ], + "angle": 0, + "content": "4. Chen, B., Feng, Y., Dai, T., Bai, J., Jiang, Y., Xia, S.T., Wang, X.: Adversarial examples generation for deep product quantization networks on image retrieval. IEEE Transactions on Pattern Analysis and Machine Intelligence 45(2), 1388-1404 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.337, + 0.786, + 0.364 + ], + "angle": 0, + "content": "5. Chen, S., Kahla, M., Jia, R., Qi, G.J.: Knowledge-enriched distributional model inversion attacks. In: ICCV (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.365, + 0.786, + 0.404 + ], + "angle": 0, + "content": "6. Choi, Y., Uh, Y., Yoo, J., Ha, J.W.: Stargan v2: Diverse image synthesis for multiple domains. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 8188-8197 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.405, + 0.786, + 0.445 + ], + "angle": 0, + "content": "7. Conneau, A., Baevski, A., Collobert, R., Mohamed, A., Auli, M.: Unsupervised cross-lingual representation learning for speech recognition. arXiv preprint arXiv:2006.13979 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.446, + 0.786, + 0.486 + ], + "angle": 0, + "content": "8. Daras, G., Dean, J., Jalal, A., Dimakis, A.G.: Intermediate layer optimization for inverse problems using deep generative models. arXiv preprint arXiv:2102.07364 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.487, + 0.786, + 0.527 + ], + "angle": 0, + "content": "9. Dataset, E.: Novel datasets for fine-grained image categorization. In: First Workshop on Fine Grained Visual Categorization, CVPR. Citeseer. Citeseer. Citeseer (2011)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.528, + 0.786, + 0.568 + ], + "angle": 0, + "content": "0. Fang, H., Chen, B., Wang, X., Wang, Z., Xia, S.T.: Gidf: A generative gradient inversion method with feature domain optimization. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 4967-4976 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.568, + 0.786, + 0.609 + ], + "angle": 0, + "content": "1. Fang, H., Kong, J., Yu, W., Chen, B., Li, J., Xia, S., Xu, K.: One perturbation is enough: On generating universal adversarial perturbations against vision-language pre-training models. arXiv preprint arXiv:2406.05491 (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.61, + 0.786, + 0.65 + ], + "angle": 0, + "content": "2. Fang, H., Qiu, Y., Yu, H., Yu, W., Kong, J., Chong, B., Chen, B., Wang, X., Xia, S.T.: Privacy leakage on dnns: A survey of model inversion attacks and defenses. arXiv preprint arXiv:2402.04013 (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.651, + 0.786, + 0.676 + ], + "angle": 0, + "content": "3. Fredrikson, M., Jha, S., Ristenpart, T.: Model inversion attacks that exploit confidence information and basic countermeasures. In: CCS. pp. 1322-1333 (2015)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.677, + 0.786, + 0.717 + ], + "angle": 0, + "content": "4. Fredrikson, M., Lantz, E., Jha, S., Lin, S., Page, D., Ristenpart, T.: Privacy in pharmacogenetics: An {End-to-End} case study of personalized warfarin dosing. In: USENIX Security. pp. 17-32 (2014)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.718, + 0.786, + 0.759 + ], + "angle": 0, + "content": "5. Goodfellow, I., Pouget-Abadie, J., Mirza, M., Xu, B., Warde-Farley, D., Ozair, S., Courville, A., Bengio, Y.: Generative adversarial nets. Advances in neural information processing systems 27 (2014)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.759, + 0.786, + 0.799 + ], + "angle": 0, + "content": "6. Goodfellow, I., Pouget-Abadie, J., Mirza, M., Xu, B., Warde-Farley, D., Ozair, S., Courville, A., Bengio, Y.: Generative adversarial networks. Communications of the ACM 63(11), 139–144 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.8, + 0.786, + 0.84 + ], + "angle": 0, + "content": "7. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 770-778 (2016)" + }, + { + "type": "list", + "bbox": [ + 0.226, + 0.173, + 0.786, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "16" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.358, + 0.129 + ], + "angle": 0, + "content": "Y. Qiu et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.147, + 0.785, + 0.189 + ], + "angle": 0, + "content": "18. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 770-778 (2016)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.19, + 0.785, + 0.232 + ], + "angle": 0, + "content": "19. Heusel, M., Ramsauer, H., Unterthiner, T., Nessler, B., Hochreiter, S.: Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems 30 (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.232, + 0.785, + 0.272 + ], + "angle": 0, + "content": "20. Hu, H., Salcic, Z., Sun, L., Dobbie, G., Yu, P.S., Zhang, X.: Membership inference attacks on machine learning: A survey. ACM Computing Surveys (CSUR) 54(11s), 1-37 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.273, + 0.785, + 0.314 + ], + "angle": 0, + "content": "21. Huang, G., Liu, Z., Van Der Maaten, L., Weinberger, K.Q.: Densely connected convolutional networks. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 4700-4708 (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.315, + 0.785, + 0.342 + ], + "angle": 0, + "content": "22. Kahla, M., Chen, S., Just, H.A., Jia, R.: Label-only model inversion attacks via boundary repulsion. In: CVPR (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.343, + 0.785, + 0.383 + ], + "angle": 0, + "content": "23. Karras, T., Aittala, M., Hellsten, J., Laine, S., Lehtinen, J., Aila, T.: Training generative adversarial networks with limited data. Advances in neural information processing systems 33, 12104-12114 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.384, + 0.785, + 0.424 + ], + "angle": 0, + "content": "24. Karras, T., Laine, S., Aila, T.: A style-based generator architecture for generative adversarial networks. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 4401-4410 (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.425, + 0.785, + 0.466 + ], + "angle": 0, + "content": "25. Karras, T., Laine, S., Aittala, M., Hellsten, J., Lehtinen, J., Aila, T.: Analyzing and improving the image quality of stylegan. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 8110-8119 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.467, + 0.785, + 0.508 + ], + "angle": 0, + "content": "26. Kynkänniemi, T., Karras, T., Laine, S., Lehtinen, J., Aila, T.: Improved precision and recall metric for assessing generative models. Advances in Neural Information Processing Systems 32 (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.509, + 0.785, + 0.536 + ], + "angle": 0, + "content": "27. Li, C., Qiu, M.: Reinforcement learning for cyber-physical systems: with cybersecurity case studies. Chapman and Hall/CRC (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.537, + 0.785, + 0.577 + ], + "angle": 0, + "content": "28. Liu, Z., Luo, P., Wang, X., Tang, X.: Deep learning face attributes in the wild. In: Proceedings of the IEEE international conference on computer vision. pp. 3730-3738 (2015)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.578, + 0.785, + 0.619 + ], + "angle": 0, + "content": "29. Naeem, M.F., Oh, S.J., Uh, Y., Choi, Y., Yoo, J.: Reliable fidelity and diversity metrics for generative models. In: International Conference on Machine Learning. pp. 7176-7185. PMLR (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.62, + 0.785, + 0.66 + ], + "angle": 0, + "content": "30. Ng, H.W., Winkler, S.: A data-driven approach to cleaning large face datasets. In: 2014 IEEE international conference on image processing (ICIP). pp. 343-347. IEEE (2014)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.661, + 0.785, + 0.702 + ], + "angle": 0, + "content": "31. Nguyen, N.B., Chandrasegaran, K., Abdollahzadeh, M., Cheung, N.M.: Rethinking model inversion attacks against deep neural networks. In: CVPR. pp. 16384-16393 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.702, + 0.785, + 0.757 + ], + "angle": 0, + "content": "32. Park, J.Y., Smedemark-Margulies, N., Daniels, M., Yu, R., van de Meent, J.W., HAnd, P.: Generator surgery for compressed sensing. In: NeurIPS 2020 Workshop on Deep Learning and Inverse Problems (2020), https://openreview.net/forum?id=s2EucjZ6d2s" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.758, + 0.785, + 0.799 + ], + "angle": 0, + "content": "33. Qiu, H., Dong, T., Zhang, T., Lu, J., Memmi, G., Qiu, M.: Adversarial attacks against network intrusion detection in IoT systems. IEEE Internet of Things Journal 8(13), 10327-10335 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.799, + 0.785, + 0.84 + ], + "angle": 0, + "content": "34. Schroff, F., Kalenichenko, D., Philbin, J.: Facenet: A unified embedding for face recognition and clustering. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 815-823 (2015)" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.147, + 0.785, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.531, + 0.115, + 0.732, + 0.127 + ], + "angle": 0, + "content": "A Closer Look at GAN Priors" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "17" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.189 + ], + "angle": 0, + "content": "35. Shen, Y., Gu, J., Tang, X., Zhou, B.: Interpreting the latent space of gans for semantic face editing. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 9243-9252 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.189, + 0.788, + 0.23 + ], + "angle": 0, + "content": "36. Shokri, R., Stronati, M., Song, C., Shmatikov, V.: Membership inference attacks against machine learning models. In: 2017 IEEE symposium on security and privacy (SP). pp. 3-18. IEEE (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.23, + 0.788, + 0.257 + ], + "angle": 0, + "content": "37. Song, C., Ristenpart, T., Shmatikov, V.: Machine learning models that remember too much. In: CCS. pp. 587-601 (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.257, + 0.788, + 0.284 + ], + "angle": 0, + "content": "38. Struppek, L., Hintersdorf, D., Correira, A.D.A., Adler, A., Kersting, K.: Plug & play attacks: Towards robust and flexible model inversion attacks. In: ICML (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.284, + 0.788, + 0.325 + ], + "angle": 0, + "content": "39. Szegedy, C., Vanhoucke, V., Ioffe, S., Shlens, J., Wojna, Z.: Rethinking the inception architecture for computer vision. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 2818-2826 (2016)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.325, + 0.788, + 0.367 + ], + "angle": 0, + "content": "40. Tewari, A., Elgharib, M., Bernard, F., Seidel, H.P., Pérez, P., Zollhöfer, M., Theobalt, C.: Pie: Portrait image embedding for semantic control. ACM Transactions on Graphics (TOG) 39(6), 1-14 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.367, + 0.788, + 0.394 + ], + "angle": 0, + "content": "41. Wang, K.C., Fu, Y., Li, K., Khisti, A., Zemel, R., Makhzani, A.: Variational model inversion attacks. In: NeurIPS (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.394, + 0.788, + 0.435 + ], + "angle": 0, + "content": "42. Wu, C., Yan, M.: Session-aware information embedding for e-commerce product recommendation. In: Proceedings of the 2017 ACM on conference on information and knowledge management. pp. 2379-2382 (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.435, + 0.788, + 0.462 + ], + "angle": 0, + "content": "43. Yang, Z., Zhang, J., Chang, E.C., Liang, Z.: Neural network inversion in adversarial setting via background knowledge alignment. In: CCS (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.462, + 0.788, + 0.516 + ], + "angle": 0, + "content": "44. Yin, H., Molchanov, P., Alvarez, J.M., Li, Z., Mallya, A., Hoiem, D., Jha, N.K., Kautz, J.: Dreaming to distill: Data-free knowledge transfer via deepinversion. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 8715-8724 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.516, + 0.788, + 0.543 + ], + "angle": 0, + "content": "45. Yu, W., Chen, B., Zhang, Q., Xia, S.T.: Editable-deepsc: Cross-modal editable semantic communication systems. arXiv preprint arXiv:2310.10347 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.543, + 0.788, + 0.584 + ], + "angle": 0, + "content": "46. Yu, W., Fang, H., Chen, B., Sui, X., Chen, C., Wu, H., Xia, S.T., Xu, K.: Gi-nas: Boosting gradient inversion attacks through adaptive neural architecture search. arXiv preprint arXiv:2405.20725 (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.584, + 0.788, + 0.625 + ], + "angle": 0, + "content": "47. Yuan, X., Chen, K., Zhang, J., Zhang, W., Yu, N., Zhang, Y.: Pseudo label-guided model inversion attack via conditional generative adversarial network. In: AAAI (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.625, + 0.788, + 0.652 + ], + "angle": 0, + "content": "48. Yuan, Z., Wu, F., Long, Y., Xiao, C., Li, B.: Secretgen: Privacy recovery on pretrained models via distribution discrimination. In: ECCV (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.652, + 0.788, + 0.707 + ], + "angle": 0, + "content": "49. Zeng, Y., Pan, M., Just, H.A., Lyu, L., Qiu, M., Jia, R.: Narcissus: A practical clean-label backdoor attack with limited information. In: Proceedings of the 2023 ACM SIGSAC Conference on Computer and Communications Security. pp. 771-785 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.707, + 0.788, + 0.762 + ], + "angle": 0, + "content": "50. Zhang, H., Wu, C., Zhang, Z., Zhu, Y., Lin, H., Zhang, Z., Sun, Y., He, T., Mueller, J., Manmatha, R., et al.: Resnest: Split-attention networks. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 2736-2746 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.762, + 0.788, + 0.789 + ], + "angle": 0, + "content": "51. Zhang, Y., Jia, R., Pei, H., Wang, W., Li, B., Song, D.: The secret revealer: Generative model-inversion attacks against deep neural networks. In: CVPR (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.789, + 0.788, + 0.831 + ], + "angle": 0, + "content": "52. Zhong, X., Fang, H., Chen, B., Gu, X., Dai, T., Qiu, M., Xia, S.T.: Hierarchical features matter: A deep exploration of gan priors for improved dataset distillation. arXiv preprint arXiv:2406.05704 (2024)" + }, + { + "type": "list", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.831 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/2024/A Closer Look at GAN Priors_ Exploiting Intermediate Features for Enhanced Model Inversion Attacks/ae02311f-0ba1-4342-a539-c7ea4e71402f_origin.pdf b/2024/A Closer Look at GAN Priors_ Exploiting Intermediate Features for Enhanced Model Inversion Attacks/ae02311f-0ba1-4342-a539-c7ea4e71402f_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..90daa5f0549797e0f40ece26a95badd9dddaf266 --- /dev/null +++ b/2024/A Closer Look at GAN Priors_ Exploiting Intermediate Features for Enhanced Model Inversion Attacks/ae02311f-0ba1-4342-a539-c7ea4e71402f_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35f1067ada74c4ea68f1590c435060b6bb49d08f474ca2dd29870af9c543db30 +size 2228671 diff --git a/2024/A Closer Look at GAN Priors_ Exploiting Intermediate Features for Enhanced Model Inversion Attacks/full.md b/2024/A Closer Look at GAN Priors_ Exploiting Intermediate Features for Enhanced Model Inversion Attacks/full.md new file mode 100644 index 0000000000000000000000000000000000000000..493d50225a92f324a1a14e427c82c2708f04127d --- /dev/null +++ b/2024/A Closer Look at GAN Priors_ Exploiting Intermediate Features for Enhanced Model Inversion Attacks/full.md @@ -0,0 +1,368 @@ +# A Closer Look at GAN Priors: Exploiting Intermediate Features for Enhanced Model Inversion Attacks + +Yixiang Qiu $^{1,2\dagger}$ , Hao Fang $^{2\dagger}$ , Hongyao Yu $^{1\dagger}$ , Bin Chen $^{1,3,4\#}$ , MeiKang Qiu $^{5}$ , and Shu-Tao Xia $^{2,4}$ + +1 Harbin Institute of Technology, Shenzhen +2 Tsinghua Shenzhen International Graduate School, Tsinghua University +3 Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies +4 Pengcheng Laboratory 5 Augusta University +qiuyixiang@stu.hit.edu.cn, fang-h23@mails.tsinghua.edu.cn +yuhongyao@stu.hit.edu.cn, chenbin2021@hit.edu.cn +qiumeikang@yahoo.com, xiast@sz.tsinghua.edu.cn + +Abstract. Model Inversion (MI) attacks aim to reconstruct privacy-sensitive training data from released models by utilizing output information, raising extensive concerns about the security of Deep Neural Networks (DNNs). Recent advances in generative adversarial networks (GANs) have contributed significantly to the improved performance of MI attacks due to their powerful ability to generate realistic images with high fidelity and appropriate semantics. However, previous MI attacks have solely disclosed private information in the latent space of GAN priors, limiting their semantic extraction and transferability across multiple target models and datasets. To address this challenge, we propose a novel method, Intermediate Features enhanced Generative Model Inversion (IF-GMI), which disassembles the GAN structure and exploits features between intermediate blocks. This allows us to extend the optimization space from latent code to intermediate features with enhanced expressive capabilities. To prevent GAN priors from generating unrealistic images, we apply a $l_{1}$ ball constraint to the optimization process. Experiments on multiple benchmarks demonstrate that our method significantly outperforms previous approaches and achieves state-of-the-art results under various settings, especially in the out-of-distribution (OOD) scenario. Our code is available at: https://github.com/final-solution/IF-GMI + +Keywords: Privacy $\cdot$ Model Inversion $\cdot$ Generative Priors + +# 1 Introduction + +In recent years, Deep Neural Networks (DNNs) have experienced unprecedented development and achieved tremendous success in a wide range of applications, + +This work was done while Yixiang Qiu was pre-admitted to Tsinghua University. + +including face recognition [17], personalized recommendations [42], and audio recognition [7]. While DNNs bring us many practical benefits, concerns [4,10,11, 46] about privacy and security have also been raised and drawn great attention. Recent studies have demonstrated that there is a certain risk of privacy leakage for DNNs as an adversary could reveal private information from these pre-trained models. Various types of novel privacy attacks [27,33,49] have been proposed, such as membership inference attack [20,36] and gradient inversion attack [10, 46]. Among the new attack methods, Model Inversion (MI) attack [12] poses a greater threat due to its powerful capability in recovering the privacy-sensitive datasets that are collected and utilized for model training. + +[14] proposes the first MI attack to reconstruct sensitive features of genomic data and demonstrate that linear regression models are vulnerable to such privacy attacks. Subsequent studies [13, 37, 43] have extended MI attacks to more Machine Learning (ML) models, but are still limited to models with simple structure and low-dimensional data such as grayscale images. Recent advances in the MI attack field have overcome the challenges in image data recovery by applying Generative Adversarial Networks (GANs) [16], resulting in the extension to DNNs with more complex structure and high-dimensional data such as RGB images. [51] first introduces the GANs to MI attack scenarios, serving as image priors. To better reveal privacy-sensitive information, [51] and subsequent GAN-based methods [5, 41, 47, 48] train GANs with publicly available datasets that have structural similarity with target private datasets. Furthermore, [38] propose to leverage the public pre-trained GAN models (e.g., StyleGAN [24]) as GAN priors, which have a stronger ability to generate high-resolution images and do not require a time-consuming training process. + +Although the aforementioned methods have achieved great progress in recovering high-quality and privacy-sensitive images, the effectiveness of GAN-based MI attacks is limited under certain scenarios. One typical challenge is the out-of-distribution (OOD) scenario, where there is a significant distributional shift between the target private dataset and the public dataset used in the training process of GAN priors. Most previous methods [5, 41, 48, 51] merely work well under scenarios with slight distributional shifts. For instance, they split the same dataset into two parts, one used as the public dataset and the other used as the private dataset. In recent years, some studies [3, 8, 35, 40, 45] have demonstrated that there is rich semantic information encoded in the latent code and intermediate features of GANs. Inspired by these works, we empirically observe that the rich semantic information encoded in the intermediate features helps to sufficiently recover high-quality private data under more rigorous settings, as shown in Figure 1. Therefore, it is imperative to explore methods for leveraging the GAN's intrinsic layered knowledge into MI attacks, mitigating the OOD issue. + +To this end, we propose a novel MI attack method, Intermediate Features enhanced Generative Model Inversion (IF-GMI), which effectively disassembles the GAN structure and leverages features between intermediate blocks. Specifically, we consider the generator of the GAN as a concatenation of multiple blocks and the vectors produced between the blocks as intermediate features. We first + +![](images/4057bffa9d95fa63769b3a9fc77a3827b617b6e5f53b3229c90871dbf3c98f35.jpg) +0.0000 + +![](images/6f5f6c0af33072bd26f8227755b3670ec1d5f67bfc325aaf947dd57cda4ede4a.jpg) +0.0000 +Generation + +![](images/c4e121353fc9d4fce58b215281bb3996d31bcf1615473a40c10492f40e6770f7.jpg) +0.3600 + +![](images/6f1614344c9dc466438968c5eb94d1b676bfb2ce116f4512e96ad811e0ec3676.jpg) +0.2200 +PPA + +![](images/850be9d20fd9e7e0bf7c398c12f999b7b9cd793dde409625621992fb51588c95.jpg) +0.9975 + +![](images/2536e1bf41eb0f8ec381ed92212c55e6c182395414fb17892502f5b5d14d093a.jpg) +0.9988 +IF-GMI + +![](images/50c3d0634734bf852aff41eaa2b402646d6a5b8fcb588d72973a65fb920978ee.jpg) + +![](images/6ed32123a3bf4d8d6b5661bb50102962aea8ee00622e289b8f0ec367e4629b9a.jpg) +Original + +![](images/406733f23327a1bc9b2c56e1adbbe0d2dd9b40270bb44ce2ce19bba8a062f521.jpg) +(a) Visual examples of IF-GMI to PPA +(b) Comparison of IF-GMI to PPA +Fig. 1: (a) Comparison of our proposed IF-GMI with baselines. The blue number below the images is the predicted confidence by the evaluation model. The first column shows the randomly generated images and the second column presents the reconstructed results by PPA [38], a typical GAN-based method focusing on directly optimizing the latent code of GAN model. The last two columns exhibit the results of our proposed IF-GMI and the ground truth images in the private dataset, respectively. (b) Top-1 attack accuracy of PPA and IF-GMI (ours) on four OOD scenarios. + +optimize the latent code input to the generator and then successively optimize the intermediate features from the start layer to the end layer. To avoid unreal image generation, we utilize a $l_{1}$ ball constraint to restrict the deviation when optimizing the intermediate features. In the end, we collect the output images after each intermediate layer optimization process and select the final results with a simple strategy. We conduct comprehensive experiments to evaluate our method in multiple settings, including OOD scenarios, various target models, and different GAN priors. The encouraging experimental results demonstrate that the proposed method outperforms baselines on multiple metrics and achieves high attack accuracy on OOD settings. Finally, we perform extensive experiments and ablation studies to validate the effectiveness of the proposed method. Our main contributions are as follows: + +- We propose a novel GAN-based MI attack method, which disassembles the pre-trained generator and successively optimizes the latent code and intermediate features under the $l_{1}$ ball constraint. +- We demonstrate that our proposed achieves state-of-the-art performance in a range of scenarios, especially under the challenging OOD settings. +- We conduct extensive experiments to validate the effectiveness and outstanding transferability of our method. + +# 2 Related Work + +# 2.1 GAN as prior knowledge + +GANs [15] are a class of deep neural networks that consist of two functional components, a generator and a discriminator, trained concurrently through adversarial processes to generate realistic data. The objective of a GAN is to learn the + +distribution of the training dataset and generate more samples from the learned probability distribution [16]. Well-trained GANs are able to generate high-fidelity and diverse images, excellent representative of which are StyleGANs [24,25]. The generator of the StyleGAN consists of a mapping network and a synthesis network. The former maps latent vectors into the intermediate latent space (i.e. $\mathcal{W}$ space), and the latter generates images through style vectors. The feature in the $\mathcal{W}$ space is well-disentangled, which means that images sharing similar features correspond to analogous style vectors. Therefore, PPA [38] performs their attacks by searching the style vectors in $\mathcal{W}$ space. The style vectors in the front layers tend to control high-level aspects of the generated images like pose, face shape, and general hair style, while those in the back ones have more influence on details [24], such as smaller scale facial features and eyes open/closed. Moreover, style vectors in $\mathcal{W}$ space do not need to follow the same distribution with the training data, which means that more diverse images can be generated by controlling the vectors [24]. + +Recent works [10, 32, 52] have shown the richness of intermediate features in GANs, our investigation also tries to explore the potential of leveraging intermediate latent space of different layers to enhance MI attacks. Our findings reveal that this approach significantly improves attack accuracy and obtains high-quality inversion results, particularly under the harder OOD scenario. + +# 2.2 Model Inversion Attacks + +Model inversion (MI) attacks aim at reconstructing the private training data from a trained model. Typically, MI attacks can be divided into the white-box scenario [51] and black-box scenario [22]. We only focus on the white-box scenario in this paper, which means that the attacker has full access to the trained model. This kind of attack is initially demonstrated through an attempt to extract genomic markers from a linear regression model, as highlighted in the earliest research by [14]. Building on this foundation, subsequent researches [13, 37, 43] have broadened the scope of MI attacks, applying them to more machine learning models like shallow networks, and simple forms of data, such as low-resolution grayscale images. However, as the scale of both the data and the models increases, the efficacy of MI attack methods diminishes dramatically. + +In response to this challenge, a novel approach known as GMI, introduced by [51], employs a GAN-based methodology to enhance the ability of MI attacks with deeper and wider DNNs. This innovative strategy leverages a GAN model trained on publicly available data to encapsulate the distributional characteristics of image data, thereby facilitating the generation of high-quality image reconstructions. The process involves the attackers first generating a set of preliminary images by inputting a batch of randomly sampled latent vectors into the GAN. These generated images are then fed into the target image classifier to obtain initial predictions. To refine the attack, the attackers iteratively optimize the input latent vectors. This optimization process aims to minimize the discrepancy between the classifier's predictions and the intended target class, as measured by the cross-entropy loss, while also reducing the discriminator loss. + +![](images/d50aeea83fea1e663e2ca3791e109e1db489790c54f2b18b1b2da20bd0120b0c.jpg) +Fig. 2: Overview of our proposed IF-GMI. Firstly, the latent vectors are sampled from standard Gaussian distribution and mapped into disentangled latent codes with semantic meanings by Mapping Network. Then we perform random augmentation on these latent codes to select optimal ones denoted as $\mathbf{w}^*$ for optimization. The Synthesis Network is disassembled into multiple blocks to search the intermediate features, which are successively updated with the identity loss calculated from the target model. Finally, the reconstructed images are generated from the last layer as results. + +With the help of the GAN, GMI seeks to achieve more precise and convincing reconstructions of complex data, thereby representing a significant advancement in the field of MI attacks. + +Lots of researches in recent years improve the attack performance on the white-box scenario based on GMI. SecretGen [48] explores the scenario when the attackers know some auxiliary information about the private data. KEDMI [5] improves the discriminator by incorporating target labels and recover the distribution of the input latent vectors for a target class. VMI [41] reformulates the MI attack from the perspective of variational inference and introduce KL-divergence as a regularization to better approximate the target distribution with a variational distribution. PPA [38] employs pre-trained StyleGAN2 to reduce the time cost of attacks and extend the attacks to high-resolution images thanks to the excellent generative ability of StyleGAN2. Moreover, they propose a set of strategies to heighten attack accuracy and robustness, including initial selection, post-selection, and data augmentation. LOMMA [31] introduces model augmentation into MI attacks to reduce overfitting of the target model. They train some surrogate models from the target model via model distillation, co-guiding the optimization process with improved loss function. PLGMI [47] proposes a top-n selection strategy, using target models to generate pseudo labels for publicly available images, thereby directing the training process for the conditional GAN. + +# 3 Methodology + +In this section, we begin by explaining the fundamental paradigm of MI attacks and provide a formulation for the MI problem. Subsequently, we present our main components and elaborate the detailed pipeline of the proposed IF-GMI, + +which contributes to the improved performance under the OOD scenario. See Figure 2 for an overview of our method. + +# 3.1 Preliminaries + +In this paper, we focus on the MI attacks under white-box settings, which means all the parameters and components of target models are available to the attacker. For image classification tasks, the malicious adversary aims to reconstruct privacy-sensitive images by leveraging the output prediction confidence of the target classifier and other auxiliary priors. Early works [44] directly optimize pixels in randomly sampled dummy images $\mathbf{x}$ to approximate target images $\mathbf{x}^*$ given the target model $T_{\theta}$ and target label $c$ , which can be formulated as follows: + +$$ +\hat {\mathbf {x}} = \underset {\mathbf {x}} {\arg \min } \mathcal {L} \left(T _ {\theta} (\mathbf {x}), c\right), \tag {1} +$$ + +where $\hat{\mathbf{x}}$ is the reconstructed image, $\mathcal{L}(\cdot ,\cdot)$ denotes the classification loss designed for image optimization and $T_{\theta}(\mathbf{x})$ represent the output confidence. Due to the full access to the target model in white-box settings, the attacker can calculate loss and directly perform backpropagation to update dummy images. + +However, the methods above are no longer functional when $\mathbf{x}$ turns into high-dimensional data which has excessive search space. To tackle such issues, recent studies [5, 38, 47, 51] introduce GANs as image priors due to their superior capability to generate high-fidelity RGB images. They propose to train a specially designed GAN with publicly available datasets that have structural similarities with the private dataset or utilize a public pre-trained GAN before the attack. Furthermore, the optimization objective is replaced with the latent vectors $\mathbf{z}$ of the generator, which has fewer parameters to optimize. With the aforementioned techniques, the MI problem is transformed into the following formulation: + +$$ +\hat {\mathbf {z}} = \underset {\mathbf {z}} {\arg \min } \mathcal {L} _ {i d} \left(T _ {\theta} (G (\mathbf {z}), c) + \lambda \mathcal {L} _ {a u x} (\mathbf {z}) \right. \tag {2} +$$ + +where $G$ represents the trained generator, $\mathcal{L}_{id}(\cdot, \cdot)$ denotes the identity loss calculated from the target model $T_{\theta}$ and $\mathcal{L}_{aux}(\cdot)$ is an optional auxiliary loss (e.g., the discriminator loss) with a hyperparameter $\lambda$ . By minimizing the Eq.2, the adversary updates the latent vectors $\mathbf{z}$ into the optimal results $\hat{\mathbf{z}}$ and generate final images through $\hat{\mathbf{x}} = G(\hat{\mathbf{z}})$ . + +Intuitively, directly optimizing the input latent code of GAN priors serves as a natural method to acquire ideal reconstructed images, leading to its widespread application in all the previous works. However, recent studies [3,8,35,40] have indicated that there is fairly rich semantic information in the intermediate features of GANs except for the input latent code. This inspires us to surpass the limitation of merely searching the latent space and propose a novel method focusing on the intermediate feature domains, which are more close to the output. + +# 3.2 Exploiting Intermediate Features for Enhanced MI Attacks + +In the following part, we delve into the internal structure of the GAN prior, attempting to explore the hierarchical layers for enhanced utilization of the rich + +semantics learned by the generator. Following the pipeline shown in Figure 2, we will elucidate each component in detail. + +The GAN prior. Most previous GAN-based attacks [5,31,47,51] require training a specialized GAN with essential auxiliary dataset towards the specific target classifier. However, the prior knowledge of GANs trained under the above setting will be excessively aligned with the target model and the auxiliary dataset, leading to significant reduction in transferability and generalization. + +Therefore, our method relies on the pre-trained StyleGAN2 [23] instead of training a GAN from scratch. The generator of StyleGAN2 can be simply divided into two components, consisting of a mapping network $G_{map}:\mathcal{Z}\to \mathcal{W}$ which maps the initial latent vectors $\mathbf{z}\in \mathcal{Z}$ into the extended $\mathcal{W}$ space [1], and a synthesis network $G_{syn}:\mathcal{W}\rightarrow \mathcal{X}$ which generates images $\mathbf{x}$ with mapped vectors $\mathbf{w}\in \mathcal{W}$ . Due to the reduced feature entanglement in $\mathcal{W}$ space that facilitates better style generation, we set $\mathbf{w}$ as the initial optimization objective rather than the commonly used latent code $\mathbf{z}$ in previous works. Specifically, we first randomly sample a batch of latent vectors $\mathbf{z}$ from Gaussian distribution and then map them with $G_{map}$ to acquire $\mathbf{w}$ , which will be iteratively updated in the first step of intermediate features optimization. Moreover, the StyleGAN2 is pre-trained without the utilization of the target model $T_{\theta}$ or other auxiliary prior corresponding to the target dataset, ensuring the flexibility and transferability of our method when attacking different target models and datasets. + +Initial Selection. Owing to the randomness in sampling latent vectors $\mathbf{z}$ , it is potential part of them cannot facilitate the generation of appropriate images, leading to a decrease in attack accuracy. To reduce the risk of generating misleading and low-quality images, previous studies [2, 38, 48] have explored the technique of initial selection and validated its effectiveness in obtaining robust latent vectors. Specifically, we first generate images with the randomly samples $\mathbf{z}$ , apply a series of transformations $Aug(\cdot)$ to the images, and feed them into the target classifier $T_{\theta}$ for corresponding prediction confidence. By selecting the latent vectors with higher scores, we can significantly improve the quality of the final images to better approximate the target distribution. + +Inspired by these prior studies [2,38,48], we also include the initial selection technique in our method and apply standard image transformations, such as random cropping, resizing and flipping. Different from previous methods, we perform initial selection on the mapped vectors $\mathbf{w}$ instead of latent vectors $\mathbf{z}$ . The robust vectors $\mathbf{w}$ are obtained with the following equation: + +$$ +\mathbf {w} _ {\text {i n i t}} = \underset {\mathbf {w}} {\arg \max } \operatorname {C o n f} \left(T _ {\theta} \left(A u g \left(G _ {\text {s y n}} (\mathbf {w})\right)\right), c\right), \tag {3} +$$ + +where $\operatorname{Conf}(\cdot, \cdot)$ measures the confidence score for augmented images $Aug(G_{syn}(\mathbf{w}))$ given the specific label $c$ . + +Intermediate Features Optimization. According to the research of [24], the front blocks in the generator control the overall characteristics while the back + +Algorithm 1 Pseudocode of the core algorithm in our proposed IF-GMI +```txt +Input: $G_{syn}$ : a pre-trained generator; $L$ : the number of intermediate features; $T_{\theta}$ : the target classifier; $\mathcal{L}_{id}$ : the identity loss; $r[1 \dots L]$ : the radius value of $l_{1}$ ball for each hierarchical features; $N$ : the number of iterations; +``` + +Output: Reconstructed images $\mathbf{x}^*$ +1: Acquire latent vectors $\mathbf{w}_{init}$ via initial selection process +2: $\mathbf{w}_{(0)} \gets \underset{\mathbf{w}}{\arg \min} \mathcal{L}_{id}(G_{syn}(\mathbf{w}_{init}))$ +3: Decompose the $G_{syn}$ into $G_{L+1} \circ G_{L} \circ \dots \circ G_{2} \circ G_{1}$ +4: Obtain the first intermediate feature $\mathbf{f}_{(1)}^{0} = G_{1}(\mathbf{w}_{(0)})$ +5: Set $\mathbf{w}_{(1)}^{0} = \mathbf{w}_{(0)}$ +6: for $i \gets 1$ to $L$ do +7: Set $G_{remain} = G_{L+1} \circ G_{L} \dots \circ G_{i+1}$ +8: for $j \gets 1$ to $N$ do +9: loss = $\mathcal{L}_{id}(G_{remain}(\mathbf{f}_{(i)}^{j-1}, \mathbf{w}_{(i)}^{j-1}))$ +10: $\mathbf{f}_{(i)}^{j} \gets Adam(\mathbf{f}_{(i)}^{j-1}; loss), ||\mathbf{f}_{(i)}^{j} - \mathbf{f}_{(i)}^{0}||_{1} \leq r[i]$ +11: $\mathbf{w}_{(i)}^{j} \gets Adam(\mathbf{w}_{(i)}^{j-1}; loss), ||\mathbf{w}_{(i)}^{j} - \mathbf{w}_{(i)}^{0}||_{1} \leq r[i]$ +12: end for +13: $\mathbf{f}_{(i+1)}^{0} = G_{i+1}(\mathbf{f}_{(i)}^{N}, \mathbf{w}_{(i)}^{N}), \mathbf{w}_{(i+1)}^{0} = \mathbf{w}_{(i)}^{N}$ +14: end for +15: The final images $\mathbf{x}^{*} = \mathbf{f}_{(L+1)}^{0}$ +16: return $\mathbf{x}^{*}$ + +ones have more influence on local details. Previous studies [38, 47, 51] neglect the role of the latter, which limits the attack performance. To take advantage of the individual blocks, we propose intermediate features optimization, as shown in the Algorithm 1. We first optimize the selected latent vectors $\mathbf{w}_{init}$ to obtain the optimal ones $\mathbf{w}_{(0)}$ before launching intermediate features optimization. Then we disassemble the pre-trained generator into $L + 1$ blocks for hierarchical layer searching, i.e., + +$$ +G _ {s y n} = G _ {L + 1} \circ G _ {L} \circ \dots G _ {2} \circ G _ {1}. \tag {4} +$$ + +And we can feed $\mathbf{w}_{(0)}$ into block $G_{1}$ to attain the first intermediate feature $\mathbf{f}_{(1)}^{0}$ . + +For each intermediate block $G_{i+1}, i \in [1, \ldots, L]$ , the corresponding intermediate features $\mathbf{f}_{(i+1)}^0$ are acquired with following steps. First, we generate images utilizing the remaining blocks (i.e., $\mathbf{x}_i = G_{L+1} \circ G_L \dots G_{i+1}(\mathbf{f}_{(i)}, \mathbf{w}_{(i)})$ ) and input them into the target classifier $T_\theta$ to compute the prediction confidence for loss function. Then, we repeat the aforementioned process to iteratively update both $\mathbf{w}_{(i)}$ and $\mathbf{f}_{(i)}$ . During the optimization process, we restrict the $\mathbf{f}_{(i)}$ within the $l_1$ ball with radius $r[i]$ centered at the initial intermediate feature $\mathbf{f}_{(i)}^0$ to avoid excessive shift that may lead to collapse image generation. Once the iteration process is completed, the optimized $\mathbf{w}_{(i)}^N$ and $\mathbf{f}_{(i)}^N$ are fed into the block $G_i$ to + +obtain the next intermediate features $\mathbf{f}_{(i + 1)}^{0}$ . Moreover, we denote the optimized $\mathbf{w}_{(i)}^{N}$ as the initial latent vector $\mathbf{w}_{(i + 1)}^{0}$ before the next layer optimization starts. + +Once we finish searching the last intermediate layer, we can generate the final images $\mathbf{x}^*$ from the last intermediate feature $\mathbf{f}_{(L)}^{N}$ , i.e., $\mathbf{x}^* = \mathbf{f}_{L + 1}^{0} = G_{i + 1}(\mathbf{f}_{(L)}^{N})$ . + +The Overall Loss. While the cross-entropy loss $\mathcal{L}_{CE}$ serves as the identity loss in most early works [5, 48, 51], there is a major drawback of $\mathcal{L}_{CE}$ . Specifically, the gradient vanishing problem emerges when the prediction confidence of target label $c$ approaches the ground truth in the one-hot vector. Following the previous study [38], we rely on the Poincaré loss function to overcome this problem. Therefore, the identity loss function utilized in our method is defined as follows: + +$$ +\mathcal {L} _ {i d} = \operatorname {a r c c o s h} \left(1 + \frac {2 \| v _ {1} - v _ {2} \| _ {2} ^ {2}}{(1 - \| v _ {1} \| _ {2} ^ {2}) (1 - \| v _ {2} \| _ {2} ^ {2})}\right), \tag {5} +$$ + +where $||v||_2$ is the Euclidean norm for the given vector. In our experiments, we denote $v_{1}$ as the normalized prediction confidence and $v_{2}$ as the one-hot vector for ground truth. Notably, the original number 1 in $v_{2}$ is substituted with 0.9999 to avoid division by zero. + +# 4 Experiments + +In this section, we first illustrate the details of our experimental settings. Then, we compare our method with state-of-the-art baselines to evaluate the attack performance. Furthermore, we conduct extensive experiments on multiple target datasets and models to further validate the effectiveness of our method in various settings. Finally, the ablation study will be evaluated on the first 100 classes of the whole dataset due to cost concerns. + +# 4.1 Experimental Setup + +Datasets. We evaluate our method on two classification tasks, including facial image classification and dog breed classification. For the facial image classification task, we select the FaceScrub [30] and CelebFaces Attributes [28] (CelebA) as private datasets to train the target models. FaceScrub consists of facial images of actors and actresses with 530 identities in total. CelebA contains facial images of 10177 identities with coarse alignment. For FaceScrub, we utilize all the identities in the major experiment. For CelebA, we select the top 1000 identities with the most images for our experiment, consisting of over 30000 images. We use Flickr-Faces-HQ [24] (FFHQ) and MetFaces [23] as public datasets. FFHQ consists of 70000 high-quality human face images. MetFaces is an image dataset of 1336 human faces extracted from the Metropolitan Museum of Art Collection, which has a huge distributional shift with real human faces. For the dog breed classification task, we use Stanford Dogs [9] as a private dataset and Animal Faces-HQ Dogs [6] (AFHQ) as a public dataset. To adapt to the target model, all images in the various datasets are pre-processed to a resolution size of $224 \times 224$ pixels in our experiment. + +Models. We trained a variety of classification models on the private datasets mentioned above, including various architectures such as ResNet-18 [18], DenseNet-169 [21], ResNet-152 [18], and ResNeSt-101 [50], as target models. Following the settings in the previous work [38], we select Inception-v3 [39] as the evaluation model. For the generative model, we employ publicly released StyleGAN2 pre-trained on the aforementioned public datasets. + +Metrics. Following PPA [38], we evaluate the performance of our attack method on various kinds of metrics as follows: + +- Attack Accuracy. This metric serves as a criterion on how well the generated samples resemble the target class. We use the evaluation model trained on the same dataset with the target model to predict the labels on reconstructed samples and compute the top-1 and top-5 accuracy for target classes, denoted as $Acc@1$ and $Acc@5$ respectively. The higher the reconstructed samples achieve attack accuracy on the evaluation model, the more private information in the dataset can be considered to be exposed [51]. +- Feature Distance. The feature is defined as the output of the model's penultimate layer. We compute the shortest feature $l_{2}$ distance between reconstructed samples and private training data for each class and calculate the average distance. The evaluated feature distances on the evaluation model and a pre-trained FaceNet [34] are denoted as $\delta_{eval}$ and $\delta_{face}$ , respectively. +- Fréchet Inception Distance (FID). FID [19] is commonly used to evaluate the generated images of GANs. It computes the distance between the feature vectors from target private data and reconstructed samples. The feature vectors are extracted by Inception-v3 pre-trained on ImageNet. The lower FID score shows higher realism and overall diversity [41]. +- Sample Diversity. We compute Precision-Recall [26] and Density-Coverage [29] scores, whose higher values indicate greater intra-class diversity of the reconstructed samples. Our results for these four metrics are stated and analyzed in the Appendix. + +# 4.2 The Number of Optimized Layers + +To obtain the highest attack performance, the number of intermediate features $L$ should be explored before conducting the major experiments. When $L$ takes a small value, there is a risk of underfitting as we merely optimize the intermediate features of the previous few layers to reconstruct the target images, especially in the OOD scenario. In contrast, when $L$ is too large, the latter layers have a greater influence on the local details [24], which may lead to overfitting to the target model in some details and produce unrealistic images. Therefore, we must balance underfitting and overfitting when choosing $L$ . We conduct a simple attack on only 10 classes for each combination of public and private datasets to select $L$ according to the results. For instance, Figure 3(a) shows the Acc@1 result for GAN prior pre-trained on FFHQ against the target DenseNet-169 trained on CelebA. The Acc@1 reaches the highest when $L = 3$ . Hence, we keep this configuration in conducting the following experiments. + +![](images/0e44ab91fb2d2ab96d903b61bf75ae4623ffc976fc2924dcc0e7f44412b5fdb0.jpg) +(a) StyleGAN2 + +![](images/302a4a29c9abef413f36f6b081d3639797110b6a84b5adb6171e8ffa8e02e483.jpg) +layer 0 + +![](images/2d9297ebe571fde373116271bd7ed018985aee102c677cf495ac580a3dc1979e.jpg) +layer 1 + +![](images/791e673b5a6a204cc1c216f6d0c44b563f71fb7a10be133c5519aa4304b860f0.jpg) +layer 2 + +![](images/d7ad69a8642367d78b77826bad4c7e6c79ac95248b0919e61bf453b213f39c4d.jpg) + +![](images/d8aa836cce8f78142345ead42a9144964cf8bb42544f3ea20d72493d9fbc1065.jpg) +layer 4 +Fig. 3: (a) Comparison of Acc@1 metric under various settings of $L$ (i.e., the number of intermediate features). (b) Visual results generated from different end layers. We define $L = 0$ as a special case that our method degenerates into merely optimizing the latent vectors $\mathbf{w}$ . + +![](images/5418e5f60f52d07697ee9aa2264db7a86841f632302cdd65b352984bd8945d1b.jpg) +ayer 5 +(b) visual samples for each layer + +![](images/7cb97fa038eb3ab9754ad0420bdc7c3ce2fe3756f0ffaea5bfd83bbef8a6579b.jpg) +layer 6 + +![](images/59a4ddcf6813cc4f0e2110506f036935f69707a298f8d942bddd9ea2338d00a1.jpg) +layer 3 +layer 7 + +Table 1: Comparison of our method with state-of-the-art methods against ResNet-18 trained on FaceScrub. + +
Public DatasetMethod↑ Acc@1↑ Acc@5↓ δface↓ δeval↓FID
FFHQGMI [51]0.1310.3391.260149.53077.800
KEDMI [5]0.1270.3171.155186.409144.195
PPA [38]0.9620.9960.707117.83441.688
LOMMA+GMI [31]0.8280.9450.784126.17855.840
LOMMA+KEDMI [31]0.5490.8140.916217.991114.045
PLGMI [47]0.7580.9280.676214.978154.497
IF-GMI(ours)0.9790.9960.667112.91540.581
MetFacesGMI [51]0.0380.1361.361161.036114.648
KEDMI [5]0.0030.0171.651212.952347.468
PPA [38]0.6280.8541.035146.74962.518
LOMMA+GMI [31]0.1600.3611.220156.297101.600
LOMMA+KEDMI [31]0.0020.0201.623214.883333.572
PLGMI [47]0.4380.7310.796205.222245.208
IF-GMI(ours)0.9490.9920.838120.35468.107
+ +# 4.3 Comparison with Previous State-of-the-art Attacks + +We compare our method with state-of-the-art MI attack methods, including GMI [51], KEDMI [5], PPA [38], LOMMA [31] and PLGMI [47]. Note that LOMMA [31] is a plug-and-play technique designed to augment existing attack methods. We use their original setup where LOMMA is integrated with GMI and KEDMI as our baselines. + +The GAN structures employed by GMI, KEDMI, and PLGMI are inherently limited to generating images at a resolution of $64 \times 64$ pixels. To ensure a fair comparison, we adopt the same operation used in PPA [38], which modifies the architecture of the generators and discriminators to enable the generation of images at an enhanced resolution of $256 \times 256$ pixels, i.e., adding two ex + +![](images/b90b989e4c6f839c952787458555202005ff6fa865d5a92ea072a65f8ba80d9e.jpg) +Private +Fig. 4: Visual comparison of reconstructed images from different methods against the ResNet-18 trained on FaceScrub. The first column shows ground truth images of the target class in the private dataset. + +![](images/2035d4fdac018c526da493640c6bac4170088e367c6f7c56434a501d34ac616d.jpg) +GMI + +![](images/74cd5d890531dacfba963b3c14eff81b6e7399c1315c833bb44ad6a1291aec2b.jpg) +KEDMI + +![](images/4f1ded3517cfc59ebe50dee073b78bbbcb12b8c7d4cc16706939dd58e6436c20.jpg) +PPA + +![](images/936d7b5c460b1573aa457c056b2499497cea2e27cfb21f6e8966ee25a32f6516.jpg) +LOMMA +GMI + +![](images/bb49ef5912d38e70d6e03a89365c65fc228bf200738bd8599253bdc0c1c40527.jpg) +LOMMA +KEDMI + +![](images/db40d1cc3c1b0b9be0828b3adbfbc759f852a73df17cad5d1afce224d188bb42.jpg) +PLGMI + +![](images/4a579351b4e119779b0bd6a66b0bb9ea3a2e8f940b196bc0c4dc1279aae51b6c.jpg) +ours + +tra upsampling layers for the generator and two downsampling layers for the discriminator respectively. + +We provide quantitative results against ResNet-18 [18] trained on the Face-Scrub dataset in Table 1. We can observe that our method achieves significant improvements over previous methods. Especially when the generator is trained on MetFaces, IF-GMI remarkably improves the Acc@1 by $15.1\%$ and the Acc@5 is nearly to $100\%$ . Moreover, our method generally achieves a lower feature distance than baselines between reconstructed samples and private data. For instance, we reduce the distance by more than $10\%$ compared to the PPA on the MetFaces dataset. Notably, the MetFaces dataset is composed of artworks and thus has a larger distributional shift with real human faces compared with the FFHQ dataset. We note that this severely reduces the reconstruction performance of previous attack methods, while our proposed method still exhibits outstanding performance, highlighting the excellent generalization ability of our approach. Visualization results of the recovered images using generators trained on FFHQ are shown in Figure 4. Compared with previous methods, our reconstructed images have higher fidelity and realism, demonstrating the superiority of exploiting GAN's intermediate features. + +# 4.4 Comparison under different target datasets and models + +To validate the effectiveness of the proposed method, we conducted extensive experiments on various datasets using different target models with different architectures. We chose the PPA method as our baseline for comparison due to its comprehensive performance in both accuracy and fidelity. Additional experimental results are in the Appendix. + +Table 2: Comparison results against ResNet-152 trained on CelebA. + +
Public DatasetMethod↑Acc@1↑Acc@5↓δface↓δeval↓FID
FFHQPPA0.8060.9460.736312.58040.430
IF-GMI(ours)0.9120.9820.678314.39230.685
MetFacesPPA0.3960.6431.063387.81074.030
IF-GMI(ours)0.7840.9290.835340.89474.504
+ +Table 3: Comparison results against different target models trained on FaceScrub with the public dataset being MetFaces. + +
Target ModelMethod↑ Acc@1↑ Acc@5↓ δface↓ δeval↓FID
ResNet-152PPA0.7310.9200.966139.38068.540
IF-GMI(ours)0.9040.9840.882138.75269.937
ResNeSt-101PPA0.7500.9270.979137.17088.660
IF-GMI(ours)0.9220.9830.884132.60976.195
DenseNet-169PPA0.7980.9480.938129.44077.520
IF-GMI(ours)0.9330.9870.851125.05082.123
+ +As shown in Table 2, our proposed IF-GMI maintains superiority in most metrics against the ResNet-152 trained on the CelebA. Our method achieves a remarkable increase of $10.6\%$ in Acc@1 and significantly reduces the FID value using the StyleGAN2 trained on FFHQ. When utilizing the MetFaces StyleGAN2, our method still achieves much better results than the baseline despite a larger distributional shift, including a $38.8\%$ increase in Acc@1 and competitive feature distance. In addition to ResNet-18, we evaluate the performance of the proposed method on more target models trained on FaceScrub, including ResNet-152, ResNeSt-101, and DenseNet-169. Benefiting from the fully utilized generative prior, our method achieves $13\% \sim 17\%$ improvement in Acc@1 metrics than the baselines and also achieves better results in most of the other metrics, as illustrated in Table 3. + +The results presented above demonstrate that our method maintains outstanding attack performance in a variety of settings, exhibiting excellent generalizability and transferability. We also provide additional experimental results on more datasets and architectures in the Appendix. + +# 4.5 Ablation Studies + +To estimate the contributions from each component in our method, we conduct ablation studies on the ResNet-152 trained on the CelebA dataset using the StyleGAN2 trained on FFHQ. The results are presented in Table 4. More ablation studies are listed in the Appendix. + +Intermediate Features Optimization. We merely remove the intermediate features optimization from our pipeline while keeping the remaining param- + +Table 4: Ablation study performed on ResNet-152 trained on CelebA dataset with FFHQ as the public dataset. IF-GMI- $i$ removes the intermediate feature optimization and only searches the latent space. IF-GMI- $l$ removes the $l_{1}$ ball constraint compared to IF-GMI. + +
Method↑Acc@1↑Acc@5↓δface↓δeval↓FID
IF-GMI-i0.8030.9280.732314.27543.576
IF-GMI-l0.9450.9920.678315.27837.528
IF-GMI0.9470.9930.677315.03237.461
+ +eters unchanged. As shown in the first row of Table 4, it leads to degradation up to $14\%$ in Acc@1 and much worse FID without this technique, demonstrating the superiority of utilizing the hierarchical features of intermediate layers. + +$l_{1}$ Ball Constraint. To avoid unreal image generation, we introduce the $l_{1}$ ball constraint into the intermediate features optimization. By observing the results shown in the second row of Table 4, the $l_{1}$ ball is beneficial in improving the performance in all metrics. Thus, we demonstrate the necessity of restricting the intermediate features within the $l_{1}$ ball constraint. + +# 5 Conclusion + +We proposed IF-GMI, a novel model inversion attack that performs effective attack in the OOD scenario. Surpassing the limitation of treating the generator as a black-box, we studied the structure and decomposed the generator into hierarchical layers, extending the optimization space from latent code to intermediate features to generate stable and high-quality images. Moreover, to avoid generating low-fidelity images, we applied a $l_{1}$ ball constraint to the optimization process. Through our extensive experiments, we demonstrated that the proposed IF-GMI achieves the state-of-the-art attack accuracy while generating samples with high fidelity and diversity. + +Our exploration of enhanced utilization of intermediate features in the GAN prior contributes to advances in MI attack field, paving the way to more practical employment for MI attacks. We hope this paper can raise concerns about privacy leakage risk of released pre-trained models and facilitate more response to the threat of MI attacks. + +# Acknowledgments + +This work is supported in part by the National Natural Science Foundation of China under grant 62171248, 62301189, Guangdong Basic and Applied Basic Research Foundation under grant 2021A1515110066, the PCNL KEY project (PCL2021A07), and Shenzhen Science and Technology Program under Grant JCYJ20220818101012025, RCBS20221008093124061, GXWD20220811172936001. + +# References + +1. Abdal, R., Qin, Y., Wonka, P.: Image2stylegan: How to embed images into the stylegan latent space? In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 4432-4441 (2019) +2. An, S., Tao, G., Xu, Q., Liu, Y., Shen, G., Yao, Y., Xu, J., Zhang, X.: Mirror: Model inversion for deep learning network with high fidelity. In: NDSS (2022) +3. Bau, D., Zhu, J.Y., Strobelt, H., Zhou, B., Tenenbaum, J.B., Freeman, W.T., Torralba, A.: Gan dissection: Visualizing and understanding generative adversarial networks. arXiv preprint arXiv:1811.10597 (2018) +4. Chen, B., Feng, Y., Dai, T., Bai, J., Jiang, Y., Xia, S.T., Wang, X.: Adversarial examples generation for deep product quantization networks on image retrieval. IEEE Transactions on Pattern Analysis and Machine Intelligence 45(2), 1388-1404 (2022) +5. Chen, S., Kahla, M., Jia, R., Qi, G.J.: Knowledge-enriched distributional model inversion attacks. In: ICCV (2021) +6. Choi, Y., Uh, Y., Yoo, J., Ha, J.W.: Stargan v2: Diverse image synthesis for multiple domains. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 8188-8197 (2020) +7. Conneau, A., Baevski, A., Collobert, R., Mohamed, A., Auli, M.: Unsupervised cross-lingual representation learning for speech recognition. arXiv preprint arXiv:2006.13979 (2020) +8. Daras, G., Dean, J., Jalal, A., Dimakis, A.G.: Intermediate layer optimization for inverse problems using deep generative models. arXiv preprint arXiv:2102.07364 (2021) +9. Dataset, E.: Novel datasets for fine-grained image categorization. In: First Workshop on Fine Grained Visual Categorization, CVPR. Citeseer. Citeseer. Citeseer (2011) +0. Fang, H., Chen, B., Wang, X., Wang, Z., Xia, S.T.: Gidf: A generative gradient inversion method with feature domain optimization. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 4967-4976 (2023) +1. Fang, H., Kong, J., Yu, W., Chen, B., Li, J., Xia, S., Xu, K.: One perturbation is enough: On generating universal adversarial perturbations against vision-language pre-training models. arXiv preprint arXiv:2406.05491 (2024) +2. Fang, H., Qiu, Y., Yu, H., Yu, W., Kong, J., Chong, B., Chen, B., Wang, X., Xia, S.T.: Privacy leakage on dnns: A survey of model inversion attacks and defenses. arXiv preprint arXiv:2402.04013 (2024) +3. Fredrikson, M., Jha, S., Ristenpart, T.: Model inversion attacks that exploit confidence information and basic countermeasures. In: CCS. pp. 1322-1333 (2015) +4. Fredrikson, M., Lantz, E., Jha, S., Lin, S., Page, D., Ristenpart, T.: Privacy in pharmacogenetics: An {End-to-End} case study of personalized warfarin dosing. In: USENIX Security. pp. 17-32 (2014) +5. Goodfellow, I., Pouget-Abadie, J., Mirza, M., Xu, B., Warde-Farley, D., Ozair, S., Courville, A., Bengio, Y.: Generative adversarial nets. Advances in neural information processing systems 27 (2014) +6. Goodfellow, I., Pouget-Abadie, J., Mirza, M., Xu, B., Warde-Farley, D., Ozair, S., Courville, A., Bengio, Y.: Generative adversarial networks. Communications of the ACM 63(11), 139–144 (2020) +7. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 770-778 (2016) + +18. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 770-778 (2016) +19. Heusel, M., Ramsauer, H., Unterthiner, T., Nessler, B., Hochreiter, S.: Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems 30 (2017) +20. Hu, H., Salcic, Z., Sun, L., Dobbie, G., Yu, P.S., Zhang, X.: Membership inference attacks on machine learning: A survey. ACM Computing Surveys (CSUR) 54(11s), 1-37 (2022) +21. Huang, G., Liu, Z., Van Der Maaten, L., Weinberger, K.Q.: Densely connected convolutional networks. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 4700-4708 (2017) +22. Kahla, M., Chen, S., Just, H.A., Jia, R.: Label-only model inversion attacks via boundary repulsion. In: CVPR (2022) +23. Karras, T., Aittala, M., Hellsten, J., Laine, S., Lehtinen, J., Aila, T.: Training generative adversarial networks with limited data. Advances in neural information processing systems 33, 12104-12114 (2020) +24. Karras, T., Laine, S., Aila, T.: A style-based generator architecture for generative adversarial networks. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 4401-4410 (2019) +25. Karras, T., Laine, S., Aittala, M., Hellsten, J., Lehtinen, J., Aila, T.: Analyzing and improving the image quality of stylegan. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 8110-8119 (2020) +26. Kynkänniemi, T., Karras, T., Laine, S., Lehtinen, J., Aila, T.: Improved precision and recall metric for assessing generative models. Advances in Neural Information Processing Systems 32 (2019) +27. Li, C., Qiu, M.: Reinforcement learning for cyber-physical systems: with cybersecurity case studies. Chapman and Hall/CRC (2019) +28. Liu, Z., Luo, P., Wang, X., Tang, X.: Deep learning face attributes in the wild. In: Proceedings of the IEEE international conference on computer vision. pp. 3730-3738 (2015) +29. Naeem, M.F., Oh, S.J., Uh, Y., Choi, Y., Yoo, J.: Reliable fidelity and diversity metrics for generative models. In: International Conference on Machine Learning. pp. 7176-7185. PMLR (2020) +30. Ng, H.W., Winkler, S.: A data-driven approach to cleaning large face datasets. In: 2014 IEEE international conference on image processing (ICIP). pp. 343-347. IEEE (2014) +31. Nguyen, N.B., Chandrasegaran, K., Abdollahzadeh, M., Cheung, N.M.: Rethinking model inversion attacks against deep neural networks. In: CVPR. pp. 16384-16393 (2023) +32. Park, J.Y., Smedemark-Margulies, N., Daniels, M., Yu, R., van de Meent, J.W., HAnd, P.: Generator surgery for compressed sensing. In: NeurIPS 2020 Workshop on Deep Learning and Inverse Problems (2020), https://openreview.net/forum?id=s2EucjZ6d2s +33. Qiu, H., Dong, T., Zhang, T., Lu, J., Memmi, G., Qiu, M.: Adversarial attacks against network intrusion detection in IoT systems. IEEE Internet of Things Journal 8(13), 10327-10335 (2020) +34. Schroff, F., Kalenichenko, D., Philbin, J.: Facenet: A unified embedding for face recognition and clustering. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 815-823 (2015) + +35. Shen, Y., Gu, J., Tang, X., Zhou, B.: Interpreting the latent space of gans for semantic face editing. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 9243-9252 (2020) +36. Shokri, R., Stronati, M., Song, C., Shmatikov, V.: Membership inference attacks against machine learning models. In: 2017 IEEE symposium on security and privacy (SP). pp. 3-18. IEEE (2017) +37. Song, C., Ristenpart, T., Shmatikov, V.: Machine learning models that remember too much. In: CCS. pp. 587-601 (2017) +38. Struppek, L., Hintersdorf, D., Correira, A.D.A., Adler, A., Kersting, K.: Plug & play attacks: Towards robust and flexible model inversion attacks. In: ICML (2022) +39. Szegedy, C., Vanhoucke, V., Ioffe, S., Shlens, J., Wojna, Z.: Rethinking the inception architecture for computer vision. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 2818-2826 (2016) +40. Tewari, A., Elgharib, M., Bernard, F., Seidel, H.P., Pérez, P., Zollhöfer, M., Theobalt, C.: Pie: Portrait image embedding for semantic control. ACM Transactions on Graphics (TOG) 39(6), 1-14 (2020) +41. Wang, K.C., Fu, Y., Li, K., Khisti, A., Zemel, R., Makhzani, A.: Variational model inversion attacks. In: NeurIPS (2021) +42. Wu, C., Yan, M.: Session-aware information embedding for e-commerce product recommendation. In: Proceedings of the 2017 ACM on conference on information and knowledge management. pp. 2379-2382 (2017) +43. Yang, Z., Zhang, J., Chang, E.C., Liang, Z.: Neural network inversion in adversarial setting via background knowledge alignment. In: CCS (2019) +44. Yin, H., Molchanov, P., Alvarez, J.M., Li, Z., Mallya, A., Hoiem, D., Jha, N.K., Kautz, J.: Dreaming to distill: Data-free knowledge transfer via deepinversion. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 8715-8724 (2020) +45. Yu, W., Chen, B., Zhang, Q., Xia, S.T.: Editable-deepsc: Cross-modal editable semantic communication systems. arXiv preprint arXiv:2310.10347 (2023) +46. Yu, W., Fang, H., Chen, B., Sui, X., Chen, C., Wu, H., Xia, S.T., Xu, K.: Gi-nas: Boosting gradient inversion attacks through adaptive neural architecture search. arXiv preprint arXiv:2405.20725 (2024) +47. Yuan, X., Chen, K., Zhang, J., Zhang, W., Yu, N., Zhang, Y.: Pseudo label-guided model inversion attack via conditional generative adversarial network. In: AAAI (2023) +48. Yuan, Z., Wu, F., Long, Y., Xiao, C., Li, B.: Secretgen: Privacy recovery on pretrained models via distribution discrimination. In: ECCV (2022) +49. Zeng, Y., Pan, M., Just, H.A., Lyu, L., Qiu, M., Jia, R.: Narcissus: A practical clean-label backdoor attack with limited information. In: Proceedings of the 2023 ACM SIGSAC Conference on Computer and Communications Security. pp. 771-785 (2023) +50. Zhang, H., Wu, C., Zhang, Z., Zhu, Y., Lin, H., Zhang, Z., Sun, Y., He, T., Mueller, J., Manmatha, R., et al.: Resnest: Split-attention networks. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 2736-2746 (2022) +51. Zhang, Y., Jia, R., Pei, H., Wang, W., Li, B., Song, D.: The secret revealer: Generative model-inversion attacks against deep neural networks. In: CVPR (2020) +52. Zhong, X., Fang, H., Chen, B., Gu, X., Dai, T., Qiu, M., Xia, S.T.: Hierarchical features matter: A deep exploration of gan priors for improved dataset distillation. arXiv preprint arXiv:2406.05704 (2024) \ No newline at end of file diff --git a/2024/A Closer Look at GAN Priors_ Exploiting Intermediate Features for Enhanced Model Inversion Attacks/images.zip b/2024/A Closer Look at GAN Priors_ Exploiting Intermediate Features for Enhanced Model Inversion Attacks/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..467fbb0f9e1c86ed7e99544f9b38a7a8352aadbc --- /dev/null +++ b/2024/A Closer Look at GAN Priors_ Exploiting Intermediate Features for Enhanced Model Inversion Attacks/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:40eeb0362d92328943b2cbbe374c8df2663c8ded5b7eadbf90dfebad1ef16d33 +size 389028 diff --git a/2024/A Closer Look at GAN Priors_ Exploiting Intermediate Features for Enhanced Model Inversion Attacks/layout.json b/2024/A Closer Look at GAN Priors_ Exploiting Intermediate Features for Enhanced Model Inversion Attacks/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..908d379a7e87e7e786ea4f703ea92d4ff8099824 --- /dev/null +++ b/2024/A Closer Look at GAN Priors_ Exploiting Intermediate Features for Enhanced Model Inversion Attacks/layout.json @@ -0,0 +1,11725 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 157, + 111, + 459, + 163 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 157, + 111, + 459, + 163 + ], + "spans": [ + { + "bbox": [ + 157, + 111, + 459, + 163 + ], + "type": "text", + "content": "A Closer Look at GAN Priors: Exploiting Intermediate Features for Enhanced Model Inversion Attacks" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 137, + 185, + 478, + 210 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 185, + 478, + 210 + ], + "spans": [ + { + "bbox": [ + 137, + 185, + 478, + 210 + ], + "type": "text", + "content": "Yixiang Qiu" + }, + { + "bbox": [ + 137, + 185, + 478, + 210 + ], + "type": "inline_equation", + "content": "^{1,2\\dagger}" + }, + { + "bbox": [ + 137, + 185, + 478, + 210 + ], + "type": "text", + "content": ", Hao Fang" + }, + { + "bbox": [ + 137, + 185, + 478, + 210 + ], + "type": "inline_equation", + "content": "^{2\\dagger}" + }, + { + "bbox": [ + 137, + 185, + 478, + 210 + ], + "type": "text", + "content": ", Hongyao Yu" + }, + { + "bbox": [ + 137, + 185, + 478, + 210 + ], + "type": "inline_equation", + "content": "^{1\\dagger}" + }, + { + "bbox": [ + 137, + 185, + 478, + 210 + ], + "type": "text", + "content": ", Bin Chen" + }, + { + "bbox": [ + 137, + 185, + 478, + 210 + ], + "type": "inline_equation", + "content": "^{1,3,4\\#}" + }, + { + "bbox": [ + 137, + 185, + 478, + 210 + ], + "type": "text", + "content": ", MeiKang Qiu" + }, + { + "bbox": [ + 137, + 185, + 478, + 210 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 137, + 185, + 478, + 210 + ], + "type": "text", + "content": ", and Shu-Tao Xia" + }, + { + "bbox": [ + 137, + 185, + 478, + 210 + ], + "type": "inline_equation", + "content": "^{2,4}" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 135, + 219, + 478, + 298 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 219, + 478, + 298 + ], + "spans": [ + { + "bbox": [ + 135, + 219, + 478, + 298 + ], + "type": "text", + "content": "1 Harbin Institute of Technology, Shenzhen \n2 Tsinghua Shenzhen International Graduate School, Tsinghua University \n3 Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies \n4 Pengcheng Laboratory 5 Augusta University \nqiuyixiang@stu.hit.edu.cn, fang-h23@mails.tsinghua.edu.cn \nyuhongyao@stu.hit.edu.cn, chenbin2021@hit.edu.cn \nqiumeikang@yahoo.com, xiast@sz.tsinghua.edu.cn" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 160, + 319, + 455, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 319, + 455, + 540 + ], + "spans": [ + { + "bbox": [ + 160, + 319, + 455, + 540 + ], + "type": "text", + "content": "Abstract. Model Inversion (MI) attacks aim to reconstruct privacy-sensitive training data from released models by utilizing output information, raising extensive concerns about the security of Deep Neural Networks (DNNs). Recent advances in generative adversarial networks (GANs) have contributed significantly to the improved performance of MI attacks due to their powerful ability to generate realistic images with high fidelity and appropriate semantics. However, previous MI attacks have solely disclosed private information in the latent space of GAN priors, limiting their semantic extraction and transferability across multiple target models and datasets. To address this challenge, we propose a novel method, Intermediate Features enhanced Generative Model Inversion (IF-GMI), which disassembles the GAN structure and exploits features between intermediate blocks. This allows us to extend the optimization space from latent code to intermediate features with enhanced expressive capabilities. To prevent GAN priors from generating unrealistic images, we apply a " + }, + { + "bbox": [ + 160, + 319, + 455, + 540 + ], + "type": "inline_equation", + "content": "l_{1}" + }, + { + "bbox": [ + 160, + 319, + 455, + 540 + ], + "type": "text", + "content": " ball constraint to the optimization process. Experiments on multiple benchmarks demonstrate that our method significantly outperforms previous approaches and achieves state-of-the-art results under various settings, especially in the out-of-distribution (OOD) scenario. Our code is available at: https://github.com/final-solution/IF-GMI" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 160, + 550, + 402, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 550, + 402, + 561 + ], + "spans": [ + { + "bbox": [ + 160, + 550, + 402, + 561 + ], + "type": "text", + "content": "Keywords: Privacy " + }, + { + "bbox": [ + 160, + 550, + 402, + 561 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 160, + 550, + 402, + 561 + ], + "type": "text", + "content": " Model Inversion " + }, + { + "bbox": [ + 160, + 550, + 402, + 561 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 160, + 550, + 402, + 561 + ], + "type": "text", + "content": " Generative Priors" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 578, + 231, + 592 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 578, + 231, + 592 + ], + "spans": [ + { + "bbox": [ + 132, + 578, + 231, + 592 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 601, + 482, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 601, + 482, + 626 + ], + "spans": [ + { + "bbox": [ + 130, + 601, + 482, + 626 + ], + "type": "text", + "content": "In recent years, Deep Neural Networks (DNNs) have experienced unprecedented development and achieved tremendous success in a wide range of applications," + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 142, + 654, + 471, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 142, + 654, + 471, + 665 + ], + "spans": [ + { + "bbox": [ + 142, + 654, + 471, + 665 + ], + "type": "text", + "content": "This work was done while Yixiang Qiu was pre-admitted to Tsinghua University." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 142, + 631, + 227, + 643 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 142, + 631, + 227, + 643 + ], + "spans": [ + { + "bbox": [ + 142, + 631, + 227, + 643 + ], + "type": "inline_equation", + "content": "\\dagger" + }, + { + "bbox": [ + 142, + 631, + 227, + 643 + ], + "type": "text", + "content": " Equal contribution." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 143, + 643, + 241, + 654 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 643, + 241, + 654 + ], + "spans": [ + { + "bbox": [ + 143, + 643, + 241, + 654 + ], + "type": "text", + "content": "Corresponding author." + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 236 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 236 + ], + "type": "text", + "content": "including face recognition [17], personalized recommendations [42], and audio recognition [7]. While DNNs bring us many practical benefits, concerns [4,10,11, 46] about privacy and security have also been raised and drawn great attention. Recent studies have demonstrated that there is a certain risk of privacy leakage for DNNs as an adversary could reveal private information from these pre-trained models. Various types of novel privacy attacks [27,33,49] have been proposed, such as membership inference attack [20,36] and gradient inversion attack [10, 46]. Among the new attack methods, Model Inversion (MI) attack [12] poses a greater threat due to its powerful capability in recovering the privacy-sensitive datasets that are collected and utilized for model training." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 239, + 482, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 239, + 482, + 419 + ], + "spans": [ + { + "bbox": [ + 130, + 239, + 482, + 419 + ], + "type": "text", + "content": "[14] proposes the first MI attack to reconstruct sensitive features of genomic data and demonstrate that linear regression models are vulnerable to such privacy attacks. Subsequent studies [13, 37, 43] have extended MI attacks to more Machine Learning (ML) models, but are still limited to models with simple structure and low-dimensional data such as grayscale images. Recent advances in the MI attack field have overcome the challenges in image data recovery by applying Generative Adversarial Networks (GANs) [16], resulting in the extension to DNNs with more complex structure and high-dimensional data such as RGB images. [51] first introduces the GANs to MI attack scenarios, serving as image priors. To better reveal privacy-sensitive information, [51] and subsequent GAN-based methods [5, 41, 47, 48] train GANs with publicly available datasets that have structural similarity with target private datasets. Furthermore, [38] propose to leverage the public pre-trained GAN models (e.g., StyleGAN [24]) as GAN priors, which have a stronger ability to generate high-resolution images and do not require a time-consuming training process." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 422, + 482, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 422, + 482, + 602 + ], + "spans": [ + { + "bbox": [ + 130, + 422, + 482, + 602 + ], + "type": "text", + "content": "Although the aforementioned methods have achieved great progress in recovering high-quality and privacy-sensitive images, the effectiveness of GAN-based MI attacks is limited under certain scenarios. One typical challenge is the out-of-distribution (OOD) scenario, where there is a significant distributional shift between the target private dataset and the public dataset used in the training process of GAN priors. Most previous methods [5, 41, 48, 51] merely work well under scenarios with slight distributional shifts. For instance, they split the same dataset into two parts, one used as the public dataset and the other used as the private dataset. In recent years, some studies [3, 8, 35, 40, 45] have demonstrated that there is rich semantic information encoded in the latent code and intermediate features of GANs. Inspired by these works, we empirically observe that the rich semantic information encoded in the intermediate features helps to sufficiently recover high-quality private data under more rigorous settings, as shown in Figure 1. Therefore, it is imperative to explore methods for leveraging the GAN's intrinsic layered knowledge into MI attacks, mitigating the OOD issue." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 605, + 482, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 605, + 482, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 605, + 482, + 665 + ], + "type": "text", + "content": "To this end, we propose a novel MI attack method, Intermediate Features enhanced Generative Model Inversion (IF-GMI), which effectively disassembles the GAN structure and leverages features between intermediate blocks. Specifically, we consider the generator of the GAN as a concatenation of multiple blocks and the vectors produced between the blocks as intermediate features. We first" + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 219, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 219, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 219, + 102 + ], + "type": "text", + "content": "Y. Qiu et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 138, + 116, + 172, + 148 + ], + "blocks": [ + { + "bbox": [ + 138, + 116, + 172, + 148 + ], + "lines": [ + { + "bbox": [ + 138, + 116, + 172, + 148 + ], + "spans": [ + { + "bbox": [ + 138, + 116, + 172, + 148 + ], + "type": "image", + "image_path": "4057bffa9d95fa63769b3a9fc77a3827b617b6e5f53b3229c90871dbf3c98f35.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 145, + 150, + 165, + 156 + ], + "lines": [ + { + "bbox": [ + 145, + 150, + 165, + 156 + ], + "spans": [ + { + "bbox": [ + 145, + 150, + 165, + 156 + ], + "type": "text", + "content": "0.0000" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 138, + 157, + 172, + 189 + ], + "blocks": [ + { + "bbox": [ + 138, + 157, + 172, + 189 + ], + "lines": [ + { + "bbox": [ + 138, + 157, + 172, + 189 + ], + "spans": [ + { + "bbox": [ + 138, + 157, + 172, + 189 + ], + "type": "image", + "image_path": "6f5f6c0af33072bd26f8227755b3670ec1d5f67bfc325aaf947dd57cda4ede4a.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 143, + 190, + 168, + 198 + ], + "lines": [ + { + "bbox": [ + 143, + 190, + 168, + 198 + ], + "spans": [ + { + "bbox": [ + 143, + 190, + 168, + 198 + ], + "type": "text", + "content": "0.0000" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 140, + 201, + 171, + 209 + ], + "lines": [ + { + "bbox": [ + 140, + 201, + 171, + 209 + ], + "spans": [ + { + "bbox": [ + 140, + 201, + 171, + 209 + ], + "type": "text", + "content": "Generation" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 176, + 116, + 207, + 149 + ], + "blocks": [ + { + "bbox": [ + 176, + 116, + 207, + 149 + ], + "lines": [ + { + "bbox": [ + 176, + 116, + 207, + 149 + ], + "spans": [ + { + "bbox": [ + 176, + 116, + 207, + 149 + ], + "type": "image", + "image_path": "c4e121353fc9d4fce58b215281bb3996d31bcf1615473a40c10492f40e6770f7.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 182, + 150, + 200, + 156 + ], + "lines": [ + { + "bbox": [ + 182, + 150, + 200, + 156 + ], + "spans": [ + { + "bbox": [ + 182, + 150, + 200, + 156 + ], + "type": "text", + "content": "0.3600" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 176, + 157, + 206, + 189 + ], + "blocks": [ + { + "bbox": [ + 176, + 157, + 206, + 189 + ], + "lines": [ + { + "bbox": [ + 176, + 157, + 206, + 189 + ], + "spans": [ + { + "bbox": [ + 176, + 157, + 206, + 189 + ], + "type": "image", + "image_path": "6f1614344c9dc466438968c5eb94d1b676bfb2ce116f4512e96ad811e0ec3676.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 184, + 190, + 201, + 196 + ], + "lines": [ + { + "bbox": [ + 184, + 190, + 201, + 196 + ], + "spans": [ + { + "bbox": [ + 184, + 190, + 201, + 196 + ], + "type": "text", + "content": "0.2200" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 187, + 199, + 200, + 206 + ], + "lines": [ + { + "bbox": [ + 187, + 199, + 200, + 206 + ], + "spans": [ + { + "bbox": [ + 187, + 199, + 200, + 206 + ], + "type": "text", + "content": "PPA" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 211, + 116, + 242, + 149 + ], + "blocks": [ + { + "bbox": [ + 211, + 116, + 242, + 149 + ], + "lines": [ + { + "bbox": [ + 211, + 116, + 242, + 149 + ], + "spans": [ + { + "bbox": [ + 211, + 116, + 242, + 149 + ], + "type": "image", + "image_path": "850be9d20fd9e7e0bf7c398c12f999b7b9cd793dde409625621992fb51588c95.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 219, + 150, + 237, + 156 + ], + "lines": [ + { + "bbox": [ + 219, + 150, + 237, + 156 + ], + "spans": [ + { + "bbox": [ + 219, + 150, + 237, + 156 + ], + "type": "text", + "content": "0.9975" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 211, + 157, + 242, + 189 + ], + "blocks": [ + { + "bbox": [ + 211, + 157, + 242, + 189 + ], + "lines": [ + { + "bbox": [ + 211, + 157, + 242, + 189 + ], + "spans": [ + { + "bbox": [ + 211, + 157, + 242, + 189 + ], + "type": "image", + "image_path": "2536e1bf41eb0f8ec381ed92212c55e6c182395414fb17892502f5b5d14d093a.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 219, + 190, + 238, + 196 + ], + "lines": [ + { + "bbox": [ + 219, + 190, + 238, + 196 + ], + "spans": [ + { + "bbox": [ + 219, + 190, + 238, + 196 + ], + "type": "text", + "content": "0.9988" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 217, + 199, + 239, + 205 + ], + "lines": [ + { + "bbox": [ + 217, + 199, + 239, + 205 + ], + "spans": [ + { + "bbox": [ + 217, + 199, + 239, + 205 + ], + "type": "text", + "content": "IF-GMI" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 247, + 116, + 278, + 149 + ], + "blocks": [ + { + "bbox": [ + 247, + 116, + 278, + 149 + ], + "lines": [ + { + "bbox": [ + 247, + 116, + 278, + 149 + ], + "spans": [ + { + "bbox": [ + 247, + 116, + 278, + 149 + ], + "type": "image", + "image_path": "50c3d0634734bf852aff41eaa2b402646d6a5b8fcb588d72973a65fb920978ee.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 251, + 150, + 269, + 156 + ], + "lines": [ + { + "bbox": [ + 251, + 150, + 269, + 156 + ], + "spans": [ + { + "bbox": [ + 251, + 150, + 269, + 156 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 246, + 157, + 278, + 189 + ], + "blocks": [ + { + "bbox": [ + 246, + 157, + 278, + 189 + ], + "lines": [ + { + "bbox": [ + 246, + 157, + 278, + 189 + ], + "spans": [ + { + "bbox": [ + 246, + 157, + 278, + 189 + ], + "type": "image", + "image_path": "6ed32123a3bf4d8d6b5661bb50102962aea8ee00622e289b8f0ec367e4629b9a.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 251, + 198, + 275, + 205 + ], + "lines": [ + { + "bbox": [ + 251, + 198, + 275, + 205 + ], + "spans": [ + { + "bbox": [ + 251, + 198, + 275, + 205 + ], + "type": "text", + "content": "Original" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 289, + 118, + 479, + 212 + ], + "blocks": [ + { + "bbox": [ + 148, + 212, + 271, + 221 + ], + "lines": [ + { + "bbox": [ + 148, + 212, + 271, + 221 + ], + "spans": [ + { + "bbox": [ + 148, + 212, + 271, + 221 + ], + "type": "text", + "content": "(a) Visual examples of IF-GMI to PPA" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 289, + 118, + 479, + 212 + ], + "lines": [ + { + "bbox": [ + 289, + 118, + 479, + 212 + ], + "spans": [ + { + "bbox": [ + 289, + 118, + 479, + 212 + ], + "type": "image", + "image_path": "406733f23327a1bc9b2c56e1adbbe0d2dd9b40270bb44ce2ce19bba8a062f521.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 331, + 213, + 440, + 222 + ], + "lines": [ + { + "bbox": [ + 331, + 213, + 440, + 222 + ], + "spans": [ + { + "bbox": [ + 331, + 213, + 440, + 222 + ], + "type": "text", + "content": "(b) Comparison of IF-GMI to PPA" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 131, + 235, + 480, + 312 + ], + "lines": [ + { + "bbox": [ + 131, + 235, + 480, + 312 + ], + "spans": [ + { + "bbox": [ + 131, + 235, + 480, + 312 + ], + "type": "text", + "content": "Fig. 1: (a) Comparison of our proposed IF-GMI with baselines. The blue number below the images is the predicted confidence by the evaluation model. The first column shows the randomly generated images and the second column presents the reconstructed results by PPA [38], a typical GAN-based method focusing on directly optimizing the latent code of GAN model. The last two columns exhibit the results of our proposed IF-GMI and the ground truth images in the private dataset, respectively. (b) Top-1 attack accuracy of PPA and IF-GMI (ours) on four OOD scenarios." + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 22 + }, + { + "bbox": [ + 130, + 334, + 480, + 476 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 334, + 480, + 476 + ], + "spans": [ + { + "bbox": [ + 130, + 334, + 480, + 476 + ], + "type": "text", + "content": "optimize the latent code input to the generator and then successively optimize the intermediate features from the start layer to the end layer. To avoid unreal image generation, we utilize a " + }, + { + "bbox": [ + 130, + 334, + 480, + 476 + ], + "type": "inline_equation", + "content": "l_{1}" + }, + { + "bbox": [ + 130, + 334, + 480, + 476 + ], + "type": "text", + "content": " ball constraint to restrict the deviation when optimizing the intermediate features. In the end, we collect the output images after each intermediate layer optimization process and select the final results with a simple strategy. We conduct comprehensive experiments to evaluate our method in multiple settings, including OOD scenarios, various target models, and different GAN priors. The encouraging experimental results demonstrate that the proposed method outperforms baselines on multiple metrics and achieves high attack accuracy on OOD settings. Finally, we perform extensive experiments and ablation studies to validate the effectiveness of the proposed method. Our main contributions are as follows:" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 137, + 483, + 479, + 565 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 138, + 483, + 479, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 483, + 479, + 517 + ], + "spans": [ + { + "bbox": [ + 138, + 483, + 479, + 517 + ], + "type": "text", + "content": "- We propose a novel GAN-based MI attack method, which disassembles the pre-trained generator and successively optimizes the latent code and intermediate features under the " + }, + { + "bbox": [ + 138, + 483, + 479, + 517 + ], + "type": "inline_equation", + "content": "l_{1}" + }, + { + "bbox": [ + 138, + 483, + 479, + 517 + ], + "type": "text", + "content": " ball constraint." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 137, + 519, + 479, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 519, + 479, + 542 + ], + "spans": [ + { + "bbox": [ + 137, + 519, + 479, + 542 + ], + "type": "text", + "content": "- We demonstrate that our proposed achieves state-of-the-art performance in a range of scenarios, especially under the challenging OOD settings." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 137, + 543, + 479, + 565 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 543, + 479, + 565 + ], + "spans": [ + { + "bbox": [ + 137, + 543, + 479, + 565 + ], + "type": "text", + "content": "- We conduct extensive experiments to validate the effectiveness and outstanding transferability of our method." + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 131, + 589, + 236, + 601 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 589, + 236, + 601 + ], + "spans": [ + { + "bbox": [ + 131, + 589, + 236, + 601 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 131, + 611, + 285, + 624 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 611, + 285, + 624 + ], + "spans": [ + { + "bbox": [ + 131, + 611, + 285, + 624 + ], + "type": "text", + "content": "2.1 GAN as prior knowledge" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 130, + 629, + 479, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 629, + 479, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 629, + 479, + 665 + ], + "type": "text", + "content": "GANs [15] are a class of deep neural networks that consist of two functional components, a generator and a discriminator, trained concurrently through adversarial processes to generate realistic data. The objective of a GAN is to learn the" + } + ] + } + ], + "index": 32 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 324, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 324, + 91, + 447, + 100 + ], + "type": "text", + "content": "A Closer Look at GAN Priors" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 295 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 295 + ], + "type": "text", + "content": "distribution of the training dataset and generate more samples from the learned probability distribution [16]. Well-trained GANs are able to generate high-fidelity and diverse images, excellent representative of which are StyleGANs [24,25]. The generator of the StyleGAN consists of a mapping network and a synthesis network. The former maps latent vectors into the intermediate latent space (i.e. " + }, + { + "bbox": [ + 130, + 116, + 482, + 295 + ], + "type": "inline_equation", + "content": "\\mathcal{W}" + }, + { + "bbox": [ + 130, + 116, + 482, + 295 + ], + "type": "text", + "content": " space), and the latter generates images through style vectors. The feature in the " + }, + { + "bbox": [ + 130, + 116, + 482, + 295 + ], + "type": "inline_equation", + "content": "\\mathcal{W}" + }, + { + "bbox": [ + 130, + 116, + 482, + 295 + ], + "type": "text", + "content": " space is well-disentangled, which means that images sharing similar features correspond to analogous style vectors. Therefore, PPA [38] performs their attacks by searching the style vectors in " + }, + { + "bbox": [ + 130, + 116, + 482, + 295 + ], + "type": "inline_equation", + "content": "\\mathcal{W}" + }, + { + "bbox": [ + 130, + 116, + 482, + 295 + ], + "type": "text", + "content": " space. The style vectors in the front layers tend to control high-level aspects of the generated images like pose, face shape, and general hair style, while those in the back ones have more influence on details [24], such as smaller scale facial features and eyes open/closed. Moreover, style vectors in " + }, + { + "bbox": [ + 130, + 116, + 482, + 295 + ], + "type": "inline_equation", + "content": "\\mathcal{W}" + }, + { + "bbox": [ + 130, + 116, + 482, + 295 + ], + "type": "text", + "content": " space do not need to follow the same distribution with the training data, which means that more diverse images can be generated by controlling the vectors [24]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 296, + 482, + 356 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 296, + 482, + 356 + ], + "spans": [ + { + "bbox": [ + 130, + 296, + 482, + 356 + ], + "type": "text", + "content": "Recent works [10, 32, 52] have shown the richness of intermediate features in GANs, our investigation also tries to explore the potential of leveraging intermediate latent space of different layers to enhance MI attacks. Our findings reveal that this approach significantly improves attack accuracy and obtains high-quality inversion results, particularly under the harder OOD scenario." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 131, + 371, + 285, + 384 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 371, + 285, + 384 + ], + "spans": [ + { + "bbox": [ + 131, + 371, + 285, + 384 + ], + "type": "text", + "content": "2.2 Model Inversion Attacks" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 390, + 482, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 390, + 482, + 521 + ], + "spans": [ + { + "bbox": [ + 130, + 390, + 482, + 521 + ], + "type": "text", + "content": "Model inversion (MI) attacks aim at reconstructing the private training data from a trained model. Typically, MI attacks can be divided into the white-box scenario [51] and black-box scenario [22]. We only focus on the white-box scenario in this paper, which means that the attacker has full access to the trained model. This kind of attack is initially demonstrated through an attempt to extract genomic markers from a linear regression model, as highlighted in the earliest research by [14]. Building on this foundation, subsequent researches [13, 37, 43] have broadened the scope of MI attacks, applying them to more machine learning models like shallow networks, and simple forms of data, such as low-resolution grayscale images. However, as the scale of both the data and the models increases, the efficacy of MI attack methods diminishes dramatically." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 522, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 522, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 522, + 482, + 666 + ], + "type": "text", + "content": "In response to this challenge, a novel approach known as GMI, introduced by [51], employs a GAN-based methodology to enhance the ability of MI attacks with deeper and wider DNNs. This innovative strategy leverages a GAN model trained on publicly available data to encapsulate the distributional characteristics of image data, thereby facilitating the generation of high-quality image reconstructions. The process involves the attackers first generating a set of preliminary images by inputting a batch of randomly sampled latent vectors into the GAN. These generated images are then fed into the target image classifier to obtain initial predictions. To refine the attack, the attackers iteratively optimize the input latent vectors. This optimization process aims to minimize the discrepancy between the classifier's predictions and the intended target class, as measured by the cross-entropy loss, while also reducing the discriminator loss." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 219, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 219, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 219, + 102 + ], + "type": "text", + "content": "Y. Qiu et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 137, + 119, + 477, + 238 + ], + "blocks": [ + { + "bbox": [ + 137, + 119, + 477, + 238 + ], + "lines": [ + { + "bbox": [ + 137, + 119, + 477, + 238 + ], + "spans": [ + { + "bbox": [ + 137, + 119, + 477, + 238 + ], + "type": "image", + "image_path": "d50aeea83fea1e663e2ca3791e109e1db489790c54f2b18b1b2da20bd0120b0c.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 247, + 482, + 324 + ], + "lines": [ + { + "bbox": [ + 130, + 247, + 482, + 324 + ], + "spans": [ + { + "bbox": [ + 130, + 247, + 482, + 324 + ], + "type": "text", + "content": "Fig. 2: Overview of our proposed IF-GMI. Firstly, the latent vectors are sampled from standard Gaussian distribution and mapped into disentangled latent codes with semantic meanings by Mapping Network. Then we perform random augmentation on these latent codes to select optimal ones denoted as " + }, + { + "bbox": [ + 130, + 247, + 482, + 324 + ], + "type": "inline_equation", + "content": "\\mathbf{w}^*" + }, + { + "bbox": [ + 130, + 247, + 482, + 324 + ], + "type": "text", + "content": " for optimization. The Synthesis Network is disassembled into multiple blocks to search the intermediate features, which are successively updated with the identity loss calculated from the target model. Finally, the reconstructed images are generated from the last layer as results." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 347, + 480, + 382 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 347, + 480, + 382 + ], + "spans": [ + { + "bbox": [ + 130, + 347, + 480, + 382 + ], + "type": "text", + "content": "With the help of the GAN, GMI seeks to achieve more precise and convincing reconstructions of complex data, thereby representing a significant advancement in the field of MI attacks." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 384, + 482, + 587 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 384, + 482, + 587 + ], + "spans": [ + { + "bbox": [ + 130, + 384, + 482, + 587 + ], + "type": "text", + "content": "Lots of researches in recent years improve the attack performance on the white-box scenario based on GMI. SecretGen [48] explores the scenario when the attackers know some auxiliary information about the private data. KEDMI [5] improves the discriminator by incorporating target labels and recover the distribution of the input latent vectors for a target class. VMI [41] reformulates the MI attack from the perspective of variational inference and introduce KL-divergence as a regularization to better approximate the target distribution with a variational distribution. PPA [38] employs pre-trained StyleGAN2 to reduce the time cost of attacks and extend the attacks to high-resolution images thanks to the excellent generative ability of StyleGAN2. Moreover, they propose a set of strategies to heighten attack accuracy and robustness, including initial selection, post-selection, and data augmentation. LOMMA [31] introduces model augmentation into MI attacks to reduce overfitting of the target model. They train some surrogate models from the target model via model distillation, co-guiding the optimization process with improved loss function. PLGMI [47] proposes a top-n selection strategy, using target models to generate pseudo labels for publicly available images, thereby directing the training process for the conditional GAN." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 605, + 233, + 618 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 605, + 233, + 618 + ], + "spans": [ + { + "bbox": [ + 132, + 605, + 233, + 618 + ], + "type": "text", + "content": "3 Methodology" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 629, + 481, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 629, + 481, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 629, + 481, + 666 + ], + "type": "text", + "content": "In this section, we begin by explaining the fundamental paradigm of MI attacks and provide a formulation for the MI problem. Subsequently, we present our main components and elaborate the detailed pipeline of the proposed IF-GMI," + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 324, + 91, + 447, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 91, + 447, + 101 + ], + "spans": [ + { + "bbox": [ + 324, + 91, + 447, + 101 + ], + "type": "text", + "content": "A Closer Look at GAN Priors" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 479, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 116, + 479, + 140 + ], + "spans": [ + { + "bbox": [ + 132, + 116, + 479, + 140 + ], + "type": "text", + "content": "which contributes to the improved performance under the OOD scenario. See Figure 2 for an overview of our method." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 154, + 228, + 166 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 154, + 228, + 166 + ], + "spans": [ + { + "bbox": [ + 132, + 154, + 228, + 166 + ], + "type": "text", + "content": "3.1 Preliminaries" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 171, + 480, + 254 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 171, + 480, + 254 + ], + "spans": [ + { + "bbox": [ + 130, + 171, + 480, + 254 + ], + "type": "text", + "content": "In this paper, we focus on the MI attacks under white-box settings, which means all the parameters and components of target models are available to the attacker. For image classification tasks, the malicious adversary aims to reconstruct privacy-sensitive images by leveraging the output prediction confidence of the target classifier and other auxiliary priors. Early works [44] directly optimize pixels in randomly sampled dummy images " + }, + { + "bbox": [ + 130, + 171, + 480, + 254 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 130, + 171, + 480, + 254 + ], + "type": "text", + "content": " to approximate target images " + }, + { + "bbox": [ + 130, + 171, + 480, + 254 + ], + "type": "inline_equation", + "content": "\\mathbf{x}^*" + }, + { + "bbox": [ + 130, + 171, + 480, + 254 + ], + "type": "text", + "content": " given the target model " + }, + { + "bbox": [ + 130, + 171, + 480, + 254 + ], + "type": "inline_equation", + "content": "T_{\\theta}" + }, + { + "bbox": [ + 130, + 171, + 480, + 254 + ], + "type": "text", + "content": " and target label " + }, + { + "bbox": [ + 130, + 171, + 480, + 254 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 130, + 171, + 480, + 254 + ], + "type": "text", + "content": ", which can be formulated as follows:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 255, + 264, + 480, + 281 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 255, + 264, + 480, + 281 + ], + "spans": [ + { + "bbox": [ + 255, + 264, + 480, + 281 + ], + "type": "interline_equation", + "content": "\\hat {\\mathbf {x}} = \\underset {\\mathbf {x}} {\\arg \\min } \\mathcal {L} \\left(T _ {\\theta} (\\mathbf {x}), c\\right), \\tag {1}", + "image_path": "416c866100c22ef3457c1721e2305403e5475b6d3785de7a95c94cb8b873d40a.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 284, + 479, + 332 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 284, + 479, + 332 + ], + "spans": [ + { + "bbox": [ + 130, + 284, + 479, + 332 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 284, + 479, + 332 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{x}}" + }, + { + "bbox": [ + 130, + 284, + 479, + 332 + ], + "type": "text", + "content": " is the reconstructed image, " + }, + { + "bbox": [ + 130, + 284, + 479, + 332 + ], + "type": "inline_equation", + "content": "\\mathcal{L}(\\cdot ,\\cdot)" + }, + { + "bbox": [ + 130, + 284, + 479, + 332 + ], + "type": "text", + "content": " denotes the classification loss designed for image optimization and " + }, + { + "bbox": [ + 130, + 284, + 479, + 332 + ], + "type": "inline_equation", + "content": "T_{\\theta}(\\mathbf{x})" + }, + { + "bbox": [ + 130, + 284, + 479, + 332 + ], + "type": "text", + "content": " represent the output confidence. Due to the full access to the target model in white-box settings, the attacker can calculate loss and directly perform backpropagation to update dummy images." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 332, + 480, + 439 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 332, + 480, + 439 + ], + "spans": [ + { + "bbox": [ + 130, + 332, + 480, + 439 + ], + "type": "text", + "content": "However, the methods above are no longer functional when " + }, + { + "bbox": [ + 130, + 332, + 480, + 439 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 130, + 332, + 480, + 439 + ], + "type": "text", + "content": " turns into high-dimensional data which has excessive search space. To tackle such issues, recent studies [5, 38, 47, 51] introduce GANs as image priors due to their superior capability to generate high-fidelity RGB images. They propose to train a specially designed GAN with publicly available datasets that have structural similarities with the private dataset or utilize a public pre-trained GAN before the attack. Furthermore, the optimization objective is replaced with the latent vectors " + }, + { + "bbox": [ + 130, + 332, + 480, + 439 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 130, + 332, + 480, + 439 + ], + "type": "text", + "content": " of the generator, which has fewer parameters to optimize. With the aforementioned techniques, the MI problem is transformed into the following formulation:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 220, + 445, + 480, + 463 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 220, + 445, + 480, + 463 + ], + "spans": [ + { + "bbox": [ + 220, + 445, + 480, + 463 + ], + "type": "interline_equation", + "content": "\\hat {\\mathbf {z}} = \\underset {\\mathbf {z}} {\\arg \\min } \\mathcal {L} _ {i d} \\left(T _ {\\theta} (G (\\mathbf {z}), c) + \\lambda \\mathcal {L} _ {a u x} (\\mathbf {z}) \\right. \\tag {2}", + "image_path": "69747c94fcdd35eb14fb5864f6ef4a59efb67bfc1e6e3f146bc67eebe083eca0.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 467, + 479, + 527 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 467, + 479, + 527 + ], + "spans": [ + { + "bbox": [ + 130, + 467, + 479, + 527 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 467, + 479, + 527 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 130, + 467, + 479, + 527 + ], + "type": "text", + "content": " represents the trained generator, " + }, + { + "bbox": [ + 130, + 467, + 479, + 527 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{id}(\\cdot, \\cdot)" + }, + { + "bbox": [ + 130, + 467, + 479, + 527 + ], + "type": "text", + "content": " denotes the identity loss calculated from the target model " + }, + { + "bbox": [ + 130, + 467, + 479, + 527 + ], + "type": "inline_equation", + "content": "T_{\\theta}" + }, + { + "bbox": [ + 130, + 467, + 479, + 527 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 467, + 479, + 527 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{aux}(\\cdot)" + }, + { + "bbox": [ + 130, + 467, + 479, + 527 + ], + "type": "text", + "content": " is an optional auxiliary loss (e.g., the discriminator loss) with a hyperparameter " + }, + { + "bbox": [ + 130, + 467, + 479, + 527 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 130, + 467, + 479, + 527 + ], + "type": "text", + "content": ". By minimizing the Eq.2, the adversary updates the latent vectors " + }, + { + "bbox": [ + 130, + 467, + 479, + 527 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 130, + 467, + 479, + 527 + ], + "type": "text", + "content": " into the optimal results " + }, + { + "bbox": [ + 130, + 467, + 479, + 527 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{z}}" + }, + { + "bbox": [ + 130, + 467, + 479, + 527 + ], + "type": "text", + "content": " and generate final images through " + }, + { + "bbox": [ + 130, + 467, + 479, + 527 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{x}} = G(\\hat{\\mathbf{z}})" + }, + { + "bbox": [ + 130, + 467, + 479, + 527 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 528, + 480, + 611 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 528, + 480, + 611 + ], + "spans": [ + { + "bbox": [ + 130, + 528, + 480, + 611 + ], + "type": "text", + "content": "Intuitively, directly optimizing the input latent code of GAN priors serves as a natural method to acquire ideal reconstructed images, leading to its widespread application in all the previous works. However, recent studies [3,8,35,40] have indicated that there is fairly rich semantic information in the intermediate features of GANs except for the input latent code. This inspires us to surpass the limitation of merely searching the latent space and propose a novel method focusing on the intermediate feature domains, which are more close to the output." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 624, + 459, + 636 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 624, + 459, + 636 + ], + "spans": [ + { + "bbox": [ + 132, + 624, + 459, + 636 + ], + "type": "text", + "content": "3.2 Exploiting Intermediate Features for Enhanced MI Attacks" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 130, + 641, + 479, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 641, + 479, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 641, + 479, + 665 + ], + "type": "text", + "content": "In the following part, we delve into the internal structure of the GAN prior, attempting to explore the hierarchical layers for enhanced utilization of the rich" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 219, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 219, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 219, + 102 + ], + "type": "text", + "content": "Y. Qiu et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 479, + 140 + ], + "type": "text", + "content": "semantics learned by the generator. Following the pipeline shown in Figure 2, we will elucidate each component in detail." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 152, + 479, + 210 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 152, + 479, + 210 + ], + "spans": [ + { + "bbox": [ + 130, + 152, + 479, + 210 + ], + "type": "text", + "content": "The GAN prior. Most previous GAN-based attacks [5,31,47,51] require training a specialized GAN with essential auxiliary dataset towards the specific target classifier. However, the prior knowledge of GANs trained under the above setting will be excessively aligned with the target model and the auxiliary dataset, leading to significant reduction in transferability and generalization." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 212, + 482, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 212, + 482, + 380 + ], + "spans": [ + { + "bbox": [ + 130, + 212, + 482, + 380 + ], + "type": "text", + "content": "Therefore, our method relies on the pre-trained StyleGAN2 [23] instead of training a GAN from scratch. The generator of StyleGAN2 can be simply divided into two components, consisting of a mapping network " + }, + { + "bbox": [ + 130, + 212, + 482, + 380 + ], + "type": "inline_equation", + "content": "G_{map}:\\mathcal{Z}\\to \\mathcal{W}" + }, + { + "bbox": [ + 130, + 212, + 482, + 380 + ], + "type": "text", + "content": " which maps the initial latent vectors " + }, + { + "bbox": [ + 130, + 212, + 482, + 380 + ], + "type": "inline_equation", + "content": "\\mathbf{z}\\in \\mathcal{Z}" + }, + { + "bbox": [ + 130, + 212, + 482, + 380 + ], + "type": "text", + "content": " into the extended " + }, + { + "bbox": [ + 130, + 212, + 482, + 380 + ], + "type": "inline_equation", + "content": "\\mathcal{W}" + }, + { + "bbox": [ + 130, + 212, + 482, + 380 + ], + "type": "text", + "content": " space [1], and a synthesis network " + }, + { + "bbox": [ + 130, + 212, + 482, + 380 + ], + "type": "inline_equation", + "content": "G_{syn}:\\mathcal{W}\\rightarrow \\mathcal{X}" + }, + { + "bbox": [ + 130, + 212, + 482, + 380 + ], + "type": "text", + "content": " which generates images " + }, + { + "bbox": [ + 130, + 212, + 482, + 380 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 130, + 212, + 482, + 380 + ], + "type": "text", + "content": " with mapped vectors " + }, + { + "bbox": [ + 130, + 212, + 482, + 380 + ], + "type": "inline_equation", + "content": "\\mathbf{w}\\in \\mathcal{W}" + }, + { + "bbox": [ + 130, + 212, + 482, + 380 + ], + "type": "text", + "content": ". Due to the reduced feature entanglement in " + }, + { + "bbox": [ + 130, + 212, + 482, + 380 + ], + "type": "inline_equation", + "content": "\\mathcal{W}" + }, + { + "bbox": [ + 130, + 212, + 482, + 380 + ], + "type": "text", + "content": " space that facilitates better style generation, we set " + }, + { + "bbox": [ + 130, + 212, + 482, + 380 + ], + "type": "inline_equation", + "content": "\\mathbf{w}" + }, + { + "bbox": [ + 130, + 212, + 482, + 380 + ], + "type": "text", + "content": " as the initial optimization objective rather than the commonly used latent code " + }, + { + "bbox": [ + 130, + 212, + 482, + 380 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 130, + 212, + 482, + 380 + ], + "type": "text", + "content": " in previous works. Specifically, we first randomly sample a batch of latent vectors " + }, + { + "bbox": [ + 130, + 212, + 482, + 380 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 130, + 212, + 482, + 380 + ], + "type": "text", + "content": " from Gaussian distribution and then map them with " + }, + { + "bbox": [ + 130, + 212, + 482, + 380 + ], + "type": "inline_equation", + "content": "G_{map}" + }, + { + "bbox": [ + 130, + 212, + 482, + 380 + ], + "type": "text", + "content": " to acquire " + }, + { + "bbox": [ + 130, + 212, + 482, + 380 + ], + "type": "inline_equation", + "content": "\\mathbf{w}" + }, + { + "bbox": [ + 130, + 212, + 482, + 380 + ], + "type": "text", + "content": ", which will be iteratively updated in the first step of intermediate features optimization. Moreover, the StyleGAN2 is pre-trained without the utilization of the target model " + }, + { + "bbox": [ + 130, + 212, + 482, + 380 + ], + "type": "inline_equation", + "content": "T_{\\theta}" + }, + { + "bbox": [ + 130, + 212, + 482, + 380 + ], + "type": "text", + "content": " or other auxiliary prior corresponding to the target dataset, ensuring the flexibility and transferability of our method when attacking different target models and datasets." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 391, + 482, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 391, + 482, + 510 + ], + "spans": [ + { + "bbox": [ + 130, + 391, + 482, + 510 + ], + "type": "text", + "content": "Initial Selection. Owing to the randomness in sampling latent vectors " + }, + { + "bbox": [ + 130, + 391, + 482, + 510 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 130, + 391, + 482, + 510 + ], + "type": "text", + "content": ", it is potential part of them cannot facilitate the generation of appropriate images, leading to a decrease in attack accuracy. To reduce the risk of generating misleading and low-quality images, previous studies [2, 38, 48] have explored the technique of initial selection and validated its effectiveness in obtaining robust latent vectors. Specifically, we first generate images with the randomly samples " + }, + { + "bbox": [ + 130, + 391, + 482, + 510 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 130, + 391, + 482, + 510 + ], + "type": "text", + "content": ", apply a series of transformations " + }, + { + "bbox": [ + 130, + 391, + 482, + 510 + ], + "type": "inline_equation", + "content": "Aug(\\cdot)" + }, + { + "bbox": [ + 130, + 391, + 482, + 510 + ], + "type": "text", + "content": " to the images, and feed them into the target classifier " + }, + { + "bbox": [ + 130, + 391, + 482, + 510 + ], + "type": "inline_equation", + "content": "T_{\\theta}" + }, + { + "bbox": [ + 130, + 391, + 482, + 510 + ], + "type": "text", + "content": " for corresponding prediction confidence. By selecting the latent vectors with higher scores, we can significantly improve the quality of the final images to better approximate the target distribution." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 511, + 482, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 511, + 482, + 571 + ], + "spans": [ + { + "bbox": [ + 130, + 511, + 482, + 571 + ], + "type": "text", + "content": "Inspired by these prior studies [2,38,48], we also include the initial selection technique in our method and apply standard image transformations, such as random cropping, resizing and flipping. Different from previous methods, we perform initial selection on the mapped vectors " + }, + { + "bbox": [ + 130, + 511, + 482, + 571 + ], + "type": "inline_equation", + "content": "\\mathbf{w}" + }, + { + "bbox": [ + 130, + 511, + 482, + 571 + ], + "type": "text", + "content": " instead of latent vectors " + }, + { + "bbox": [ + 130, + 511, + 482, + 571 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 130, + 511, + 482, + 571 + ], + "type": "text", + "content": ". The robust vectors " + }, + { + "bbox": [ + 130, + 511, + 482, + 571 + ], + "type": "inline_equation", + "content": "\\mathbf{w}" + }, + { + "bbox": [ + 130, + 511, + 482, + 571 + ], + "type": "text", + "content": " are obtained with the following equation:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 210, + 580, + 480, + 597 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 210, + 580, + 480, + 597 + ], + "spans": [ + { + "bbox": [ + 210, + 580, + 480, + 597 + ], + "type": "interline_equation", + "content": "\\mathbf {w} _ {\\text {i n i t}} = \\underset {\\mathbf {w}} {\\arg \\max } \\operatorname {C o n f} \\left(T _ {\\theta} \\left(A u g \\left(G _ {\\text {s y n}} (\\mathbf {w})\\right)\\right), c\\right), \\tag {3}", + "image_path": "19927758833a3269e7701cf48d9d1521a47c138a5a6506580bfc134a5d253293.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 605, + 491, + 630 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 605, + 491, + 630 + ], + "spans": [ + { + "bbox": [ + 130, + 605, + 491, + 630 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 605, + 491, + 630 + ], + "type": "inline_equation", + "content": "\\operatorname{Conf}(\\cdot, \\cdot)" + }, + { + "bbox": [ + 130, + 605, + 491, + 630 + ], + "type": "text", + "content": " measures the confidence score for augmented images " + }, + { + "bbox": [ + 130, + 605, + 491, + 630 + ], + "type": "inline_equation", + "content": "Aug(G_{syn}(\\mathbf{w}))" + }, + { + "bbox": [ + 130, + 605, + 491, + 630 + ], + "type": "text", + "content": " given the specific label " + }, + { + "bbox": [ + 130, + 605, + 491, + 630 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 130, + 605, + 491, + 630 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 131, + 641, + 481, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 641, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 131, + 641, + 481, + 665 + ], + "type": "text", + "content": "Intermediate Features Optimization. According to the research of [24], the front blocks in the generator control the overall characteristics while the back" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 324, + 91, + 447, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 91, + 447, + 101 + ], + "spans": [ + { + "bbox": [ + 324, + 91, + 447, + 101 + ], + "type": "text", + "content": "A Closer Look at GAN Priors" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 91, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 91, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 91, + 480, + 100 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 132, + 130, + 481, + 171 + ], + "blocks": [ + { + "bbox": [ + 133, + 116, + 455, + 129 + ], + "lines": [ + { + "bbox": [ + 133, + 116, + 455, + 129 + ], + "spans": [ + { + "bbox": [ + 133, + 116, + 455, + 129 + ], + "type": "text", + "content": "Algorithm 1 Pseudocode of the core algorithm in our proposed IF-GMI" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 132, + 130, + 481, + 171 + ], + "lines": [ + { + "bbox": [ + 132, + 130, + 481, + 171 + ], + "spans": [ + { + "bbox": [ + 132, + 130, + 481, + 171 + ], + "type": "text", + "content": "Input: " + }, + { + "bbox": [ + 132, + 130, + 481, + 171 + ], + "type": "inline_equation", + "content": "G_{syn}" + }, + { + "bbox": [ + 132, + 130, + 481, + 171 + ], + "type": "text", + "content": ": a pre-trained generator; " + }, + { + "bbox": [ + 132, + 130, + 481, + 171 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 132, + 130, + 481, + 171 + ], + "type": "text", + "content": ": the number of intermediate features; " + }, + { + "bbox": [ + 132, + 130, + 481, + 171 + ], + "type": "inline_equation", + "content": "T_{\\theta}" + }, + { + "bbox": [ + 132, + 130, + 481, + 171 + ], + "type": "text", + "content": ": the target classifier; " + }, + { + "bbox": [ + 132, + 130, + 481, + 171 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{id}" + }, + { + "bbox": [ + 132, + 130, + 481, + 171 + ], + "type": "text", + "content": ": the identity loss; " + }, + { + "bbox": [ + 132, + 130, + 481, + 171 + ], + "type": "inline_equation", + "content": "r[1 \\dots L]" + }, + { + "bbox": [ + 132, + 130, + 481, + 171 + ], + "type": "text", + "content": ": the radius value of " + }, + { + "bbox": [ + 132, + 130, + 481, + 171 + ], + "type": "inline_equation", + "content": "l_{1}" + }, + { + "bbox": [ + 132, + 130, + 481, + 171 + ], + "type": "text", + "content": " ball for each hierarchical features; " + }, + { + "bbox": [ + 132, + 130, + 481, + 171 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 132, + 130, + 481, + 171 + ], + "type": "text", + "content": ": the number of iterations;" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "code_body" + } + ], + "index": 3, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 138, + 186, + 394, + 411 + ], + "blocks": [ + { + "bbox": [ + 132, + 172, + 291, + 184 + ], + "lines": [ + { + "bbox": [ + 132, + 172, + 291, + 184 + ], + "spans": [ + { + "bbox": [ + 132, + 172, + 291, + 184 + ], + "type": "text", + "content": "Output: Reconstructed images " + }, + { + "bbox": [ + 132, + 172, + 291, + 184 + ], + "type": "inline_equation", + "content": "\\mathbf{x}^*" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 138, + 186, + 394, + 411 + ], + "lines": [ + { + "bbox": [ + 138, + 186, + 394, + 411 + ], + "spans": [ + { + "bbox": [ + 138, + 186, + 394, + 411 + ], + "type": "text", + "content": "1: Acquire latent vectors " + }, + { + "bbox": [ + 138, + 186, + 394, + 411 + ], + "type": "inline_equation", + "content": "\\mathbf{w}_{init}" + }, + { + "bbox": [ + 138, + 186, + 394, + 411 + ], + "type": "text", + "content": " via initial selection process \n2: " + }, + { + "bbox": [ + 138, + 186, + 394, + 411 + ], + "type": "inline_equation", + "content": "\\mathbf{w}_{(0)} \\gets \\underset{\\mathbf{w}}{\\arg \\min} \\mathcal{L}_{id}(G_{syn}(\\mathbf{w}_{init}))" + }, + { + "bbox": [ + 138, + 186, + 394, + 411 + ], + "type": "text", + "content": " \n3: Decompose the " + }, + { + "bbox": [ + 138, + 186, + 394, + 411 + ], + "type": "inline_equation", + "content": "G_{syn}" + }, + { + "bbox": [ + 138, + 186, + 394, + 411 + ], + "type": "text", + "content": " into " + }, + { + "bbox": [ + 138, + 186, + 394, + 411 + ], + "type": "inline_equation", + "content": "G_{L+1} \\circ G_{L} \\circ \\dots \\circ G_{2} \\circ G_{1}" + }, + { + "bbox": [ + 138, + 186, + 394, + 411 + ], + "type": "text", + "content": " \n4: Obtain the first intermediate feature " + }, + { + "bbox": [ + 138, + 186, + 394, + 411 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_{(1)}^{0} = G_{1}(\\mathbf{w}_{(0)})" + }, + { + "bbox": [ + 138, + 186, + 394, + 411 + ], + "type": "text", + "content": " \n5: Set " + }, + { + "bbox": [ + 138, + 186, + 394, + 411 + ], + "type": "inline_equation", + "content": "\\mathbf{w}_{(1)}^{0} = \\mathbf{w}_{(0)}" + }, + { + "bbox": [ + 138, + 186, + 394, + 411 + ], + "type": "text", + "content": " \n6: for " + }, + { + "bbox": [ + 138, + 186, + 394, + 411 + ], + "type": "inline_equation", + "content": "i \\gets 1" + }, + { + "bbox": [ + 138, + 186, + 394, + 411 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 138, + 186, + 394, + 411 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 138, + 186, + 394, + 411 + ], + "type": "text", + "content": " do \n7: Set " + }, + { + "bbox": [ + 138, + 186, + 394, + 411 + ], + "type": "inline_equation", + "content": "G_{remain} = G_{L+1} \\circ G_{L} \\dots \\circ G_{i+1}" + }, + { + "bbox": [ + 138, + 186, + 394, + 411 + ], + "type": "text", + "content": " \n8: for " + }, + { + "bbox": [ + 138, + 186, + 394, + 411 + ], + "type": "inline_equation", + "content": "j \\gets 1" + }, + { + "bbox": [ + 138, + 186, + 394, + 411 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 138, + 186, + 394, + 411 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 138, + 186, + 394, + 411 + ], + "type": "text", + "content": " do \n9: loss = " + }, + { + "bbox": [ + 138, + 186, + 394, + 411 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{id}(G_{remain}(\\mathbf{f}_{(i)}^{j-1}, \\mathbf{w}_{(i)}^{j-1}))" + }, + { + "bbox": [ + 138, + 186, + 394, + 411 + ], + "type": "text", + "content": " \n10: " + }, + { + "bbox": [ + 138, + 186, + 394, + 411 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_{(i)}^{j} \\gets Adam(\\mathbf{f}_{(i)}^{j-1}; loss), ||\\mathbf{f}_{(i)}^{j} - \\mathbf{f}_{(i)}^{0}||_{1} \\leq r[i]" + }, + { + "bbox": [ + 138, + 186, + 394, + 411 + ], + "type": "text", + "content": " \n11: " + }, + { + "bbox": [ + 138, + 186, + 394, + 411 + ], + "type": "inline_equation", + "content": "\\mathbf{w}_{(i)}^{j} \\gets Adam(\\mathbf{w}_{(i)}^{j-1}; loss), ||\\mathbf{w}_{(i)}^{j} - \\mathbf{w}_{(i)}^{0}||_{1} \\leq r[i]" + }, + { + "bbox": [ + 138, + 186, + 394, + 411 + ], + "type": "text", + "content": " \n12: end for \n13: " + }, + { + "bbox": [ + 138, + 186, + 394, + 411 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_{(i+1)}^{0} = G_{i+1}(\\mathbf{f}_{(i)}^{N}, \\mathbf{w}_{(i)}^{N}), \\mathbf{w}_{(i+1)}^{0} = \\mathbf{w}_{(i)}^{N}" + }, + { + "bbox": [ + 138, + 186, + 394, + 411 + ], + "type": "text", + "content": " \n14: end for \n15: The final images " + }, + { + "bbox": [ + 138, + 186, + 394, + 411 + ], + "type": "inline_equation", + "content": "\\mathbf{x}^{*} = \\mathbf{f}_{(L+1)}^{0}" + }, + { + "bbox": [ + 138, + 186, + 394, + 411 + ], + "type": "text", + "content": " \n16: return " + }, + { + "bbox": [ + 138, + 186, + 394, + 411 + ], + "type": "inline_equation", + "content": "\\mathbf{x}^{*}" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "code_body" + } + ], + "index": 5, + "sub_type": "algorithm" + }, + { + "bbox": [ + 130, + 437, + 482, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 437, + 482, + 521 + ], + "spans": [ + { + "bbox": [ + 130, + 437, + 482, + 521 + ], + "type": "text", + "content": "ones have more influence on local details. Previous studies [38, 47, 51] neglect the role of the latter, which limits the attack performance. To take advantage of the individual blocks, we propose intermediate features optimization, as shown in the Algorithm 1. We first optimize the selected latent vectors " + }, + { + "bbox": [ + 130, + 437, + 482, + 521 + ], + "type": "inline_equation", + "content": "\\mathbf{w}_{init}" + }, + { + "bbox": [ + 130, + 437, + 482, + 521 + ], + "type": "text", + "content": " to obtain the optimal ones " + }, + { + "bbox": [ + 130, + 437, + 482, + 521 + ], + "type": "inline_equation", + "content": "\\mathbf{w}_{(0)}" + }, + { + "bbox": [ + 130, + 437, + 482, + 521 + ], + "type": "text", + "content": " before launching intermediate features optimization. Then we disassemble the pre-trained generator into " + }, + { + "bbox": [ + 130, + 437, + 482, + 521 + ], + "type": "inline_equation", + "content": "L + 1" + }, + { + "bbox": [ + 130, + 437, + 482, + 521 + ], + "type": "text", + "content": " blocks for hierarchical layer searching, i.e.," + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 234, + 522, + 481, + 536 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 234, + 522, + 481, + 536 + ], + "spans": [ + { + "bbox": [ + 234, + 522, + 481, + 536 + ], + "type": "interline_equation", + "content": "G _ {s y n} = G _ {L + 1} \\circ G _ {L} \\circ \\dots G _ {2} \\circ G _ {1}. \\tag {4}", + "image_path": "dcc9232ede48ceba1177bc85b1a7d9d0f0941c88fd89c1d1caa887cc35e495a8.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 542, + 481, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 542, + 481, + 556 + ], + "spans": [ + { + "bbox": [ + 132, + 542, + 481, + 556 + ], + "type": "text", + "content": "And we can feed " + }, + { + "bbox": [ + 132, + 542, + 481, + 556 + ], + "type": "inline_equation", + "content": "\\mathbf{w}_{(0)}" + }, + { + "bbox": [ + 132, + 542, + 481, + 556 + ], + "type": "text", + "content": " into block " + }, + { + "bbox": [ + 132, + 542, + 481, + 556 + ], + "type": "inline_equation", + "content": "G_{1}" + }, + { + "bbox": [ + 132, + 542, + 481, + 556 + ], + "type": "text", + "content": " to attain the first intermediate feature " + }, + { + "bbox": [ + 132, + 542, + 481, + 556 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_{(1)}^{0}" + }, + { + "bbox": [ + 132, + 542, + 481, + 556 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 556, + 482, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 556, + 482, + 668 + ], + "spans": [ + { + "bbox": [ + 130, + 556, + 482, + 668 + ], + "type": "text", + "content": "For each intermediate block " + }, + { + "bbox": [ + 130, + 556, + 482, + 668 + ], + "type": "inline_equation", + "content": "G_{i+1}, i \\in [1, \\ldots, L]" + }, + { + "bbox": [ + 130, + 556, + 482, + 668 + ], + "type": "text", + "content": ", the corresponding intermediate features " + }, + { + "bbox": [ + 130, + 556, + 482, + 668 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_{(i+1)}^0" + }, + { + "bbox": [ + 130, + 556, + 482, + 668 + ], + "type": "text", + "content": " are acquired with following steps. First, we generate images utilizing the remaining blocks (i.e., " + }, + { + "bbox": [ + 130, + 556, + 482, + 668 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_i = G_{L+1} \\circ G_L \\dots G_{i+1}(\\mathbf{f}_{(i)}, \\mathbf{w}_{(i)})" + }, + { + "bbox": [ + 130, + 556, + 482, + 668 + ], + "type": "text", + "content": ") and input them into the target classifier " + }, + { + "bbox": [ + 130, + 556, + 482, + 668 + ], + "type": "inline_equation", + "content": "T_\\theta" + }, + { + "bbox": [ + 130, + 556, + 482, + 668 + ], + "type": "text", + "content": " to compute the prediction confidence for loss function. Then, we repeat the aforementioned process to iteratively update both " + }, + { + "bbox": [ + 130, + 556, + 482, + 668 + ], + "type": "inline_equation", + "content": "\\mathbf{w}_{(i)}" + }, + { + "bbox": [ + 130, + 556, + 482, + 668 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 556, + 482, + 668 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_{(i)}" + }, + { + "bbox": [ + 130, + 556, + 482, + 668 + ], + "type": "text", + "content": ". During the optimization process, we restrict the " + }, + { + "bbox": [ + 130, + 556, + 482, + 668 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_{(i)}" + }, + { + "bbox": [ + 130, + 556, + 482, + 668 + ], + "type": "text", + "content": " within the " + }, + { + "bbox": [ + 130, + 556, + 482, + 668 + ], + "type": "inline_equation", + "content": "l_1" + }, + { + "bbox": [ + 130, + 556, + 482, + 668 + ], + "type": "text", + "content": " ball with radius " + }, + { + "bbox": [ + 130, + 556, + 482, + 668 + ], + "type": "inline_equation", + "content": "r[i]" + }, + { + "bbox": [ + 130, + 556, + 482, + 668 + ], + "type": "text", + "content": " centered at the initial intermediate feature " + }, + { + "bbox": [ + 130, + 556, + 482, + 668 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_{(i)}^0" + }, + { + "bbox": [ + 130, + 556, + 482, + 668 + ], + "type": "text", + "content": " to avoid excessive shift that may lead to collapse image generation. Once the iteration process is completed, the optimized " + }, + { + "bbox": [ + 130, + 556, + 482, + 668 + ], + "type": "inline_equation", + "content": "\\mathbf{w}_{(i)}^N" + }, + { + "bbox": [ + 130, + 556, + 482, + 668 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 556, + 482, + 668 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_{(i)}^N" + }, + { + "bbox": [ + 130, + 556, + 482, + 668 + ], + "type": "text", + "content": " are fed into the block " + }, + { + "bbox": [ + 130, + 556, + 482, + 668 + ], + "type": "inline_equation", + "content": "G_i" + }, + { + "bbox": [ + 130, + 556, + 482, + 668 + ], + "type": "text", + "content": " to" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 219, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 219, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 219, + 102 + ], + "type": "text", + "content": "Y. Qiu et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 479, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 479, + 144 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 479, + 144 + ], + "type": "text", + "content": "obtain the next intermediate features " + }, + { + "bbox": [ + 130, + 116, + 479, + 144 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_{(i + 1)}^{0}" + }, + { + "bbox": [ + 130, + 116, + 479, + 144 + ], + "type": "text", + "content": ". Moreover, we denote the optimized " + }, + { + "bbox": [ + 130, + 116, + 479, + 144 + ], + "type": "inline_equation", + "content": "\\mathbf{w}_{(i)}^{N}" + }, + { + "bbox": [ + 130, + 116, + 479, + 144 + ], + "type": "text", + "content": " as the initial latent vector " + }, + { + "bbox": [ + 130, + 116, + 479, + 144 + ], + "type": "inline_equation", + "content": "\\mathbf{w}_{(i + 1)}^{0}" + }, + { + "bbox": [ + 130, + 116, + 479, + 144 + ], + "type": "text", + "content": " before the next layer optimization starts." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 144, + 480, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 144, + 480, + 168 + ], + "spans": [ + { + "bbox": [ + 130, + 144, + 480, + 168 + ], + "type": "text", + "content": "Once we finish searching the last intermediate layer, we can generate the final images " + }, + { + "bbox": [ + 130, + 144, + 480, + 168 + ], + "type": "inline_equation", + "content": "\\mathbf{x}^*" + }, + { + "bbox": [ + 130, + 144, + 480, + 168 + ], + "type": "text", + "content": " from the last intermediate feature " + }, + { + "bbox": [ + 130, + 144, + 480, + 168 + ], + "type": "inline_equation", + "content": "\\mathbf{f}_{(L)}^{N}" + }, + { + "bbox": [ + 130, + 144, + 480, + 168 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 130, + 144, + 480, + 168 + ], + "type": "inline_equation", + "content": "\\mathbf{x}^* = \\mathbf{f}_{L + 1}^{0} = G_{i + 1}(\\mathbf{f}_{(L)}^{N})" + }, + { + "bbox": [ + 130, + 144, + 480, + 168 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 175, + 479, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 175, + 479, + 247 + ], + "spans": [ + { + "bbox": [ + 130, + 175, + 479, + 247 + ], + "type": "text", + "content": "The Overall Loss. While the cross-entropy loss " + }, + { + "bbox": [ + 130, + 175, + 479, + 247 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{CE}" + }, + { + "bbox": [ + 130, + 175, + 479, + 247 + ], + "type": "text", + "content": " serves as the identity loss in most early works [5, 48, 51], there is a major drawback of " + }, + { + "bbox": [ + 130, + 175, + 479, + 247 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{CE}" + }, + { + "bbox": [ + 130, + 175, + 479, + 247 + ], + "type": "text", + "content": ". Specifically, the gradient vanishing problem emerges when the prediction confidence of target label " + }, + { + "bbox": [ + 130, + 175, + 479, + 247 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 130, + 175, + 479, + 247 + ], + "type": "text", + "content": " approaches the ground truth in the one-hot vector. Following the previous study [38], we rely on the Poincaré loss function to overcome this problem. Therefore, the identity loss function utilized in our method is defined as follows:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 207, + 256, + 480, + 282 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 207, + 256, + 480, + 282 + ], + "spans": [ + { + "bbox": [ + 207, + 256, + 480, + 282 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {i d} = \\operatorname {a r c c o s h} \\left(1 + \\frac {2 \\| v _ {1} - v _ {2} \\| _ {2} ^ {2}}{(1 - \\| v _ {1} \\| _ {2} ^ {2}) (1 - \\| v _ {2} \\| _ {2} ^ {2})}\\right), \\tag {5}", + "image_path": "447c4210ba021e891f55343db1d998d993820f24c9037c4377b05ee24dc8e7b0.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 285, + 480, + 332 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 285, + 480, + 332 + ], + "spans": [ + { + "bbox": [ + 130, + 285, + 480, + 332 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 285, + 480, + 332 + ], + "type": "inline_equation", + "content": "||v||_2" + }, + { + "bbox": [ + 130, + 285, + 480, + 332 + ], + "type": "text", + "content": " is the Euclidean norm for the given vector. In our experiments, we denote " + }, + { + "bbox": [ + 130, + 285, + 480, + 332 + ], + "type": "inline_equation", + "content": "v_{1}" + }, + { + "bbox": [ + 130, + 285, + 480, + 332 + ], + "type": "text", + "content": " as the normalized prediction confidence and " + }, + { + "bbox": [ + 130, + 285, + 480, + 332 + ], + "type": "inline_equation", + "content": "v_{2}" + }, + { + "bbox": [ + 130, + 285, + 480, + 332 + ], + "type": "text", + "content": " as the one-hot vector for ground truth. Notably, the original number 1 in " + }, + { + "bbox": [ + 130, + 285, + 480, + 332 + ], + "type": "inline_equation", + "content": "v_{2}" + }, + { + "bbox": [ + 130, + 285, + 480, + 332 + ], + "type": "text", + "content": " is substituted with 0.9999 to avoid division by zero." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 131, + 348, + 230, + 361 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 348, + 230, + 361 + ], + "spans": [ + { + "bbox": [ + 131, + 348, + 230, + 361 + ], + "type": "text", + "content": "4 Experiments" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 370, + 480, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 370, + 480, + 441 + ], + "spans": [ + { + "bbox": [ + 130, + 370, + 480, + 441 + ], + "type": "text", + "content": "In this section, we first illustrate the details of our experimental settings. Then, we compare our method with state-of-the-art baselines to evaluate the attack performance. Furthermore, we conduct extensive experiments on multiple target datasets and models to further validate the effectiveness of our method in various settings. Finally, the ablation study will be evaluated on the first 100 classes of the whole dataset due to cost concerns." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 131, + 457, + 261, + 470 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 457, + 261, + 470 + ], + "spans": [ + { + "bbox": [ + 131, + 457, + 261, + 470 + ], + "type": "text", + "content": "4.1 Experimental Setup" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 474, + 481, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 474, + 481, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 474, + 481, + 666 + ], + "type": "text", + "content": "Datasets. We evaluate our method on two classification tasks, including facial image classification and dog breed classification. For the facial image classification task, we select the FaceScrub [30] and CelebFaces Attributes [28] (CelebA) as private datasets to train the target models. FaceScrub consists of facial images of actors and actresses with 530 identities in total. CelebA contains facial images of 10177 identities with coarse alignment. For FaceScrub, we utilize all the identities in the major experiment. For CelebA, we select the top 1000 identities with the most images for our experiment, consisting of over 30000 images. We use Flickr-Faces-HQ [24] (FFHQ) and MetFaces [23] as public datasets. FFHQ consists of 70000 high-quality human face images. MetFaces is an image dataset of 1336 human faces extracted from the Metropolitan Museum of Art Collection, which has a huge distributional shift with real human faces. For the dog breed classification task, we use Stanford Dogs [9] as a private dataset and Animal Faces-HQ Dogs [6] (AFHQ) as a public dataset. To adapt to the target model, all images in the various datasets are pre-processed to a resolution size of " + }, + { + "bbox": [ + 130, + 474, + 481, + 666 + ], + "type": "inline_equation", + "content": "224 \\times 224" + }, + { + "bbox": [ + 130, + 474, + 481, + 666 + ], + "type": "text", + "content": " pixels in our experiment." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 324, + 91, + 447, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 91, + 447, + 101 + ], + "spans": [ + { + "bbox": [ + 324, + 91, + 447, + 101 + ], + "type": "text", + "content": "A Closer Look at GAN Priors" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 488, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 488, + 189 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 488, + 189 + ], + "type": "text", + "content": "Models. We trained a variety of classification models on the private datasets mentioned above, including various architectures such as ResNet-18 [18], DenseNet-169 [21], ResNet-152 [18], and ResNeSt-101 [50], as target models. Following the settings in the previous work [38], we select Inception-v3 [39] as the evaluation model. For the generative model, we employ publicly released StyleGAN2 pre-trained on the aforementioned public datasets." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 197, + 480, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 197, + 480, + 220 + ], + "spans": [ + { + "bbox": [ + 130, + 197, + 480, + 220 + ], + "type": "text", + "content": "Metrics. Following PPA [38], we evaluate the performance of our attack method on various kinds of metrics as follows:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 138, + 224, + 482, + 474 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 138, + 224, + 482, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 224, + 482, + 308 + ], + "spans": [ + { + "bbox": [ + 138, + 224, + 482, + 308 + ], + "type": "text", + "content": "- Attack Accuracy. This metric serves as a criterion on how well the generated samples resemble the target class. We use the evaluation model trained on the same dataset with the target model to predict the labels on reconstructed samples and compute the top-1 and top-5 accuracy for target classes, denoted as " + }, + { + "bbox": [ + 138, + 224, + 482, + 308 + ], + "type": "inline_equation", + "content": "Acc@1" + }, + { + "bbox": [ + 138, + 224, + 482, + 308 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 138, + 224, + 482, + 308 + ], + "type": "inline_equation", + "content": "Acc@5" + }, + { + "bbox": [ + 138, + 224, + 482, + 308 + ], + "type": "text", + "content": " respectively. The higher the reconstructed samples achieve attack accuracy on the evaluation model, the more private information in the dataset can be considered to be exposed [51]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 138, + 308, + 482, + 367 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 308, + 482, + 367 + ], + "spans": [ + { + "bbox": [ + 138, + 308, + 482, + 367 + ], + "type": "text", + "content": "- Feature Distance. The feature is defined as the output of the model's penultimate layer. We compute the shortest feature " + }, + { + "bbox": [ + 138, + 308, + 482, + 367 + ], + "type": "inline_equation", + "content": "l_{2}" + }, + { + "bbox": [ + 138, + 308, + 482, + 367 + ], + "type": "text", + "content": " distance between reconstructed samples and private training data for each class and calculate the average distance. The evaluated feature distances on the evaluation model and a pre-trained FaceNet [34] are denoted as " + }, + { + "bbox": [ + 138, + 308, + 482, + 367 + ], + "type": "inline_equation", + "content": "\\delta_{eval}" + }, + { + "bbox": [ + 138, + 308, + 482, + 367 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 138, + 308, + 482, + 367 + ], + "type": "inline_equation", + "content": "\\delta_{face}" + }, + { + "bbox": [ + 138, + 308, + 482, + 367 + ], + "type": "text", + "content": ", respectively." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 138, + 367, + 482, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 367, + 482, + 426 + ], + "spans": [ + { + "bbox": [ + 138, + 367, + 482, + 426 + ], + "type": "text", + "content": "- Fréchet Inception Distance (FID). FID [19] is commonly used to evaluate the generated images of GANs. It computes the distance between the feature vectors from target private data and reconstructed samples. The feature vectors are extracted by Inception-v3 pre-trained on ImageNet. The lower FID score shows higher realism and overall diversity [41]." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 138, + 426, + 482, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 426, + 482, + 474 + ], + "spans": [ + { + "bbox": [ + 138, + 426, + 482, + 474 + ], + "type": "text", + "content": "- Sample Diversity. We compute Precision-Recall [26] and Density-Coverage [29] scores, whose higher values indicate greater intra-class diversity of the reconstructed samples. Our results for these four metrics are stated and analyzed in the Appendix." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 132, + 493, + 331, + 506 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 493, + 331, + 506 + ], + "spans": [ + { + "bbox": [ + 132, + 493, + 331, + 506 + ], + "type": "text", + "content": "4.2 The Number of Optimized Layers" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 510, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 510, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 510, + 482, + 666 + ], + "type": "text", + "content": "To obtain the highest attack performance, the number of intermediate features " + }, + { + "bbox": [ + 130, + 510, + 482, + 666 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 130, + 510, + 482, + 666 + ], + "type": "text", + "content": " should be explored before conducting the major experiments. When " + }, + { + "bbox": [ + 130, + 510, + 482, + 666 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 130, + 510, + 482, + 666 + ], + "type": "text", + "content": " takes a small value, there is a risk of underfitting as we merely optimize the intermediate features of the previous few layers to reconstruct the target images, especially in the OOD scenario. In contrast, when " + }, + { + "bbox": [ + 130, + 510, + 482, + 666 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 130, + 510, + 482, + 666 + ], + "type": "text", + "content": " is too large, the latter layers have a greater influence on the local details [24], which may lead to overfitting to the target model in some details and produce unrealistic images. Therefore, we must balance underfitting and overfitting when choosing " + }, + { + "bbox": [ + 130, + 510, + 482, + 666 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 130, + 510, + 482, + 666 + ], + "type": "text", + "content": ". We conduct a simple attack on only 10 classes for each combination of public and private datasets to select " + }, + { + "bbox": [ + 130, + 510, + 482, + 666 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 130, + 510, + 482, + 666 + ], + "type": "text", + "content": " according to the results. For instance, Figure 3(a) shows the Acc@1 result for GAN prior pre-trained on FFHQ against the target DenseNet-169 trained on CelebA. The Acc@1 reaches the highest when " + }, + { + "bbox": [ + 130, + 510, + 482, + 666 + ], + "type": "inline_equation", + "content": "L = 3" + }, + { + "bbox": [ + 130, + 510, + 482, + 666 + ], + "type": "text", + "content": ". Hence, we keep this configuration in conducting the following experiments." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 219, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 219, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 219, + 102 + ], + "type": "text", + "content": "Y. Qiu et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 140, + 129, + 302, + 213 + ], + "blocks": [ + { + "bbox": [ + 140, + 129, + 302, + 213 + ], + "lines": [ + { + "bbox": [ + 140, + 129, + 302, + 213 + ], + "spans": [ + { + "bbox": [ + 140, + 129, + 302, + 213 + ], + "type": "image", + "image_path": "0e44ab91fb2d2ab96d903b61bf75ae4623ffc976fc2924dcc0e7f44412b5fdb0.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 203, + 223, + 242, + 232 + ], + "lines": [ + { + "bbox": [ + 203, + 223, + 242, + 232 + ], + "spans": [ + { + "bbox": [ + 203, + 223, + 242, + 232 + ], + "type": "text", + "content": "(a) StyleGAN2" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 313, + 129, + 350, + 165 + ], + "blocks": [ + { + "bbox": [ + 313, + 129, + 350, + 165 + ], + "lines": [ + { + "bbox": [ + 313, + 129, + 350, + 165 + ], + "spans": [ + { + "bbox": [ + 313, + 129, + 350, + 165 + ], + "type": "image", + "image_path": "302a4a29c9abef413f36f6b081d3639797110b6a84b5adb6171e8ffa8e02e483.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 324, + 167, + 340, + 174 + ], + "lines": [ + { + "bbox": [ + 324, + 167, + 340, + 174 + ], + "spans": [ + { + "bbox": [ + 324, + 167, + 340, + 174 + ], + "type": "text", + "content": "layer 0" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 353, + 129, + 389, + 165 + ], + "blocks": [ + { + "bbox": [ + 353, + 129, + 389, + 165 + ], + "lines": [ + { + "bbox": [ + 353, + 129, + 389, + 165 + ], + "spans": [ + { + "bbox": [ + 353, + 129, + 389, + 165 + ], + "type": "image", + "image_path": "2d9297ebe571fde373116271bd7ed018985aee102c677cf495ac580a3dc1979e.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 364, + 167, + 380, + 174 + ], + "lines": [ + { + "bbox": [ + 364, + 167, + 380, + 174 + ], + "spans": [ + { + "bbox": [ + 364, + 167, + 380, + 174 + ], + "type": "text", + "content": "layer 1" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 393, + 129, + 429, + 165 + ], + "blocks": [ + { + "bbox": [ + 393, + 129, + 429, + 165 + ], + "lines": [ + { + "bbox": [ + 393, + 129, + 429, + 165 + ], + "spans": [ + { + "bbox": [ + 393, + 129, + 429, + 165 + ], + "type": "image", + "image_path": "791e673b5a6a204cc1c216f6d0c44b563f71fb7a10be133c5519aa4304b860f0.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 403, + 167, + 419, + 174 + ], + "lines": [ + { + "bbox": [ + 403, + 167, + 419, + 174 + ], + "spans": [ + { + "bbox": [ + 403, + 167, + 419, + 174 + ], + "type": "text", + "content": "layer 2" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 433, + 129, + 469, + 165 + ], + "blocks": [ + { + "bbox": [ + 433, + 129, + 469, + 165 + ], + "lines": [ + { + "bbox": [ + 433, + 129, + 469, + 165 + ], + "spans": [ + { + "bbox": [ + 433, + 129, + 469, + 165 + ], + "type": "image", + "image_path": "d7ad69a8642367d78b77826bad4c7e6c79ac95248b0919e61bf453b213f39c4d.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 314, + 175, + 350, + 212 + ], + "blocks": [ + { + "bbox": [ + 314, + 175, + 350, + 212 + ], + "lines": [ + { + "bbox": [ + 314, + 175, + 350, + 212 + ], + "spans": [ + { + "bbox": [ + 314, + 175, + 350, + 212 + ], + "type": "image", + "image_path": "d8aa836cce8f78142345ead42a9144964cf8bb42544f3ea20d72493d9fbc1065.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 324, + 214, + 341, + 220 + ], + "lines": [ + { + "bbox": [ + 324, + 214, + 341, + 220 + ], + "spans": [ + { + "bbox": [ + 324, + 214, + 341, + 220 + ], + "type": "text", + "content": "layer 4" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 130, + 247, + 480, + 291 + ], + "lines": [ + { + "bbox": [ + 130, + 247, + 480, + 291 + ], + "spans": [ + { + "bbox": [ + 130, + 247, + 480, + 291 + ], + "type": "text", + "content": "Fig. 3: (a) Comparison of Acc@1 metric under various settings of " + }, + { + "bbox": [ + 130, + 247, + 480, + 291 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 130, + 247, + 480, + 291 + ], + "type": "text", + "content": " (i.e., the number of intermediate features). (b) Visual results generated from different end layers. We define " + }, + { + "bbox": [ + 130, + 247, + 480, + 291 + ], + "type": "inline_equation", + "content": "L = 0" + }, + { + "bbox": [ + 130, + 247, + 480, + 291 + ], + "type": "text", + "content": " as a special case that our method degenerates into merely optimizing the latent vectors " + }, + { + "bbox": [ + 130, + 247, + 480, + 291 + ], + "type": "inline_equation", + "content": "\\mathbf{w}" + }, + { + "bbox": [ + 130, + 247, + 480, + 291 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 353, + 175, + 389, + 212 + ], + "blocks": [ + { + "bbox": [ + 353, + 175, + 389, + 212 + ], + "lines": [ + { + "bbox": [ + 353, + 175, + 389, + 212 + ], + "spans": [ + { + "bbox": [ + 353, + 175, + 389, + 212 + ], + "type": "image", + "image_path": "5418e5f60f52d07697ee9aa2264db7a86841f632302cdd65b352984bd8945d1b.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 367, + 214, + 384, + 220 + ], + "lines": [ + { + "bbox": [ + 367, + 214, + 384, + 220 + ], + "spans": [ + { + "bbox": [ + 367, + 214, + 384, + 220 + ], + "type": "text", + "content": "ayer 5" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 352, + 223, + 437, + 232 + ], + "lines": [ + { + "bbox": [ + 352, + 223, + 437, + 232 + ], + "spans": [ + { + "bbox": [ + 352, + 223, + 437, + 232 + ], + "type": "text", + "content": "(b) visual samples for each layer" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 394, + 175, + 429, + 212 + ], + "blocks": [ + { + "bbox": [ + 394, + 175, + 429, + 212 + ], + "lines": [ + { + "bbox": [ + 394, + 175, + 429, + 212 + ], + "spans": [ + { + "bbox": [ + 394, + 175, + 429, + 212 + ], + "type": "image", + "image_path": "7cb97fa038eb3ab9754ad0420bdc7c3ce2fe3756f0ffaea5bfd83bbef8a6579b.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 403, + 213, + 420, + 220 + ], + "lines": [ + { + "bbox": [ + 403, + 213, + 420, + 220 + ], + "spans": [ + { + "bbox": [ + 403, + 213, + 420, + 220 + ], + "type": "text", + "content": "layer 6" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 433, + 175, + 469, + 212 + ], + "blocks": [ + { + "bbox": [ + 443, + 167, + 459, + 174 + ], + "lines": [ + { + "bbox": [ + 443, + 167, + 459, + 174 + ], + "spans": [ + { + "bbox": [ + 443, + 167, + 459, + 174 + ], + "type": "text", + "content": "layer 3" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 433, + 175, + 469, + 212 + ], + "lines": [ + { + "bbox": [ + 433, + 175, + 469, + 212 + ], + "spans": [ + { + "bbox": [ + 433, + 175, + 469, + 212 + ], + "type": "image", + "image_path": "59a4ddcf6813cc4f0e2110506f036935f69707a298f8d942bddd9ea2338d00a1.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 444, + 213, + 459, + 220 + ], + "lines": [ + { + "bbox": [ + 444, + 213, + 459, + 220 + ], + "spans": [ + { + "bbox": [ + 444, + 213, + 459, + 220 + ], + "type": "text", + "content": "layer 7" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "table", + "bbox": [ + 135, + 335, + 482, + 499 + ], + "blocks": [ + { + "bbox": [ + 131, + 304, + 480, + 325 + ], + "lines": [ + { + "bbox": [ + 131, + 304, + 480, + 325 + ], + "spans": [ + { + "bbox": [ + 131, + 304, + 480, + 325 + ], + "type": "text", + "content": "Table 1: Comparison of our method with state-of-the-art methods against ResNet-18 trained on FaceScrub." + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 135, + 335, + 482, + 499 + ], + "lines": [ + { + "bbox": [ + 135, + 335, + 482, + 499 + ], + "spans": [ + { + "bbox": [ + 135, + 335, + 482, + 499 + ], + "type": "table", + "html": "
Public DatasetMethod↑ Acc@1↑ Acc@5↓ δface↓ δeval↓FID
FFHQGMI [51]0.1310.3391.260149.53077.800
KEDMI [5]0.1270.3171.155186.409144.195
PPA [38]0.9620.9960.707117.83441.688
LOMMA+GMI [31]0.8280.9450.784126.17855.840
LOMMA+KEDMI [31]0.5490.8140.916217.991114.045
PLGMI [47]0.7580.9280.676214.978154.497
IF-GMI(ours)0.9790.9960.667112.91540.581
MetFacesGMI [51]0.0380.1361.361161.036114.648
KEDMI [5]0.0030.0171.651212.952347.468
PPA [38]0.6280.8541.035146.74962.518
LOMMA+GMI [31]0.1600.3611.220156.297101.600
LOMMA+KEDMI [31]0.0020.0201.623214.883333.572
PLGMI [47]0.4380.7310.796205.222245.208
IF-GMI(ours)0.9490.9920.838120.35468.107
", + "image_path": "c792262ddfd26a0427609a7c5d099687bc69718d510db7f3eacf970bdb35d75e.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "table_body" + } + ], + "index": 23 + }, + { + "bbox": [ + 131, + 522, + 418, + 534 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 522, + 418, + 534 + ], + "spans": [ + { + "bbox": [ + 131, + 522, + 418, + 534 + ], + "type": "text", + "content": "4.3 Comparison with Previous State-of-the-art Attacks" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 130, + 544, + 480, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 544, + 480, + 604 + ], + "spans": [ + { + "bbox": [ + 130, + 544, + 480, + 604 + ], + "type": "text", + "content": "We compare our method with state-of-the-art MI attack methods, including GMI [51], KEDMI [5], PPA [38], LOMMA [31] and PLGMI [47]. Note that LOMMA [31] is a plug-and-play technique designed to augment existing attack methods. We use their original setup where LOMMA is integrated with GMI and KEDMI as our baselines." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 130, + 605, + 481, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 605, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 605, + 481, + 665 + ], + "type": "text", + "content": "The GAN structures employed by GMI, KEDMI, and PLGMI are inherently limited to generating images at a resolution of " + }, + { + "bbox": [ + 130, + 605, + 481, + 665 + ], + "type": "inline_equation", + "content": "64 \\times 64" + }, + { + "bbox": [ + 130, + 605, + 481, + 665 + ], + "type": "text", + "content": " pixels. To ensure a fair comparison, we adopt the same operation used in PPA [38], which modifies the architecture of the generators and discriminators to enable the generation of images at an enhanced resolution of " + }, + { + "bbox": [ + 130, + 605, + 481, + 665 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 130, + 605, + 481, + 665 + ], + "type": "text", + "content": " pixels, i.e., adding two ex" + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 324, + 91, + 447, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 91, + 447, + 101 + ], + "spans": [ + { + "bbox": [ + 324, + 91, + 447, + 101 + ], + "type": "text", + "content": "A Closer Look at GAN Priors" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 479, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 479, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 479, + 100 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 149, + 118, + 179, + 258 + ], + "blocks": [ + { + "bbox": [ + 149, + 118, + 179, + 258 + ], + "lines": [ + { + "bbox": [ + 149, + 118, + 179, + 258 + ], + "spans": [ + { + "bbox": [ + 149, + 118, + 179, + 258 + ], + "type": "image", + "image_path": "b90b989e4c6f839c952787458555202005ff6fa865d5a92ea072a65f8ba80d9e.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 155, + 258, + 174, + 265 + ], + "lines": [ + { + "bbox": [ + 155, + 258, + 174, + 265 + ], + "spans": [ + { + "bbox": [ + 155, + 258, + 174, + 265 + ], + "type": "text", + "content": "Private" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 131, + 285, + 482, + 319 + ], + "lines": [ + { + "bbox": [ + 131, + 285, + 482, + 319 + ], + "spans": [ + { + "bbox": [ + 131, + 285, + 482, + 319 + ], + "type": "text", + "content": "Fig. 4: Visual comparison of reconstructed images from different methods against the ResNet-18 trained on FaceScrub. The first column shows ground truth images of the target class in the private dataset." + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 199, + 119, + 228, + 258 + ], + "blocks": [ + { + "bbox": [ + 199, + 119, + 228, + 258 + ], + "lines": [ + { + "bbox": [ + 199, + 119, + 228, + 258 + ], + "spans": [ + { + "bbox": [ + 199, + 119, + 228, + 258 + ], + "type": "image", + "image_path": "2035d4fdac018c526da493640c6bac4170088e367c6f7c56434a501d34ac616d.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 209, + 258, + 220, + 265 + ], + "lines": [ + { + "bbox": [ + 209, + 258, + 220, + 265 + ], + "spans": [ + { + "bbox": [ + 209, + 258, + 220, + 265 + ], + "type": "text", + "content": "GMI" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 237, + 119, + 267, + 258 + ], + "blocks": [ + { + "bbox": [ + 237, + 119, + 267, + 258 + ], + "lines": [ + { + "bbox": [ + 237, + 119, + 267, + 258 + ], + "spans": [ + { + "bbox": [ + 237, + 119, + 267, + 258 + ], + "type": "image", + "image_path": "74cd5d890531dacfba963b3c14eff81b6e7399c1315c833bb44ad6a1291aec2b.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 244, + 258, + 261, + 265 + ], + "lines": [ + { + "bbox": [ + 244, + 258, + 261, + 265 + ], + "spans": [ + { + "bbox": [ + 244, + 258, + 261, + 265 + ], + "type": "text", + "content": "KEDMI" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 276, + 119, + 306, + 258 + ], + "blocks": [ + { + "bbox": [ + 276, + 119, + 306, + 258 + ], + "lines": [ + { + "bbox": [ + 276, + 119, + 306, + 258 + ], + "spans": [ + { + "bbox": [ + 276, + 119, + 306, + 258 + ], + "type": "image", + "image_path": "4f1ded3517cfc59ebe50dee073b78bbbcb12b8c7d4cc16706939dd58e6436c20.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 286, + 258, + 296, + 265 + ], + "lines": [ + { + "bbox": [ + 286, + 258, + 296, + 265 + ], + "spans": [ + { + "bbox": [ + 286, + 258, + 296, + 265 + ], + "type": "text", + "content": "PPA" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 315, + 119, + 345, + 258 + ], + "blocks": [ + { + "bbox": [ + 315, + 119, + 345, + 258 + ], + "lines": [ + { + "bbox": [ + 315, + 119, + 345, + 258 + ], + "spans": [ + { + "bbox": [ + 315, + 119, + 345, + 258 + ], + "type": "image", + "image_path": "936d7b5c460b1573aa457c056b2499497cea2e27cfb21f6e8966ee25a32f6516.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 321, + 258, + 341, + 271 + ], + "lines": [ + { + "bbox": [ + 321, + 258, + 341, + 271 + ], + "spans": [ + { + "bbox": [ + 321, + 258, + 341, + 271 + ], + "type": "text", + "content": "LOMMA +GMI" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 354, + 119, + 384, + 258 + ], + "blocks": [ + { + "bbox": [ + 354, + 119, + 384, + 258 + ], + "lines": [ + { + "bbox": [ + 354, + 119, + 384, + 258 + ], + "spans": [ + { + "bbox": [ + 354, + 119, + 384, + 258 + ], + "type": "image", + "image_path": "bb49ef5912d38e70d6e03a89365c65fc228bf200738bd8599253bdc0c1c40527.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 359, + 258, + 380, + 271 + ], + "lines": [ + { + "bbox": [ + 359, + 258, + 380, + 271 + ], + "spans": [ + { + "bbox": [ + 359, + 258, + 380, + 271 + ], + "type": "text", + "content": "LOMMA +KEDMI" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 394, + 119, + 424, + 258 + ], + "blocks": [ + { + "bbox": [ + 394, + 119, + 424, + 258 + ], + "lines": [ + { + "bbox": [ + 394, + 119, + 424, + 258 + ], + "spans": [ + { + "bbox": [ + 394, + 119, + 424, + 258 + ], + "type": "image", + "image_path": "db40d1cc3c1b0b9be0828b3adbfbc759f852a73df17cad5d1afce224d188bb42.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 399, + 258, + 417, + 265 + ], + "lines": [ + { + "bbox": [ + 399, + 258, + 417, + 265 + ], + "spans": [ + { + "bbox": [ + 399, + 258, + 417, + 265 + ], + "type": "text", + "content": "PLGMI" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 432, + 119, + 462, + 258 + ], + "blocks": [ + { + "bbox": [ + 432, + 119, + 462, + 258 + ], + "lines": [ + { + "bbox": [ + 432, + 119, + 462, + 258 + ], + "spans": [ + { + "bbox": [ + 432, + 119, + 462, + 258 + ], + "type": "image", + "image_path": "4a579351b4e119779b0bd6a66b0bb9ea3a2e8f940b196bc0c4dc1279aae51b6c.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 441, + 259, + 453, + 265 + ], + "lines": [ + { + "bbox": [ + 441, + 259, + 453, + 265 + ], + "spans": [ + { + "bbox": [ + 441, + 259, + 453, + 265 + ], + "type": "text", + "content": "ours" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "bbox": [ + 130, + 343, + 480, + 367 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 343, + 480, + 367 + ], + "spans": [ + { + "bbox": [ + 130, + 343, + 480, + 367 + ], + "type": "text", + "content": "tra upsampling layers for the generator and two downsampling layers for the discriminator respectively." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 130, + 369, + 482, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 369, + 482, + 559 + ], + "spans": [ + { + "bbox": [ + 130, + 369, + 482, + 559 + ], + "type": "text", + "content": "We provide quantitative results against ResNet-18 [18] trained on the Face-Scrub dataset in Table 1. We can observe that our method achieves significant improvements over previous methods. Especially when the generator is trained on MetFaces, IF-GMI remarkably improves the Acc@1 by " + }, + { + "bbox": [ + 130, + 369, + 482, + 559 + ], + "type": "inline_equation", + "content": "15.1\\%" + }, + { + "bbox": [ + 130, + 369, + 482, + 559 + ], + "type": "text", + "content": " and the Acc@5 is nearly to " + }, + { + "bbox": [ + 130, + 369, + 482, + 559 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 130, + 369, + 482, + 559 + ], + "type": "text", + "content": ". Moreover, our method generally achieves a lower feature distance than baselines between reconstructed samples and private data. For instance, we reduce the distance by more than " + }, + { + "bbox": [ + 130, + 369, + 482, + 559 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 130, + 369, + 482, + 559 + ], + "type": "text", + "content": " compared to the PPA on the MetFaces dataset. Notably, the MetFaces dataset is composed of artworks and thus has a larger distributional shift with real human faces compared with the FFHQ dataset. We note that this severely reduces the reconstruction performance of previous attack methods, while our proposed method still exhibits outstanding performance, highlighting the excellent generalization ability of our approach. Visualization results of the recovered images using generators trained on FFHQ are shown in Figure 4. Compared with previous methods, our reconstructed images have higher fidelity and realism, demonstrating the superiority of exploiting GAN's intermediate features." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 131, + 582, + 440, + 594 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 582, + 440, + 594 + ], + "spans": [ + { + "bbox": [ + 131, + 582, + 440, + 594 + ], + "type": "text", + "content": "4.4 Comparison under different target datasets and models" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 605, + 482, + 666 + ], + "type": "text", + "content": "To validate the effectiveness of the proposed method, we conducted extensive experiments on various datasets using different target models with different architectures. We chose the PPA method as our baseline for comparison due to its comprehensive performance in both accuracy and fidelity. Additional experimental results are in the Appendix." + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 219, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 219, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 219, + 102 + ], + "type": "text", + "content": "Y. Qiu et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 135, + 135, + 481, + 206 + ], + "blocks": [ + { + "bbox": [ + 165, + 114, + 447, + 126 + ], + "lines": [ + { + "bbox": [ + 165, + 114, + 447, + 126 + ], + "spans": [ + { + "bbox": [ + 165, + 114, + 447, + 126 + ], + "type": "text", + "content": "Table 2: Comparison results against ResNet-152 trained on CelebA." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 135, + 135, + 481, + 206 + ], + "lines": [ + { + "bbox": [ + 135, + 135, + 481, + 206 + ], + "spans": [ + { + "bbox": [ + 135, + 135, + 481, + 206 + ], + "type": "table", + "html": "
Public DatasetMethod↑Acc@1↑Acc@5↓δface↓δeval↓FID
FFHQPPA0.8060.9460.736312.58040.430
IF-GMI(ours)0.9120.9820.678314.39230.685
MetFacesPPA0.3960.6431.063387.81074.030
IF-GMI(ours)0.7840.9290.835340.89474.504
", + "image_path": "884b06e52a1cfe2701a2e8f698962928efb6878351541f0ff55419fae06492c4.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 135, + 247, + 481, + 345 + ], + "blocks": [ + { + "bbox": [ + 132, + 215, + 480, + 237 + ], + "lines": [ + { + "bbox": [ + 132, + 215, + 480, + 237 + ], + "spans": [ + { + "bbox": [ + 132, + 215, + 480, + 237 + ], + "type": "text", + "content": "Table 3: Comparison results against different target models trained on FaceScrub with the public dataset being MetFaces." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 135, + 247, + 481, + 345 + ], + "lines": [ + { + "bbox": [ + 135, + 247, + 481, + 345 + ], + "spans": [ + { + "bbox": [ + 135, + 247, + 481, + 345 + ], + "type": "table", + "html": "
Target ModelMethod↑ Acc@1↑ Acc@5↓ δface↓ δeval↓FID
ResNet-152PPA0.7310.9200.966139.38068.540
IF-GMI(ours)0.9040.9840.882138.75269.937
ResNeSt-101PPA0.7500.9270.979137.17088.660
IF-GMI(ours)0.9220.9830.884132.60976.195
DenseNet-169PPA0.7980.9480.938129.44077.520
IF-GMI(ours)0.9330.9870.851125.05082.123
", + "image_path": "d033d4173505cd8f9e044c6f4de600ee912f78efbc12627b1952648722ddc7df.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 366, + 480, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 366, + 480, + 509 + ], + "spans": [ + { + "bbox": [ + 130, + 366, + 480, + 509 + ], + "type": "text", + "content": "As shown in Table 2, our proposed IF-GMI maintains superiority in most metrics against the ResNet-152 trained on the CelebA. Our method achieves a remarkable increase of " + }, + { + "bbox": [ + 130, + 366, + 480, + 509 + ], + "type": "inline_equation", + "content": "10.6\\%" + }, + { + "bbox": [ + 130, + 366, + 480, + 509 + ], + "type": "text", + "content": " in Acc@1 and significantly reduces the FID value using the StyleGAN2 trained on FFHQ. When utilizing the MetFaces StyleGAN2, our method still achieves much better results than the baseline despite a larger distributional shift, including a " + }, + { + "bbox": [ + 130, + 366, + 480, + 509 + ], + "type": "inline_equation", + "content": "38.8\\%" + }, + { + "bbox": [ + 130, + 366, + 480, + 509 + ], + "type": "text", + "content": " increase in Acc@1 and competitive feature distance. In addition to ResNet-18, we evaluate the performance of the proposed method on more target models trained on FaceScrub, including ResNet-152, ResNeSt-101, and DenseNet-169. Benefiting from the fully utilized generative prior, our method achieves " + }, + { + "bbox": [ + 130, + 366, + 480, + 509 + ], + "type": "inline_equation", + "content": "13\\% \\sim 17\\%" + }, + { + "bbox": [ + 130, + 366, + 480, + 509 + ], + "type": "text", + "content": " improvement in Acc@1 metrics than the baselines and also achieves better results in most of the other metrics, as illustrated in Table 3." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 510, + 480, + 558 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 510, + 480, + 558 + ], + "spans": [ + { + "bbox": [ + 130, + 510, + 480, + 558 + ], + "type": "text", + "content": "The results presented above demonstrate that our method maintains outstanding attack performance in a variety of settings, exhibiting excellent generalizability and transferability. We also provide additional experimental results on more datasets and architectures in the Appendix." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 574, + 246, + 586 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 574, + 246, + 586 + ], + "spans": [ + { + "bbox": [ + 132, + 574, + 246, + 586 + ], + "type": "text", + "content": "4.5 Ablation Studies" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 594, + 480, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 594, + 480, + 641 + ], + "spans": [ + { + "bbox": [ + 130, + 594, + 480, + 641 + ], + "type": "text", + "content": "To estimate the contributions from each component in our method, we conduct ablation studies on the ResNet-152 trained on the CelebA dataset using the StyleGAN2 trained on FFHQ. The results are presented in Table 4. More ablation studies are listed in the Appendix." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 641, + 481, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 641, + 481, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 641, + 481, + 666 + ], + "type": "text", + "content": "Intermediate Features Optimization. We merely remove the intermediate features optimization from our pipeline while keeping the remaining param-" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 324, + 91, + 447, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 91, + 447, + 101 + ], + "spans": [ + { + "bbox": [ + 324, + 91, + 447, + 101 + ], + "type": "text", + "content": "A Closer Look at GAN Priors" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 170, + 168, + 447, + 231 + ], + "blocks": [ + { + "bbox": [ + 130, + 114, + 482, + 158 + ], + "lines": [ + { + "bbox": [ + 130, + 114, + 482, + 158 + ], + "spans": [ + { + "bbox": [ + 130, + 114, + 482, + 158 + ], + "type": "text", + "content": "Table 4: Ablation study performed on ResNet-152 trained on CelebA dataset with FFHQ as the public dataset. IF-GMI-" + }, + { + "bbox": [ + 130, + 114, + 482, + 158 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 130, + 114, + 482, + 158 + ], + "type": "text", + "content": " removes the intermediate feature optimization and only searches the latent space. IF-GMI-" + }, + { + "bbox": [ + 130, + 114, + 482, + 158 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 130, + 114, + 482, + 158 + ], + "type": "text", + "content": " removes the " + }, + { + "bbox": [ + 130, + 114, + 482, + 158 + ], + "type": "inline_equation", + "content": "l_{1}" + }, + { + "bbox": [ + 130, + 114, + 482, + 158 + ], + "type": "text", + "content": " ball constraint compared to IF-GMI." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 170, + 168, + 447, + 231 + ], + "lines": [ + { + "bbox": [ + 170, + 168, + 447, + 231 + ], + "spans": [ + { + "bbox": [ + 170, + 168, + 447, + 231 + ], + "type": "table", + "html": "
Method↑Acc@1↑Acc@5↓δface↓δeval↓FID
IF-GMI-i0.8030.9280.732314.27543.576
IF-GMI-l0.9450.9920.678315.27837.528
IF-GMI0.9470.9930.677315.03237.461
", + "image_path": "79b7704ad9143b850a5f85c4444f82c3a581f71a57b9a4f350e3089297b086a3.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 251, + 479, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 251, + 479, + 285 + ], + "spans": [ + { + "bbox": [ + 130, + 251, + 479, + 285 + ], + "type": "text", + "content": "eters unchanged. As shown in the first row of Table 4, it leads to degradation up to " + }, + { + "bbox": [ + 130, + 251, + 479, + 285 + ], + "type": "inline_equation", + "content": "14\\%" + }, + { + "bbox": [ + 130, + 251, + 479, + 285 + ], + "type": "text", + "content": " in Acc@1 and much worse FID without this technique, demonstrating the superiority of utilizing the hierarchical features of intermediate layers." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 286, + 482, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 286, + 482, + 346 + ], + "spans": [ + { + "bbox": [ + 130, + 286, + 482, + 346 + ], + "type": "inline_equation", + "content": "l_{1}" + }, + { + "bbox": [ + 130, + 286, + 482, + 346 + ], + "type": "text", + "content": " Ball Constraint. To avoid unreal image generation, we introduce the " + }, + { + "bbox": [ + 130, + 286, + 482, + 346 + ], + "type": "inline_equation", + "content": "l_{1}" + }, + { + "bbox": [ + 130, + 286, + 482, + 346 + ], + "type": "text", + "content": " ball constraint into the intermediate features optimization. By observing the results shown in the second row of Table 4, the " + }, + { + "bbox": [ + 130, + 286, + 482, + 346 + ], + "type": "inline_equation", + "content": "l_{1}" + }, + { + "bbox": [ + 130, + 286, + 482, + 346 + ], + "type": "text", + "content": " ball is beneficial in improving the performance in all metrics. Thus, we demonstrate the necessity of restricting the intermediate features within the " + }, + { + "bbox": [ + 130, + 286, + 482, + 346 + ], + "type": "inline_equation", + "content": "l_{1}" + }, + { + "bbox": [ + 130, + 286, + 482, + 346 + ], + "type": "text", + "content": " ball constraint." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 363, + 220, + 376 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 363, + 220, + 376 + ], + "spans": [ + { + "bbox": [ + 132, + 363, + 220, + 376 + ], + "type": "text", + "content": "5 Conclusion" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 386, + 482, + 493 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 386, + 482, + 493 + ], + "spans": [ + { + "bbox": [ + 130, + 386, + 482, + 493 + ], + "type": "text", + "content": "We proposed IF-GMI, a novel model inversion attack that performs effective attack in the OOD scenario. Surpassing the limitation of treating the generator as a black-box, we studied the structure and decomposed the generator into hierarchical layers, extending the optimization space from latent code to intermediate features to generate stable and high-quality images. Moreover, to avoid generating low-fidelity images, we applied a " + }, + { + "bbox": [ + 130, + 386, + 482, + 493 + ], + "type": "inline_equation", + "content": "l_{1}" + }, + { + "bbox": [ + 130, + 386, + 482, + 493 + ], + "type": "text", + "content": " ball constraint to the optimization process. Through our extensive experiments, we demonstrated that the proposed IF-GMI achieves the state-of-the-art attack accuracy while generating samples with high fidelity and diversity." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 494, + 482, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 494, + 482, + 553 + ], + "spans": [ + { + "bbox": [ + 130, + 494, + 482, + 553 + ], + "type": "text", + "content": "Our exploration of enhanced utilization of intermediate features in the GAN prior contributes to advances in MI attack field, paving the way to more practical employment for MI attacks. We hope this paper can raise concerns about privacy leakage risk of released pre-trained models and facilitate more response to the threat of MI attacks." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 570, + 241, + 583 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 570, + 241, + 583 + ], + "spans": [ + { + "bbox": [ + 132, + 570, + 241, + 583 + ], + "type": "text", + "content": "Acknowledgments" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 593, + 486, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 593, + 486, + 654 + ], + "spans": [ + { + "bbox": [ + 130, + 593, + 486, + 654 + ], + "type": "text", + "content": "This work is supported in part by the National Natural Science Foundation of China under grant 62171248, 62301189, Guangdong Basic and Applied Basic Research Foundation under grant 2021A1515110066, the PCNL KEY project (PCL2021A07), and Shenzhen Science and Technology Program under Grant JCYJ20220818101012025, RCBS20221008093124061, GXWD20220811172936001." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 219, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 219, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 219, + 102 + ], + "type": "text", + "content": "Y. Qiu et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 133, + 114, + 197, + 126 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 114, + 197, + 126 + ], + "spans": [ + { + "bbox": [ + 133, + 114, + 197, + 126 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 138, + 137, + 481, + 665 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 138, + 137, + 481, + 169 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 137, + 481, + 169 + ], + "spans": [ + { + "bbox": [ + 138, + 137, + 481, + 169 + ], + "type": "text", + "content": "1. Abdal, R., Qin, Y., Wonka, P.: Image2stylegan: How to embed images into the stylegan latent space? In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 4432-4441 (2019)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 138, + 170, + 481, + 191 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 170, + 481, + 191 + ], + "spans": [ + { + "bbox": [ + 138, + 170, + 481, + 191 + ], + "type": "text", + "content": "2. An, S., Tao, G., Xu, Q., Liu, Y., Shen, G., Yao, Y., Xu, J., Zhang, X.: Mirror: Model inversion for deep learning network with high fidelity. In: NDSS (2022)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 138, + 191, + 481, + 223 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 191, + 481, + 223 + ], + "spans": [ + { + "bbox": [ + 138, + 191, + 481, + 223 + ], + "type": "text", + "content": "3. Bau, D., Zhu, J.Y., Strobelt, H., Zhou, B., Tenenbaum, J.B., Freeman, W.T., Torralba, A.: Gan dissection: Visualizing and understanding generative adversarial networks. arXiv preprint arXiv:1811.10597 (2018)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 138, + 224, + 481, + 266 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 224, + 481, + 266 + ], + "spans": [ + { + "bbox": [ + 138, + 224, + 481, + 266 + ], + "type": "text", + "content": "4. Chen, B., Feng, Y., Dai, T., Bai, J., Jiang, Y., Xia, S.T., Wang, X.: Adversarial examples generation for deep product quantization networks on image retrieval. IEEE Transactions on Pattern Analysis and Machine Intelligence 45(2), 1388-1404 (2022)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 138, + 266, + 481, + 288 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 266, + 481, + 288 + ], + "spans": [ + { + "bbox": [ + 138, + 266, + 481, + 288 + ], + "type": "text", + "content": "5. Chen, S., Kahla, M., Jia, R., Qi, G.J.: Knowledge-enriched distributional model inversion attacks. In: ICCV (2021)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 138, + 289, + 481, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 289, + 481, + 319 + ], + "spans": [ + { + "bbox": [ + 138, + 289, + 481, + 319 + ], + "type": "text", + "content": "6. Choi, Y., Uh, Y., Yoo, J., Ha, J.W.: Stargan v2: Diverse image synthesis for multiple domains. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 8188-8197 (2020)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 138, + 320, + 481, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 320, + 481, + 352 + ], + "spans": [ + { + "bbox": [ + 138, + 320, + 481, + 352 + ], + "type": "text", + "content": "7. Conneau, A., Baevski, A., Collobert, R., Mohamed, A., Auli, M.: Unsupervised cross-lingual representation learning for speech recognition. arXiv preprint arXiv:2006.13979 (2020)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 138, + 353, + 481, + 384 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 353, + 481, + 384 + ], + "spans": [ + { + "bbox": [ + 138, + 353, + 481, + 384 + ], + "type": "text", + "content": "8. Daras, G., Dean, J., Jalal, A., Dimakis, A.G.: Intermediate layer optimization for inverse problems using deep generative models. arXiv preprint arXiv:2102.07364 (2021)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 138, + 385, + 481, + 417 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 385, + 481, + 417 + ], + "spans": [ + { + "bbox": [ + 138, + 385, + 481, + 417 + ], + "type": "text", + "content": "9. Dataset, E.: Novel datasets for fine-grained image categorization. In: First Workshop on Fine Grained Visual Categorization, CVPR. Citeseer. Citeseer. Citeseer (2011)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 138, + 418, + 481, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 418, + 481, + 449 + ], + "spans": [ + { + "bbox": [ + 138, + 418, + 481, + 449 + ], + "type": "text", + "content": "0. Fang, H., Chen, B., Wang, X., Wang, Z., Xia, S.T.: Gidf: A generative gradient inversion method with feature domain optimization. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 4967-4976 (2023)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 138, + 449, + 481, + 482 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 449, + 481, + 482 + ], + "spans": [ + { + "bbox": [ + 138, + 449, + 481, + 482 + ], + "type": "text", + "content": "1. Fang, H., Kong, J., Yu, W., Chen, B., Li, J., Xia, S., Xu, K.: One perturbation is enough: On generating universal adversarial perturbations against vision-language pre-training models. arXiv preprint arXiv:2406.05491 (2024)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 138, + 483, + 481, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 483, + 481, + 514 + ], + "spans": [ + { + "bbox": [ + 138, + 483, + 481, + 514 + ], + "type": "text", + "content": "2. Fang, H., Qiu, Y., Yu, H., Yu, W., Kong, J., Chong, B., Chen, B., Wang, X., Xia, S.T.: Privacy leakage on dnns: A survey of model inversion attacks and defenses. arXiv preprint arXiv:2402.04013 (2024)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 138, + 515, + 481, + 535 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 515, + 481, + 535 + ], + "spans": [ + { + "bbox": [ + 138, + 515, + 481, + 535 + ], + "type": "text", + "content": "3. Fredrikson, M., Jha, S., Ristenpart, T.: Model inversion attacks that exploit confidence information and basic countermeasures. In: CCS. pp. 1322-1333 (2015)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 138, + 536, + 481, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 536, + 481, + 567 + ], + "spans": [ + { + "bbox": [ + 138, + 536, + 481, + 567 + ], + "type": "text", + "content": "4. Fredrikson, M., Lantz, E., Jha, S., Lin, S., Page, D., Ristenpart, T.: Privacy in pharmacogenetics: An {End-to-End} case study of personalized warfarin dosing. In: USENIX Security. pp. 17-32 (2014)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 138, + 568, + 481, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 568, + 481, + 601 + ], + "spans": [ + { + "bbox": [ + 138, + 568, + 481, + 601 + ], + "type": "text", + "content": "5. Goodfellow, I., Pouget-Abadie, J., Mirza, M., Xu, B., Warde-Farley, D., Ozair, S., Courville, A., Bengio, Y.: Generative adversarial nets. Advances in neural information processing systems 27 (2014)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 138, + 601, + 481, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 601, + 481, + 632 + ], + "spans": [ + { + "bbox": [ + 138, + 601, + 481, + 632 + ], + "type": "text", + "content": "6. Goodfellow, I., Pouget-Abadie, J., Mirza, M., Xu, B., Warde-Farley, D., Ozair, S., Courville, A., Bengio, Y.: Generative adversarial networks. Communications of the ACM 63(11), 139–144 (2020)" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 138, + 633, + 481, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 633, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 138, + 633, + 481, + 665 + ], + "type": "text", + "content": "7. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 770-778 (2016)" + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 324, + 91, + 447, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 91, + 447, + 101 + ], + "spans": [ + { + "bbox": [ + 324, + 91, + 447, + 101 + ], + "type": "text", + "content": "A Closer Look at GAN Priors" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 480, + 665 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 133, + 116, + 480, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 116, + 480, + 149 + ], + "spans": [ + { + "bbox": [ + 133, + 116, + 480, + 149 + ], + "type": "text", + "content": "18. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 770-778 (2016)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 133, + 150, + 480, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 150, + 480, + 183 + ], + "spans": [ + { + "bbox": [ + 133, + 150, + 480, + 183 + ], + "type": "text", + "content": "19. Heusel, M., Ramsauer, H., Unterthiner, T., Nessler, B., Hochreiter, S.: Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems 30 (2017)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 183, + 480, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 183, + 480, + 215 + ], + "spans": [ + { + "bbox": [ + 132, + 183, + 480, + 215 + ], + "type": "text", + "content": "20. Hu, H., Salcic, Z., Sun, L., Dobbie, G., Yu, P.S., Zhang, X.: Membership inference attacks on machine learning: A survey. ACM Computing Surveys (CSUR) 54(11s), 1-37 (2022)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 216, + 480, + 248 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 216, + 480, + 248 + ], + "spans": [ + { + "bbox": [ + 132, + 216, + 480, + 248 + ], + "type": "text", + "content": "21. Huang, G., Liu, Z., Van Der Maaten, L., Weinberger, K.Q.: Densely connected convolutional networks. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 4700-4708 (2017)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 249, + 480, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 249, + 480, + 270 + ], + "spans": [ + { + "bbox": [ + 132, + 249, + 480, + 270 + ], + "type": "text", + "content": "22. Kahla, M., Chen, S., Just, H.A., Jia, R.: Label-only model inversion attacks via boundary repulsion. In: CVPR (2022)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 271, + 480, + 303 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 271, + 480, + 303 + ], + "spans": [ + { + "bbox": [ + 132, + 271, + 480, + 303 + ], + "type": "text", + "content": "23. Karras, T., Aittala, M., Hellsten, J., Laine, S., Lehtinen, J., Aila, T.: Training generative adversarial networks with limited data. Advances in neural information processing systems 33, 12104-12114 (2020)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 304, + 480, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 304, + 480, + 335 + ], + "spans": [ + { + "bbox": [ + 132, + 304, + 480, + 335 + ], + "type": "text", + "content": "24. Karras, T., Laine, S., Aila, T.: A style-based generator architecture for generative adversarial networks. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 4401-4410 (2019)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 336, + 480, + 369 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 336, + 480, + 369 + ], + "spans": [ + { + "bbox": [ + 132, + 336, + 480, + 369 + ], + "type": "text", + "content": "25. Karras, T., Laine, S., Aittala, M., Hellsten, J., Lehtinen, J., Aila, T.: Analyzing and improving the image quality of stylegan. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 8110-8119 (2020)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 369, + 480, + 402 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 369, + 480, + 402 + ], + "spans": [ + { + "bbox": [ + 132, + 369, + 480, + 402 + ], + "type": "text", + "content": "26. Kynkänniemi, T., Karras, T., Laine, S., Lehtinen, J., Aila, T.: Improved precision and recall metric for assessing generative models. Advances in Neural Information Processing Systems 32 (2019)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 403, + 480, + 424 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 403, + 480, + 424 + ], + "spans": [ + { + "bbox": [ + 132, + 403, + 480, + 424 + ], + "type": "text", + "content": "27. Li, C., Qiu, M.: Reinforcement learning for cyber-physical systems: with cybersecurity case studies. Chapman and Hall/CRC (2019)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 425, + 480, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 425, + 480, + 456 + ], + "spans": [ + { + "bbox": [ + 132, + 425, + 480, + 456 + ], + "type": "text", + "content": "28. Liu, Z., Luo, P., Wang, X., Tang, X.: Deep learning face attributes in the wild. In: Proceedings of the IEEE international conference on computer vision. pp. 3730-3738 (2015)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 132, + 457, + 480, + 490 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 457, + 480, + 490 + ], + "spans": [ + { + "bbox": [ + 132, + 457, + 480, + 490 + ], + "type": "text", + "content": "29. Naeem, M.F., Oh, S.J., Uh, Y., Choi, Y., Yoo, J.: Reliable fidelity and diversity metrics for generative models. In: International Conference on Machine Learning. pp. 7176-7185. PMLR (2020)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 132, + 491, + 480, + 522 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 491, + 480, + 522 + ], + "spans": [ + { + "bbox": [ + 132, + 491, + 480, + 522 + ], + "type": "text", + "content": "30. Ng, H.W., Winkler, S.: A data-driven approach to cleaning large face datasets. In: 2014 IEEE international conference on image processing (ICIP). pp. 343-347. IEEE (2014)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 132, + 523, + 480, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 523, + 480, + 555 + ], + "spans": [ + { + "bbox": [ + 132, + 523, + 480, + 555 + ], + "type": "text", + "content": "31. Nguyen, N.B., Chandrasegaran, K., Abdollahzadeh, M., Cheung, N.M.: Rethinking model inversion attacks against deep neural networks. In: CVPR. pp. 16384-16393 (2023)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 132, + 555, + 480, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 555, + 480, + 599 + ], + "spans": [ + { + "bbox": [ + 132, + 555, + 480, + 599 + ], + "type": "text", + "content": "32. Park, J.Y., Smedemark-Margulies, N., Daniels, M., Yu, R., van de Meent, J.W., HAnd, P.: Generator surgery for compressed sensing. In: NeurIPS 2020 Workshop on Deep Learning and Inverse Problems (2020), https://openreview.net/forum?id=s2EucjZ6d2s" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 132, + 600, + 480, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 600, + 480, + 632 + ], + "spans": [ + { + "bbox": [ + 132, + 600, + 480, + 632 + ], + "type": "text", + "content": "33. Qiu, H., Dong, T., Zhang, T., Lu, J., Memmi, G., Qiu, M.: Adversarial attacks against network intrusion detection in IoT systems. IEEE Internet of Things Journal 8(13), 10327-10335 (2020)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 132, + 632, + 480, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 632, + 480, + 665 + ], + "spans": [ + { + "bbox": [ + 132, + 632, + 480, + 665 + ], + "type": "text", + "content": "34. Schroff, F., Kalenichenko, D., Philbin, J.: Facenet: A unified embedding for face recognition and clustering. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 815-823 (2015)" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 219, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 219, + 102 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 219, + 102 + ], + "type": "text", + "content": "Y. Qiu et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 658 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 149 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 149 + ], + "type": "text", + "content": "35. Shen, Y., Gu, J., Tang, X., Zhou, B.: Interpreting the latent space of gans for semantic face editing. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 9243-9252 (2020)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 149, + 482, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 149, + 482, + 182 + ], + "spans": [ + { + "bbox": [ + 130, + 149, + 482, + 182 + ], + "type": "text", + "content": "36. Shokri, R., Stronati, M., Song, C., Shmatikov, V.: Membership inference attacks against machine learning models. In: 2017 IEEE symposium on security and privacy (SP). pp. 3-18. IEEE (2017)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 182, + 482, + 203 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 182, + 482, + 203 + ], + "spans": [ + { + "bbox": [ + 130, + 182, + 482, + 203 + ], + "type": "text", + "content": "37. Song, C., Ristenpart, T., Shmatikov, V.: Machine learning models that remember too much. In: CCS. pp. 587-601 (2017)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 203, + 482, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 203, + 482, + 224 + ], + "spans": [ + { + "bbox": [ + 130, + 203, + 482, + 224 + ], + "type": "text", + "content": "38. Struppek, L., Hintersdorf, D., Correira, A.D.A., Adler, A., Kersting, K.: Plug & play attacks: Towards robust and flexible model inversion attacks. In: ICML (2022)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 224, + 482, + 257 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 224, + 482, + 257 + ], + "spans": [ + { + "bbox": [ + 130, + 224, + 482, + 257 + ], + "type": "text", + "content": "39. Szegedy, C., Vanhoucke, V., Ioffe, S., Shlens, J., Wojna, Z.: Rethinking the inception architecture for computer vision. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 2818-2826 (2016)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 257, + 482, + 290 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 257, + 482, + 290 + ], + "spans": [ + { + "bbox": [ + 130, + 257, + 482, + 290 + ], + "type": "text", + "content": "40. Tewari, A., Elgharib, M., Bernard, F., Seidel, H.P., Pérez, P., Zollhöfer, M., Theobalt, C.: Pie: Portrait image embedding for semantic control. ACM Transactions on Graphics (TOG) 39(6), 1-14 (2020)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 290, + 482, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 290, + 482, + 312 + ], + "spans": [ + { + "bbox": [ + 130, + 290, + 482, + 312 + ], + "type": "text", + "content": "41. Wang, K.C., Fu, Y., Li, K., Khisti, A., Zemel, R., Makhzani, A.: Variational model inversion attacks. In: NeurIPS (2021)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 312, + 482, + 344 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 312, + 482, + 344 + ], + "spans": [ + { + "bbox": [ + 130, + 312, + 482, + 344 + ], + "type": "text", + "content": "42. Wu, C., Yan, M.: Session-aware information embedding for e-commerce product recommendation. In: Proceedings of the 2017 ACM on conference on information and knowledge management. pp. 2379-2382 (2017)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 344, + 482, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 344, + 482, + 365 + ], + "spans": [ + { + "bbox": [ + 130, + 344, + 482, + 365 + ], + "type": "text", + "content": "43. Yang, Z., Zhang, J., Chang, E.C., Liang, Z.: Neural network inversion in adversarial setting via background knowledge alignment. In: CCS (2019)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 365, + 482, + 408 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 365, + 482, + 408 + ], + "spans": [ + { + "bbox": [ + 130, + 365, + 482, + 408 + ], + "type": "text", + "content": "44. Yin, H., Molchanov, P., Alvarez, J.M., Li, Z., Mallya, A., Hoiem, D., Jha, N.K., Kautz, J.: Dreaming to distill: Data-free knowledge transfer via deepinversion. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 8715-8724 (2020)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 130, + 408, + 482, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 408, + 482, + 430 + ], + "spans": [ + { + "bbox": [ + 130, + 408, + 482, + 430 + ], + "type": "text", + "content": "45. Yu, W., Chen, B., Zhang, Q., Xia, S.T.: Editable-deepsc: Cross-modal editable semantic communication systems. arXiv preprint arXiv:2310.10347 (2023)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 130, + 430, + 482, + 462 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 430, + 482, + 462 + ], + "spans": [ + { + "bbox": [ + 130, + 430, + 482, + 462 + ], + "type": "text", + "content": "46. Yu, W., Fang, H., Chen, B., Sui, X., Chen, C., Wu, H., Xia, S.T., Xu, K.: Gi-nas: Boosting gradient inversion attacks through adaptive neural architecture search. arXiv preprint arXiv:2405.20725 (2024)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 130, + 462, + 482, + 495 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 462, + 482, + 495 + ], + "spans": [ + { + "bbox": [ + 130, + 462, + 482, + 495 + ], + "type": "text", + "content": "47. Yuan, X., Chen, K., Zhang, J., Zhang, W., Yu, N., Zhang, Y.: Pseudo label-guided model inversion attack via conditional generative adversarial network. In: AAAI (2023)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 130, + 495, + 482, + 516 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 495, + 482, + 516 + ], + "spans": [ + { + "bbox": [ + 130, + 495, + 482, + 516 + ], + "type": "text", + "content": "48. Yuan, Z., Wu, F., Long, Y., Xiao, C., Li, B.: Secretgen: Privacy recovery on pretrained models via distribution discrimination. In: ECCV (2022)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 130, + 516, + 482, + 559 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 516, + 482, + 559 + ], + "spans": [ + { + "bbox": [ + 130, + 516, + 482, + 559 + ], + "type": "text", + "content": "49. Zeng, Y., Pan, M., Just, H.A., Lyu, L., Qiu, M., Jia, R.: Narcissus: A practical clean-label backdoor attack with limited information. In: Proceedings of the 2023 ACM SIGSAC Conference on Computer and Communications Security. pp. 771-785 (2023)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 130, + 559, + 482, + 603 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 559, + 482, + 603 + ], + "spans": [ + { + "bbox": [ + 130, + 559, + 482, + 603 + ], + "type": "text", + "content": "50. Zhang, H., Wu, C., Zhang, Z., Zhu, Y., Lin, H., Zhang, Z., Sun, Y., He, T., Mueller, J., Manmatha, R., et al.: Resnest: Split-attention networks. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 2736-2746 (2022)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 130, + 603, + 482, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 603, + 482, + 624 + ], + "spans": [ + { + "bbox": [ + 130, + 603, + 482, + 624 + ], + "type": "text", + "content": "51. Zhang, Y., Jia, R., Pei, H., Wang, W., Li, B., Song, D.: The secret revealer: Generative model-inversion attacks against deep neural networks. In: CVPR (2020)" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 130, + 624, + 482, + 658 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 624, + 482, + 658 + ], + "spans": [ + { + "bbox": [ + 130, + 624, + 482, + 658 + ], + "type": "text", + "content": "52. Zhong, X., Fang, H., Chen, B., Gu, X., Dai, T., Qiu, M., Xia, S.T.: Hierarchical features matter: A deep exploration of gan priors for improved dataset distillation. arXiv preprint arXiv:2406.05704 (2024)" + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 324, + 91, + 447, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 91, + 447, + 100 + ], + "spans": [ + { + "bbox": [ + 324, + 91, + 447, + 100 + ], + "type": "text", + "content": "A Closer Look at GAN Priors" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/A Compact Dynamic 3D Gaussian Representation for Real-Time Dynamic View Synthesis/7472726a-f5ca-4354-bd16-63b6aa3c1be0_content_list.json b/2024/A Compact Dynamic 3D Gaussian Representation for Real-Time Dynamic View Synthesis/7472726a-f5ca-4354-bd16-63b6aa3c1be0_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..f73fbdbc886aef89551bd17b82f90ffffb00256d --- /dev/null +++ b/2024/A Compact Dynamic 3D Gaussian Representation for Real-Time Dynamic View Synthesis/7472726a-f5ca-4354-bd16-63b6aa3c1be0_content_list.json @@ -0,0 +1,1733 @@ +[ + { + "type": "text", + "text": "A Compact Dynamic 3D Gaussian Representation for Real-Time Dynamic View Synthesis", + "text_level": 1, + "bbox": [ + 250, + 140, + 754, + 186 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Kai Katsumata, Duc Minh Vo, and Hideki Nakayama", + "bbox": [ + 308, + 212, + 694, + 227 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The University of Tokyo, Japan {katsumata, vm Duc, nakayama}@nlab.ci.i.u-tokyo.ac.jp", + "bbox": [ + 279, + 239, + 723, + 268 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract. 3D Gaussian Splatting (3DGS) has shown remarkable success in synthesizing novel views given multiple views of a static scene. Yet, 3DGS faces challenges when applied to dynamic scenes because 3D Gaussian parameters need to be updated per timestep, requiring a large amount of memory and at least a dozen observations per timestep. To address these limitations, we present a compact dynamic 3D Gaussian representation that models positions and rotations as functions of time with a few parameter approximations while keeping other properties of 3DGS including scale, color, and opacity invariant. Our method can dramatically reduce memory usage and relax a strict multi-view assumption. In our experiments on monocular and multi-view scenarios, we show that our method not only matches state-of-the-art methods, often linked with slower rendering speeds, in terms of high rendering quality, but also significantly surpasses them by achieving a rendering speed of 118 frames per second at a resolution of $1,352 \\times 1,014$ on a single GPU.", + "bbox": [ + 261, + 303, + 743, + 497 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 217, + 521, + 356, + 537 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The landscape of novel view synthesis of scenes captured through multiple images/videos has undergone a revolutionary transformation, owing principally to major breakthroughs in neural radiance field (NeRF) approaches [6,41,57]. Although they achieve remarkable visual quality, particularly in dynamic scenes [4,21,31,34,45], NeRFs inevitably confront hurdles in terms of high-speed training and rendering [41,43,44,48]. This limitation is attributed to their reliance on multi-layer perceptrons (MLPs). Recently, 3D Gaussian Splatting (3DGS) [26] introduced a differentiable 3D Gaussian representation and point-based rasterization, signaling a departure from neural network reliance. 3DGS has emerged as a promising solution that not only accelerates training and rendering processes but also delivers high-quality rendered scenes, rivaling the levels set by NeRF [41] on static scenes.", + "bbox": [ + 212, + 553, + 792, + 718 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Nonetheless, in the realm of dynamic scene synthesis, 3DGS faces challenges related to memory usage and the need for many observations [38]. In particular, a significant number of 3D Gaussian parameters must be stored per timestep, resulting in a non-negligible increase in memory usage and the need for numerous observations per timestep. This poses challenges in monocular or few-view setups, as their strict multiview assumption demands advanced facilities or expertise, limiting flexibility in capturing setups. Exploring 3DGS without multi-view assumption enables dynamic view synthesis with a simple and easy camera setup, which is the primary goal of this study.", + "bbox": [ + 212, + 719, + 789, + 840 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/000059aa1fa1476365bb3f5609dfd948998b9607287e045b8a334c8e42f20b20.jpg", + "image_caption": [ + "Fig. 1: We show examples of novel view synthesis on the MUTANT scene in the D-NeRF dataset, visual quality (PSNR), rendering speed (FPS), and memory used to store optimized parameters. Our method yields reconstruction fidelity competitive with SoTAs with real-time rendering, achieving $100 \\times$ faster rendering speed than V4D and reasonable memory size. Non-obvious differences in quality are highlighted. **Bold typeface number** indicates the best result among the methods with the competitive rendering quality (excepting for 3DGS), and the **underline** one does the second best." + ], + "image_footnote": [], + "bbox": [ + 217, + 143, + 787, + 268 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To achieve memory-efficient real-time dynamic view synthesis from monocular and multi-view videos, we present a compact dynamic 3D Gaussian representation, containing time-invariant and time-varying parameters to capture dynamic motion effectively. Similarly to [26, 38], we use scaling factors in the covariance matrix, opacity, and color as time-invariant parameters. As modeling the change in positions over time is important to represent dynamic scenes [43-45], we express each 3D Gaussian position as a function of time to model the temporal change in the position. We also represent 3D Gaussian rotation as a time-varying parameter because the rotation of the objects in the world can be typically changed. Inspired by the studies that model motion as periodic [2, 71], we fit the position using the Fourier approximation. We fit the rotation using the linear approximation. The time-varying parameters make our representation dynamic, meaning that a 3D Gaussian moves and rotates over time. Moreover, as we use a function with a few parameters to represent the position, the small degree of freedom contributes to the smoothness of reconstructed scenes, enhancing the robustness against unseen views. Crucially, the memory consumption of our representation is solely determined by the number of 3D Gaussians and the number of the approximation function parameters, remaining independent of input length. Beyond optimizing Gaussian representations through image-level reconstruction, we further enhance temporal consistency by supervising the Gaussian with optical flow obtained from input videos. This ensures high-quality reconstruction and facilitates the generalization of the representation.", + "bbox": [ + 212, + 378, + 787, + 680 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our experiments on dynamic datasets (D-NeRF [45], DyNeRF [31], and HyperNeRF [44]) demonstrate the effectiveness of optimizing our dynamic 3D Gaussian from both monocular and multi-view videos, showing that our proposed method achieves rendering quality that rivals that of previous NeRFs [17, 18, 20]. In addition to faithful rendering quality, the proposed method achieves rendering speeds similar to a fast radiance field method [26] while avoiding large memory increases caused by a dynamic extension (see Fig. 1). Finally, we show an editing application enabled by the explicit property of 3D Gaussian representations. In summary, our contributions are as follow:", + "bbox": [ + 212, + 681, + 787, + 801 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "- We present a compact dynamic 3D Gaussian representation with time-varying Gaussian parameters equipped with basis functions for representing dynamic scenes.", + "bbox": [ + 225, + 809, + 790, + 840 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "K. Katsumata et al.", + "bbox": [ + 271, + 114, + 387, + 127 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "- As a 3D Gaussian representation is defined over all the timesteps, the 3D Gaussian parameters can be optimized with the frames at all the timesteps, enabling dynamic scene reconstruction from monocular or few-view videos.", + "bbox": [ + 225, + 146, + 785, + 190 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "- Our dynamic 3D Gaussian representation facilitates real-time high-quality dynamic scene rendering of high-resolution images of $1,352 \\times 1,014$ with a frame rate of 118 FPS using a single GPU.", + "bbox": [ + 225, + 191, + 785, + 236 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Related Work", + "text_level": 1, + "bbox": [ + 215, + 260, + 366, + 276 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We briefly overview radiance fields for dynamic scenes and discuss recent efficient explicit representation methods (grid-, plane-, hash-, and point-based), contextualizing our work within real-time dynamic view synthesis.", + "bbox": [ + 212, + 292, + 785, + 338 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.1 Dynamic view synthesis", + "text_level": 1, + "bbox": [ + 215, + 359, + 423, + 375 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Applications in virtual reality and computer vision often need reconstruction of dynamic scenes. Several works extend NeRF [41] to handle dynamic scenes in multi-view or monocular setups by time-varying NeRF [21, 31, 45, 60]. The regularization techniques for temporal smoothness enable suitable scene representations from monocular videos [33]. Additional sensory information is also useful for spatio-temporal regularization. Some attempts [21, 33, 58] employ depth or flow, which are observed or predicted with external networks to reconstruct from sparse observations. Deformation-based approaches [43, 44, 54, 59], another research direction in dynamic reconstruction, combine static NeRF and deformation fields. Although tremendous efforts show high visual quality for dynamic view synthesis, the frequent querying of MLP in NeRFs results in the drawback of slow optimization and rendering [65]. Our study aims to enable real-time dynamic view synthesis with high visual quality. We aim to extend 3DGS to dynamic scene reconstruction to achieve high-speed rendering while maintaining the rendering quality from sparse training views.", + "bbox": [ + 212, + 385, + 787, + 597 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2 Explicit Radiance Fields", + "text_level": 1, + "bbox": [ + 215, + 618, + 428, + 633 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Recent studies [11, 19, 69] have addressed the issue in implicit models (i.e., NeRFs) by exploring explicit models, reducing optimization and rendering time. Plenoxels [19] directly optimizes 3D grid representation instead of neural networks. Generally, explicit models sacrifice visual quality for fast training time [19]. Hybrid approaches [11, 17, 18, 20, 42, 53] aim to achieve better trade-offs between training time and visual quality. Instant-NGP allows for a compact MLP by exploiting a multi-level hash grid to encode positions to feature vectors [42]. Plane-based approaches are designed principally to represent bounded scenes [3, 9, 10, 14, 18, 23]. MERF [49] employs a multiresolution representation and a fast contraction function to reconstruct unbounded scenes. For dynamic scenes, K-planes [18] decomposes 4D dynamic volumes into multiple feature planes and employs an MLP-based feature decoder for determining color and density. Structured representations still grapple with the trade-off between rendering speed and quality. In this study, unstructured 3D Gaussians promise large gains in rendering speed.", + "bbox": [ + 212, + 643, + 787, + 840 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "A Compact Dynamic 3D Gaussian", + "bbox": [ + 524, + 114, + 730, + 128 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 774, + 114, + 785, + 126 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.3 Point-based rendering", + "text_level": 1, + "bbox": [ + 215, + 146, + 413, + 162 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Points, which naturally come from depth sensors, Structure from Motion (SfM) [51], or common Multi-View Stereo (MVS) algorithms [50, 52], offer a useful representation of fine-grained scenes and complex objects, and facilitate computationally efficient rendering. Consequently, they have been studied comprehensively in the vision and graphics community. The differentiable pipeline for point-based rendering results in points can be used for reconstructing 3D scenes [26-29]. 3DGS [26] achieves real-time rendering with high visual quality for unbounded static scenes at the expense of the generalization performance derived from NeRF's continuous neural field representation. 3DGS is replacing NeRFs as the backbone of text-to-3D models, leading to faster 3D generation [1, 12, 55, 66, 68]. Recently, Dynamic 3D Gaussians [38] employs 3DGS for dynamic scenes, which models dynamic scenes by the Gaussian position and rotation at each timestamp. The position and rotation of Gaussians at every timestamp are effective in modeling scenes from dense multi-view dynamic scenes. However, this approach presents difficulties in reconstructing monocular dynamic scenes, resulting in excessive memory consumption, particularly for extended input sequences. Specifically, the space complexity of the method for a scene with $T$ frames is $O(TN)$ , where $N$ is the number of 3D Gaussians. Our goal is to reduce memory consumption by representing time-varying position and rotation with approximation using a few parameters. The space complexity of our method is $O(LN)$ , where $L$ is the number of parameters of the approximation, and usually $L < T$ .", + "bbox": [ + 212, + 167, + 787, + 470 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Concurrent works on dynamic view synthesis includes approaches combining Gaussian Splatting with MLPs [5,22,32,35,36], approaches focusing on Gaussian representation [13, 15, 16, 64, 67], and approaches for specific targets [30, 46, 47, 72]. SpacetimeGaussian [32] focuses on dynamic view synthesis from multiview videos, unlike this study, by combining Gaussian Splatting and MLPs. [64] aims to model motion by employing a deformation field network while sacrificing rendering speed. [67] splits Gaussians in a time direction, and each Gaussian only focuses on a local temporal space. Four-dimensional (4D) Rotor Gaussian Splatting [15] models a local temporal space via temporal slicing for fast rendering. We aim to build a memory-efficient Gaussian representation for dynamic scenes, even for monocular scenes, while maintaining pure 3D Gaussian representation in order not to sacrifice the gift of 3D Gaussians, such as outstanding rendering speed and ease of direct editing of the scene.", + "bbox": [ + 212, + 472, + 787, + 654 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3 Method", + "text_level": 1, + "bbox": [ + 215, + 674, + 316, + 690 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Given images with timesteps and camera parameters obtained from videos, our task is to learn a 4D spatial-temporal representation of a dynamic scene that enables fast and high-quality view rendering. To achieve this, we use 3DGS in dynamic view synthesis. The original 3D Gaussian representation [26] is defined by a position (mean), a covariance matrix (decomposed into a rotation matrix and a scaling vector), a color (determined by spherical harmonics (SH) [8] coefficient), and an opacity. To represent dynamic scenes, each 3D Gaussian in our method (Fig. 2) regards the position and rotation as time-varying parameters and others as time-invariant parameters over time (Sec. 3.1). Given a set of 3D Gaussians, intrinsic and extrinsic camera parameters, and", + "bbox": [ + 212, + 704, + 787, + 840 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 217, + 116, + 228, + 126 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "K. Katsumata et al.", + "bbox": [ + 271, + 114, + 387, + 127 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/e865dcf72c9a9d8bdfad9c6614cda3a3456003052c368cedabb7a7105d446329.jpg", + "image_caption": [ + "Fig. 2: Overview of our dynamic view synthesis framework. Our dynamic 3D Gaussian representation takes temporal modeling of 3D centers and rotations with Fourier and linear approximation, respectively. Our representation parameters are shared over all the timesteps, and observations of each timestep hint at the representation for other timesteps, enabling compact representation and reconstruction of dynamic scenes from few-view videos. In this figure, we only illustrate the time-varying parameterization of one Gaussian for the sake of simplicity." + ], + "image_footnote": [], + "bbox": [ + 250, + 143, + 751, + 247 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "a timestep, we render images with the 3DGS technique [26], which renders an image by employing Gaussians within the camera plane out of a set of 3D Gaussians (Sec. 3.2). We update the Gaussian parameters to decrease the distance between rendered and training images in image and flow spaces (Sec. 3.3). Flow reconstruction loss enhances the temporal consistency of the learned representation, resulting in plausible image reconstruction. The small degrees of freedom of our representation essentially facilitate the reconstruction of dynamic scenes from a few observations.", + "bbox": [ + 212, + 371, + 787, + 476 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.1 Dynamic 3D Gaussian representation", + "text_level": 1, + "bbox": [ + 214, + 498, + 517, + 513 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "One possible extension of 3DGS [38] to dynamic scenes is to model the scenes per timestep explicitly. Although that strategy allows for flexible modeling for dynamic scenes, it requires 3D Gaussian parameters per timestep, increasing the memory size proportionally to video length. Since the representation for each time is optimized by observations with the number of cameras, the strategy lacks sufficient observations in monocular or few-view video setups, limiting its effectiveness in such scenarios.", + "bbox": [ + 212, + 522, + 787, + 611 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To design a compact dynamic 3D Gaussian representation, we express 3D Gaussian parameters using only a few parameters to achieve faithful reconstruction without a large increase in parameters. Our dynamic scene representation comprises a set of dynamic 3D Gaussians, extending the static 3D Gaussian introduced in [26]. This representation allows 3D Gaussians to move through the scene over time, using time-varying parameters (center position and rotation factors) and time-invariant parameters (scale, color, and opacity). Each dynamic Gaussian encapsulates the following parameters:", + "bbox": [ + 212, + 613, + 787, + 719 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1) a 3D center at time $t$ : $[x(t),y(t),z(t)]^{\\top} \\in \\mathbb{R}^{3}$ ,", + "2) a 3D rotation at time $t$ represented by a quaternion:" + ], + "bbox": [ + 232, + 722, + 586, + 753 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n[ q _ {x} (t), q _ {y} (t), q _ {z} (t), q _ {w} (t) ] ^ {\\top} \\in \\mathbb {R} ^ {4},\n$$\n", + "text_format": "latex", + "bbox": [ + 232, + 753, + 455, + 768 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "3) a scaling factor: $[s_x, s_y, s_z]^{\\mathsf{T}} \\in \\mathbb{R}^3$ ,", + "4) SH coefficients representing color with the degrees of freedom $k$ :" + ], + "bbox": [ + 232, + 753, + 679, + 799 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nh \\in \\mathbb {R} ^ {3 \\times (k + 1) ^ {2}},\n$$\n", + "text_format": "latex", + "bbox": [ + 232, + 799, + 336, + 815 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5) an opacity: $o \\in \\mathbb{R}$ .", + "bbox": [ + 232, + 816, + 372, + 830 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "A Compact Dynamic 3D Gaussian", + "bbox": [ + 524, + 114, + 730, + 128 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 774, + 114, + 785, + 126 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Each Gaussian at time $t$ is characterized by a 3D center $\\pmb{\\mu}(t) = [x(t), y(t), z(t)]^{\\top}$ and a 3D covariance matrix $\\pmb{\\Sigma}(t)$ . The density of the 3D Gaussian at the intersection $\\pmb{x}$ with a ray is obtained as follows:", + "bbox": [ + 212, + 145, + 787, + 191 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nG _ {t} (\\boldsymbol {x}) = e ^ {- \\frac {1}{2} (\\boldsymbol {x} - \\boldsymbol {\\mu} (t)) ^ {\\top} \\boldsymbol {\\Sigma} (t) ^ {- 1} (\\boldsymbol {x} - \\boldsymbol {\\mu} (t))}. \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 372, + 196, + 785, + 217 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To constrain the covariance matrix $\\boldsymbol{\\Sigma}(t)$ such that it is a positive semi-definite matrix during optimization, the covariance matrix $\\boldsymbol{\\Sigma}(t)$ is decomposed by using a scaling matrix $\\mathbf{S} = \\mathrm{diag}(s_x, s_y, s_z)$ and a rotation matrix $\\mathbf{R}(t)$ as $\\boldsymbol{\\Sigma}(t) = \\mathbf{R}(t)\\mathbf{S}\\mathbf{S}^{\\top}\\mathbf{R}(t)^{\\top}$ . Here, the rotation matrix $\\mathbf{R}(t)$ is represented by quaternion $(q_x(t), q_y(t), q_z(t), q_w(t))$ . Since most parts of the dynamic scene hardly change in scale because the solid objects (e.g., humans, animals, and things) scarcely expand or shrink, we maintain the scale parameter as a constant to reduce the model size. In what follows, we formally define the 3D center and rotation.", + "bbox": [ + 212, + 222, + 787, + 343 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Since motion in dynamic scenes is primarily described by changing the position of points like scene or optical flow [37, 61], we model the 3D center with an expressive approximation. We approximate the 3D position $x(t), y(t), z(t)$ using Fourier approximation. At time $t$ , it is represented by", + "bbox": [ + 212, + 343, + 787, + 404 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} x (t) = w _ {x, 0} + \\sum_ {i = 1} ^ {L} w _ {x, 2 i - 1} \\sin (2 i \\pi t) + w _ {x, 2 i} \\cos (2 i \\pi t), \\\\ y (t) = w _ {y, 0} + \\sum_ {i = 1} ^ {L} w _ {y, 2 i - 1} \\sin (2 i \\pi t) + w _ {y, 2 i} \\cos (2 i \\pi t), \\tag {2} \\\\ z (t) = w _ {z, 0} + \\sum_ {i = 1} ^ {L} w _ {z, 2 i - 1} \\sin (2 i \\pi t) + w _ {z, 2 i} \\cos (2 i \\pi t), \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 305, + 409, + 785, + 472 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where, $w_{\\cdot,0}, \\ldots, w_{\\cdot,2L}$ are the intercept and coefficients of the position, and $L$ is the number of terms (harmonics). We remark that a polynomial approximation is inadequate due to underfitting with a small number of bases and overfitting with higher-order polynomials. For these reasons, we choose the Fourier approximation.", + "bbox": [ + 212, + 477, + 787, + 537 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3DGS uses anisotropic 3D Gaussians, resulting in the need for dynamic modeling of Gaussian rotations. We approximate the 3D rotation (quaternion) over time using a linear approximation because a unit quaternion can be approximated locally as linear when considering its tangent plane. At time $t$ , it is defined as", + "bbox": [ + 212, + 537, + 787, + 598 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} q _ {x} (t) = w _ {q x, 0} + w _ {q x, 1} t, \\quad q _ {y} (t) = w _ {q y, 0} + w _ {q y, 1} t, \\\\ q _ {z} (t) = w _ {q z, 0} + w _ {q z, 1} t, \\quad q _ {w} (t) = w _ {q w, 0} + w _ {q w, 1} t, \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 325, + 603, + 674, + 638 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $w_{\\cdot,0}$ and $w_{\\cdot,1}$ are intercepts and coefficients of the rotation, respectively. We project the quaternion $q_{\\cdot}(t)$ onto the unit quaternion by normalizing it: $q_{\\cdot}(t) / \\|q_{\\cdot}(t)\\|$ , to ensure that the quaternion at time $t$ is a unit quaternion.", + "bbox": [ + 212, + 643, + 787, + 689 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "For each Gaussian, the preceding definitions yield $3L + 8 + 3 + 3(k + 1)^2 + 1$ parameters with respect to the 3D center, 3D rotation, scale, color, and opacity. Notably, the parameter count for each Gaussian is defined merely by the number of approximation terms and spherical harmonic degrees of freedom, with no regard to time length. Compared to methods that store parameters for each timestep, our approach saves on memory usage. Memory consumption in our dynamic scene representation is determined by two hyperparameters (i.e., $L$ and $k$ ) and the number of Gaussians used. Furthermore, the representation defined as a function of time over continuous time inhibits discontinuous movement through time. This characteristic improves robustness in novel view synthesis settings.", + "bbox": [ + 212, + 689, + 787, + 840 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "K. Katsumata et al.", + "bbox": [ + 271, + 114, + 387, + 127 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.2 Rendering via 3D Gaussian Splitting", + "text_level": 1, + "bbox": [ + 215, + 146, + 516, + 162 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Rendering with 3D Gaussian applies splatting techniques [26] to the Gaussian within the camera planes. Zwicker et al. [73] introduced the projection of the 3D covariance matrix to the 2D one. The 3D covariance matrix $\\pmb{\\Sigma}$ is projected into a 2D one $\\pmb{\\Sigma}'$ given a viewing transformation $\\mathbf{W}$ as $\\pmb{\\Sigma}'(t) = \\mathbf{J}\\mathbf{W}\\pmb{\\Sigma}(t)\\mathbf{W}^{\\top}\\mathbf{J}^{\\top}$ , where $\\mathbf{J}$ is the Jacobian of the affine approximation of the projective transformation at Gaussian center $\\pmb{\\mu}(t)$ :", + "bbox": [ + 214, + 172, + 787, + 248 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {J} = \\left[ \\begin{array}{c c c} \\frac {1}{v _ {z}} & 0 & - \\frac {v _ {x}}{v _ {z} ^ {2}} \\\\ 0 & \\frac {1}{v _ {z}} & - \\frac {v _ {y}}{v _ {z} ^ {2}} \\\\ 0 & 0 & 0 \\end{array} \\right], \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 431, + 260, + 785, + 310 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $[v_x, v_y, v_z]^{\\top} = \\mathbf{W}\\boldsymbol{\\mu}(t)$ is the camera coordinate of the Gaussian center $\\boldsymbol{\\mu}(t)$ obtained by the viewing transformation, which projects the points from world space to camera space.", + "bbox": [ + 214, + 323, + 784, + 369 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Similar to NeRF style volumetric rendering, point-based rendering computes the color $C$ of a pixel by evaluating the blending of $N$ ordered points that overlap the pixel $C = \\sum_{i=1}^{N} c_i \\alpha_i \\prod_{j=1}^{i-1} (1 - \\alpha_j)$ , where $c_i$ represents the color of a Gaussian evaluated by SH coefficients, and $\\alpha_i$ represents the density that is calculated from a 2D Gaussian with the 2D covariance $\\pmb{\\Sigma}'$ and 2D center $\\pmb{\\mu}'$ at time $t$ and the optimized opacity $o$ .", + "bbox": [ + 214, + 369, + 785, + 446 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.3 Optimization of the dynamic 3D Gaussian representation", + "text_level": 1, + "bbox": [ + 215, + 468, + 653, + 484 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We optimize the Gaussian parameters, i.e., intercepts and coefficients of position and rotation $w$ , a scaling factor $s_x, s_y, s_z$ , SH coefficients $h$ , and an opacity $o$ , based on the iterations of rendering and a comparison of the rendered images with training frames in the captured videos. To compare the rendered and training views, the loss function contains the L1 loss and the structural similarity (SSIM) [63] loss $\\mathcal{L}_{\\mathrm{D - SSIM}}$ :", + "bbox": [ + 214, + 494, + 785, + 571 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {r e c o n}} = (1 - \\lambda) | \\hat {I} - I | + \\lambda \\mathcal {L} _ {\\mathrm {D} - \\mathrm {S S I M}}, \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 370, + 580, + 785, + 598 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $\\hat{I}$ and $I$ are the rendered and target images, respectively. The loss function moves and rotates the anisotropic Gaussians and changes their color and opacity so that each Gaussian covers a homogeneous area. Since the loss just fixes incorrectly positioned Gaussians, the over- or under-representation of the set of Gaussians for the scene needs a mechanism for creating Gaussians that reconstruct the scene or destroy extra Gaussians. We also follow the divide and prune techniques in 3DGS for producing a compact and precise representation of the scene. We surveil the gradients of each Gaussian and densify Gaussians by splitting a Gaussian with a large gradient and a large scale into two small Gaussians, and cloning a Gaussian with a large gradient and a small scale to two Gaussians. Moreover, we remove transparent Gaussians with an opacity less than a threshold value of 0.005.", + "bbox": [ + 214, + 612, + 787, + 777 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Following [26], we initialize a set of Gaussians using a set of sparse points from SfM [51] for real scenes, and we initialize a set of Gaussians randomly using a uniform distribution for synthetic scenes owing to the absence of the prior. We adopt a two-stage optimization strategy consisting of static and dynamic stages. Deeming the", + "bbox": [ + 214, + 779, + 787, + 840 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "A Compact Dynamic 3D Gaussian", + "bbox": [ + 524, + 114, + 730, + 128 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 774, + 114, + 785, + 126 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "frames in the captured datasets as static scenes, we optimize static representation in the static stage to learn the prior of Gaussians. In other words, we optimize the parameters that are consistent all over time (i.e., scale, SH coefficients, and opacity) and the intercepts for the center and rotation $(w_{x,0}, w_{y,0}, w_{z,0}, w_{qx,0}, w_{qy,0}, w_{qz,0}, w_{qw,0})$ among the Gaussian parameters in the static stage. After the static stage, we optimize all the parameters of the set of Gaussians to reconstruct a dynamic region as a dynamic stage.", + "bbox": [ + 212, + 146, + 787, + 236 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Another challenge in the dynamic scene reconstruction is ambiguity caused by the limited number of captured views at a timestep. Since a dynamic scene contains temporal changes, such as moving objects and changing shapes, sharing the scene information over frames with different timesteps is difficult. To overcome the ambiguity, we employ flow information. Similar to our 3D Gaussian, scene flow [39, 40, 62] is defined as the position of a point in 3D space and its motion. These 3D points originate from different mechanisms than those in 3D Gaussian, making matching in 3D space difficult. Since optical flow defined on the image plane can be directly matched with a 3D Gaussian and is readily to compute from monocular inputs, we supervise the flows of the estimizable Gaussians with the ground truth optical flows of the input frames. We use RAFT [56] to obtain ground truth flow for training views: forward flow $f_{\\mathrm{fwd}}$ and backward flow $f_{\\mathrm{bwd}}$ between two adjacent frames. The flow loss $\\mathcal{L}_{\\mathrm{flow}}$ takes the L1 loss between the ground truth flows and the optical flow of the Gaussian for both directions of the flows. The flow loss gives our method spatial-temporal consistency without any additional computation cost in rendering. We combine the flow loss $\\mathcal{L}_{\\mathrm{flow}}$ with the reconstruction loss that compares the rendered and training views:", + "bbox": [ + 212, + 237, + 789, + 479 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\mathcal {L} _ {\\text {r e c o n}} + \\lambda_ {\\text {f l o w}} \\mathcal {L} _ {\\text {f l o w}} (\\hat {F}, F), \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 395, + 486, + 785, + 502 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "where $F = \\{f_{\\mathrm{fwd}}, f_{\\mathrm{bwd}}\\}$ and $\\hat{F}$ are the ground truth flow and the flow of the Gaussians, respectively, and $\\lambda_{\\mathrm{flow}}$ is a balancing hyperparameter for the flow term. Instead of applying an optical flow algorithm for rendering, we create pseudo optical flow from a Gaussian representation. Scene motion is represented solely by the 3D mean coefficients: $w_{x,1 \\leq i}, w_{y,1 \\leq i}, w_{z,1 \\leq i}$ . Scene flow in 3D space can be computed by", + "bbox": [ + 212, + 512, + 787, + 589 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {f} _ {\\mathrm {f w d}} ^ {x} = x (t + \\Delta t) - x (t), \\quad \\hat {f} _ {\\mathrm {b w d}} ^ {x} = x (t) - x (t - \\Delta t),\n$$\n", + "text_format": "latex", + "bbox": [ + 320, + 598, + 683, + 614 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {f} _ {\\mathrm {f w d}} ^ {y} = y (t + \\Delta t) - y (t), \\quad \\hat {f} _ {\\mathrm {b w d}} ^ {y} = y (t) - y (t - \\Delta t), \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 323, + 619, + 784, + 637 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {f} _ {\\mathrm {f w d}} ^ {\\tilde {z}} = z (t + \\Delta t) - z (t), \\quad \\hat {f} _ {\\mathrm {b w d}} ^ {\\tilde {z}} = z (t) - z (t - \\Delta t),\n$$\n", + "text_format": "latex", + "bbox": [ + 323, + 641, + 683, + 657 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "where $\\Delta t$ is the difference between the timesteps of the two image frames. The scene flow is projected into a 2D camera plane using", + "bbox": [ + 212, + 665, + 787, + 696 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {f} _ {\\left\\{f w d, b w d \\right\\}} ^ {x y z} = \\mathbf {J} \\left[ \\hat {f} _ {\\left\\{f w d, b w d \\right\\}}, \\hat {f} _ {\\left\\{f w d, b w d \\right\\}} ^ {y}, \\hat {f} _ {\\left\\{f w d, b w d \\right\\}} ^ {z} \\right] ^ {\\top}, \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 326, + 702, + 785, + 722 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "where $\\mathbf{J}$ is the Jacobian of the affine approximation of the projective transformation at the Gaussian center $\\mu$ (Eq. (4)). Regarding scene flows on the camera plane as RGB colors, point-based rendering can compute an optical flow of a pixel through $\\alpha$ -blending:", + "bbox": [ + 212, + 729, + 787, + 773 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {f} _ {\\mathrm {f w d}} = \\sum_ {i = 1} ^ {N} \\hat {f} _ {\\mathrm {f w d}, i} ^ {\\mathrm {x y z}} \\alpha_ {i} \\prod_ {j = 1} ^ {i - 1} (1 - \\alpha_ {j}). \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 390, + 795, + 785, + 835 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 217, + 114, + 227, + 126 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "K. Katsumata et al.", + "bbox": [ + 271, + 114, + 387, + 127 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/4c650dafc6ec69e20921646eb9c6418afeadefc0584eb17191737df572c413dc.jpg", + "image_caption": [ + "Fig. 3: Qualitative comparison on D-NeRF [45]. We highlight the differences by zoom view. Our method achieves competitive visual quality with strong baselines. While our method successfully reconstructs intricate details like hands, it causes a blurred sphere shape." + ], + "image_footnote": [], + "bbox": [ + 218, + 141, + 784, + 575 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "The backward flow is calculated in the same way. The optical flow $\\hat{F}$ consists of the forward flows $\\hat{f}_{\\mathrm{fwd}}$ and backward flows $\\hat{f}_{\\mathrm{bwd}}$ for all pixels. We exclude the flow loss for the D-NeRF dataset because the teleport of the cameras between adjacent frames causes difficulties in calculating ground truth flows.", + "bbox": [ + 212, + 657, + 787, + 720 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4 Experiment", + "text_level": 1, + "bbox": [ + 215, + 744, + 349, + 763 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.1 Evaluation data", + "text_level": 1, + "bbox": [ + 215, + 781, + 369, + 794 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We evaluate our compact dynamic Gaussian representation using dynamic scene datasets: a synthetic one D-NeRF [45] and two real ones, i.e., DyNeRF [31] and HyperNeRF [44].", + "bbox": [ + 215, + 809, + 794, + 840 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "A Compact Dynamic 3D Gaussian", + "bbox": [ + 524, + 114, + 730, + 128 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 774, + 114, + 785, + 126 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/dd5399393ebdb39bfe1aa531f5e1d7d2eb3da185eb83c736c001caf11a1e7572.jpg", + "table_caption": [ + "Table 1: Quantitative results on the D-NeRF dataset [45]. Our method performs competitively against NeRF approaches in terms of visual quality and achieves the fastest rendering speed among the highest-performing methods. Results except the FPS of [17, 18, 20] are adopted from the original papers. The best and second best scores among competing methods are highlighted." + ], + "table_footnote": [], + "table_body": "
PSNR↑MS-SSIM↑LPIPS↓FPS ↑Train Time ↓Mem↓
TiNeuVox-S [17]30.750.960.070.328 mins8MB
TiNeuVox-B [17]32.670.970.040.1328 mins48MB
K-Planes [18]31.610.97-0.5452 mins~497MB
V4D [20]33.720.980.021.476.9 hrs1.2GB
3DGS [26]20.510.890.071706 mins~50MB
D-3DGS17.220.810.1317315 mins~913MB
Ours32.190.970.041508 mins~159MB
", + "bbox": [ + 305, + 212, + 702, + 320 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/9d7be7ed78f5c4919310db7d60d24d5f352961dcc136cbaee4d77b28a0c5ceb3.jpg", + "table_caption": [ + "Table 2: Quantitative results on the DyNeRF datasets [31]. Results excepting FPS of [18, 20] are adopted from the original papers. The best and second best scores among competing methods (excepting 3DGS) are highlighted. While our method matches NeRFs in terms of rendering quality, our method matches 3DGS in terms of rendering speed. Besides, our method is 20 times more compact than Dynamic3DGaussians." + ], + "table_footnote": [], + "table_body": "
PSNR↑MS-SSIM↑LPIPS↓FPS↑Train Time↓Mem↓
K-Planes [18]31.630.964-0.311.8 hrs~309MB
V4D28.960.9370.170.114 hrs1.2GB
3DGS [26]20.940.8000.2910920 mins~198MB
D-3DGS24.360.8340.2511951 mins~2.3GB
Dynamic3DGaussians [38]27.790.8690.23512.1 hrs~6.6GB
Ours30.460.9550.151181 hrs~338MB
", + "bbox": [ + 292, + 415, + 715, + 507 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "D-NeRF dataset [45]. This dataset comprises eight videos of varying lengths, ranging from 50 to 200 frames per video. The camera setup is designed to mimic a monocular camera setting by teleporting between adjacent timesteps. The test views are from novel camera positions. We train and render at the resolution of $800 \\times 800$ .", + "bbox": [ + 212, + 535, + 787, + 594 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "DyNeRF dataset [31]. The multi-camera dataset includes six 10-second videos captured at 30 FPS using 15-20 synchronized fixed cameras. For evaluation, a central camera is used, while training utilizes frames from the other cameras. The training and rendering resolution is set at $1,352 \\times 1,014$ .", + "bbox": [ + 212, + 595, + 787, + 655 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "HyperNeRF dataset [44]. This dataset encompasses videos ranging from 8 to $15\\mathrm{~s}$ , captured at 15 FPS using two Pixel 3 phones. The training and rendering processes are conducted at a resolution of $540 \\times 960$ .", + "bbox": [ + 212, + 656, + 787, + 700 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4.2 Implementation details", + "text_level": 1, + "bbox": [ + 214, + 724, + 419, + 739 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We adhere to the experimental setup in the 3DGS paper [26]. The number of approximation terms of the Gaussian centers $L$ is set to 2 for the D-NeRF dataset. For the other datasets, $L$ is set to 5 from preliminary experiments. Our two-stage optimization process begins with an initial fitting of parameters, excluding the coefficients for Gaussian center and rotation. This initial stage spans 3,000 iterations and utilizes all training views in a static setting. Subsequently, we engage in a dynamic stage, adjusting all Gaussian", + "bbox": [ + 212, + 750, + 787, + 842 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "K. Katsumata et al.", + "bbox": [ + 271, + 114, + 387, + 127 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/019dcf24817e81245bfa51ac7c218b9ce5a82f30621c0ee413ee714fe766d0a3.jpg", + "table_caption": [ + "Table 3: Quantitative results on the HyperNeRF dataset [44]. Our method demonstrates competitive performance in rendering quality across all scenes, surpassing the compared methods in rendering speed. Furthermore, our method is not inferior to the compared methods in training time and memory size." + ], + "table_footnote": [ + "Train time of HyperNeRF [44] is estimated from their paper's descriptions. Originally reported as 8 hours on 4 TPU v4s [25], the TPU v4 is slightly faster than the A100 GPU, and the A100 GPU is at least 1.5 times faster than the A6000 GPU." + ], + "table_body": "
FPS↑Train Time↓Mem↓BROOM3D PRINTERCHICKENPEELBANANAMean
PSNR↑SSIM↑PSNR↑SSIM↑PSNR↑SSIM↑PSNR↑SSIM↑PSNR↑SSIM↑
HyperNeRF [44]0.3648 hrs†15MB19.30.59120.00.82126.90.94823.30.89622.20.811
TiNeuVox-B [17]0.1430 mins48MB21.50.68622.80.84128.30.94724.40.87324.30.837
V4D [20]0.157 hrs1.2GB22.10.66923.20.83528.40.92925.20.87324.70.827
Ours1881 hrs~720MB22.10.78925.50.91928.30.93426.60.92025.60.890
", + "bbox": [ + 217, + 212, + 785, + 297 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/8646b14d601e95ac912fc9ca3a0b37c477b69db65020e2bb5588c4ff8ef2e31b.jpg", + "image_caption": [ + "Fig. 4: Qualitative comparison on the DyNeRF dataset [31]. The differences are zoomed in." + ], + "image_footnote": [], + "bbox": [ + 228, + 323, + 776, + 470 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "parameters in 27,000 iterations. The entire optimization process encompasses 30,000 iterations. Following [26], $\\lambda$ is set to 0.2. We set the flow loss weight $\\lambda_{\\mathrm{flow}}$ to 1,000 and acquire ground truth flow through the RAFT pretrained on the Sintel dataset [7]. All experiments are conducted on a single RTX A6000 GPU.", + "bbox": [ + 214, + 526, + 787, + 588 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "4.3 Evaluation setup", + "text_level": 1, + "bbox": [ + 215, + 611, + 375, + 627 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Compared methods. We benchmark our method against the following baseline methods: TiNeuVox [17], K-Planes [18], V4D [20], HyperNeRF [44], 3D Gaussian Splatting (3DGS) [26], Dynamic3DGaussians [38], and a D-3DGS baseline. D-3DGS is the dynamic extension of 3DGS, which stores both position and rotation for each timestep.", + "bbox": [ + 212, + 637, + 787, + 698 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Evaluation metrics. We assess the methods using various metrics, including PSNR [24], SSIM [63], LPIPS [70], FPS, Training time, and memory used to store optimized parameters. Memory consumption includes the 3D Gaussian parameters, voxel/plane representation, and neural network parameters.", + "bbox": [ + 214, + 699, + 789, + 758 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "4.4 Experimental results", + "text_level": 1, + "bbox": [ + 215, + 782, + 403, + 797 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Quantitative results. The quantitative results on the D-NeRF dataset are detailed in Tab. 1. Our method demonstrates a performance comparable to TiNeuVox and K-Planes", + "bbox": [ + 214, + 809, + 785, + 839 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "A Compact Dynamic 3D Gaussian", + "bbox": [ + 524, + 114, + 730, + 128 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 769, + 114, + 784, + 126 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/2d1346f0d33ed12f3ce5ba01ba407a5ba140e4062cbebc5cb565decdb9f99f69.jpg", + "image_caption": [ + "Fig. 5: Qualitative comparison on HyperNeRF [44]. Our method offers sharp results." + ], + "image_footnote": [], + "bbox": [ + 225, + 142, + 776, + 282 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Table 4: Per-scene quantitative comparison on D-NeRF scenes of different $L$ , which stands for the number of harmonic terms in the Fourier approximation, and other design choices. The highest mean score is achieved with $L = 2$ , but increasing the complexity $L$ (the number of coefficients) improves visual quality in some scenes (JUMPING JACKS and T-REX). The spline approximations bring marginal improvements in some scenes but slower rendering. The time-varying scale (the last row) also provides minor gains in some cases and increases the memory size. The setting reported in Fig. 3 is highlighted with a gray background.", + "bbox": [ + 215, + 327, + 787, + 426 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/df3d03184746249722433f929dd3d8ed6119f13587c3874e0281b6c07578f790.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
STAND UPJACKSBALLSLEGOWARRIORHOOKT-REXMUTANTMean
PSNR SSIMPSNR SSIMPSNR SSIMPSNR SSIMPSNR SSIMPSNR SSIMPSNR SSIMPSNR SSIMPSNR SSIM
L = 140.210.99427.220.95230.270.97224.260.94032.42
L = 239.100.99330.950.98033.290.98423.150.92234.15
L = 338.090.99032.780.98432.540.97922.120.88135.36
L = 435.830.98432.930.98230.390.96921.060.85534.38
L = 532.890.97630.710.97727.680.95920.200.82532.64
Linear27.770.97323.100.92126.680.95922.270.92217.39
Quadratic29.400.97823.440.92627.510.96322.450.92417.70
Cubic29.980.97923.710.92827.760.96422.370.92118.04
Spline (5)38.870.99331.960.98332.960.98023.090.91834.46
Spline (6)38.000.99231.840.98432.810.98022.250.90335.24
Linear (scale)38.320.99330.910.98032.550.98423.870.93034.43
", + "bbox": [ + 218, + 436, + 784, + 579 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "in terms of visual quality as measured by PSNR, SSIM, and LPIPS. Notably, it excels in training time, FPS, and memory size, achieving a rendering speed that is $300 \\times$ faster than that of K-Planes. Furthermore, our method surpasses both 3DGS and D-3DGS in terms of visual quality without compromising rendering speed. In the DyNeRF scenes experiment, detailed in Tab. 2, while our method does not exceed the baseline in reconstruction quality, it shows a substantial improvement in FPS. Since the DyNeRF scenes contain multi-view data, the D-3DGS baseline naturally improves static 3DGS, unlike monocular scenes. Our method even attains rendering speeds that exceed real-time performance at a high resolution of $1,354 \\times 1,014$ . For the challenging HyperNeRF dataset, which is captured by only two moving cameras and referenced in Tab. 3, our method not only demonstrates rapid rendering speeds but also achieves higher average PSNR and SSIM scores than the compared methods.", + "bbox": [ + 212, + 611, + 787, + 792 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Qualitative results. Figures 3 to 5 show that our method yields faithful reconstruction for the dynamic scenes. Unlike the structured representation, which has a fixed size of grids, the unstructured nature of 3D Gaussians enables adaptive control of the expres", + "bbox": [ + 212, + 794, + 787, + 840 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "K. Katsumata et al.", + "bbox": [ + 271, + 114, + 387, + 127 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/c41a63e6504a068c6b8087083f95eaa993f6898c57b2029c9990377c962f6808.jpg", + "image_caption": [ + "Ours without $\\mathcal{L}_{\\mathrm{flow}}$", + "Ours" + ], + "image_footnote": [], + "bbox": [ + 238, + 157, + 763, + 277 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/d36c05713d738724be5412fe9c680427ac7ddb81f84b165665bd4a83725f0cad.jpg", + "image_caption": [ + "Fig.6: Qualitative comparison of disabled and enabled flow loss on DyNeRF. We highlight the difference by zoom view.", + "Fig.7: Composition of two scenes. Our method allows for the addition of adding 3D objects represented 3D Gaussians into a 3D Gaussian scene. We highlight the added object." + ], + "image_footnote": [], + "bbox": [ + 272, + 335, + 730, + 474 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "siveness of the representation, delivering sharper renderings. As seen with the results for BOUNCING BALLS, since our method has discrete primitives, it sometimes fails to reproduce smooth boundaries.", + "bbox": [ + 212, + 547, + 784, + 593 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Effect of the number of parameters $L$ . Table 4 shows per-scene PSNR and SSIM scores of K-Planes and our method with the different $L$ (Eq. (2)). It is observed that the optimal $L$ for novel view synthesis varies from scene to scene, highlighting the necessity for complex approximations to capture intricate motions effectively.", + "bbox": [ + 212, + 595, + 784, + 655 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Effect of flow loss. Additionally, visual comparisons drawn from our method without and with the flow loss (Fig. 6) reveal that incorporating the flow loss mitigates ghostly artifacts and significantly enhances the accuracy of color reconstruction.", + "bbox": [ + 212, + 657, + 784, + 702 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Design choice. Our method is very flexible and allows for the use of arbitrary approximation functions and the choice of time-varying parameters. Table 4 also shows the experimental results of other options for the design of the model to facilitate future dynamic scene reconstruction. The linear, quadratic, and cubic baselines approximate time-varying 3D positions with polynomials of degrees one, two, and three, respectively. The Spline (5) and Spline (6) baselines approximate 3D positions with spline approximations of five and six points, respectively. The linear (scale) baseline approximates time-varying scales with the linear approximation in addition to positions and rotations. Although a Spline baseline gives minor performance gains in some cases, it", + "bbox": [ + 212, + 704, + 785, + 839 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "A Compact Dynamic 3D Gaussian", + "bbox": [ + 524, + 114, + 730, + 128 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 769, + 114, + 784, + 126 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "achieves 91 FPS for rendering, showing slower rendering than the proposed method. The linear (scale) baseline does not show additional parameters that would result in performance improvements. For time-varying 3D rotation, we also consider the approximation with slerp. Since it does not offer performance gains while causing numerical instability for static Gaussians, we use linear approximation for rotation. For faster rendering and compact representation, we use the Fourier approximation for 3D positions and model 3D positions and rotations as time-varying parameters.", + "bbox": [ + 215, + 146, + 785, + 252 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Scene composition. Since our dynamic 3D Gaussian representation still uses pure 3D Gaussian representation, the learned representation facilitates straightforward editing of Gaussians. We demonstrate the composition of two scenes with our representation. Figure 7 illustrates this by combining the MUTANT scene from the D-NeRF dataset with the SEARED STEAK scene from the DyNeRF dataset. This demonstrates the capability of our method in editing dynamic 3D scenes.", + "bbox": [ + 215, + 252, + 785, + 343 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "5 Discussion and Conclusion", + "text_level": 1, + "bbox": [ + 217, + 367, + 470, + 383 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Limitations and future directions. Our dynamic Gaussians are defined through all times of the dynamic scene. This representation implicitly assumes Gaussians exist over all times of the scene. It enables us to naturally model the rigid and non-rigid deformation in the scene. On the other hand, for modeling the change in topology, the occurrence and extinction of Gaussians (e.g., fluid) is tough. Static colors cause difficulty in modeling changes in illumination and color. The reconstruction capability of the method depends on the number of parameters, so that the scene representation is compact but results in poor rendering quality for very long sequences, requiring additional memory consumption for proper reconstruction. To overcome these limitations, considering the lifetime of Gaussians, such as adding start and end time parameters, will allow for the modeling of changes in scene topology, and the adaptive decision of flexibility will leads to better trade-offs between quality and memory size.", + "bbox": [ + 215, + 401, + 785, + 580 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Our Gaussian representation sacrifices the continuity and smoothness inherent in neural field-based volume rendering. Distilling NeRFs into our proposed representation in a manner similar to PlenOctree [69] is a potential extension of our method, promising to enhance rendering quality while maintaining fast rendering advantage.", + "bbox": [ + 215, + 583, + 785, + 642 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Conclusion. We present a compact dynamic 3D Gaussian representation enabling faithful reconstruction and real-time rendering of dynamic scenes. We propose a representation for the position and rotation of 3D Gaussians as a function of time for modeling the motion of the scene. The parameterized functions of time introduce memory efficiency and robustness to the number of views per timestep. Furthermore, we introduce the flow loss constraining the scene flow of the learned Gaussian representation with the ground truth flow. Our experiments on synthetic and real datasets show that the proposed method achieves real-time dynamic scene rendering even at high resolutions.", + "bbox": [ + 215, + 642, + 785, + 763 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Acknowledgements This study was supported by JSPS/MEXT KAKENHI Grant Numbers JP24K20830, JP23KJ0381, JP23K28139, and JP22H05015, ROIS NII Open Collaborative Research 2024-24S1201, and the Institute of AI and Beyond of the University of Tokyo. The authors would like to thank D. Horita for carefully proofreading the manuscript and N. Umetani for providing helpful advice on the method's limitations.", + "bbox": [ + 215, + 763, + 785, + 839 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "K. Katsumata et al.", + "bbox": [ + 271, + 114, + 387, + 127 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 217, + 143, + 310, + 160 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "1. Abdal, R., Yifan, W., Shi, Z., Xu, Y., Po, R., Kuang, Z., Chen, Q., Yeung, D.Y., Wetzstein, G.: Gaussian Shell Maps for Efficient 3D Human Generation. In: CVPR. pp. 9441-9451 (2024)", + "2. Akhter, I., Sheikh, Y., Khan, S., Kanade, T.: Nonrigid Structure from Motion in Trajectory Space. In: NeurIPS (2008)", + "3. An, S., Xu, H., Shi, Y., Song, G., Ogras, U.Y., Luo, L.: PanoHead: Geometry-Aware 3D Full-Head Synthesis in 360 degree. In: CVPR. pp. 20950-20959 (2023)", + "4. Attal, B., Huang, J.B., Richardt, C., Zollhöefer, M., Kopf, J., O'Toole, M., Kim, C.: HyperReel: High-Fidelity 6-DoF Video with Ray-Conditioned Sampling. In: CVPR. pp. 16610-16620 (2023)", + "5. Bae, J., Kim, S., Yun, Y., Lee, H., Bang, G., Uh, Y.: Per-Gaussian Embedding-Based Deformation for Deformable 3D Gaussian Splatting. arXiv preprint arXiv:2404.03613 (2024)", + "6. Barron, J.T., Mildenhall, B., Tancik, M., Hedman, P., Martin-Brualla, R., Srinivasan, P.P.: Mip-NeRF: A Multiscale Representation for Anti-Aliasing Neural Radiance Fields. In: CVPR. pp. 5855-5864 (2021)", + "7. Butler, D.J., Wulff, J., Stanley, G.B., Black, M.J.: A Naturalistic Open Source Movie for Optical Flow Evaluation. In: ECCV. pp. 611-625 (2012)", + "8. Cabral, B., Max, N., Springmeyer, R.: Bidirectional Reflection Functions from Surface Bump Maps. SIGGRAPH 21(4), 273-281 (1987)", + "9. Cao, A., Johnson, J.: HexPlane: A Fast Representation for Dynamic Scenes. In: CVPR. pp. 130-141 (2023)", + "0. Chan, E.R., Lin, C.Z., Chan, M.A., Nagano, K., Pan, B., De Mello, S., Gallo, O., Guibas, L.J., Tremblay, J., Khamis, S., et al.: Efficient Geometry-Aware 3D Generative Adversarial Networks. In: CVPR. pp. 16123-16133 (2022)", + "1. Chen, A., Xu, Z., Geiger, A., Yu, J., Su, H.: TensoRF: Tensorial Radiance Fields. In: ECCV. pp. 333-350 (2022)", + "2. Chen, Z., Wang, F., Liu, H.: Text-to-3D Using Gaussian Splatting. arXiv preprint arXiv:2309.16585 (2023)", + "3. Das, D., Wewer, C., Yunus, R., Ilg, E., Lenssen, J.E.: Neural Parametric Gaussians for Monocular Non-Rigid Object Reconstruction. In: CVPR. pp. 10715-10725 (2024)", + "4. Dong, Z., Chen, X., Yang, J., Black, M.J., Hilliges, O., Geiger, A.: AG3D: Learning to Generate 3D Avatars from 2D Image Collections. In: ICCV. pp. 14916-14927 (2023)", + "5. Duan, Y., Wei, F., Dai, Q., He, Y., Chen, W., Chen, B.: 4D-Rotor Gaussian Splitting: Towards Efficient Novel View Synthesis for Dynamic Scenes. ACM TOG (2024)", + "6. Duisterhof, B.P., Mandi, Z., Yao, Y., Liu, J.W., Shou, M.Z., Song, S., Ichnowski, J.: MD-Splatting: Learning Metric Deformation from 4D Gaussians in Highly Deformable Scenes. arXiv preprint arXiv:2312.00583 (2023)", + "7. Fang, J., Yi, T., Wang, X., Xie, L., Zhang, X., Liu, W., Nießner, M., Tian, Q.: Fast Dynamic Radiance Fields with Time-Aware Neural Voxels. In: SIGGRAPH Asia (2022)", + "8. Fridovich-Keil, S., Meanti, G., Warburg, F.R., Recht, B., Kanazawa, A.: K-Planes: Explicit Radiance Fields in Space, Time, and Appearance. In: CVPR. pp. 12479-12488 (2023)", + "9. Fridovich-Keil, S., Yu, A., Tancik, M., Chen, Q., Recht, B., Kanazawa, A.: Plenoxels: Radiance Fields Without Neural Networks. In: CVPR. pp. 5501-5510 (2022)", + "20. Gan, W., Xu, H., Huang, Y., Chen, S., Yokoya, N.: V4D: Voxel for 4D Novel View Synthesis. IEEE TVCG 30(2), 1579-1591 (2024)", + "21. Gao, C., Saraf, A., Kopf, J., Huang, J.B.: Dynamic View Synthesis from Dynamic Monocular Video. In: ICCV. pp. 5712-5721 (2021)", + "22. Guo, Z., Zhou, W., Li, L., Wang, M., Li, H.: Motion-Aware 3D Gaussian Splitting for Efficient Dynamic Scene Reconstruction. arXiv preprint arXiv:2403.11447 (2024)" + ], + "bbox": [ + 225, + 176, + 785, + 839 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "A Compact Dynamic 3D Gaussian", + "bbox": [ + 524, + 114, + 730, + 128 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 769, + 116, + 784, + 126 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "23. He, H., Yang, Z., Li, S., Dai, B., Wu, W.: OrthoPlanes: A Novel Representation for Better 3D-Awareness of GANs. In: ICCV. pp. 22996-23007 (2023)", + "24. Huynh-Thu, Q., Ghanbari, M.: Scope of Validity of PSNR in Image/Video Quality Assessment. Electronics Letters 44(13), 800-801 (2008)", + "25. Jouppi, N., Kurian, G., Li, S., Ma, P., Nagarajan, R., Nai, L., Patil, N., Subramanian, S., Swing, A., Towles, B., Young, C., Zhou, X., Zhou, Z., Patterson, D.A.: TPU v4: An Optically Reconfigurable Supercomputer for Machine Learning with Hardware Support for Embeddings. In: Proceedings of the 50th Annual International Symposium on Computer Architecture. pp. 1-14 (2023)", + "26. Kerbl, B., Kopanas, G., Leimkuhler, T., Drettakis, G.: 3D Gaussian Splitting for Real-Time Radiance Field Rendering. ACM TOG 42(4), 1-14 (2023)", + "27. Keselman, L., Hebert, M.: Approximate Differentiable Rendering with Algebraic Surfaces. In: ECCV. pp. 596-614 (2022)", + "28. Kopanas, G., Leimkuhler, T., Rainer, G., Jambon, C., Drettakis, G.: Neural Point Catacaustics for Novel-View Synthesis of Reflections. ACM TOG 41(6), 1-15 (2022)", + "29. Kopanas, G., Philip, J., Leimkuhler, T., Drettakis, G.: Point-Based Neural Rendering with Per-View Optimization. In: Comput. Graph. Forum. vol. 40, pp. 29-43 (2021)", + "30. Lei, J., Wang, Y., Pavlakos, G., Liu, L., Daniilidis, K.: GART: Gaussian Articulated Template Models. In: CVPR. pp. 19876-19887 (2024)", + "31. Li, T., Slavcheva, M., Zollhöefer, M., Green, S., Lassner, C., Kim, C., Schmidt, T., Lovegrove, S., Goesele, M., Newcombe, R., et al.: Neural 3D Video Synthesis from Multi-View Video. In: CVPR. pp. 5521-5531 (2022)", + "32. Li, Z., Chen, Z., Li, Z., Xu, Y.: Spacetime Gaussian Feature Splatting for Real-Time Dynamic View Synthesis. arXiv preprint arXiv:2312.16812 (2023)", + "33. Li, Z., Niklaus, S., Snavely, N., Wang, O.: Neural Scene Flow Fields for Space-Time View Synthesis of Dynamic Scenes. In: CVPR. pp. 6498-6508 (2021)", + "34. Li, Z., Wang, Q., Cole, F., Tucker, R., Snavely, N.: DynIBaR: Neural Dynamic Image-Based Rendering. In: CVPR. pp. 4273-4284 (2023)", + "35. Liang, Y., Khan, N., Li, Z., Nguyen-Phuoc, T., Lanman, D., Tompkin, J., Xiao, L.: Gaufre: Gaussian Deformation Fields for Real-Time Dynamic Novel View Synthesis. arXiv preprint arXiv:2312.11458 (2023)", + "36. Lu, Z., Guo, X., Hui, L., Chen, T., Yang, M., Tang, X., Zhu, F., Dai, Y.: 3D Geometry-Aware Deformable Gaussian Splitting for Dynamic View Synthesis. In: CVPR. pp. 8900-8910 (2024)", + "37. Lucas, B.D., Kanade, T.: An Iterative Image Registration Technique with an Application to Stereo Vision. In: IJCAI. pp. 674-679 (1981)", + "38. Luiten, J., Kopanas, G., Leibe, B., Ramanan, D.: Dynamic 3D Gaussians: Tracking by Persistent Dynamic View Synthesis. In: 3DV (2024)", + "39. Mayer, N., Ilg, E., Hausser, P., Fischer, P., Cremers, D., Dosovitskiy, A., Brox, T.: A Large Dataset to Train Convolutional Networks for Disparity, Optical Flow, and Scene Flow Estimation. In: CVPR. pp. 4040-4048 (2016)", + "40. Menze, M., Geiger, A.: Object Scene Flow for Autonomous Vehicles. In: CVPR. pp. 3061-3070 (2015)", + "41. Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: NeRF: Representing Scenes as Neural Radiance Fields for View Synthesis. In: ECCV (2020)", + "42. Müller, T., Evans, A., Schied, C., Keller, A.: Instant Neural Graphics Primitives with a Multiresolution Hash Encoding. ACM TOG 41(4), 1-15 (2022)", + "43. Park, K., Sinha, U., Barron, J.T., Bouaziz, S., Goldman, D.B., Seitz, S.M., Martin-Brualla, R.: Nerfies: Deformable Neural Radiance Fields. In: ICCV. pp. 5865-5874 (2021)" + ], + "bbox": [ + 215, + 147, + 785, + 839 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "K. Katsumata et al.", + "bbox": [ + 271, + 114, + 387, + 127 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "44. Park, K., Sinha, U., Hedman, P., Barron, J.T., Bouaziz, S., Goldman, D.B., Martin-Brualla, R., Seitz, S.M.: HyperNeRF: a Higher-Dimensional Representation for Topologically Varying Neural Radiance Fields. ACM TOG 40(6), 1-12 (2021)", + "45. Pumarola, A., Corona, E., Pons-Moll, G., Moreno-Noguer, F.: D-NeRF: Neural Radiance Fields for Dynamic Scenes. In: CVPR. pp. 10318-10327 (2021)", + "46. Qian, S., Kirschstein, T., Schoneveld, L., Davoli, D., Giebenhain, S., Nießner, M.: GaussianAvatars: Photorealistic Head Avatars with Rigged 3D Gaussians. In: CVPR. pp. 20299-20309 (2024)", + "47. Qian, Z., Wang, S., Mihajlovic, M., Geiger, A., Tang, S.: 3DGS-Avatar: Animatable Avatars via Deformable 3D Gaussian Splatting. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5020-5030 (2024)", + "48. Reiser, C., Peng, S., Liao, Y., Geiger, A.: KiloNeRF: Speeding up Neural Radiance Fields with Thousands of Tiny MLPs. In: ICCV. pp. 14335-14345 (2021)", + "49. Reiser, C., Szeliski, R., Verbin, D., Srinivasan, P., Mildenhall, B., Geiger, A., Barron, J., Hedman, P.: MERF: Memory-Efficient Radiance Fields for Real-Time View Synthesis in Unbounded Scenes. ACM TOG 42(4), 1-12 (2023)", + "50. Schonberger, J.L., Zheng, E., Frahm, J.M., Pollefeys, M.: Pixelwise View Selection for Unstructured Multi-View Stereo. In: ECCV. pp. 501-518 (2016)", + "51. Schonberger, J.L., Frahm, J.M.: Structure-from-Motion Revisited. In: CVPR (2016)", + "52. Seitz, S.M., Curless, B., Diebel, J., Scharstein, D., Szeliski, R.: A Comparison and Evaluation of Multi-View Stereo Reconstruction Algorithms. In: CVPR. pp. 519-528 (2006)", + "53. Shao, R., Zheng, Z., Tu, H., Liu, B., Zhang, H., Liu, Y.: Tensor4D: Efficient Neural 4D Decomposition for High-Fidelity Dynamic Reconstruction and Rendering. In: CVPR. pp. 16632-16642 (2023)", + "54. Song, L., Chen, A., Li, Z., Chen, Z., Chen, L., Yuan, J., Xu, Y., Geiger, A.: NeRFPlayer: A Streamable Dynamic Scene Representation with Decomposed Neural Radiance Fields. IEEE TVCG 29(5), 2732-2742 (2023)", + "55. Tang, J., Ren, J., Zhou, H., Liu, Z., Zeng, G.: DreamGaussian: Generative Gaussian Splitting for Efficient 3D Content Creation. arXiv preprint arXiv:2309.16653 (2023)", + "56. Teed, Z., Deng, J.: RAFT: Recurrent All-Pairs Field Transforms for Optical Flow. In: ECCV. pp. 402-419 (2020)", + "57. Tewari, A., Thies, J., Mildenhall, B., Srinivasan, P., Tretschk, E., Wang, Y., Lassner, C., Sitzmann, V., Martin-Brualla, R., Lombardi, S., Simon, T., Theobalt, C., Nießner, M., Barron, J.T., Wetzstein, G., Zollhöefer, M., Golyanik, V.: Advances in Neural Rendering. In: Comput. Graph. Forum. vol. 41, pp. 703-735 (2022)", + "58. Tian, F., Du, S., Duan, Y.: MonoNeRF: Learning a Generalizable Dynamic Radiance Field from Monocular Videos. In: ICCV. pp. 17903-17913 (2023)", + "59. Tretschk, E., Tewari, A., Golyanik, V., Zollhöefer, M., Lassner, C., Theobalt, C.: Non-Rigid Neural Radiance Fields: Reconstruction and Novel View Synthesis of a Dynamic Scene From Monocular Video. In: ICCV (2021)", + "60. Tretschk, E., Tewari, A., Golyanik, V., Zollhöfer, M., Lassner, C., Theobalt, C.: Non-Rigid Neural Radiance Fields: Reconstruction and Novel View Synthesis of a Dynamic Scene from Monocular Video. In: ICCV. pp. 12959-12970 (2021)", + "61. Vedula, S., Baker, S., Rander, P., Collins, R., Kanade, T.: Three-Dimensional Scene Flow. In: CVPR. pp. 722-729 (1999)", + "62. Vedula, S., Rander, P., Collins, R., Kanade, T.: Three-Dimensional Scene Flow. IEEE TPAMI 27(3), 475-480 (2005)", + "63. Wang, Z., Bovik, A.C., Sheikh, H.R., Simoncelli, E.P.: Image Quality Assessment: From Error Visibility to Structural Similarity. IEEE TIP 13(4), 600-612 (2004)" + ], + "bbox": [ + 212, + 146, + 787, + 839 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "A Compact Dynamic 3D Gaussian", + "bbox": [ + 524, + 114, + 730, + 128 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 767, + 114, + 785, + 126 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "64. Wu, G., Yi, T., Fang, J., Xie, L., Zhang, X., Wei, W., Liu, W., Tian, Q., Wang, X.: 4D Gaussian Splitting for Real-Time Dynamic Scene Rendering. arXiv preprint arXiv:2310.08528 (2023)", + "65. Xie, Y., Takikawa, T., Saito, S., Litany, O., Yan, S., Khan, N., Tombari, F., Tompkin, J., Sitzmann, V., Sridhar, S.: Neural Fields in Visual Computing and Beyond. Comput. Graph. Forum (2022)", + "66. Xu, D., Yuan, Y., Mardani, M., Liu, S., Song, J., Wang, Z., Vahdat, A.: AGG: Amortized Generative 3D Gaussians for Single Image to 3D. arXiv preprint arXiv:2401.04099 (2024)", + "67. Yang, Z., Yang, H., Pan, Z., Zhu, X., Zhang, L.: Real-Time Photorealistic Dynamic Scene Representation and Rendering with 4D Gaussian Splatting. arXiv preprint arXiv:2310.10642 (2023)", + "68. Yi, T., Fang, J., Wu, G., Xie, L., Zhang, X., Liu, W., Tian, Q., Wang, X.: GaussianDreamer: Fast Generation from Text to 3D Gaussian Splatting with Point Cloud Priors. arXiv preprint arXiv:2310.08529 (2023)", + "69. Yu, A., Li, R., Tancik, M., Li, H., Ng, R., Kanazawa, A.: PlenOctrees for Real-Time Rendering of Neural Radiance Fields. In: ICCV. pp. 5752-5761 (2021)", + "70. Zhang, R., Isola, P., Efros, A.A., Shechtman, E., Wang, O.: The Unreasonable Effectiveness of Deep Features as a Perceptual Metric. In: CVPR. pp. 586-595 (2018)", + "71. Zheng, E., Ji, D., Dunn, E., Frahm, J.M.: Sparse Dynamic 3D Reconstruction from Unsynchronized Videos. In: CVPR. pp. 4435-4443 (2015)", + "72. Zielonka, W., Bagautdinov, T., Saito, S., Zollhöfer, M., Thies, J., Romero, J.: Drivable 3D Gaussian Avatars. arXiv preprint arXiv:2311.08581 (2023)", + "73. Zwicker, M., Pfister, H., Van Baar, J., Gross, M.: EWA Splitting. IEEE TVCG 8(3), 223-238 (2002)" + ], + "bbox": [ + 215, + 146, + 787, + 479 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "K. Katsumata et al.", + "bbox": [ + 271, + 114, + 387, + 127 + ], + "page_idx": 17 + } +] \ No newline at end of file diff --git a/2024/A Compact Dynamic 3D Gaussian Representation for Real-Time Dynamic View Synthesis/7472726a-f5ca-4354-bd16-63b6aa3c1be0_model.json b/2024/A Compact Dynamic 3D Gaussian Representation for Real-Time Dynamic View Synthesis/7472726a-f5ca-4354-bd16-63b6aa3c1be0_model.json new file mode 100644 index 0000000000000000000000000000000000000000..7b0a9734fb79168cabc7937a0a53eca3a86646d8 --- /dev/null +++ b/2024/A Compact Dynamic 3D Gaussian Representation for Real-Time Dynamic View Synthesis/7472726a-f5ca-4354-bd16-63b6aa3c1be0_model.json @@ -0,0 +1,2590 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.25, + 0.141, + 0.756, + 0.187 + ], + "angle": 0, + "content": "A Compact Dynamic 3D Gaussian Representation for Real-Time Dynamic View Synthesis" + }, + { + "type": "text", + "bbox": [ + 0.309, + 0.213, + 0.695, + 0.228 + ], + "angle": 0, + "content": "Kai Katsumata, Duc Minh Vo, and Hideki Nakayama" + }, + { + "type": "text", + "bbox": [ + 0.281, + 0.24, + 0.724, + 0.27 + ], + "angle": 0, + "content": "The University of Tokyo, Japan {katsumata, vm Duc, nakayama}@nlab.ci.i.u-tokyo.ac.jp" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.304, + 0.744, + 0.498 + ], + "angle": 0, + "content": "Abstract. 3D Gaussian Splatting (3DGS) has shown remarkable success in synthesizing novel views given multiple views of a static scene. Yet, 3DGS faces challenges when applied to dynamic scenes because 3D Gaussian parameters need to be updated per timestep, requiring a large amount of memory and at least a dozen observations per timestep. To address these limitations, we present a compact dynamic 3D Gaussian representation that models positions and rotations as functions of time with a few parameter approximations while keeping other properties of 3DGS including scale, color, and opacity invariant. Our method can dramatically reduce memory usage and relax a strict multi-view assumption. In our experiments on monocular and multi-view scenarios, we show that our method not only matches state-of-the-art methods, often linked with slower rendering speeds, in terms of high rendering quality, but also significantly surpasses them by achieving a rendering speed of 118 frames per second at a resolution of \\(1,352 \\times 1,014\\) on a single GPU." + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.522, + 0.357, + 0.538 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.554, + 0.793, + 0.719 + ], + "angle": 0, + "content": "The landscape of novel view synthesis of scenes captured through multiple images/videos has undergone a revolutionary transformation, owing principally to major breakthroughs in neural radiance field (NeRF) approaches [6,41,57]. Although they achieve remarkable visual quality, particularly in dynamic scenes [4,21,31,34,45], NeRFs inevitably confront hurdles in terms of high-speed training and rendering [41,43,44,48]. This limitation is attributed to their reliance on multi-layer perceptrons (MLPs). Recently, 3D Gaussian Splatting (3DGS) [26] introduced a differentiable 3D Gaussian representation and point-based rasterization, signaling a departure from neural network reliance. 3DGS has emerged as a promising solution that not only accelerates training and rendering processes but also delivers high-quality rendered scenes, rivaling the levels set by NeRF [41] on static scenes." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.72, + 0.79, + 0.841 + ], + "angle": 0, + "content": "Nonetheless, in the realm of dynamic scene synthesis, 3DGS faces challenges related to memory usage and the need for many observations [38]. In particular, a significant number of 3D Gaussian parameters must be stored per timestep, resulting in a non-negligible increase in memory usage and the need for numerous observations per timestep. This poses challenges in monocular or few-view setups, as their strict multiview assumption demands advanced facilities or expertise, limiting flexibility in capturing setups. Exploring 3DGS without multi-view assumption enables dynamic view synthesis with a simple and easy camera setup, which is the primary goal of this study." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "2" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.116, + 0.388, + 0.128 + ], + "angle": 0, + "content": "K. Katsumata et al." + }, + { + "type": "image", + "bbox": [ + 0.218, + 0.144, + 0.789, + 0.27 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.277, + 0.789, + 0.373 + ], + "angle": 0, + "content": "Fig. 1: We show examples of novel view synthesis on the MUTANT scene in the D-NeRF dataset, visual quality (PSNR), rendering speed (FPS), and memory used to store optimized parameters. Our method yields reconstruction fidelity competitive with SoTAs with real-time rendering, achieving \\(100 \\times\\) faster rendering speed than V4D and reasonable memory size. Non-obvious differences in quality are highlighted. **Bold typeface number** indicates the best result among the methods with the competitive rendering quality (excepting for 3DGS), and the **underline** one does the second best." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.38, + 0.789, + 0.681 + ], + "angle": 0, + "content": "To achieve memory-efficient real-time dynamic view synthesis from monocular and multi-view videos, we present a compact dynamic 3D Gaussian representation, containing time-invariant and time-varying parameters to capture dynamic motion effectively. Similarly to [26, 38], we use scaling factors in the covariance matrix, opacity, and color as time-invariant parameters. As modeling the change in positions over time is important to represent dynamic scenes [43-45], we express each 3D Gaussian position as a function of time to model the temporal change in the position. We also represent 3D Gaussian rotation as a time-varying parameter because the rotation of the objects in the world can be typically changed. Inspired by the studies that model motion as periodic [2, 71], we fit the position using the Fourier approximation. We fit the rotation using the linear approximation. The time-varying parameters make our representation dynamic, meaning that a 3D Gaussian moves and rotates over time. Moreover, as we use a function with a few parameters to represent the position, the small degree of freedom contributes to the smoothness of reconstructed scenes, enhancing the robustness against unseen views. Crucially, the memory consumption of our representation is solely determined by the number of 3D Gaussians and the number of the approximation function parameters, remaining independent of input length. Beyond optimizing Gaussian representations through image-level reconstruction, we further enhance temporal consistency by supervising the Gaussian with optical flow obtained from input videos. This ensures high-quality reconstruction and facilitates the generalization of the representation." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.682, + 0.789, + 0.803 + ], + "angle": 0, + "content": "Our experiments on dynamic datasets (D-NeRF [45], DyNeRF [31], and HyperNeRF [44]) demonstrate the effectiveness of optimizing our dynamic 3D Gaussian from both monocular and multi-view videos, showing that our proposed method achieves rendering quality that rivals that of previous NeRFs [17, 18, 20]. In addition to faithful rendering quality, the proposed method achieves rendering speeds similar to a fast radiance field method [26] while avoiding large memory increases caused by a dynamic extension (see Fig. 1). Finally, we show an editing application enabled by the explicit property of 3D Gaussian representations. In summary, our contributions are as follow:" + }, + { + "type": "text", + "bbox": [ + 0.226, + 0.81, + 0.791, + 0.842 + ], + "angle": 0, + "content": "- We present a compact dynamic 3D Gaussian representation with time-varying Gaussian parameters equipped with basis functions for representing dynamic scenes." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.525, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "A Compact Dynamic 3D Gaussian" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.116, + 0.787, + 0.127 + ], + "angle": 0, + "content": "3" + }, + { + "type": "text", + "bbox": [ + 0.227, + 0.147, + 0.786, + 0.191 + ], + "angle": 0, + "content": "- As a 3D Gaussian representation is defined over all the timesteps, the 3D Gaussian parameters can be optimized with the frames at all the timesteps, enabling dynamic scene reconstruction from monocular or few-view videos." + }, + { + "type": "text", + "bbox": [ + 0.227, + 0.192, + 0.786, + 0.237 + ], + "angle": 0, + "content": "- Our dynamic 3D Gaussian representation facilitates real-time high-quality dynamic scene rendering of high-resolution images of \\(1,352 \\times 1,014\\) with a frame rate of 118 FPS using a single GPU." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.261, + 0.367, + 0.277 + ], + "angle": 0, + "content": "2 Related Work" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.293, + 0.787, + 0.339 + ], + "angle": 0, + "content": "We briefly overview radiance fields for dynamic scenes and discuss recent efficient explicit representation methods (grid-, plane-, hash-, and point-based), contextualizing our work within real-time dynamic view synthesis." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.36, + 0.424, + 0.376 + ], + "angle": 0, + "content": "2.1 Dynamic view synthesis" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.386, + 0.789, + 0.598 + ], + "angle": 0, + "content": "Applications in virtual reality and computer vision often need reconstruction of dynamic scenes. Several works extend NeRF [41] to handle dynamic scenes in multi-view or monocular setups by time-varying NeRF [21, 31, 45, 60]. The regularization techniques for temporal smoothness enable suitable scene representations from monocular videos [33]. Additional sensory information is also useful for spatio-temporal regularization. Some attempts [21, 33, 58] employ depth or flow, which are observed or predicted with external networks to reconstruct from sparse observations. Deformation-based approaches [43, 44, 54, 59], another research direction in dynamic reconstruction, combine static NeRF and deformation fields. Although tremendous efforts show high visual quality for dynamic view synthesis, the frequent querying of MLP in NeRFs results in the drawback of slow optimization and rendering [65]. Our study aims to enable real-time dynamic view synthesis with high visual quality. We aim to extend 3DGS to dynamic scene reconstruction to achieve high-speed rendering while maintaining the rendering quality from sparse training views." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.619, + 0.429, + 0.634 + ], + "angle": 0, + "content": "2.2 Explicit Radiance Fields" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.644, + 0.789, + 0.842 + ], + "angle": 0, + "content": "Recent studies [11, 19, 69] have addressed the issue in implicit models (i.e., NeRFs) by exploring explicit models, reducing optimization and rendering time. Plenoxels [19] directly optimizes 3D grid representation instead of neural networks. Generally, explicit models sacrifice visual quality for fast training time [19]. Hybrid approaches [11, 17, 18, 20, 42, 53] aim to achieve better trade-offs between training time and visual quality. Instant-NGP allows for a compact MLP by exploiting a multi-level hash grid to encode positions to feature vectors [42]. Plane-based approaches are designed principally to represent bounded scenes [3, 9, 10, 14, 18, 23]. MERF [49] employs a multiresolution representation and a fast contraction function to reconstruct unbounded scenes. For dynamic scenes, K-planes [18] decomposes 4D dynamic volumes into multiple feature planes and employs an MLP-based feature decoder for determining color and density. Structured representations still grapple with the trade-off between rendering speed and quality. In this study, unstructured 3D Gaussians promise large gains in rendering speed." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.117, + 0.23, + 0.127 + ], + "angle": 0, + "content": "4" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.116, + 0.388, + 0.128 + ], + "angle": 0, + "content": "K. Katsumata et al." + }, + { + "type": "title", + "bbox": [ + 0.217, + 0.147, + 0.414, + 0.163 + ], + "angle": 0, + "content": "2.3 Point-based rendering" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.169, + 0.788, + 0.472 + ], + "angle": 0, + "content": "Points, which naturally come from depth sensors, Structure from Motion (SfM) [51], or common Multi-View Stereo (MVS) algorithms [50, 52], offer a useful representation of fine-grained scenes and complex objects, and facilitate computationally efficient rendering. Consequently, they have been studied comprehensively in the vision and graphics community. The differentiable pipeline for point-based rendering results in points can be used for reconstructing 3D scenes [26-29]. 3DGS [26] achieves real-time rendering with high visual quality for unbounded static scenes at the expense of the generalization performance derived from NeRF's continuous neural field representation. 3DGS is replacing NeRFs as the backbone of text-to-3D models, leading to faster 3D generation [1, 12, 55, 66, 68]. Recently, Dynamic 3D Gaussians [38] employs 3DGS for dynamic scenes, which models dynamic scenes by the Gaussian position and rotation at each timestamp. The position and rotation of Gaussians at every timestamp are effective in modeling scenes from dense multi-view dynamic scenes. However, this approach presents difficulties in reconstructing monocular dynamic scenes, resulting in excessive memory consumption, particularly for extended input sequences. Specifically, the space complexity of the method for a scene with \\( T \\) frames is \\( O(TN) \\), where \\( N \\) is the number of 3D Gaussians. Our goal is to reduce memory consumption by representing time-varying position and rotation with approximation using a few parameters. The space complexity of our method is \\( O(LN) \\), where \\( L \\) is the number of parameters of the approximation, and usually \\( L < T \\)." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.473, + 0.788, + 0.655 + ], + "angle": 0, + "content": "Concurrent works on dynamic view synthesis includes approaches combining Gaussian Splatting with MLPs [5,22,32,35,36], approaches focusing on Gaussian representation [13, 15, 16, 64, 67], and approaches for specific targets [30, 46, 47, 72]. SpacetimeGaussian [32] focuses on dynamic view synthesis from multiview videos, unlike this study, by combining Gaussian Splatting and MLPs. [64] aims to model motion by employing a deformation field network while sacrificing rendering speed. [67] splits Gaussians in a time direction, and each Gaussian only focuses on a local temporal space. Four-dimensional (4D) Rotor Gaussian Splatting [15] models a local temporal space via temporal slicing for fast rendering. We aim to build a memory-efficient Gaussian representation for dynamic scenes, even for monocular scenes, while maintaining pure 3D Gaussian representation in order not to sacrifice the gift of 3D Gaussians, such as outstanding rendering speed and ease of direct editing of the scene." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.675, + 0.317, + 0.691 + ], + "angle": 0, + "content": "3 Method" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.705, + 0.788, + 0.842 + ], + "angle": 0, + "content": "Given images with timesteps and camera parameters obtained from videos, our task is to learn a 4D spatial-temporal representation of a dynamic scene that enables fast and high-quality view rendering. To achieve this, we use 3DGS in dynamic view synthesis. The original 3D Gaussian representation [26] is defined by a position (mean), a covariance matrix (decomposed into a rotation matrix and a scaling vector), a color (determined by spherical harmonics (SH) [8] coefficient), and an opacity. To represent dynamic scenes, each 3D Gaussian in our method (Fig. 2) regards the position and rotation as time-varying parameters and others as time-invariant parameters over time (Sec. 3.1). Given a set of 3D Gaussians, intrinsic and extrinsic camera parameters, and" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.525, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "A Compact Dynamic 3D Gaussian" + }, + { + "type": "page_number", + "bbox": [ + 0.776, + 0.116, + 0.786, + 0.127 + ], + "angle": 0, + "content": "5" + }, + { + "type": "image", + "bbox": [ + 0.25, + 0.144, + 0.752, + 0.248 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.26, + 0.788, + 0.343 + ], + "angle": 0, + "content": "Fig. 2: Overview of our dynamic view synthesis framework. Our dynamic 3D Gaussian representation takes temporal modeling of 3D centers and rotations with Fourier and linear approximation, respectively. Our representation parameters are shared over all the timesteps, and observations of each timestep hint at the representation for other timesteps, enabling compact representation and reconstruction of dynamic scenes from few-view videos. In this figure, we only illustrate the time-varying parameterization of one Gaussian for the sake of simplicity." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.372, + 0.788, + 0.477 + ], + "angle": 0, + "content": "a timestep, we render images with the 3DGS technique [26], which renders an image by employing Gaussians within the camera plane out of a set of 3D Gaussians (Sec. 3.2). We update the Gaussian parameters to decrease the distance between rendered and training images in image and flow spaces (Sec. 3.3). Flow reconstruction loss enhances the temporal consistency of the learned representation, resulting in plausible image reconstruction. The small degrees of freedom of our representation essentially facilitate the reconstruction of dynamic scenes from a few observations." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.499, + 0.518, + 0.515 + ], + "angle": 0, + "content": "3.1 Dynamic 3D Gaussian representation" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.523, + 0.788, + 0.612 + ], + "angle": 0, + "content": "One possible extension of 3DGS [38] to dynamic scenes is to model the scenes per timestep explicitly. Although that strategy allows for flexible modeling for dynamic scenes, it requires 3D Gaussian parameters per timestep, increasing the memory size proportionally to video length. Since the representation for each time is optimized by observations with the number of cameras, the strategy lacks sufficient observations in monocular or few-view video setups, limiting its effectiveness in such scenarios." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.614, + 0.788, + 0.72 + ], + "angle": 0, + "content": "To design a compact dynamic 3D Gaussian representation, we express 3D Gaussian parameters using only a few parameters to achieve faithful reconstruction without a large increase in parameters. Our dynamic scene representation comprises a set of dynamic 3D Gaussians, extending the static 3D Gaussian introduced in [26]. This representation allows 3D Gaussians to move through the scene over time, using time-varying parameters (center position and rotation factors) and time-invariant parameters (scale, color, and opacity). Each dynamic Gaussian encapsulates the following parameters:" + }, + { + "type": "text", + "bbox": [ + 0.233, + 0.723, + 0.556, + 0.741 + ], + "angle": 0, + "content": "1) a 3D center at time \\(t\\): \\([x(t),y(t),z(t)]^{\\top} \\in \\mathbb{R}^{3}\\)," + }, + { + "type": "text", + "bbox": [ + 0.233, + 0.741, + 0.588, + 0.755 + ], + "angle": 0, + "content": "2) a 3D rotation at time \\(t\\) represented by a quaternion:" + }, + { + "type": "list", + "bbox": [ + 0.233, + 0.723, + 0.588, + 0.755 + ], + "angle": 0, + "content": null + }, + { + "type": "equation", + "bbox": [ + 0.233, + 0.754, + 0.457, + 0.77 + ], + "angle": 0, + "content": "\\[\n[ q _ {x} (t), q _ {y} (t), q _ {z} (t), q _ {w} (t) ] ^ {\\top} \\in \\mathbb {R} ^ {4},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.233, + 0.769, + 0.484, + 0.786 + ], + "angle": 0, + "content": "3) a scaling factor: \\([s_x, s_y, s_z]^{\\mathsf{T}} \\in \\mathbb{R}^3\\)," + }, + { + "type": "text", + "bbox": [ + 0.233, + 0.786, + 0.68, + 0.8 + ], + "angle": 0, + "content": "4) SH coefficients representing color with the degrees of freedom \\(k\\):" + }, + { + "type": "list", + "bbox": [ + 0.233, + 0.754, + 0.68, + 0.8 + ], + "angle": 0, + "content": null + }, + { + "type": "equation", + "bbox": [ + 0.233, + 0.8, + 0.338, + 0.816 + ], + "angle": 0, + "content": "\\[\nh \\in \\mathbb {R} ^ {3 \\times (k + 1) ^ {2}},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.233, + 0.817, + 0.373, + 0.832 + ], + "angle": 0, + "content": "5) an opacity: \\(o \\in \\mathbb{R}\\)." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "6" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.388, + 0.128 + ], + "angle": 0, + "content": "K. Katsumata et al." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.146, + 0.788, + 0.193 + ], + "angle": 0, + "content": "Each Gaussian at time \\( t \\) is characterized by a 3D center \\( \\pmb{\\mu}(t) = [x(t), y(t), z(t)]^{\\top} \\) and a 3D covariance matrix \\( \\pmb{\\Sigma}(t) \\). The density of the 3D Gaussian at the intersection \\( \\pmb{x} \\) with a ray is obtained as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.374, + 0.197, + 0.787, + 0.218 + ], + "angle": 0, + "content": "\\[\nG _ {t} (\\boldsymbol {x}) = e ^ {- \\frac {1}{2} (\\boldsymbol {x} - \\boldsymbol {\\mu} (t)) ^ {\\top} \\boldsymbol {\\Sigma} (t) ^ {- 1} (\\boldsymbol {x} - \\boldsymbol {\\mu} (t))}. \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.223, + 0.788, + 0.344 + ], + "angle": 0, + "content": "To constrain the covariance matrix \\(\\boldsymbol{\\Sigma}(t)\\) such that it is a positive semi-definite matrix during optimization, the covariance matrix \\(\\boldsymbol{\\Sigma}(t)\\) is decomposed by using a scaling matrix \\(\\mathbf{S} = \\mathrm{diag}(s_x, s_y, s_z)\\) and a rotation matrix \\(\\mathbf{R}(t)\\) as \\(\\boldsymbol{\\Sigma}(t) = \\mathbf{R}(t)\\mathbf{S}\\mathbf{S}^{\\top}\\mathbf{R}(t)^{\\top}\\). Here, the rotation matrix \\(\\mathbf{R}(t)\\) is represented by quaternion \\((q_x(t), q_y(t), q_z(t), q_w(t))\\). Since most parts of the dynamic scene hardly change in scale because the solid objects (e.g., humans, animals, and things) scarcely expand or shrink, we maintain the scale parameter as a constant to reduce the model size. In what follows, we formally define the 3D center and rotation." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.344, + 0.788, + 0.405 + ], + "angle": 0, + "content": "Since motion in dynamic scenes is primarily described by changing the position of points like scene or optical flow [37, 61], we model the 3D center with an expressive approximation. We approximate the 3D position \\( x(t), y(t), z(t) \\) using Fourier approximation. At time \\( t \\), it is represented by" + }, + { + "type": "equation", + "bbox": [ + 0.306, + 0.41, + 0.786, + 0.473 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} x (t) = w _ {x, 0} + \\sum_ {i = 1} ^ {L} w _ {x, 2 i - 1} \\sin (2 i \\pi t) + w _ {x, 2 i} \\cos (2 i \\pi t), \\\\ y (t) = w _ {y, 0} + \\sum_ {i = 1} ^ {L} w _ {y, 2 i - 1} \\sin (2 i \\pi t) + w _ {y, 2 i} \\cos (2 i \\pi t), \\tag {2} \\\\ z (t) = w _ {z, 0} + \\sum_ {i = 1} ^ {L} w _ {z, 2 i - 1} \\sin (2 i \\pi t) + w _ {z, 2 i} \\cos (2 i \\pi t), \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.478, + 0.788, + 0.538 + ], + "angle": 0, + "content": "where, \\( w_{\\cdot,0}, \\ldots, w_{\\cdot,2L} \\) are the intercept and coefficients of the position, and \\( L \\) is the number of terms (harmonics). We remark that a polynomial approximation is inadequate due to underfitting with a small number of bases and overfitting with higher-order polynomials. For these reasons, we choose the Fourier approximation." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.538, + 0.788, + 0.599 + ], + "angle": 0, + "content": "3DGS uses anisotropic 3D Gaussians, resulting in the need for dynamic modeling of Gaussian rotations. We approximate the 3D rotation (quaternion) over time using a linear approximation because a unit quaternion can be approximated locally as linear when considering its tangent plane. At time \\( t \\), it is defined as" + }, + { + "type": "equation", + "bbox": [ + 0.326, + 0.604, + 0.676, + 0.64 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} q _ {x} (t) = w _ {q x, 0} + w _ {q x, 1} t, \\quad q _ {y} (t) = w _ {q y, 0} + w _ {q y, 1} t, \\\\ q _ {z} (t) = w _ {q z, 0} + w _ {q z, 1} t, \\quad q _ {w} (t) = w _ {q w, 0} + w _ {q w, 1} t, \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.644, + 0.788, + 0.69 + ], + "angle": 0, + "content": "where \\( w_{\\cdot,0} \\) and \\( w_{\\cdot,1} \\) are intercepts and coefficients of the rotation, respectively. We project the quaternion \\( q_{\\cdot}(t) \\) onto the unit quaternion by normalizing it: \\( q_{\\cdot}(t) / \\|q_{\\cdot}(t)\\| \\), to ensure that the quaternion at time \\( t \\) is a unit quaternion." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.69, + 0.788, + 0.841 + ], + "angle": 0, + "content": "For each Gaussian, the preceding definitions yield \\(3L + 8 + 3 + 3(k + 1)^2 + 1\\) parameters with respect to the 3D center, 3D rotation, scale, color, and opacity. Notably, the parameter count for each Gaussian is defined merely by the number of approximation terms and spherical harmonic degrees of freedom, with no regard to time length. Compared to methods that store parameters for each timestep, our approach saves on memory usage. Memory consumption in our dynamic scene representation is determined by two hyperparameters (i.e., \\(L\\) and \\(k\\)) and the number of Gaussians used. Furthermore, the representation defined as a function of time over continuous time inhibits discontinuous movement through time. This characteristic improves robustness in novel view synthesis settings." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.525, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "A Compact Dynamic 3D Gaussian" + }, + { + "type": "page_number", + "bbox": [ + 0.776, + 0.116, + 0.786, + 0.127 + ], + "angle": 0, + "content": "7" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.147, + 0.517, + 0.163 + ], + "angle": 0, + "content": "3.2 Rendering via 3D Gaussian Splitting" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.173, + 0.788, + 0.249 + ], + "angle": 0, + "content": "Rendering with 3D Gaussian applies splatting techniques [26] to the Gaussian within the camera planes. Zwicker et al. [73] introduced the projection of the 3D covariance matrix to the 2D one. The 3D covariance matrix \\(\\pmb{\\Sigma}\\) is projected into a 2D one \\(\\pmb{\\Sigma}'\\) given a viewing transformation \\(\\mathbf{W}\\) as \\(\\pmb{\\Sigma}'(t) = \\mathbf{J}\\mathbf{W}\\pmb{\\Sigma}(t)\\mathbf{W}^{\\top}\\mathbf{J}^{\\top}\\), where \\(\\mathbf{J}\\) is the Jacobian of the affine approximation of the projective transformation at Gaussian center \\(\\pmb{\\mu}(t)\\):" + }, + { + "type": "equation", + "bbox": [ + 0.433, + 0.261, + 0.786, + 0.311 + ], + "angle": 0, + "content": "\\[\n\\mathbf {J} = \\left[ \\begin{array}{c c c} \\frac {1}{v _ {z}} & 0 & - \\frac {v _ {x}}{v _ {z} ^ {2}} \\\\ 0 & \\frac {1}{v _ {z}} & - \\frac {v _ {y}}{v _ {z} ^ {2}} \\\\ 0 & 0 & 0 \\end{array} \\right], \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.324, + 0.785, + 0.37 + ], + "angle": 0, + "content": "where \\([v_x, v_y, v_z]^{\\top} = \\mathbf{W}\\boldsymbol{\\mu}(t)\\) is the camera coordinate of the Gaussian center \\(\\boldsymbol{\\mu}(t)\\) obtained by the viewing transformation, which projects the points from world space to camera space." + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.371, + 0.787, + 0.447 + ], + "angle": 0, + "content": "Similar to NeRF style volumetric rendering, point-based rendering computes the color \\( C \\) of a pixel by evaluating the blending of \\( N \\) ordered points that overlap the pixel \\( C = \\sum_{i=1}^{N} c_i \\alpha_i \\prod_{j=1}^{i-1} (1 - \\alpha_j) \\), where \\( c_i \\) represents the color of a Gaussian evaluated by SH coefficients, and \\( \\alpha_i \\) represents the density that is calculated from a 2D Gaussian with the 2D covariance \\( \\pmb{\\Sigma}' \\) and 2D center \\( \\pmb{\\mu}' \\) at time \\( t \\) and the optimized opacity \\( o \\)." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.469, + 0.655, + 0.485 + ], + "angle": 0, + "content": "3.3 Optimization of the dynamic 3D Gaussian representation" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.495, + 0.787, + 0.572 + ], + "angle": 0, + "content": "We optimize the Gaussian parameters, i.e., intercepts and coefficients of position and rotation \\( w \\), a scaling factor \\( s_x, s_y, s_z \\), SH coefficients \\( h \\), and an opacity \\( o \\), based on the iterations of rendering and a comparison of the rendered images with training frames in the captured videos. To compare the rendered and training views, the loss function contains the L1 loss and the structural similarity (SSIM) [63] loss \\( \\mathcal{L}_{\\mathrm{D - SSIM}} \\):" + }, + { + "type": "equation", + "bbox": [ + 0.371, + 0.582, + 0.786, + 0.599 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {r e c o n}} = (1 - \\lambda) | \\hat {I} - I | + \\lambda \\mathcal {L} _ {\\mathrm {D} - \\mathrm {S S I M}}, \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.613, + 0.788, + 0.779 + ], + "angle": 0, + "content": "where \\(\\hat{I}\\) and \\(I\\) are the rendered and target images, respectively. The loss function moves and rotates the anisotropic Gaussians and changes their color and opacity so that each Gaussian covers a homogeneous area. Since the loss just fixes incorrectly positioned Gaussians, the over- or under-representation of the set of Gaussians for the scene needs a mechanism for creating Gaussians that reconstruct the scene or destroy extra Gaussians. We also follow the divide and prune techniques in 3DGS for producing a compact and precise representation of the scene. We surveil the gradients of each Gaussian and densify Gaussians by splitting a Gaussian with a large gradient and a large scale into two small Gaussians, and cloning a Gaussian with a large gradient and a small scale to two Gaussians. Moreover, we remove transparent Gaussians with an opacity less than a threshold value of 0.005." + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.78, + 0.788, + 0.841 + ], + "angle": 0, + "content": "Following [26], we initialize a set of Gaussians using a set of sparse points from SfM [51] for real scenes, and we initialize a set of Gaussians randomly using a uniform distribution for synthetic scenes owing to the absence of the prior. We adopt a two-stage optimization strategy consisting of static and dynamic stages. Deeming the" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.228, + 0.127 + ], + "angle": 0, + "content": "8" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.388, + 0.128 + ], + "angle": 0, + "content": "K. Katsumata et al." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.237 + ], + "angle": 0, + "content": "frames in the captured datasets as static scenes, we optimize static representation in the static stage to learn the prior of Gaussians. In other words, we optimize the parameters that are consistent all over time (i.e., scale, SH coefficients, and opacity) and the intercepts for the center and rotation \\((w_{x,0}, w_{y,0}, w_{z,0}, w_{qx,0}, w_{qy,0}, w_{qz,0}, w_{qw,0})\\) among the Gaussian parameters in the static stage. After the static stage, we optimize all the parameters of the set of Gaussians to reconstruct a dynamic region as a dynamic stage." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.238, + 0.79, + 0.48 + ], + "angle": 0, + "content": "Another challenge in the dynamic scene reconstruction is ambiguity caused by the limited number of captured views at a timestep. Since a dynamic scene contains temporal changes, such as moving objects and changing shapes, sharing the scene information over frames with different timesteps is difficult. To overcome the ambiguity, we employ flow information. Similar to our 3D Gaussian, scene flow [39, 40, 62] is defined as the position of a point in 3D space and its motion. These 3D points originate from different mechanisms than those in 3D Gaussian, making matching in 3D space difficult. Since optical flow defined on the image plane can be directly matched with a 3D Gaussian and is readily to compute from monocular inputs, we supervise the flows of the estimizable Gaussians with the ground truth optical flows of the input frames. We use RAFT [56] to obtain ground truth flow for training views: forward flow \\( f_{\\mathrm{fwd}} \\) and backward flow \\( f_{\\mathrm{bwd}} \\) between two adjacent frames. The flow loss \\( \\mathcal{L}_{\\mathrm{flow}} \\) takes the L1 loss between the ground truth flows and the optical flow of the Gaussian for both directions of the flows. The flow loss gives our method spatial-temporal consistency without any additional computation cost in rendering. We combine the flow loss \\( \\mathcal{L}_{\\mathrm{flow}} \\) with the reconstruction loss that compares the rendered and training views:" + }, + { + "type": "equation", + "bbox": [ + 0.397, + 0.487, + 0.786, + 0.503 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\mathcal {L} _ {\\text {r e c o n}} + \\lambda_ {\\text {f l o w}} \\mathcal {L} _ {\\text {f l o w}} (\\hat {F}, F), \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.513, + 0.788, + 0.59 + ], + "angle": 0, + "content": "where \\( F = \\{f_{\\mathrm{fwd}}, f_{\\mathrm{bwd}}\\} \\) and \\( \\hat{F} \\) are the ground truth flow and the flow of the Gaussians, respectively, and \\( \\lambda_{\\mathrm{flow}} \\) is a balancing hyperparameter for the flow term. Instead of applying an optical flow algorithm for rendering, we create pseudo optical flow from a Gaussian representation. Scene motion is represented solely by the 3D mean coefficients: \\( w_{x,1 \\leq i}, w_{y,1 \\leq i}, w_{z,1 \\leq i} \\). Scene flow in 3D space can be computed by" + }, + { + "type": "equation", + "bbox": [ + 0.321, + 0.599, + 0.684, + 0.616 + ], + "angle": 0, + "content": "\\[\n\\hat {f} _ {\\mathrm {f w d}} ^ {x} = x (t + \\Delta t) - x (t), \\quad \\hat {f} _ {\\mathrm {b w d}} ^ {x} = x (t) - x (t - \\Delta t),\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.324, + 0.621, + 0.785, + 0.638 + ], + "angle": 0, + "content": "\\[\n\\hat {f} _ {\\mathrm {f w d}} ^ {y} = y (t + \\Delta t) - y (t), \\quad \\hat {f} _ {\\mathrm {b w d}} ^ {y} = y (t) - y (t - \\Delta t), \\tag {7}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.324, + 0.642, + 0.684, + 0.659 + ], + "angle": 0, + "content": "\\[\n\\hat {f} _ {\\mathrm {f w d}} ^ {\\tilde {z}} = z (t + \\Delta t) - z (t), \\quad \\hat {f} _ {\\mathrm {b w d}} ^ {\\tilde {z}} = z (t) - z (t - \\Delta t),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.666, + 0.788, + 0.697 + ], + "angle": 0, + "content": "where \\(\\Delta t\\) is the difference between the timesteps of the two image frames. The scene flow is projected into a 2D camera plane using" + }, + { + "type": "equation", + "bbox": [ + 0.328, + 0.703, + 0.786, + 0.723 + ], + "angle": 0, + "content": "\\[\n\\hat {f} _ {\\left\\{f w d, b w d \\right\\}} ^ {x y z} = \\mathbf {J} \\left[ \\hat {f} _ {\\left\\{f w d, b w d \\right\\}}, \\hat {f} _ {\\left\\{f w d, b w d \\right\\}} ^ {y}, \\hat {f} _ {\\left\\{f w d, b w d \\right\\}} ^ {z} \\right] ^ {\\top}, \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.73, + 0.788, + 0.775 + ], + "angle": 0, + "content": "where \\(\\mathbf{J}\\) is the Jacobian of the affine approximation of the projective transformation at the Gaussian center \\(\\mu\\) (Eq. (4)). Regarding scene flows on the camera plane as RGB colors, point-based rendering can compute an optical flow of a pixel through \\(\\alpha\\)-blending:" + }, + { + "type": "equation", + "bbox": [ + 0.392, + 0.796, + 0.786, + 0.837 + ], + "angle": 0, + "content": "\\[\n\\hat {f} _ {\\mathrm {f w d}} = \\sum_ {i = 1} ^ {N} \\hat {f} _ {\\mathrm {f w d}, i} ^ {\\mathrm {x y z}} \\alpha_ {i} \\prod_ {j = 1} ^ {i - 1} (1 - \\alpha_ {j}). \\tag {9}\n\\]" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.525, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "A Compact Dynamic 3D Gaussian" + }, + { + "type": "page_number", + "bbox": [ + 0.776, + 0.116, + 0.786, + 0.127 + ], + "angle": 0, + "content": "9" + }, + { + "type": "image", + "bbox": [ + 0.219, + 0.142, + 0.785, + 0.576 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.216, + 0.585, + 0.788, + 0.629 + ], + "angle": 0, + "content": "Fig. 3: Qualitative comparison on D-NeRF [45]. We highlight the differences by zoom view. Our method achieves competitive visual quality with strong baselines. While our method successfully reconstructs intricate details like hands, it causes a blurred sphere shape." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.659, + 0.788, + 0.721 + ], + "angle": 0, + "content": "The backward flow is calculated in the same way. The optical flow \\(\\hat{F}\\) consists of the forward flows \\(\\hat{f}_{\\mathrm{fwd}}\\) and backward flows \\(\\hat{f}_{\\mathrm{bwd}}\\) for all pixels. We exclude the flow loss for the D-NeRF dataset because the teleport of the cameras between adjacent frames causes difficulties in calculating ground truth flows." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.746, + 0.35, + 0.765 + ], + "angle": 0, + "content": "4 Experiment" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.782, + 0.37, + 0.795 + ], + "angle": 0, + "content": "4.1 Evaluation data" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.81, + 0.795, + 0.842 + ], + "angle": 0, + "content": "We evaluate our compact dynamic Gaussian representation using dynamic scene datasets: a synthetic one D-NeRF [45] and two real ones, i.e., DyNeRF [31] and HyperNeRF [44]." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "10" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.388, + 0.128 + ], + "angle": 0, + "content": "K. Katsumata et al." + }, + { + "type": "table_caption", + "bbox": [ + 0.214, + 0.145, + 0.788, + 0.203 + ], + "angle": 0, + "content": "Table 1: Quantitative results on the D-NeRF dataset [45]. Our method performs competitively against NeRF approaches in terms of visual quality and achieves the fastest rendering speed among the highest-performing methods. Results except the FPS of [17, 18, 20] are adopted from the original papers. The best and second best scores among competing methods are highlighted." + }, + { + "type": "table", + "bbox": [ + 0.306, + 0.213, + 0.703, + 0.321 + ], + "angle": 0, + "content": "
PSNR↑MS-SSIM↑LPIPS↓FPS ↑Train Time ↓Mem↓
TiNeuVox-S [17]30.750.960.070.328 mins8MB
TiNeuVox-B [17]32.670.970.040.1328 mins48MB
K-Planes [18]31.610.97-0.5452 mins~497MB
V4D [20]33.720.980.021.476.9 hrs1.2GB
3DGS [26]20.510.890.071706 mins~50MB
D-3DGS17.220.810.1317315 mins~913MB
Ours32.190.970.041508 mins~159MB
" + }, + { + "type": "table_caption", + "bbox": [ + 0.214, + 0.333, + 0.788, + 0.404 + ], + "angle": 0, + "content": "Table 2: Quantitative results on the DyNeRF datasets [31]. Results excepting FPS of [18, 20] are adopted from the original papers. The best and second best scores among competing methods (excepting 3DGS) are highlighted. While our method matches NeRFs in terms of rendering quality, our method matches 3DGS in terms of rendering speed. Besides, our method is 20 times more compact than Dynamic3DGaussians." + }, + { + "type": "table", + "bbox": [ + 0.293, + 0.416, + 0.716, + 0.508 + ], + "angle": 0, + "content": "
PSNR↑MS-SSIM↑LPIPS↓FPS↑Train Time↓Mem↓
K-Planes [18]31.630.964-0.311.8 hrs~309MB
V4D28.960.9370.170.114 hrs1.2GB
3DGS [26]20.940.8000.2910920 mins~198MB
D-3DGS24.360.8340.2511951 mins~2.3GB
Dynamic3DGaussians [38]27.790.8690.23512.1 hrs~6.6GB
Ours30.460.9550.151181 hrs~338MB
" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.536, + 0.788, + 0.595 + ], + "angle": 0, + "content": "D-NeRF dataset [45]. This dataset comprises eight videos of varying lengths, ranging from 50 to 200 frames per video. The camera setup is designed to mimic a monocular camera setting by teleporting between adjacent timesteps. The test views are from novel camera positions. We train and render at the resolution of \\(800 \\times 800\\)." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.597, + 0.788, + 0.656 + ], + "angle": 0, + "content": "DyNeRF dataset [31]. The multi-camera dataset includes six 10-second videos captured at 30 FPS using 15-20 synchronized fixed cameras. For evaluation, a central camera is used, while training utilizes frames from the other cameras. The training and rendering resolution is set at \\(1,352 \\times 1,014\\)." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.657, + 0.788, + 0.702 + ], + "angle": 0, + "content": "HyperNeRF dataset [44]. This dataset encompasses videos ranging from 8 to \\(15\\mathrm{~s}\\), captured at 15 FPS using two Pixel 3 phones. The training and rendering processes are conducted at a resolution of \\(540 \\times 960\\)." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.725, + 0.421, + 0.74 + ], + "angle": 0, + "content": "4.2 Implementation details" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.75, + 0.788, + 0.843 + ], + "angle": 0, + "content": "We adhere to the experimental setup in the 3DGS paper [26]. The number of approximation terms of the Gaussian centers \\( L \\) is set to 2 for the D-NeRF dataset. For the other datasets, \\( L \\) is set to 5 from preliminary experiments. Our two-stage optimization process begins with an initial fitting of parameters, excluding the coefficients for Gaussian center and rotation. This initial stage spans 3,000 iterations and utilizes all training views in a static setting. Subsequently, we engage in a dynamic stage, adjusting all Gaussian" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.525, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "A Compact Dynamic 3D Gaussian" + }, + { + "type": "page_number", + "bbox": [ + 0.77, + 0.116, + 0.785, + 0.127 + ], + "angle": 0, + "content": "11" + }, + { + "type": "table_caption", + "bbox": [ + 0.215, + 0.145, + 0.788, + 0.201 + ], + "angle": 0, + "content": "Table 3: Quantitative results on the HyperNeRF dataset [44]. Our method demonstrates competitive performance in rendering quality across all scenes, surpassing the compared methods in rendering speed. Furthermore, our method is not inferior to the compared methods in training time and memory size." + }, + { + "type": "table", + "bbox": [ + 0.218, + 0.213, + 0.787, + 0.298 + ], + "angle": 0, + "content": "
FPS↑Train Time↓Mem↓BROOM3D PRINTERCHICKENPEELBANANAMean
PSNR↑SSIM↑PSNR↑SSIM↑PSNR↑SSIM↑PSNR↑SSIM↑PSNR↑SSIM↑
HyperNeRF [44]0.3648 hrs†15MB19.30.59120.00.82126.90.94823.30.89622.20.811
TiNeuVox-B [17]0.1430 mins48MB21.50.68622.80.84128.30.94724.40.87324.30.837
V4D [20]0.157 hrs1.2GB22.10.66923.20.83528.40.92925.20.87324.70.827
Ours1881 hrs~720MB22.10.78925.50.91928.30.93426.60.92025.60.890
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.217, + 0.299, + 0.786, + 0.318 + ], + "angle": 0, + "content": "Train time of HyperNeRF [44] is estimated from their paper's descriptions. Originally reported as 8 hours on 4 TPU v4s [25], the TPU v4 is slightly faster than the A100 GPU, and the A100 GPU is at least 1.5 times faster than the A6000 GPU." + }, + { + "type": "image", + "bbox": [ + 0.229, + 0.324, + 0.777, + 0.472 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.23, + 0.482, + 0.773, + 0.497 + ], + "angle": 0, + "content": "Fig. 4: Qualitative comparison on the DyNeRF dataset [31]. The differences are zoomed in." + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.527, + 0.788, + 0.589 + ], + "angle": 0, + "content": "parameters in 27,000 iterations. The entire optimization process encompasses 30,000 iterations. Following [26], \\(\\lambda\\) is set to 0.2. We set the flow loss weight \\(\\lambda_{\\mathrm{flow}}\\) to 1,000 and acquire ground truth flow through the RAFT pretrained on the Sintel dataset [7]. All experiments are conducted on a single RTX A6000 GPU." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.612, + 0.377, + 0.628 + ], + "angle": 0, + "content": "4.3 Evaluation setup" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.638, + 0.788, + 0.699 + ], + "angle": 0, + "content": "Compared methods. We benchmark our method against the following baseline methods: TiNeuVox [17], K-Planes [18], V4D [20], HyperNeRF [44], 3D Gaussian Splatting (3DGS) [26], Dynamic3DGaussians [38], and a D-3DGS baseline. D-3DGS is the dynamic extension of 3DGS, which stores both position and rotation for each timestep." + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.7, + 0.79, + 0.759 + ], + "angle": 0, + "content": "Evaluation metrics. We assess the methods using various metrics, including PSNR [24], SSIM [63], LPIPS [70], FPS, Training time, and memory used to store optimized parameters. Memory consumption includes the 3D Gaussian parameters, voxel/plane representation, and neural network parameters." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.784, + 0.405, + 0.799 + ], + "angle": 0, + "content": "4.4 Experimental results" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.81, + 0.787, + 0.84 + ], + "angle": 0, + "content": "Quantitative results. The quantitative results on the D-NeRF dataset are detailed in Tab. 1. Our method demonstrates a performance comparable to TiNeuVox and K-Planes" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "12" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.116, + 0.388, + 0.128 + ], + "angle": 0, + "content": "K. Katsumata et al." + }, + { + "type": "image", + "bbox": [ + 0.227, + 0.143, + 0.777, + 0.284 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.25, + 0.295, + 0.751, + 0.31 + ], + "angle": 0, + "content": "Fig. 5: Qualitative comparison on HyperNeRF [44]. Our method offers sharp results." + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.328, + 0.788, + 0.427 + ], + "angle": 0, + "content": "Table 4: Per-scene quantitative comparison on D-NeRF scenes of different \\( L \\), which stands for the number of harmonic terms in the Fourier approximation, and other design choices. The highest mean score is achieved with \\( L = 2 \\), but increasing the complexity \\( L \\) (the number of coefficients) improves visual quality in some scenes (JUMPING JACKS and T-REX). The spline approximations bring marginal improvements in some scenes but slower rendering. The time-varying scale (the last row) also provides minor gains in some cases and increases the memory size. The setting reported in Fig. 3 is highlighted with a gray background." + }, + { + "type": "table", + "bbox": [ + 0.22, + 0.437, + 0.785, + 0.58 + ], + "angle": 0, + "content": "
STAND UPJACKSBALLSLEGOWARRIORHOOKT-REXMUTANTMean
PSNR SSIMPSNR SSIMPSNR SSIMPSNR SSIMPSNR SSIMPSNR SSIMPSNR SSIMPSNR SSIMPSNR SSIM
L = 140.210.99427.220.95230.270.97224.260.94032.42
L = 239.100.99330.950.98033.290.98423.150.92234.15
L = 338.090.99032.780.98432.540.97922.120.88135.36
L = 435.830.98432.930.98230.390.96921.060.85534.38
L = 532.890.97630.710.97727.680.95920.200.82532.64
Linear27.770.97323.100.92126.680.95922.270.92217.39
Quadratic29.400.97823.440.92627.510.96322.450.92417.70
Cubic29.980.97923.710.92827.760.96422.370.92118.04
Spline (5)38.870.99331.960.98332.960.98023.090.91834.46
Spline (6)38.000.99231.840.98432.810.98022.250.90335.24
Linear (scale)38.320.99330.910.98032.550.98423.870.93034.43
" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.612, + 0.789, + 0.793 + ], + "angle": 0, + "content": "in terms of visual quality as measured by PSNR, SSIM, and LPIPS. Notably, it excels in training time, FPS, and memory size, achieving a rendering speed that is \\(300 \\times\\) faster than that of K-Planes. Furthermore, our method surpasses both 3DGS and D-3DGS in terms of visual quality without compromising rendering speed. In the DyNeRF scenes experiment, detailed in Tab. 2, while our method does not exceed the baseline in reconstruction quality, it shows a substantial improvement in FPS. Since the DyNeRF scenes contain multi-view data, the D-3DGS baseline naturally improves static 3DGS, unlike monocular scenes. Our method even attains rendering speeds that exceed real-time performance at a high resolution of \\(1,354 \\times 1,014\\). For the challenging HyperNeRF dataset, which is captured by only two moving cameras and referenced in Tab. 3, our method not only demonstrates rapid rendering speeds but also achieves higher average PSNR and SSIM scores than the compared methods." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.795, + 0.789, + 0.842 + ], + "angle": 0, + "content": "Qualitative results. Figures 3 to 5 show that our method yields faithful reconstruction for the dynamic scenes. Unlike the structured representation, which has a fixed size of grids, the unstructured nature of 3D Gaussians enables adaptive control of the expres" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.525, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "A Compact Dynamic 3D Gaussian" + }, + { + "type": "page_number", + "bbox": [ + 0.77, + 0.116, + 0.785, + 0.127 + ], + "angle": 0, + "content": "13" + }, + { + "type": "image_caption", + "bbox": [ + 0.278, + 0.144, + 0.406, + 0.157 + ], + "angle": 0, + "content": "Ours without \\(\\mathcal{L}_{\\mathrm{flow}}\\)" + }, + { + "type": "image_caption", + "bbox": [ + 0.643, + 0.145, + 0.678, + 0.156 + ], + "angle": 0, + "content": "Ours" + }, + { + "type": "image", + "bbox": [ + 0.24, + 0.159, + 0.764, + 0.279 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.215, + 0.29, + 0.785, + 0.318 + ], + "angle": 0, + "content": "Fig.6: Qualitative comparison of disabled and enabled flow loss on DyNeRF. We highlight the difference by zoom view." + }, + { + "type": "image", + "bbox": [ + 0.274, + 0.336, + 0.731, + 0.476 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.215, + 0.488, + 0.785, + 0.516 + ], + "angle": 0, + "content": "Fig.7: Composition of two scenes. Our method allows for the addition of adding 3D objects represented 3D Gaussians into a 3D Gaussian scene. We highlight the added object." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.549, + 0.785, + 0.594 + ], + "angle": 0, + "content": "siveness of the representation, delivering sharper renderings. As seen with the results for BOUNCING BALLS, since our method has discrete primitives, it sometimes fails to reproduce smooth boundaries." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.596, + 0.785, + 0.656 + ], + "angle": 0, + "content": "Effect of the number of parameters \\( L \\). Table 4 shows per-scene PSNR and SSIM scores of K-Planes and our method with the different \\( L \\) (Eq. (2)). It is observed that the optimal \\( L \\) for novel view synthesis varies from scene to scene, highlighting the necessity for complex approximations to capture intricate motions effectively." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.658, + 0.785, + 0.703 + ], + "angle": 0, + "content": "Effect of flow loss. Additionally, visual comparisons drawn from our method without and with the flow loss (Fig. 6) reveal that incorporating the flow loss mitigates ghostly artifacts and significantly enhances the accuracy of color reconstruction." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.705, + 0.787, + 0.84 + ], + "angle": 0, + "content": "Design choice. Our method is very flexible and allows for the use of arbitrary approximation functions and the choice of time-varying parameters. Table 4 also shows the experimental results of other options for the design of the model to facilitate future dynamic scene reconstruction. The linear, quadratic, and cubic baselines approximate time-varying 3D positions with polynomials of degrees one, two, and three, respectively. The Spline (5) and Spline (6) baselines approximate 3D positions with spline approximations of five and six points, respectively. The linear (scale) baseline approximates time-varying scales with the linear approximation in addition to positions and rotations. Although a Spline baseline gives minor performance gains in some cases, it" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "14" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.116, + 0.388, + 0.128 + ], + "angle": 0, + "content": "K. Katsumata et al." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.147, + 0.787, + 0.253 + ], + "angle": 0, + "content": "achieves 91 FPS for rendering, showing slower rendering than the proposed method. The linear (scale) baseline does not show additional parameters that would result in performance improvements. For time-varying 3D rotation, we also consider the approximation with slerp. Since it does not offer performance gains while causing numerical instability for static Gaussians, we use linear approximation for rotation. For faster rendering and compact representation, we use the Fourier approximation for 3D positions and model 3D positions and rotations as time-varying parameters." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.253, + 0.787, + 0.344 + ], + "angle": 0, + "content": "Scene composition. Since our dynamic 3D Gaussian representation still uses pure 3D Gaussian representation, the learned representation facilitates straightforward editing of Gaussians. We demonstrate the composition of two scenes with our representation. Figure 7 illustrates this by combining the MUTANT scene from the D-NeRF dataset with the SEARED STEAK scene from the DyNeRF dataset. This demonstrates the capability of our method in editing dynamic 3D scenes." + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.368, + 0.472, + 0.384 + ], + "angle": 0, + "content": "5 Discussion and Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.402, + 0.787, + 0.582 + ], + "angle": 0, + "content": "Limitations and future directions. Our dynamic Gaussians are defined through all times of the dynamic scene. This representation implicitly assumes Gaussians exist over all times of the scene. It enables us to naturally model the rigid and non-rigid deformation in the scene. On the other hand, for modeling the change in topology, the occurrence and extinction of Gaussians (e.g., fluid) is tough. Static colors cause difficulty in modeling changes in illumination and color. The reconstruction capability of the method depends on the number of parameters, so that the scene representation is compact but results in poor rendering quality for very long sequences, requiring additional memory consumption for proper reconstruction. To overcome these limitations, considering the lifetime of Gaussians, such as adding start and end time parameters, will allow for the modeling of changes in scene topology, and the adaptive decision of flexibility will leads to better trade-offs between quality and memory size." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.584, + 0.787, + 0.643 + ], + "angle": 0, + "content": "Our Gaussian representation sacrifices the continuity and smoothness inherent in neural field-based volume rendering. Distilling NeRFs into our proposed representation in a manner similar to PlenOctree [69] is a potential extension of our method, promising to enhance rendering quality while maintaining fast rendering advantage." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.643, + 0.787, + 0.765 + ], + "angle": 0, + "content": "Conclusion. We present a compact dynamic 3D Gaussian representation enabling faithful reconstruction and real-time rendering of dynamic scenes. We propose a representation for the position and rotation of 3D Gaussians as a function of time for modeling the motion of the scene. The parameterized functions of time introduce memory efficiency and robustness to the number of views per timestep. Furthermore, we introduce the flow loss constraining the scene flow of the learned Gaussian representation with the ground truth flow. Our experiments on synthetic and real datasets show that the proposed method achieves real-time dynamic scene rendering even at high resolutions." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.765, + 0.787, + 0.84 + ], + "angle": 0, + "content": "Acknowledgements This study was supported by JSPS/MEXT KAKENHI Grant Numbers JP24K20830, JP23KJ0381, JP23K28139, and JP22H05015, ROIS NII Open Collaborative Research 2024-24S1201, and the Institute of AI and Beyond of the University of Tokyo. The authors would like to thank D. Horita for carefully proofreading the manuscript and N. Umetani for providing helpful advice on the method's limitations." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.525, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "A Compact Dynamic 3D Gaussian" + }, + { + "type": "page_number", + "bbox": [ + 0.77, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "15" + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.145, + 0.312, + 0.161 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.177, + 0.786, + 0.204 + ], + "angle": 0, + "content": "1. Abdal, R., Yifan, W., Shi, Z., Xu, Y., Po, R., Kuang, Z., Chen, Q., Yeung, D.Y., Wetzstein, G.: Gaussian Shell Maps for Efficient 3D Human Generation. In: CVPR. pp. 9441-9451 (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.205, + 0.787, + 0.232 + ], + "angle": 0, + "content": "2. Akhter, I., Sheikh, Y., Khan, S., Kanade, T.: Nonrigid Structure from Motion in Trajectory Space. In: NeurIPS (2008)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.233, + 0.787, + 0.26 + ], + "angle": 0, + "content": "3. An, S., Xu, H., Shi, Y., Song, G., Ogras, U.Y., Luo, L.: PanoHead: Geometry-Aware 3D Full-Head Synthesis in 360 degree. In: CVPR. pp. 20950-20959 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.261, + 0.787, + 0.3 + ], + "angle": 0, + "content": "4. Attal, B., Huang, J.B., Richardt, C., Zollhöefer, M., Kopf, J., O'Toole, M., Kim, C.: HyperReel: High-Fidelity 6-DoF Video with Ray-Conditioned Sampling. In: CVPR. pp. 16610-16620 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.301, + 0.787, + 0.328 + ], + "angle": 0, + "content": "5. Bae, J., Kim, S., Yun, Y., Lee, H., Bang, G., Uh, Y.: Per-Gaussian Embedding-Based Deformation for Deformable 3D Gaussian Splatting. arXiv preprint arXiv:2404.03613 (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.33, + 0.787, + 0.37 + ], + "angle": 0, + "content": "6. Barron, J.T., Mildenhall, B., Tancik, M., Hedman, P., Martin-Brualla, R., Srinivasan, P.P.: Mip-NeRF: A Multiscale Representation for Anti-Aliasing Neural Radiance Fields. In: CVPR. pp. 5855-5864 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.371, + 0.787, + 0.398 + ], + "angle": 0, + "content": "7. Butler, D.J., Wulff, J., Stanley, G.B., Black, M.J.: A Naturalistic Open Source Movie for Optical Flow Evaluation. In: ECCV. pp. 611-625 (2012)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.399, + 0.787, + 0.425 + ], + "angle": 0, + "content": "8. Cabral, B., Max, N., Springmeyer, R.: Bidirectional Reflection Functions from Surface Bump Maps. SIGGRAPH 21(4), 273-281 (1987)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.426, + 0.787, + 0.452 + ], + "angle": 0, + "content": "9. Cao, A., Johnson, J.: HexPlane: A Fast Representation for Dynamic Scenes. In: CVPR. pp. 130-141 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.453, + 0.787, + 0.495 + ], + "angle": 0, + "content": "0. Chan, E.R., Lin, C.Z., Chan, M.A., Nagano, K., Pan, B., De Mello, S., Gallo, O., Guibas, L.J., Tremblay, J., Khamis, S., et al.: Efficient Geometry-Aware 3D Generative Adversarial Networks. In: CVPR. pp. 16123-16133 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.496, + 0.787, + 0.522 + ], + "angle": 0, + "content": "1. Chen, A., Xu, Z., Geiger, A., Yu, J., Su, H.: TensoRF: Tensorial Radiance Fields. In: ECCV. pp. 333-350 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.523, + 0.787, + 0.549 + ], + "angle": 0, + "content": "2. Chen, Z., Wang, F., Liu, H.: Text-to-3D Using Gaussian Splatting. arXiv preprint arXiv:2309.16585 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.55, + 0.787, + 0.578 + ], + "angle": 0, + "content": "3. Das, D., Wewer, C., Yunus, R., Ilg, E., Lenssen, J.E.: Neural Parametric Gaussians for Monocular Non-Rigid Object Reconstruction. In: CVPR. pp. 10715-10725 (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.579, + 0.787, + 0.605 + ], + "angle": 0, + "content": "4. Dong, Z., Chen, X., Yang, J., Black, M.J., Hilliges, O., Geiger, A.: AG3D: Learning to Generate 3D Avatars from 2D Image Collections. In: ICCV. pp. 14916-14927 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.606, + 0.787, + 0.633 + ], + "angle": 0, + "content": "5. Duan, Y., Wei, F., Dai, Q., He, Y., Chen, W., Chen, B.: 4D-Rotor Gaussian Splitting: Towards Efficient Novel View Synthesis for Dynamic Scenes. ACM TOG (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.634, + 0.787, + 0.674 + ], + "angle": 0, + "content": "6. Duisterhof, B.P., Mandi, Z., Yao, Y., Liu, J.W., Shou, M.Z., Song, S., Ichnowski, J.: MD-Splatting: Learning Metric Deformation from 4D Gaussians in Highly Deformable Scenes. arXiv preprint arXiv:2312.00583 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.675, + 0.787, + 0.702 + ], + "angle": 0, + "content": "7. Fang, J., Yi, T., Wang, X., Xie, L., Zhang, X., Liu, W., Nießner, M., Tian, Q.: Fast Dynamic Radiance Fields with Time-Aware Neural Voxels. In: SIGGRAPH Asia (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.703, + 0.787, + 0.73 + ], + "angle": 0, + "content": "8. Fridovich-Keil, S., Meanti, G., Warburg, F.R., Recht, B., Kanazawa, A.: K-Planes: Explicit Radiance Fields in Space, Time, and Appearance. In: CVPR. pp. 12479-12488 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.731, + 0.787, + 0.757 + ], + "angle": 0, + "content": "9. Fridovich-Keil, S., Yu, A., Tancik, M., Chen, Q., Recht, B., Kanazawa, A.: Plenoxels: Radiance Fields Without Neural Networks. In: CVPR. pp. 5501-5510 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.758, + 0.787, + 0.784 + ], + "angle": 0, + "content": "20. Gan, W., Xu, H., Huang, Y., Chen, S., Yokoya, N.: V4D: Voxel for 4D Novel View Synthesis. IEEE TVCG 30(2), 1579-1591 (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.785, + 0.787, + 0.812 + ], + "angle": 0, + "content": "21. Gao, C., Saraf, A., Kopf, J., Huang, J.B.: Dynamic View Synthesis from Dynamic Monocular Video. In: ICCV. pp. 5712-5721 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.813, + 0.787, + 0.84 + ], + "angle": 0, + "content": "22. Guo, Z., Zhou, W., Li, L., Wang, M., Li, H.: Motion-Aware 3D Gaussian Splitting for Efficient Dynamic Scene Reconstruction. arXiv preprint arXiv:2403.11447 (2024)" + }, + { + "type": "list", + "bbox": [ + 0.226, + 0.177, + 0.787, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "16" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.388, + 0.128 + ], + "angle": 0, + "content": "K. Katsumata et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.148, + 0.786, + 0.175 + ], + "angle": 0, + "content": "23. He, H., Yang, Z., Li, S., Dai, B., Wu, W.: OrthoPlanes: A Novel Representation for Better 3D-Awareness of GANs. In: ICCV. pp. 22996-23007 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.177, + 0.786, + 0.203 + ], + "angle": 0, + "content": "24. Huynh-Thu, Q., Ghanbari, M.: Scope of Validity of PSNR in Image/Video Quality Assessment. Electronics Letters 44(13), 800-801 (2008)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.205, + 0.786, + 0.274 + ], + "angle": 0, + "content": "25. Jouppi, N., Kurian, G., Li, S., Ma, P., Nagarajan, R., Nai, L., Patil, N., Subramanian, S., Swing, A., Towles, B., Young, C., Zhou, X., Zhou, Z., Patterson, D.A.: TPU v4: An Optically Reconfigurable Supercomputer for Machine Learning with Hardware Support for Embeddings. In: Proceedings of the 50th Annual International Symposium on Computer Architecture. pp. 1-14 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.275, + 0.786, + 0.302 + ], + "angle": 0, + "content": "26. Kerbl, B., Kopanas, G., Leimkuhler, T., Drettakis, G.: 3D Gaussian Splitting for Real-Time Radiance Field Rendering. ACM TOG 42(4), 1-14 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.303, + 0.786, + 0.33 + ], + "angle": 0, + "content": "27. Keselman, L., Hebert, M.: Approximate Differentiable Rendering with Algebraic Surfaces. In: ECCV. pp. 596-614 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.332, + 0.786, + 0.359 + ], + "angle": 0, + "content": "28. Kopanas, G., Leimkuhler, T., Rainer, G., Jambon, C., Drettakis, G.: Neural Point Catacaustics for Novel-View Synthesis of Reflections. ACM TOG 41(6), 1-15 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.36, + 0.786, + 0.387 + ], + "angle": 0, + "content": "29. Kopanas, G., Philip, J., Leimkuhler, T., Drettakis, G.: Point-Based Neural Rendering with Per-View Optimization. In: Comput. Graph. Forum. vol. 40, pp. 29-43 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.388, + 0.786, + 0.416 + ], + "angle": 0, + "content": "30. Lei, J., Wang, Y., Pavlakos, G., Liu, L., Daniilidis, K.: GART: Gaussian Articulated Template Models. In: CVPR. pp. 19876-19887 (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.417, + 0.786, + 0.457 + ], + "angle": 0, + "content": "31. Li, T., Slavcheva, M., Zollhöefer, M., Green, S., Lassner, C., Kim, C., Schmidt, T., Lovegrove, S., Goesele, M., Newcombe, R., et al.: Neural 3D Video Synthesis from Multi-View Video. In: CVPR. pp. 5521-5531 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.459, + 0.786, + 0.486 + ], + "angle": 0, + "content": "32. Li, Z., Chen, Z., Li, Z., Xu, Y.: Spacetime Gaussian Feature Splatting for Real-Time Dynamic View Synthesis. arXiv preprint arXiv:2312.16812 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.487, + 0.786, + 0.515 + ], + "angle": 0, + "content": "33. Li, Z., Niklaus, S., Snavely, N., Wang, O.: Neural Scene Flow Fields for Space-Time View Synthesis of Dynamic Scenes. In: CVPR. pp. 6498-6508 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.516, + 0.786, + 0.543 + ], + "angle": 0, + "content": "34. Li, Z., Wang, Q., Cole, F., Tucker, R., Snavely, N.: DynIBaR: Neural Dynamic Image-Based Rendering. In: CVPR. pp. 4273-4284 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.544, + 0.786, + 0.585 + ], + "angle": 0, + "content": "35. Liang, Y., Khan, N., Li, Z., Nguyen-Phuoc, T., Lanman, D., Tompkin, J., Xiao, L.: Gaufre: Gaussian Deformation Fields for Real-Time Dynamic Novel View Synthesis. arXiv preprint arXiv:2312.11458 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.587, + 0.786, + 0.627 + ], + "angle": 0, + "content": "36. Lu, Z., Guo, X., Hui, L., Chen, T., Yang, M., Tang, X., Zhu, F., Dai, Y.: 3D Geometry-Aware Deformable Gaussian Splitting for Dynamic View Synthesis. In: CVPR. pp. 8900-8910 (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.629, + 0.786, + 0.656 + ], + "angle": 0, + "content": "37. Lucas, B.D., Kanade, T.: An Iterative Image Registration Technique with an Application to Stereo Vision. In: IJCAI. pp. 674-679 (1981)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.657, + 0.786, + 0.684 + ], + "angle": 0, + "content": "38. Luiten, J., Kopanas, G., Leibe, B., Ramanan, D.: Dynamic 3D Gaussians: Tracking by Persistent Dynamic View Synthesis. In: 3DV (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.686, + 0.786, + 0.727 + ], + "angle": 0, + "content": "39. Mayer, N., Ilg, E., Hausser, P., Fischer, P., Cremers, D., Dosovitskiy, A., Brox, T.: A Large Dataset to Train Convolutional Networks for Disparity, Optical Flow, and Scene Flow Estimation. In: CVPR. pp. 4040-4048 (2016)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.728, + 0.786, + 0.754 + ], + "angle": 0, + "content": "40. Menze, M., Geiger, A.: Object Scene Flow for Autonomous Vehicles. In: CVPR. pp. 3061-3070 (2015)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.756, + 0.786, + 0.783 + ], + "angle": 0, + "content": "41. Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: NeRF: Representing Scenes as Neural Radiance Fields for View Synthesis. In: ECCV (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.785, + 0.786, + 0.811 + ], + "angle": 0, + "content": "42. Müller, T., Evans, A., Schied, C., Keller, A.: Instant Neural Graphics Primitives with a Multiresolution Hash Encoding. ACM TOG 41(4), 1-15 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.812, + 0.786, + 0.84 + ], + "angle": 0, + "content": "43. Park, K., Sinha, U., Barron, J.T., Bouaziz, S., Goldman, D.B., Seitz, S.M., Martin-Brualla, R.: Nerfies: Deformable Neural Radiance Fields. In: ICCV. pp. 5865-5874 (2021)" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.148, + 0.786, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.525, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "A Compact Dynamic 3D Gaussian" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.116, + 0.786, + 0.127 + ], + "angle": 0, + "content": "17" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.189 + ], + "angle": 0, + "content": "44. Park, K., Sinha, U., Hedman, P., Barron, J.T., Bouaziz, S., Goldman, D.B., Martin-Brualla, R., Seitz, S.M.: HyperNeRF: a Higher-Dimensional Representation for Topologically Varying Neural Radiance Fields. ACM TOG 40(6), 1-12 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.19, + 0.788, + 0.219 + ], + "angle": 0, + "content": "45. Pumarola, A., Corona, E., Pons-Moll, G., Moreno-Noguer, F.: D-NeRF: Neural Radiance Fields for Dynamic Scenes. In: CVPR. pp. 10318-10327 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.219, + 0.787, + 0.26 + ], + "angle": 0, + "content": "46. Qian, S., Kirschstein, T., Schoneveld, L., Davoli, D., Giebenhain, S., Nießner, M.: GaussianAvatars: Photorealistic Head Avatars with Rigged 3D Gaussians. In: CVPR. pp. 20299-20309 (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.261, + 0.787, + 0.303 + ], + "angle": 0, + "content": "47. Qian, Z., Wang, S., Mihajlovic, M., Geiger, A., Tang, S.: 3DGS-Avatar: Animatable Avatars via Deformable 3D Gaussian Splatting. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5020-5030 (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.304, + 0.787, + 0.332 + ], + "angle": 0, + "content": "48. Reiser, C., Peng, S., Liao, Y., Geiger, A.: KiloNeRF: Speeding up Neural Radiance Fields with Thousands of Tiny MLPs. In: ICCV. pp. 14335-14345 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.332, + 0.787, + 0.373 + ], + "angle": 0, + "content": "49. Reiser, C., Szeliski, R., Verbin, D., Srinivasan, P., Mildenhall, B., Geiger, A., Barron, J., Hedman, P.: MERF: Memory-Efficient Radiance Fields for Real-Time View Synthesis in Unbounded Scenes. ACM TOG 42(4), 1-12 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.374, + 0.787, + 0.402 + ], + "angle": 0, + "content": "50. Schonberger, J.L., Zheng, E., Frahm, J.M., Pollefeys, M.: Pixelwise View Selection for Unstructured Multi-View Stereo. In: ECCV. pp. 501-518 (2016)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.403, + 0.742, + 0.417 + ], + "angle": 0, + "content": "51. Schonberger, J.L., Frahm, J.M.: Structure-from-Motion Revisited. In: CVPR (2016)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.418, + 0.787, + 0.445 + ], + "angle": 0, + "content": "52. Seitz, S.M., Curless, B., Diebel, J., Scharstein, D., Szeliski, R.: A Comparison and Evaluation of Multi-View Stereo Reconstruction Algorithms. In: CVPR. pp. 519-528 (2006)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.446, + 0.787, + 0.487 + ], + "angle": 0, + "content": "53. Shao, R., Zheng, Z., Tu, H., Liu, B., Zhang, H., Liu, Y.: Tensor4D: Efficient Neural 4D Decomposition for High-Fidelity Dynamic Reconstruction and Rendering. In: CVPR. pp. 16632-16642 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.488, + 0.787, + 0.529 + ], + "angle": 0, + "content": "54. Song, L., Chen, A., Li, Z., Chen, Z., Chen, L., Yuan, J., Xu, Y., Geiger, A.: NeRFPlayer: A Streamable Dynamic Scene Representation with Decomposed Neural Radiance Fields. IEEE TVCG 29(5), 2732-2742 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.53, + 0.787, + 0.558 + ], + "angle": 0, + "content": "55. Tang, J., Ren, J., Zhou, H., Liu, Z., Zeng, G.: DreamGaussian: Generative Gaussian Splitting for Efficient 3D Content Creation. arXiv preprint arXiv:2309.16653 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.559, + 0.787, + 0.586 + ], + "angle": 0, + "content": "56. Teed, Z., Deng, J.: RAFT: Recurrent All-Pairs Field Transforms for Optical Flow. In: ECCV. pp. 402-419 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.587, + 0.787, + 0.643 + ], + "angle": 0, + "content": "57. Tewari, A., Thies, J., Mildenhall, B., Srinivasan, P., Tretschk, E., Wang, Y., Lassner, C., Sitzmann, V., Martin-Brualla, R., Lombardi, S., Simon, T., Theobalt, C., Nießner, M., Barron, J.T., Wetzstein, G., Zollhöefer, M., Golyanik, V.: Advances in Neural Rendering. In: Comput. Graph. Forum. vol. 41, pp. 703-735 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.643, + 0.787, + 0.671 + ], + "angle": 0, + "content": "58. Tian, F., Du, S., Duan, Y.: MonoNeRF: Learning a Generalizable Dynamic Radiance Field from Monocular Videos. In: ICCV. pp. 17903-17913 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.671, + 0.787, + 0.712 + ], + "angle": 0, + "content": "59. Tretschk, E., Tewari, A., Golyanik, V., Zollhöefer, M., Lassner, C., Theobalt, C.: Non-Rigid Neural Radiance Fields: Reconstruction and Novel View Synthesis of a Dynamic Scene From Monocular Video. In: ICCV (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.713, + 0.787, + 0.755 + ], + "angle": 0, + "content": "60. Tretschk, E., Tewari, A., Golyanik, V., Zollhöfer, M., Lassner, C., Theobalt, C.: Non-Rigid Neural Radiance Fields: Reconstruction and Novel View Synthesis of a Dynamic Scene from Monocular Video. In: ICCV. pp. 12959-12970 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.756, + 0.787, + 0.783 + ], + "angle": 0, + "content": "61. Vedula, S., Baker, S., Rander, P., Collins, R., Kanade, T.: Three-Dimensional Scene Flow. In: CVPR. pp. 722-729 (1999)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.784, + 0.787, + 0.812 + ], + "angle": 0, + "content": "62. Vedula, S., Rander, P., Collins, R., Kanade, T.: Three-Dimensional Scene Flow. IEEE TPAMI 27(3), 475-480 (2005)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.813, + 0.787, + 0.84 + ], + "angle": 0, + "content": "63. Wang, Z., Bovik, A.C., Sheikh, H.R., Simoncelli, E.P.: Image Quality Assessment: From Error Visibility to Structural Similarity. IEEE TIP 13(4), 600-612 (2004)" + }, + { + "type": "list", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "18" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.388, + 0.128 + ], + "angle": 0, + "content": "K. Katsumata et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.147, + 0.788, + 0.189 + ], + "angle": 0, + "content": "64. Wu, G., Yi, T., Fang, J., Xie, L., Zhang, X., Wei, W., Liu, W., Tian, Q., Wang, X.: 4D Gaussian Splitting for Real-Time Dynamic Scene Rendering. arXiv preprint arXiv:2310.08528 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.19, + 0.788, + 0.231 + ], + "angle": 0, + "content": "65. Xie, Y., Takikawa, T., Saito, S., Litany, O., Yan, S., Khan, N., Tombari, F., Tompkin, J., Sitzmann, V., Sridhar, S.: Neural Fields in Visual Computing and Beyond. Comput. Graph. Forum (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.232, + 0.787, + 0.259 + ], + "angle": 0, + "content": "66. Xu, D., Yuan, Y., Mardani, M., Liu, S., Song, J., Wang, Z., Vahdat, A.: AGG: Amortized Generative 3D Gaussians for Single Image to 3D. arXiv preprint arXiv:2401.04099 (2024)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.26, + 0.787, + 0.299 + ], + "angle": 0, + "content": "67. Yang, Z., Yang, H., Pan, Z., Zhu, X., Zhang, L.: Real-Time Photorealistic Dynamic Scene Representation and Rendering with 4D Gaussian Splatting. arXiv preprint arXiv:2310.10642 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.3, + 0.787, + 0.342 + ], + "angle": 0, + "content": "68. Yi, T., Fang, J., Wu, G., Xie, L., Zhang, X., Liu, W., Tian, Q., Wang, X.: GaussianDreamer: Fast Generation from Text to 3D Gaussian Splatting with Point Cloud Priors. arXiv preprint arXiv:2310.08529 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.343, + 0.787, + 0.37 + ], + "angle": 0, + "content": "69. Yu, A., Li, R., Tancik, M., Li, H., Ng, R., Kanazawa, A.: PlenOctrees for Real-Time Rendering of Neural Radiance Fields. In: ICCV. pp. 5752-5761 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.371, + 0.787, + 0.398 + ], + "angle": 0, + "content": "70. Zhang, R., Isola, P., Efros, A.A., Shechtman, E., Wang, O.: The Unreasonable Effectiveness of Deep Features as a Perceptual Metric. In: CVPR. pp. 586-595 (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.399, + 0.787, + 0.424 + ], + "angle": 0, + "content": "71. Zheng, E., Ji, D., Dunn, E., Frahm, J.M.: Sparse Dynamic 3D Reconstruction from Unsynchronized Videos. In: CVPR. pp. 4435-4443 (2015)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.425, + 0.787, + 0.452 + ], + "angle": 0, + "content": "72. Zielonka, W., Bagautdinov, T., Saito, S., Zollhöfer, M., Thies, J., Romero, J.: Drivable 3D Gaussian Avatars. arXiv preprint arXiv:2311.08581 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.453, + 0.787, + 0.481 + ], + "angle": 0, + "content": "73. Zwicker, M., Pfister, H., Van Baar, J., Gross, M.: EWA Splitting. IEEE TVCG 8(3), 223-238 (2002)" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.147, + 0.788, + 0.481 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/2024/A Compact Dynamic 3D Gaussian Representation for Real-Time Dynamic View Synthesis/7472726a-f5ca-4354-bd16-63b6aa3c1be0_origin.pdf b/2024/A Compact Dynamic 3D Gaussian Representation for Real-Time Dynamic View Synthesis/7472726a-f5ca-4354-bd16-63b6aa3c1be0_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..8089a9dd38dccd9add65c18faa7b7a5ab30a5b71 --- /dev/null +++ b/2024/A Compact Dynamic 3D Gaussian Representation for Real-Time Dynamic View Synthesis/7472726a-f5ca-4354-bd16-63b6aa3c1be0_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3ce29e723d86342089887cf5acb53c0b68ff03b51018b772967295c7de57f12 +size 16900017 diff --git a/2024/A Compact Dynamic 3D Gaussian Representation for Real-Time Dynamic View Synthesis/full.md b/2024/A Compact Dynamic 3D Gaussian Representation for Real-Time Dynamic View Synthesis/full.md new file mode 100644 index 0000000000000000000000000000000000000000..0c9b53e88a7783a7103d4a0c2037647e84ea2847 --- /dev/null +++ b/2024/A Compact Dynamic 3D Gaussian Representation for Real-Time Dynamic View Synthesis/full.md @@ -0,0 +1,329 @@ +# A Compact Dynamic 3D Gaussian Representation for Real-Time Dynamic View Synthesis + +Kai Katsumata, Duc Minh Vo, and Hideki Nakayama + +The University of Tokyo, Japan {katsumata, vm Duc, nakayama}@nlab.ci.i.u-tokyo.ac.jp + +Abstract. 3D Gaussian Splatting (3DGS) has shown remarkable success in synthesizing novel views given multiple views of a static scene. Yet, 3DGS faces challenges when applied to dynamic scenes because 3D Gaussian parameters need to be updated per timestep, requiring a large amount of memory and at least a dozen observations per timestep. To address these limitations, we present a compact dynamic 3D Gaussian representation that models positions and rotations as functions of time with a few parameter approximations while keeping other properties of 3DGS including scale, color, and opacity invariant. Our method can dramatically reduce memory usage and relax a strict multi-view assumption. In our experiments on monocular and multi-view scenarios, we show that our method not only matches state-of-the-art methods, often linked with slower rendering speeds, in terms of high rendering quality, but also significantly surpasses them by achieving a rendering speed of 118 frames per second at a resolution of $1,352 \times 1,014$ on a single GPU. + +# 1 Introduction + +The landscape of novel view synthesis of scenes captured through multiple images/videos has undergone a revolutionary transformation, owing principally to major breakthroughs in neural radiance field (NeRF) approaches [6,41,57]. Although they achieve remarkable visual quality, particularly in dynamic scenes [4,21,31,34,45], NeRFs inevitably confront hurdles in terms of high-speed training and rendering [41,43,44,48]. This limitation is attributed to their reliance on multi-layer perceptrons (MLPs). Recently, 3D Gaussian Splatting (3DGS) [26] introduced a differentiable 3D Gaussian representation and point-based rasterization, signaling a departure from neural network reliance. 3DGS has emerged as a promising solution that not only accelerates training and rendering processes but also delivers high-quality rendered scenes, rivaling the levels set by NeRF [41] on static scenes. + +Nonetheless, in the realm of dynamic scene synthesis, 3DGS faces challenges related to memory usage and the need for many observations [38]. In particular, a significant number of 3D Gaussian parameters must be stored per timestep, resulting in a non-negligible increase in memory usage and the need for numerous observations per timestep. This poses challenges in monocular or few-view setups, as their strict multiview assumption demands advanced facilities or expertise, limiting flexibility in capturing setups. Exploring 3DGS without multi-view assumption enables dynamic view synthesis with a simple and easy camera setup, which is the primary goal of this study. + +![](images/000059aa1fa1476365bb3f5609dfd948998b9607287e045b8a334c8e42f20b20.jpg) +Fig. 1: We show examples of novel view synthesis on the MUTANT scene in the D-NeRF dataset, visual quality (PSNR), rendering speed (FPS), and memory used to store optimized parameters. Our method yields reconstruction fidelity competitive with SoTAs with real-time rendering, achieving $100 \times$ faster rendering speed than V4D and reasonable memory size. Non-obvious differences in quality are highlighted. **Bold typeface number** indicates the best result among the methods with the competitive rendering quality (excepting for 3DGS), and the **underline** one does the second best. + +To achieve memory-efficient real-time dynamic view synthesis from monocular and multi-view videos, we present a compact dynamic 3D Gaussian representation, containing time-invariant and time-varying parameters to capture dynamic motion effectively. Similarly to [26, 38], we use scaling factors in the covariance matrix, opacity, and color as time-invariant parameters. As modeling the change in positions over time is important to represent dynamic scenes [43-45], we express each 3D Gaussian position as a function of time to model the temporal change in the position. We also represent 3D Gaussian rotation as a time-varying parameter because the rotation of the objects in the world can be typically changed. Inspired by the studies that model motion as periodic [2, 71], we fit the position using the Fourier approximation. We fit the rotation using the linear approximation. The time-varying parameters make our representation dynamic, meaning that a 3D Gaussian moves and rotates over time. Moreover, as we use a function with a few parameters to represent the position, the small degree of freedom contributes to the smoothness of reconstructed scenes, enhancing the robustness against unseen views. Crucially, the memory consumption of our representation is solely determined by the number of 3D Gaussians and the number of the approximation function parameters, remaining independent of input length. Beyond optimizing Gaussian representations through image-level reconstruction, we further enhance temporal consistency by supervising the Gaussian with optical flow obtained from input videos. This ensures high-quality reconstruction and facilitates the generalization of the representation. + +Our experiments on dynamic datasets (D-NeRF [45], DyNeRF [31], and HyperNeRF [44]) demonstrate the effectiveness of optimizing our dynamic 3D Gaussian from both monocular and multi-view videos, showing that our proposed method achieves rendering quality that rivals that of previous NeRFs [17, 18, 20]. In addition to faithful rendering quality, the proposed method achieves rendering speeds similar to a fast radiance field method [26] while avoiding large memory increases caused by a dynamic extension (see Fig. 1). Finally, we show an editing application enabled by the explicit property of 3D Gaussian representations. In summary, our contributions are as follow: + +- We present a compact dynamic 3D Gaussian representation with time-varying Gaussian parameters equipped with basis functions for representing dynamic scenes. + +- As a 3D Gaussian representation is defined over all the timesteps, the 3D Gaussian parameters can be optimized with the frames at all the timesteps, enabling dynamic scene reconstruction from monocular or few-view videos. + +- Our dynamic 3D Gaussian representation facilitates real-time high-quality dynamic scene rendering of high-resolution images of $1,352 \times 1,014$ with a frame rate of 118 FPS using a single GPU. + +# 2 Related Work + +We briefly overview radiance fields for dynamic scenes and discuss recent efficient explicit representation methods (grid-, plane-, hash-, and point-based), contextualizing our work within real-time dynamic view synthesis. + +# 2.1 Dynamic view synthesis + +Applications in virtual reality and computer vision often need reconstruction of dynamic scenes. Several works extend NeRF [41] to handle dynamic scenes in multi-view or monocular setups by time-varying NeRF [21, 31, 45, 60]. The regularization techniques for temporal smoothness enable suitable scene representations from monocular videos [33]. Additional sensory information is also useful for spatio-temporal regularization. Some attempts [21, 33, 58] employ depth or flow, which are observed or predicted with external networks to reconstruct from sparse observations. Deformation-based approaches [43, 44, 54, 59], another research direction in dynamic reconstruction, combine static NeRF and deformation fields. Although tremendous efforts show high visual quality for dynamic view synthesis, the frequent querying of MLP in NeRFs results in the drawback of slow optimization and rendering [65]. Our study aims to enable real-time dynamic view synthesis with high visual quality. We aim to extend 3DGS to dynamic scene reconstruction to achieve high-speed rendering while maintaining the rendering quality from sparse training views. + +# 2.2 Explicit Radiance Fields + +Recent studies [11, 19, 69] have addressed the issue in implicit models (i.e., NeRFs) by exploring explicit models, reducing optimization and rendering time. Plenoxels [19] directly optimizes 3D grid representation instead of neural networks. Generally, explicit models sacrifice visual quality for fast training time [19]. Hybrid approaches [11, 17, 18, 20, 42, 53] aim to achieve better trade-offs between training time and visual quality. Instant-NGP allows for a compact MLP by exploiting a multi-level hash grid to encode positions to feature vectors [42]. Plane-based approaches are designed principally to represent bounded scenes [3, 9, 10, 14, 18, 23]. MERF [49] employs a multiresolution representation and a fast contraction function to reconstruct unbounded scenes. For dynamic scenes, K-planes [18] decomposes 4D dynamic volumes into multiple feature planes and employs an MLP-based feature decoder for determining color and density. Structured representations still grapple with the trade-off between rendering speed and quality. In this study, unstructured 3D Gaussians promise large gains in rendering speed. + +# 2.3 Point-based rendering + +Points, which naturally come from depth sensors, Structure from Motion (SfM) [51], or common Multi-View Stereo (MVS) algorithms [50, 52], offer a useful representation of fine-grained scenes and complex objects, and facilitate computationally efficient rendering. Consequently, they have been studied comprehensively in the vision and graphics community. The differentiable pipeline for point-based rendering results in points can be used for reconstructing 3D scenes [26-29]. 3DGS [26] achieves real-time rendering with high visual quality for unbounded static scenes at the expense of the generalization performance derived from NeRF's continuous neural field representation. 3DGS is replacing NeRFs as the backbone of text-to-3D models, leading to faster 3D generation [1, 12, 55, 66, 68]. Recently, Dynamic 3D Gaussians [38] employs 3DGS for dynamic scenes, which models dynamic scenes by the Gaussian position and rotation at each timestamp. The position and rotation of Gaussians at every timestamp are effective in modeling scenes from dense multi-view dynamic scenes. However, this approach presents difficulties in reconstructing monocular dynamic scenes, resulting in excessive memory consumption, particularly for extended input sequences. Specifically, the space complexity of the method for a scene with $T$ frames is $O(TN)$ , where $N$ is the number of 3D Gaussians. Our goal is to reduce memory consumption by representing time-varying position and rotation with approximation using a few parameters. The space complexity of our method is $O(LN)$ , where $L$ is the number of parameters of the approximation, and usually $L < T$ . + +Concurrent works on dynamic view synthesis includes approaches combining Gaussian Splatting with MLPs [5,22,32,35,36], approaches focusing on Gaussian representation [13, 15, 16, 64, 67], and approaches for specific targets [30, 46, 47, 72]. SpacetimeGaussian [32] focuses on dynamic view synthesis from multiview videos, unlike this study, by combining Gaussian Splatting and MLPs. [64] aims to model motion by employing a deformation field network while sacrificing rendering speed. [67] splits Gaussians in a time direction, and each Gaussian only focuses on a local temporal space. Four-dimensional (4D) Rotor Gaussian Splatting [15] models a local temporal space via temporal slicing for fast rendering. We aim to build a memory-efficient Gaussian representation for dynamic scenes, even for monocular scenes, while maintaining pure 3D Gaussian representation in order not to sacrifice the gift of 3D Gaussians, such as outstanding rendering speed and ease of direct editing of the scene. + +# 3 Method + +Given images with timesteps and camera parameters obtained from videos, our task is to learn a 4D spatial-temporal representation of a dynamic scene that enables fast and high-quality view rendering. To achieve this, we use 3DGS in dynamic view synthesis. The original 3D Gaussian representation [26] is defined by a position (mean), a covariance matrix (decomposed into a rotation matrix and a scaling vector), a color (determined by spherical harmonics (SH) [8] coefficient), and an opacity. To represent dynamic scenes, each 3D Gaussian in our method (Fig. 2) regards the position and rotation as time-varying parameters and others as time-invariant parameters over time (Sec. 3.1). Given a set of 3D Gaussians, intrinsic and extrinsic camera parameters, and + +![](images/e865dcf72c9a9d8bdfad9c6614cda3a3456003052c368cedabb7a7105d446329.jpg) +Fig. 2: Overview of our dynamic view synthesis framework. Our dynamic 3D Gaussian representation takes temporal modeling of 3D centers and rotations with Fourier and linear approximation, respectively. Our representation parameters are shared over all the timesteps, and observations of each timestep hint at the representation for other timesteps, enabling compact representation and reconstruction of dynamic scenes from few-view videos. In this figure, we only illustrate the time-varying parameterization of one Gaussian for the sake of simplicity. + +a timestep, we render images with the 3DGS technique [26], which renders an image by employing Gaussians within the camera plane out of a set of 3D Gaussians (Sec. 3.2). We update the Gaussian parameters to decrease the distance between rendered and training images in image and flow spaces (Sec. 3.3). Flow reconstruction loss enhances the temporal consistency of the learned representation, resulting in plausible image reconstruction. The small degrees of freedom of our representation essentially facilitate the reconstruction of dynamic scenes from a few observations. + +# 3.1 Dynamic 3D Gaussian representation + +One possible extension of 3DGS [38] to dynamic scenes is to model the scenes per timestep explicitly. Although that strategy allows for flexible modeling for dynamic scenes, it requires 3D Gaussian parameters per timestep, increasing the memory size proportionally to video length. Since the representation for each time is optimized by observations with the number of cameras, the strategy lacks sufficient observations in monocular or few-view video setups, limiting its effectiveness in such scenarios. + +To design a compact dynamic 3D Gaussian representation, we express 3D Gaussian parameters using only a few parameters to achieve faithful reconstruction without a large increase in parameters. Our dynamic scene representation comprises a set of dynamic 3D Gaussians, extending the static 3D Gaussian introduced in [26]. This representation allows 3D Gaussians to move through the scene over time, using time-varying parameters (center position and rotation factors) and time-invariant parameters (scale, color, and opacity). Each dynamic Gaussian encapsulates the following parameters: + +1) a 3D center at time $t$ : $[x(t),y(t),z(t)]^{\top} \in \mathbb{R}^{3}$ , +2) a 3D rotation at time $t$ represented by a quaternion: + +$$ +[ q _ {x} (t), q _ {y} (t), q _ {z} (t), q _ {w} (t) ] ^ {\top} \in \mathbb {R} ^ {4}, +$$ + +3) a scaling factor: $[s_x, s_y, s_z]^{\mathsf{T}} \in \mathbb{R}^3$ , +4) SH coefficients representing color with the degrees of freedom $k$ : + +$$ +h \in \mathbb {R} ^ {3 \times (k + 1) ^ {2}}, +$$ + +5) an opacity: $o \in \mathbb{R}$ . + +Each Gaussian at time $t$ is characterized by a 3D center $\pmb{\mu}(t) = [x(t), y(t), z(t)]^{\top}$ and a 3D covariance matrix $\pmb{\Sigma}(t)$ . The density of the 3D Gaussian at the intersection $\pmb{x}$ with a ray is obtained as follows: + +$$ +G _ {t} (\boldsymbol {x}) = e ^ {- \frac {1}{2} (\boldsymbol {x} - \boldsymbol {\mu} (t)) ^ {\top} \boldsymbol {\Sigma} (t) ^ {- 1} (\boldsymbol {x} - \boldsymbol {\mu} (t))}. \tag {1} +$$ + +To constrain the covariance matrix $\boldsymbol{\Sigma}(t)$ such that it is a positive semi-definite matrix during optimization, the covariance matrix $\boldsymbol{\Sigma}(t)$ is decomposed by using a scaling matrix $\mathbf{S} = \mathrm{diag}(s_x, s_y, s_z)$ and a rotation matrix $\mathbf{R}(t)$ as $\boldsymbol{\Sigma}(t) = \mathbf{R}(t)\mathbf{S}\mathbf{S}^{\top}\mathbf{R}(t)^{\top}$ . Here, the rotation matrix $\mathbf{R}(t)$ is represented by quaternion $(q_x(t), q_y(t), q_z(t), q_w(t))$ . Since most parts of the dynamic scene hardly change in scale because the solid objects (e.g., humans, animals, and things) scarcely expand or shrink, we maintain the scale parameter as a constant to reduce the model size. In what follows, we formally define the 3D center and rotation. + +Since motion in dynamic scenes is primarily described by changing the position of points like scene or optical flow [37, 61], we model the 3D center with an expressive approximation. We approximate the 3D position $x(t), y(t), z(t)$ using Fourier approximation. At time $t$ , it is represented by + +$$ +\begin{array}{l} x (t) = w _ {x, 0} + \sum_ {i = 1} ^ {L} w _ {x, 2 i - 1} \sin (2 i \pi t) + w _ {x, 2 i} \cos (2 i \pi t), \\ y (t) = w _ {y, 0} + \sum_ {i = 1} ^ {L} w _ {y, 2 i - 1} \sin (2 i \pi t) + w _ {y, 2 i} \cos (2 i \pi t), \tag {2} \\ z (t) = w _ {z, 0} + \sum_ {i = 1} ^ {L} w _ {z, 2 i - 1} \sin (2 i \pi t) + w _ {z, 2 i} \cos (2 i \pi t), \\ \end{array} +$$ + +where, $w_{\cdot,0}, \ldots, w_{\cdot,2L}$ are the intercept and coefficients of the position, and $L$ is the number of terms (harmonics). We remark that a polynomial approximation is inadequate due to underfitting with a small number of bases and overfitting with higher-order polynomials. For these reasons, we choose the Fourier approximation. + +3DGS uses anisotropic 3D Gaussians, resulting in the need for dynamic modeling of Gaussian rotations. We approximate the 3D rotation (quaternion) over time using a linear approximation because a unit quaternion can be approximated locally as linear when considering its tangent plane. At time $t$ , it is defined as + +$$ +\begin{array}{l} q _ {x} (t) = w _ {q x, 0} + w _ {q x, 1} t, \quad q _ {y} (t) = w _ {q y, 0} + w _ {q y, 1} t, \\ q _ {z} (t) = w _ {q z, 0} + w _ {q z, 1} t, \quad q _ {w} (t) = w _ {q w, 0} + w _ {q w, 1} t, \\ \end{array} +$$ + +where $w_{\cdot,0}$ and $w_{\cdot,1}$ are intercepts and coefficients of the rotation, respectively. We project the quaternion $q_{\cdot}(t)$ onto the unit quaternion by normalizing it: $q_{\cdot}(t) / \|q_{\cdot}(t)\|$ , to ensure that the quaternion at time $t$ is a unit quaternion. + +For each Gaussian, the preceding definitions yield $3L + 8 + 3 + 3(k + 1)^2 + 1$ parameters with respect to the 3D center, 3D rotation, scale, color, and opacity. Notably, the parameter count for each Gaussian is defined merely by the number of approximation terms and spherical harmonic degrees of freedom, with no regard to time length. Compared to methods that store parameters for each timestep, our approach saves on memory usage. Memory consumption in our dynamic scene representation is determined by two hyperparameters (i.e., $L$ and $k$ ) and the number of Gaussians used. Furthermore, the representation defined as a function of time over continuous time inhibits discontinuous movement through time. This characteristic improves robustness in novel view synthesis settings. + +# 3.2 Rendering via 3D Gaussian Splitting + +Rendering with 3D Gaussian applies splatting techniques [26] to the Gaussian within the camera planes. Zwicker et al. [73] introduced the projection of the 3D covariance matrix to the 2D one. The 3D covariance matrix $\pmb{\Sigma}$ is projected into a 2D one $\pmb{\Sigma}'$ given a viewing transformation $\mathbf{W}$ as $\pmb{\Sigma}'(t) = \mathbf{J}\mathbf{W}\pmb{\Sigma}(t)\mathbf{W}^{\top}\mathbf{J}^{\top}$ , where $\mathbf{J}$ is the Jacobian of the affine approximation of the projective transformation at Gaussian center $\pmb{\mu}(t)$ : + +$$ +\mathbf {J} = \left[ \begin{array}{c c c} \frac {1}{v _ {z}} & 0 & - \frac {v _ {x}}{v _ {z} ^ {2}} \\ 0 & \frac {1}{v _ {z}} & - \frac {v _ {y}}{v _ {z} ^ {2}} \\ 0 & 0 & 0 \end{array} \right], \tag {4} +$$ + +where $[v_x, v_y, v_z]^{\top} = \mathbf{W}\boldsymbol{\mu}(t)$ is the camera coordinate of the Gaussian center $\boldsymbol{\mu}(t)$ obtained by the viewing transformation, which projects the points from world space to camera space. + +Similar to NeRF style volumetric rendering, point-based rendering computes the color $C$ of a pixel by evaluating the blending of $N$ ordered points that overlap the pixel $C = \sum_{i=1}^{N} c_i \alpha_i \prod_{j=1}^{i-1} (1 - \alpha_j)$ , where $c_i$ represents the color of a Gaussian evaluated by SH coefficients, and $\alpha_i$ represents the density that is calculated from a 2D Gaussian with the 2D covariance $\pmb{\Sigma}'$ and 2D center $\pmb{\mu}'$ at time $t$ and the optimized opacity $o$ . + +# 3.3 Optimization of the dynamic 3D Gaussian representation + +We optimize the Gaussian parameters, i.e., intercepts and coefficients of position and rotation $w$ , a scaling factor $s_x, s_y, s_z$ , SH coefficients $h$ , and an opacity $o$ , based on the iterations of rendering and a comparison of the rendered images with training frames in the captured videos. To compare the rendered and training views, the loss function contains the L1 loss and the structural similarity (SSIM) [63] loss $\mathcal{L}_{\mathrm{D - SSIM}}$ : + +$$ +\mathcal {L} _ {\text {r e c o n}} = (1 - \lambda) | \hat {I} - I | + \lambda \mathcal {L} _ {\mathrm {D} - \mathrm {S S I M}}, \tag {5} +$$ + +where $\hat{I}$ and $I$ are the rendered and target images, respectively. The loss function moves and rotates the anisotropic Gaussians and changes their color and opacity so that each Gaussian covers a homogeneous area. Since the loss just fixes incorrectly positioned Gaussians, the over- or under-representation of the set of Gaussians for the scene needs a mechanism for creating Gaussians that reconstruct the scene or destroy extra Gaussians. We also follow the divide and prune techniques in 3DGS for producing a compact and precise representation of the scene. We surveil the gradients of each Gaussian and densify Gaussians by splitting a Gaussian with a large gradient and a large scale into two small Gaussians, and cloning a Gaussian with a large gradient and a small scale to two Gaussians. Moreover, we remove transparent Gaussians with an opacity less than a threshold value of 0.005. + +Following [26], we initialize a set of Gaussians using a set of sparse points from SfM [51] for real scenes, and we initialize a set of Gaussians randomly using a uniform distribution for synthetic scenes owing to the absence of the prior. We adopt a two-stage optimization strategy consisting of static and dynamic stages. Deeming the + +frames in the captured datasets as static scenes, we optimize static representation in the static stage to learn the prior of Gaussians. In other words, we optimize the parameters that are consistent all over time (i.e., scale, SH coefficients, and opacity) and the intercepts for the center and rotation $(w_{x,0}, w_{y,0}, w_{z,0}, w_{qx,0}, w_{qy,0}, w_{qz,0}, w_{qw,0})$ among the Gaussian parameters in the static stage. After the static stage, we optimize all the parameters of the set of Gaussians to reconstruct a dynamic region as a dynamic stage. + +Another challenge in the dynamic scene reconstruction is ambiguity caused by the limited number of captured views at a timestep. Since a dynamic scene contains temporal changes, such as moving objects and changing shapes, sharing the scene information over frames with different timesteps is difficult. To overcome the ambiguity, we employ flow information. Similar to our 3D Gaussian, scene flow [39, 40, 62] is defined as the position of a point in 3D space and its motion. These 3D points originate from different mechanisms than those in 3D Gaussian, making matching in 3D space difficult. Since optical flow defined on the image plane can be directly matched with a 3D Gaussian and is readily to compute from monocular inputs, we supervise the flows of the estimizable Gaussians with the ground truth optical flows of the input frames. We use RAFT [56] to obtain ground truth flow for training views: forward flow $f_{\mathrm{fwd}}$ and backward flow $f_{\mathrm{bwd}}$ between two adjacent frames. The flow loss $\mathcal{L}_{\mathrm{flow}}$ takes the L1 loss between the ground truth flows and the optical flow of the Gaussian for both directions of the flows. The flow loss gives our method spatial-temporal consistency without any additional computation cost in rendering. We combine the flow loss $\mathcal{L}_{\mathrm{flow}}$ with the reconstruction loss that compares the rendered and training views: + +$$ +\mathcal {L} = \mathcal {L} _ {\text {r e c o n}} + \lambda_ {\text {f l o w}} \mathcal {L} _ {\text {f l o w}} (\hat {F}, F), \tag {6} +$$ + +where $F = \{f_{\mathrm{fwd}}, f_{\mathrm{bwd}}\}$ and $\hat{F}$ are the ground truth flow and the flow of the Gaussians, respectively, and $\lambda_{\mathrm{flow}}$ is a balancing hyperparameter for the flow term. Instead of applying an optical flow algorithm for rendering, we create pseudo optical flow from a Gaussian representation. Scene motion is represented solely by the 3D mean coefficients: $w_{x,1 \leq i}, w_{y,1 \leq i}, w_{z,1 \leq i}$ . Scene flow in 3D space can be computed by + +$$ +\hat {f} _ {\mathrm {f w d}} ^ {x} = x (t + \Delta t) - x (t), \quad \hat {f} _ {\mathrm {b w d}} ^ {x} = x (t) - x (t - \Delta t), +$$ + +$$ +\hat {f} _ {\mathrm {f w d}} ^ {y} = y (t + \Delta t) - y (t), \quad \hat {f} _ {\mathrm {b w d}} ^ {y} = y (t) - y (t - \Delta t), \tag {7} +$$ + +$$ +\hat {f} _ {\mathrm {f w d}} ^ {\tilde {z}} = z (t + \Delta t) - z (t), \quad \hat {f} _ {\mathrm {b w d}} ^ {\tilde {z}} = z (t) - z (t - \Delta t), +$$ + +where $\Delta t$ is the difference between the timesteps of the two image frames. The scene flow is projected into a 2D camera plane using + +$$ +\hat {f} _ {\left\{f w d, b w d \right\}} ^ {x y z} = \mathbf {J} \left[ \hat {f} _ {\left\{f w d, b w d \right\}}, \hat {f} _ {\left\{f w d, b w d \right\}} ^ {y}, \hat {f} _ {\left\{f w d, b w d \right\}} ^ {z} \right] ^ {\top}, \tag {8} +$$ + +where $\mathbf{J}$ is the Jacobian of the affine approximation of the projective transformation at the Gaussian center $\mu$ (Eq. (4)). Regarding scene flows on the camera plane as RGB colors, point-based rendering can compute an optical flow of a pixel through $\alpha$ -blending: + +$$ +\hat {f} _ {\mathrm {f w d}} = \sum_ {i = 1} ^ {N} \hat {f} _ {\mathrm {f w d}, i} ^ {\mathrm {x y z}} \alpha_ {i} \prod_ {j = 1} ^ {i - 1} (1 - \alpha_ {j}). \tag {9} +$$ + +![](images/4c650dafc6ec69e20921646eb9c6418afeadefc0584eb17191737df572c413dc.jpg) +Fig. 3: Qualitative comparison on D-NeRF [45]. We highlight the differences by zoom view. Our method achieves competitive visual quality with strong baselines. While our method successfully reconstructs intricate details like hands, it causes a blurred sphere shape. + +The backward flow is calculated in the same way. The optical flow $\hat{F}$ consists of the forward flows $\hat{f}_{\mathrm{fwd}}$ and backward flows $\hat{f}_{\mathrm{bwd}}$ for all pixels. We exclude the flow loss for the D-NeRF dataset because the teleport of the cameras between adjacent frames causes difficulties in calculating ground truth flows. + +# 4 Experiment + +# 4.1 Evaluation data + +We evaluate our compact dynamic Gaussian representation using dynamic scene datasets: a synthetic one D-NeRF [45] and two real ones, i.e., DyNeRF [31] and HyperNeRF [44]. + +Table 1: Quantitative results on the D-NeRF dataset [45]. Our method performs competitively against NeRF approaches in terms of visual quality and achieves the fastest rendering speed among the highest-performing methods. Results except the FPS of [17, 18, 20] are adopted from the original papers. The best and second best scores among competing methods are highlighted. + +
PSNR↑MS-SSIM↑LPIPS↓FPS ↑Train Time ↓Mem↓
TiNeuVox-S [17]30.750.960.070.328 mins8MB
TiNeuVox-B [17]32.670.970.040.1328 mins48MB
K-Planes [18]31.610.97-0.5452 mins~497MB
V4D [20]33.720.980.021.476.9 hrs1.2GB
3DGS [26]20.510.890.071706 mins~50MB
D-3DGS17.220.810.1317315 mins~913MB
Ours32.190.970.041508 mins~159MB
+ +Table 2: Quantitative results on the DyNeRF datasets [31]. Results excepting FPS of [18, 20] are adopted from the original papers. The best and second best scores among competing methods (excepting 3DGS) are highlighted. While our method matches NeRFs in terms of rendering quality, our method matches 3DGS in terms of rendering speed. Besides, our method is 20 times more compact than Dynamic3DGaussians. + +
PSNR↑MS-SSIM↑LPIPS↓FPS↑Train Time↓Mem↓
K-Planes [18]31.630.964-0.311.8 hrs~309MB
V4D28.960.9370.170.114 hrs1.2GB
3DGS [26]20.940.8000.2910920 mins~198MB
D-3DGS24.360.8340.2511951 mins~2.3GB
Dynamic3DGaussians [38]27.790.8690.23512.1 hrs~6.6GB
Ours30.460.9550.151181 hrs~338MB
+ +D-NeRF dataset [45]. This dataset comprises eight videos of varying lengths, ranging from 50 to 200 frames per video. The camera setup is designed to mimic a monocular camera setting by teleporting between adjacent timesteps. The test views are from novel camera positions. We train and render at the resolution of $800 \times 800$ . + +DyNeRF dataset [31]. The multi-camera dataset includes six 10-second videos captured at 30 FPS using 15-20 synchronized fixed cameras. For evaluation, a central camera is used, while training utilizes frames from the other cameras. The training and rendering resolution is set at $1,352 \times 1,014$ . + +HyperNeRF dataset [44]. This dataset encompasses videos ranging from 8 to $15\mathrm{~s}$ , captured at 15 FPS using two Pixel 3 phones. The training and rendering processes are conducted at a resolution of $540 \times 960$ . + +# 4.2 Implementation details + +We adhere to the experimental setup in the 3DGS paper [26]. The number of approximation terms of the Gaussian centers $L$ is set to 2 for the D-NeRF dataset. For the other datasets, $L$ is set to 5 from preliminary experiments. Our two-stage optimization process begins with an initial fitting of parameters, excluding the coefficients for Gaussian center and rotation. This initial stage spans 3,000 iterations and utilizes all training views in a static setting. Subsequently, we engage in a dynamic stage, adjusting all Gaussian + +Table 3: Quantitative results on the HyperNeRF dataset [44]. Our method demonstrates competitive performance in rendering quality across all scenes, surpassing the compared methods in rendering speed. Furthermore, our method is not inferior to the compared methods in training time and memory size. + +
FPS↑Train Time↓Mem↓BROOM3D PRINTERCHICKENPEELBANANAMean
PSNR↑SSIM↑PSNR↑SSIM↑PSNR↑SSIM↑PSNR↑SSIM↑PSNR↑SSIM↑
HyperNeRF [44]0.3648 hrs†15MB19.30.59120.00.82126.90.94823.30.89622.20.811
TiNeuVox-B [17]0.1430 mins48MB21.50.68622.80.84128.30.94724.40.87324.30.837
V4D [20]0.157 hrs1.2GB22.10.66923.20.83528.40.92925.20.87324.70.827
Ours1881 hrs~720MB22.10.78925.50.91928.30.93426.60.92025.60.890
+ +Train time of HyperNeRF [44] is estimated from their paper's descriptions. Originally reported as 8 hours on 4 TPU v4s [25], the TPU v4 is slightly faster than the A100 GPU, and the A100 GPU is at least 1.5 times faster than the A6000 GPU. + +![](images/8646b14d601e95ac912fc9ca3a0b37c477b69db65020e2bb5588c4ff8ef2e31b.jpg) +Fig. 4: Qualitative comparison on the DyNeRF dataset [31]. The differences are zoomed in. + +parameters in 27,000 iterations. The entire optimization process encompasses 30,000 iterations. Following [26], $\lambda$ is set to 0.2. We set the flow loss weight $\lambda_{\mathrm{flow}}$ to 1,000 and acquire ground truth flow through the RAFT pretrained on the Sintel dataset [7]. All experiments are conducted on a single RTX A6000 GPU. + +# 4.3 Evaluation setup + +Compared methods. We benchmark our method against the following baseline methods: TiNeuVox [17], K-Planes [18], V4D [20], HyperNeRF [44], 3D Gaussian Splatting (3DGS) [26], Dynamic3DGaussians [38], and a D-3DGS baseline. D-3DGS is the dynamic extension of 3DGS, which stores both position and rotation for each timestep. + +Evaluation metrics. We assess the methods using various metrics, including PSNR [24], SSIM [63], LPIPS [70], FPS, Training time, and memory used to store optimized parameters. Memory consumption includes the 3D Gaussian parameters, voxel/plane representation, and neural network parameters. + +# 4.4 Experimental results + +Quantitative results. The quantitative results on the D-NeRF dataset are detailed in Tab. 1. Our method demonstrates a performance comparable to TiNeuVox and K-Planes + +![](images/2d1346f0d33ed12f3ce5ba01ba407a5ba140e4062cbebc5cb565decdb9f99f69.jpg) +Fig. 5: Qualitative comparison on HyperNeRF [44]. Our method offers sharp results. + +Table 4: Per-scene quantitative comparison on D-NeRF scenes of different $L$ , which stands for the number of harmonic terms in the Fourier approximation, and other design choices. The highest mean score is achieved with $L = 2$ , but increasing the complexity $L$ (the number of coefficients) improves visual quality in some scenes (JUMPING JACKS and T-REX). The spline approximations bring marginal improvements in some scenes but slower rendering. The time-varying scale (the last row) also provides minor gains in some cases and increases the memory size. The setting reported in Fig. 3 is highlighted with a gray background. + +
STAND UPJACKSBALLSLEGOWARRIORHOOKT-REXMUTANTMean
PSNR SSIMPSNR SSIMPSNR SSIMPSNR SSIMPSNR SSIMPSNR SSIMPSNR SSIMPSNR SSIMPSNR SSIM
L = 140.210.99427.220.95230.270.97224.260.94032.42
L = 239.100.99330.950.98033.290.98423.150.92234.15
L = 338.090.99032.780.98432.540.97922.120.88135.36
L = 435.830.98432.930.98230.390.96921.060.85534.38
L = 532.890.97630.710.97727.680.95920.200.82532.64
Linear27.770.97323.100.92126.680.95922.270.92217.39
Quadratic29.400.97823.440.92627.510.96322.450.92417.70
Cubic29.980.97923.710.92827.760.96422.370.92118.04
Spline (5)38.870.99331.960.98332.960.98023.090.91834.46
Spline (6)38.000.99231.840.98432.810.98022.250.90335.24
Linear (scale)38.320.99330.910.98032.550.98423.870.93034.43
+ +in terms of visual quality as measured by PSNR, SSIM, and LPIPS. Notably, it excels in training time, FPS, and memory size, achieving a rendering speed that is $300 \times$ faster than that of K-Planes. Furthermore, our method surpasses both 3DGS and D-3DGS in terms of visual quality without compromising rendering speed. In the DyNeRF scenes experiment, detailed in Tab. 2, while our method does not exceed the baseline in reconstruction quality, it shows a substantial improvement in FPS. Since the DyNeRF scenes contain multi-view data, the D-3DGS baseline naturally improves static 3DGS, unlike monocular scenes. Our method even attains rendering speeds that exceed real-time performance at a high resolution of $1,354 \times 1,014$ . For the challenging HyperNeRF dataset, which is captured by only two moving cameras and referenced in Tab. 3, our method not only demonstrates rapid rendering speeds but also achieves higher average PSNR and SSIM scores than the compared methods. + +Qualitative results. Figures 3 to 5 show that our method yields faithful reconstruction for the dynamic scenes. Unlike the structured representation, which has a fixed size of grids, the unstructured nature of 3D Gaussians enables adaptive control of the expres + +![](images/c41a63e6504a068c6b8087083f95eaa993f6898c57b2029c9990377c962f6808.jpg) +Ours without $\mathcal{L}_{\mathrm{flow}}$ +Ours + +![](images/d36c05713d738724be5412fe9c680427ac7ddb81f84b165665bd4a83725f0cad.jpg) +Fig.6: Qualitative comparison of disabled and enabled flow loss on DyNeRF. We highlight the difference by zoom view. +Fig.7: Composition of two scenes. Our method allows for the addition of adding 3D objects represented 3D Gaussians into a 3D Gaussian scene. We highlight the added object. + +siveness of the representation, delivering sharper renderings. As seen with the results for BOUNCING BALLS, since our method has discrete primitives, it sometimes fails to reproduce smooth boundaries. + +Effect of the number of parameters $L$ . Table 4 shows per-scene PSNR and SSIM scores of K-Planes and our method with the different $L$ (Eq. (2)). It is observed that the optimal $L$ for novel view synthesis varies from scene to scene, highlighting the necessity for complex approximations to capture intricate motions effectively. + +Effect of flow loss. Additionally, visual comparisons drawn from our method without and with the flow loss (Fig. 6) reveal that incorporating the flow loss mitigates ghostly artifacts and significantly enhances the accuracy of color reconstruction. + +Design choice. Our method is very flexible and allows for the use of arbitrary approximation functions and the choice of time-varying parameters. Table 4 also shows the experimental results of other options for the design of the model to facilitate future dynamic scene reconstruction. The linear, quadratic, and cubic baselines approximate time-varying 3D positions with polynomials of degrees one, two, and three, respectively. The Spline (5) and Spline (6) baselines approximate 3D positions with spline approximations of five and six points, respectively. The linear (scale) baseline approximates time-varying scales with the linear approximation in addition to positions and rotations. Although a Spline baseline gives minor performance gains in some cases, it + +achieves 91 FPS for rendering, showing slower rendering than the proposed method. The linear (scale) baseline does not show additional parameters that would result in performance improvements. For time-varying 3D rotation, we also consider the approximation with slerp. Since it does not offer performance gains while causing numerical instability for static Gaussians, we use linear approximation for rotation. For faster rendering and compact representation, we use the Fourier approximation for 3D positions and model 3D positions and rotations as time-varying parameters. + +Scene composition. Since our dynamic 3D Gaussian representation still uses pure 3D Gaussian representation, the learned representation facilitates straightforward editing of Gaussians. We demonstrate the composition of two scenes with our representation. Figure 7 illustrates this by combining the MUTANT scene from the D-NeRF dataset with the SEARED STEAK scene from the DyNeRF dataset. This demonstrates the capability of our method in editing dynamic 3D scenes. + +# 5 Discussion and Conclusion + +Limitations and future directions. Our dynamic Gaussians are defined through all times of the dynamic scene. This representation implicitly assumes Gaussians exist over all times of the scene. It enables us to naturally model the rigid and non-rigid deformation in the scene. On the other hand, for modeling the change in topology, the occurrence and extinction of Gaussians (e.g., fluid) is tough. Static colors cause difficulty in modeling changes in illumination and color. The reconstruction capability of the method depends on the number of parameters, so that the scene representation is compact but results in poor rendering quality for very long sequences, requiring additional memory consumption for proper reconstruction. To overcome these limitations, considering the lifetime of Gaussians, such as adding start and end time parameters, will allow for the modeling of changes in scene topology, and the adaptive decision of flexibility will leads to better trade-offs between quality and memory size. + +Our Gaussian representation sacrifices the continuity and smoothness inherent in neural field-based volume rendering. Distilling NeRFs into our proposed representation in a manner similar to PlenOctree [69] is a potential extension of our method, promising to enhance rendering quality while maintaining fast rendering advantage. + +Conclusion. We present a compact dynamic 3D Gaussian representation enabling faithful reconstruction and real-time rendering of dynamic scenes. We propose a representation for the position and rotation of 3D Gaussians as a function of time for modeling the motion of the scene. The parameterized functions of time introduce memory efficiency and robustness to the number of views per timestep. Furthermore, we introduce the flow loss constraining the scene flow of the learned Gaussian representation with the ground truth flow. Our experiments on synthetic and real datasets show that the proposed method achieves real-time dynamic scene rendering even at high resolutions. + +Acknowledgements This study was supported by JSPS/MEXT KAKENHI Grant Numbers JP24K20830, JP23KJ0381, JP23K28139, and JP22H05015, ROIS NII Open Collaborative Research 2024-24S1201, and the Institute of AI and Beyond of the University of Tokyo. The authors would like to thank D. Horita for carefully proofreading the manuscript and N. Umetani for providing helpful advice on the method's limitations. + +# References + +1. Abdal, R., Yifan, W., Shi, Z., Xu, Y., Po, R., Kuang, Z., Chen, Q., Yeung, D.Y., Wetzstein, G.: Gaussian Shell Maps for Efficient 3D Human Generation. In: CVPR. pp. 9441-9451 (2024) +2. Akhter, I., Sheikh, Y., Khan, S., Kanade, T.: Nonrigid Structure from Motion in Trajectory Space. In: NeurIPS (2008) +3. An, S., Xu, H., Shi, Y., Song, G., Ogras, U.Y., Luo, L.: PanoHead: Geometry-Aware 3D Full-Head Synthesis in 360 degree. In: CVPR. pp. 20950-20959 (2023) +4. Attal, B., Huang, J.B., Richardt, C., Zollhöefer, M., Kopf, J., O'Toole, M., Kim, C.: HyperReel: High-Fidelity 6-DoF Video with Ray-Conditioned Sampling. In: CVPR. pp. 16610-16620 (2023) +5. Bae, J., Kim, S., Yun, Y., Lee, H., Bang, G., Uh, Y.: Per-Gaussian Embedding-Based Deformation for Deformable 3D Gaussian Splatting. arXiv preprint arXiv:2404.03613 (2024) +6. Barron, J.T., Mildenhall, B., Tancik, M., Hedman, P., Martin-Brualla, R., Srinivasan, P.P.: Mip-NeRF: A Multiscale Representation for Anti-Aliasing Neural Radiance Fields. In: CVPR. pp. 5855-5864 (2021) +7. Butler, D.J., Wulff, J., Stanley, G.B., Black, M.J.: A Naturalistic Open Source Movie for Optical Flow Evaluation. In: ECCV. pp. 611-625 (2012) +8. Cabral, B., Max, N., Springmeyer, R.: Bidirectional Reflection Functions from Surface Bump Maps. SIGGRAPH 21(4), 273-281 (1987) +9. Cao, A., Johnson, J.: HexPlane: A Fast Representation for Dynamic Scenes. In: CVPR. pp. 130-141 (2023) +0. Chan, E.R., Lin, C.Z., Chan, M.A., Nagano, K., Pan, B., De Mello, S., Gallo, O., Guibas, L.J., Tremblay, J., Khamis, S., et al.: Efficient Geometry-Aware 3D Generative Adversarial Networks. In: CVPR. pp. 16123-16133 (2022) +1. Chen, A., Xu, Z., Geiger, A., Yu, J., Su, H.: TensoRF: Tensorial Radiance Fields. In: ECCV. pp. 333-350 (2022) +2. Chen, Z., Wang, F., Liu, H.: Text-to-3D Using Gaussian Splatting. arXiv preprint arXiv:2309.16585 (2023) +3. Das, D., Wewer, C., Yunus, R., Ilg, E., Lenssen, J.E.: Neural Parametric Gaussians for Monocular Non-Rigid Object Reconstruction. In: CVPR. pp. 10715-10725 (2024) +4. Dong, Z., Chen, X., Yang, J., Black, M.J., Hilliges, O., Geiger, A.: AG3D: Learning to Generate 3D Avatars from 2D Image Collections. In: ICCV. pp. 14916-14927 (2023) +5. Duan, Y., Wei, F., Dai, Q., He, Y., Chen, W., Chen, B.: 4D-Rotor Gaussian Splitting: Towards Efficient Novel View Synthesis for Dynamic Scenes. ACM TOG (2024) +6. Duisterhof, B.P., Mandi, Z., Yao, Y., Liu, J.W., Shou, M.Z., Song, S., Ichnowski, J.: MD-Splatting: Learning Metric Deformation from 4D Gaussians in Highly Deformable Scenes. arXiv preprint arXiv:2312.00583 (2023) +7. Fang, J., Yi, T., Wang, X., Xie, L., Zhang, X., Liu, W., Nießner, M., Tian, Q.: Fast Dynamic Radiance Fields with Time-Aware Neural Voxels. In: SIGGRAPH Asia (2022) +8. Fridovich-Keil, S., Meanti, G., Warburg, F.R., Recht, B., Kanazawa, A.: K-Planes: Explicit Radiance Fields in Space, Time, and Appearance. In: CVPR. pp. 12479-12488 (2023) +9. Fridovich-Keil, S., Yu, A., Tancik, M., Chen, Q., Recht, B., Kanazawa, A.: Plenoxels: Radiance Fields Without Neural Networks. In: CVPR. pp. 5501-5510 (2022) +20. Gan, W., Xu, H., Huang, Y., Chen, S., Yokoya, N.: V4D: Voxel for 4D Novel View Synthesis. IEEE TVCG 30(2), 1579-1591 (2024) +21. Gao, C., Saraf, A., Kopf, J., Huang, J.B.: Dynamic View Synthesis from Dynamic Monocular Video. In: ICCV. pp. 5712-5721 (2021) +22. Guo, Z., Zhou, W., Li, L., Wang, M., Li, H.: Motion-Aware 3D Gaussian Splitting for Efficient Dynamic Scene Reconstruction. arXiv preprint arXiv:2403.11447 (2024) + +23. He, H., Yang, Z., Li, S., Dai, B., Wu, W.: OrthoPlanes: A Novel Representation for Better 3D-Awareness of GANs. In: ICCV. pp. 22996-23007 (2023) +24. Huynh-Thu, Q., Ghanbari, M.: Scope of Validity of PSNR in Image/Video Quality Assessment. Electronics Letters 44(13), 800-801 (2008) +25. Jouppi, N., Kurian, G., Li, S., Ma, P., Nagarajan, R., Nai, L., Patil, N., Subramanian, S., Swing, A., Towles, B., Young, C., Zhou, X., Zhou, Z., Patterson, D.A.: TPU v4: An Optically Reconfigurable Supercomputer for Machine Learning with Hardware Support for Embeddings. In: Proceedings of the 50th Annual International Symposium on Computer Architecture. pp. 1-14 (2023) +26. Kerbl, B., Kopanas, G., Leimkuhler, T., Drettakis, G.: 3D Gaussian Splitting for Real-Time Radiance Field Rendering. ACM TOG 42(4), 1-14 (2023) +27. Keselman, L., Hebert, M.: Approximate Differentiable Rendering with Algebraic Surfaces. In: ECCV. pp. 596-614 (2022) +28. Kopanas, G., Leimkuhler, T., Rainer, G., Jambon, C., Drettakis, G.: Neural Point Catacaustics for Novel-View Synthesis of Reflections. ACM TOG 41(6), 1-15 (2022) +29. Kopanas, G., Philip, J., Leimkuhler, T., Drettakis, G.: Point-Based Neural Rendering with Per-View Optimization. In: Comput. Graph. Forum. vol. 40, pp. 29-43 (2021) +30. Lei, J., Wang, Y., Pavlakos, G., Liu, L., Daniilidis, K.: GART: Gaussian Articulated Template Models. In: CVPR. pp. 19876-19887 (2024) +31. Li, T., Slavcheva, M., Zollhöefer, M., Green, S., Lassner, C., Kim, C., Schmidt, T., Lovegrove, S., Goesele, M., Newcombe, R., et al.: Neural 3D Video Synthesis from Multi-View Video. In: CVPR. pp. 5521-5531 (2022) +32. Li, Z., Chen, Z., Li, Z., Xu, Y.: Spacetime Gaussian Feature Splatting for Real-Time Dynamic View Synthesis. arXiv preprint arXiv:2312.16812 (2023) +33. Li, Z., Niklaus, S., Snavely, N., Wang, O.: Neural Scene Flow Fields for Space-Time View Synthesis of Dynamic Scenes. In: CVPR. pp. 6498-6508 (2021) +34. Li, Z., Wang, Q., Cole, F., Tucker, R., Snavely, N.: DynIBaR: Neural Dynamic Image-Based Rendering. In: CVPR. pp. 4273-4284 (2023) +35. Liang, Y., Khan, N., Li, Z., Nguyen-Phuoc, T., Lanman, D., Tompkin, J., Xiao, L.: Gaufre: Gaussian Deformation Fields for Real-Time Dynamic Novel View Synthesis. arXiv preprint arXiv:2312.11458 (2023) +36. Lu, Z., Guo, X., Hui, L., Chen, T., Yang, M., Tang, X., Zhu, F., Dai, Y.: 3D Geometry-Aware Deformable Gaussian Splitting for Dynamic View Synthesis. In: CVPR. pp. 8900-8910 (2024) +37. Lucas, B.D., Kanade, T.: An Iterative Image Registration Technique with an Application to Stereo Vision. In: IJCAI. pp. 674-679 (1981) +38. Luiten, J., Kopanas, G., Leibe, B., Ramanan, D.: Dynamic 3D Gaussians: Tracking by Persistent Dynamic View Synthesis. In: 3DV (2024) +39. Mayer, N., Ilg, E., Hausser, P., Fischer, P., Cremers, D., Dosovitskiy, A., Brox, T.: A Large Dataset to Train Convolutional Networks for Disparity, Optical Flow, and Scene Flow Estimation. In: CVPR. pp. 4040-4048 (2016) +40. Menze, M., Geiger, A.: Object Scene Flow for Autonomous Vehicles. In: CVPR. pp. 3061-3070 (2015) +41. Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: NeRF: Representing Scenes as Neural Radiance Fields for View Synthesis. In: ECCV (2020) +42. Müller, T., Evans, A., Schied, C., Keller, A.: Instant Neural Graphics Primitives with a Multiresolution Hash Encoding. ACM TOG 41(4), 1-15 (2022) +43. Park, K., Sinha, U., Barron, J.T., Bouaziz, S., Goldman, D.B., Seitz, S.M., Martin-Brualla, R.: Nerfies: Deformable Neural Radiance Fields. In: ICCV. pp. 5865-5874 (2021) + +44. Park, K., Sinha, U., Hedman, P., Barron, J.T., Bouaziz, S., Goldman, D.B., Martin-Brualla, R., Seitz, S.M.: HyperNeRF: a Higher-Dimensional Representation for Topologically Varying Neural Radiance Fields. ACM TOG 40(6), 1-12 (2021) +45. Pumarola, A., Corona, E., Pons-Moll, G., Moreno-Noguer, F.: D-NeRF: Neural Radiance Fields for Dynamic Scenes. In: CVPR. pp. 10318-10327 (2021) +46. Qian, S., Kirschstein, T., Schoneveld, L., Davoli, D., Giebenhain, S., Nießner, M.: GaussianAvatars: Photorealistic Head Avatars with Rigged 3D Gaussians. In: CVPR. pp. 20299-20309 (2024) +47. Qian, Z., Wang, S., Mihajlovic, M., Geiger, A., Tang, S.: 3DGS-Avatar: Animatable Avatars via Deformable 3D Gaussian Splatting. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5020-5030 (2024) +48. Reiser, C., Peng, S., Liao, Y., Geiger, A.: KiloNeRF: Speeding up Neural Radiance Fields with Thousands of Tiny MLPs. In: ICCV. pp. 14335-14345 (2021) +49. Reiser, C., Szeliski, R., Verbin, D., Srinivasan, P., Mildenhall, B., Geiger, A., Barron, J., Hedman, P.: MERF: Memory-Efficient Radiance Fields for Real-Time View Synthesis in Unbounded Scenes. ACM TOG 42(4), 1-12 (2023) +50. Schonberger, J.L., Zheng, E., Frahm, J.M., Pollefeys, M.: Pixelwise View Selection for Unstructured Multi-View Stereo. In: ECCV. pp. 501-518 (2016) +51. Schonberger, J.L., Frahm, J.M.: Structure-from-Motion Revisited. In: CVPR (2016) +52. Seitz, S.M., Curless, B., Diebel, J., Scharstein, D., Szeliski, R.: A Comparison and Evaluation of Multi-View Stereo Reconstruction Algorithms. In: CVPR. pp. 519-528 (2006) +53. Shao, R., Zheng, Z., Tu, H., Liu, B., Zhang, H., Liu, Y.: Tensor4D: Efficient Neural 4D Decomposition for High-Fidelity Dynamic Reconstruction and Rendering. In: CVPR. pp. 16632-16642 (2023) +54. Song, L., Chen, A., Li, Z., Chen, Z., Chen, L., Yuan, J., Xu, Y., Geiger, A.: NeRFPlayer: A Streamable Dynamic Scene Representation with Decomposed Neural Radiance Fields. IEEE TVCG 29(5), 2732-2742 (2023) +55. Tang, J., Ren, J., Zhou, H., Liu, Z., Zeng, G.: DreamGaussian: Generative Gaussian Splitting for Efficient 3D Content Creation. arXiv preprint arXiv:2309.16653 (2023) +56. Teed, Z., Deng, J.: RAFT: Recurrent All-Pairs Field Transforms for Optical Flow. In: ECCV. pp. 402-419 (2020) +57. Tewari, A., Thies, J., Mildenhall, B., Srinivasan, P., Tretschk, E., Wang, Y., Lassner, C., Sitzmann, V., Martin-Brualla, R., Lombardi, S., Simon, T., Theobalt, C., Nießner, M., Barron, J.T., Wetzstein, G., Zollhöefer, M., Golyanik, V.: Advances in Neural Rendering. In: Comput. Graph. Forum. vol. 41, pp. 703-735 (2022) +58. Tian, F., Du, S., Duan, Y.: MonoNeRF: Learning a Generalizable Dynamic Radiance Field from Monocular Videos. In: ICCV. pp. 17903-17913 (2023) +59. Tretschk, E., Tewari, A., Golyanik, V., Zollhöefer, M., Lassner, C., Theobalt, C.: Non-Rigid Neural Radiance Fields: Reconstruction and Novel View Synthesis of a Dynamic Scene From Monocular Video. In: ICCV (2021) +60. Tretschk, E., Tewari, A., Golyanik, V., Zollhöfer, M., Lassner, C., Theobalt, C.: Non-Rigid Neural Radiance Fields: Reconstruction and Novel View Synthesis of a Dynamic Scene from Monocular Video. In: ICCV. pp. 12959-12970 (2021) +61. Vedula, S., Baker, S., Rander, P., Collins, R., Kanade, T.: Three-Dimensional Scene Flow. In: CVPR. pp. 722-729 (1999) +62. Vedula, S., Rander, P., Collins, R., Kanade, T.: Three-Dimensional Scene Flow. IEEE TPAMI 27(3), 475-480 (2005) +63. Wang, Z., Bovik, A.C., Sheikh, H.R., Simoncelli, E.P.: Image Quality Assessment: From Error Visibility to Structural Similarity. IEEE TIP 13(4), 600-612 (2004) + +64. Wu, G., Yi, T., Fang, J., Xie, L., Zhang, X., Wei, W., Liu, W., Tian, Q., Wang, X.: 4D Gaussian Splitting for Real-Time Dynamic Scene Rendering. arXiv preprint arXiv:2310.08528 (2023) +65. Xie, Y., Takikawa, T., Saito, S., Litany, O., Yan, S., Khan, N., Tombari, F., Tompkin, J., Sitzmann, V., Sridhar, S.: Neural Fields in Visual Computing and Beyond. Comput. Graph. Forum (2022) +66. Xu, D., Yuan, Y., Mardani, M., Liu, S., Song, J., Wang, Z., Vahdat, A.: AGG: Amortized Generative 3D Gaussians for Single Image to 3D. arXiv preprint arXiv:2401.04099 (2024) +67. Yang, Z., Yang, H., Pan, Z., Zhu, X., Zhang, L.: Real-Time Photorealistic Dynamic Scene Representation and Rendering with 4D Gaussian Splatting. arXiv preprint arXiv:2310.10642 (2023) +68. Yi, T., Fang, J., Wu, G., Xie, L., Zhang, X., Liu, W., Tian, Q., Wang, X.: GaussianDreamer: Fast Generation from Text to 3D Gaussian Splatting with Point Cloud Priors. arXiv preprint arXiv:2310.08529 (2023) +69. Yu, A., Li, R., Tancik, M., Li, H., Ng, R., Kanazawa, A.: PlenOctrees for Real-Time Rendering of Neural Radiance Fields. In: ICCV. pp. 5752-5761 (2021) +70. Zhang, R., Isola, P., Efros, A.A., Shechtman, E., Wang, O.: The Unreasonable Effectiveness of Deep Features as a Perceptual Metric. In: CVPR. pp. 586-595 (2018) +71. Zheng, E., Ji, D., Dunn, E., Frahm, J.M.: Sparse Dynamic 3D Reconstruction from Unsynchronized Videos. In: CVPR. pp. 4435-4443 (2015) +72. Zielonka, W., Bagautdinov, T., Saito, S., Zollhöfer, M., Thies, J., Romero, J.: Drivable 3D Gaussian Avatars. arXiv preprint arXiv:2311.08581 (2023) +73. Zwicker, M., Pfister, H., Van Baar, J., Gross, M.: EWA Splitting. IEEE TVCG 8(3), 223-238 (2002) \ No newline at end of file diff --git a/2024/A Compact Dynamic 3D Gaussian Representation for Real-Time Dynamic View Synthesis/images.zip b/2024/A Compact Dynamic 3D Gaussian Representation for Real-Time Dynamic View Synthesis/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..383c66a27f145aaf6397f1232d4a773ec36bf55c --- /dev/null +++ b/2024/A Compact Dynamic 3D Gaussian Representation for Real-Time Dynamic View Synthesis/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4dd1de0eb9d50974839e6de933cbece551aaf130b4a81540810accaa35d595d1 +size 681532 diff --git a/2024/A Compact Dynamic 3D Gaussian Representation for Real-Time Dynamic View Synthesis/layout.json b/2024/A Compact Dynamic 3D Gaussian Representation for Real-Time Dynamic View Synthesis/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..377add533f98336f90f437efdf3bb65c213ca6a9 --- /dev/null +++ b/2024/A Compact Dynamic 3D Gaussian Representation for Real-Time Dynamic View Synthesis/layout.json @@ -0,0 +1,9804 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 153, + 111, + 462, + 148 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 153, + 111, + 462, + 148 + ], + "spans": [ + { + "bbox": [ + 153, + 111, + 462, + 148 + ], + "type": "text", + "content": "A Compact Dynamic 3D Gaussian Representation for Real-Time Dynamic View Synthesis" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 189, + 168, + 425, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 189, + 168, + 425, + 180 + ], + "spans": [ + { + "bbox": [ + 189, + 168, + 425, + 180 + ], + "type": "text", + "content": "Kai Katsumata, Duc Minh Vo, and Hideki Nakayama" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 171, + 190, + 443, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 171, + 190, + 443, + 213 + ], + "spans": [ + { + "bbox": [ + 171, + 190, + 443, + 213 + ], + "type": "text", + "content": "The University of Tokyo, Japan {katsumata, vm Duc, nakayama}@nlab.ci.i.u-tokyo.ac.jp" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 160, + 240, + 455, + 394 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 240, + 455, + 394 + ], + "spans": [ + { + "bbox": [ + 160, + 240, + 455, + 394 + ], + "type": "text", + "content": "Abstract. 3D Gaussian Splatting (3DGS) has shown remarkable success in synthesizing novel views given multiple views of a static scene. Yet, 3DGS faces challenges when applied to dynamic scenes because 3D Gaussian parameters need to be updated per timestep, requiring a large amount of memory and at least a dozen observations per timestep. To address these limitations, we present a compact dynamic 3D Gaussian representation that models positions and rotations as functions of time with a few parameter approximations while keeping other properties of 3DGS including scale, color, and opacity invariant. Our method can dramatically reduce memory usage and relax a strict multi-view assumption. In our experiments on monocular and multi-view scenarios, we show that our method not only matches state-of-the-art methods, often linked with slower rendering speeds, in terms of high rendering quality, but also significantly surpasses them by achieving a rendering speed of 118 frames per second at a resolution of " + }, + { + "bbox": [ + 160, + 240, + 455, + 394 + ], + "type": "inline_equation", + "content": "1,352 \\times 1,014" + }, + { + "bbox": [ + 160, + 240, + 455, + 394 + ], + "type": "text", + "content": " on a single GPU." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 133, + 413, + 218, + 426 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 413, + 218, + 426 + ], + "spans": [ + { + "bbox": [ + 133, + 413, + 218, + 426 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 438, + 485, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 438, + 485, + 569 + ], + "spans": [ + { + "bbox": [ + 130, + 438, + 485, + 569 + ], + "type": "text", + "content": "The landscape of novel view synthesis of scenes captured through multiple images/videos has undergone a revolutionary transformation, owing principally to major breakthroughs in neural radiance field (NeRF) approaches [6,41,57]. Although they achieve remarkable visual quality, particularly in dynamic scenes [4,21,31,34,45], NeRFs inevitably confront hurdles in terms of high-speed training and rendering [41,43,44,48]. This limitation is attributed to their reliance on multi-layer perceptrons (MLPs). Recently, 3D Gaussian Splatting (3DGS) [26] introduced a differentiable 3D Gaussian representation and point-based rasterization, signaling a departure from neural network reliance. 3DGS has emerged as a promising solution that not only accelerates training and rendering processes but also delivers high-quality rendered scenes, rivaling the levels set by NeRF [41] on static scenes." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 570, + 483, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 570, + 483, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 570, + 483, + 666 + ], + "type": "text", + "content": "Nonetheless, in the realm of dynamic scene synthesis, 3DGS faces challenges related to memory usage and the need for many observations [38]. In particular, a significant number of 3D Gaussian parameters must be stored per timestep, resulting in a non-negligible increase in memory usage and the need for numerous observations per timestep. This poses challenges in monocular or few-view setups, as their strict multiview assumption demands advanced facilities or expertise, limiting flexibility in capturing setups. Exploring 3DGS without multi-view assumption enables dynamic view synthesis with a simple and easy camera setup, which is the primary goal of this study." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 133, + 114, + 482, + 213 + ], + "blocks": [ + { + "bbox": [ + 133, + 114, + 482, + 213 + ], + "lines": [ + { + "bbox": [ + 133, + 114, + 482, + 213 + ], + "spans": [ + { + "bbox": [ + 133, + 114, + 482, + 213 + ], + "type": "image", + "image_path": "000059aa1fa1476365bb3f5609dfd948998b9607287e045b8a334c8e42f20b20.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 219, + 482, + 295 + ], + "lines": [ + { + "bbox": [ + 130, + 219, + 482, + 295 + ], + "spans": [ + { + "bbox": [ + 130, + 219, + 482, + 295 + ], + "type": "text", + "content": "Fig. 1: We show examples of novel view synthesis on the MUTANT scene in the D-NeRF dataset, visual quality (PSNR), rendering speed (FPS), and memory used to store optimized parameters. Our method yields reconstruction fidelity competitive with SoTAs with real-time rendering, achieving " + }, + { + "bbox": [ + 130, + 219, + 482, + 295 + ], + "type": "inline_equation", + "content": "100 \\times" + }, + { + "bbox": [ + 130, + 219, + 482, + 295 + ], + "type": "text", + "content": " faster rendering speed than V4D and reasonable memory size. Non-obvious differences in quality are highlighted. **Bold typeface number** indicates the best result among the methods with the competitive rendering quality (excepting for 3DGS), and the **underline** one does the second best." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 300, + 482, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 300, + 482, + 539 + ], + "spans": [ + { + "bbox": [ + 130, + 300, + 482, + 539 + ], + "type": "text", + "content": "To achieve memory-efficient real-time dynamic view synthesis from monocular and multi-view videos, we present a compact dynamic 3D Gaussian representation, containing time-invariant and time-varying parameters to capture dynamic motion effectively. Similarly to [26, 38], we use scaling factors in the covariance matrix, opacity, and color as time-invariant parameters. As modeling the change in positions over time is important to represent dynamic scenes [43-45], we express each 3D Gaussian position as a function of time to model the temporal change in the position. We also represent 3D Gaussian rotation as a time-varying parameter because the rotation of the objects in the world can be typically changed. Inspired by the studies that model motion as periodic [2, 71], we fit the position using the Fourier approximation. We fit the rotation using the linear approximation. The time-varying parameters make our representation dynamic, meaning that a 3D Gaussian moves and rotates over time. Moreover, as we use a function with a few parameters to represent the position, the small degree of freedom contributes to the smoothness of reconstructed scenes, enhancing the robustness against unseen views. Crucially, the memory consumption of our representation is solely determined by the number of 3D Gaussians and the number of the approximation function parameters, remaining independent of input length. Beyond optimizing Gaussian representations through image-level reconstruction, we further enhance temporal consistency by supervising the Gaussian with optical flow obtained from input videos. This ensures high-quality reconstruction and facilitates the generalization of the representation." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 540, + 482, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 540, + 482, + 635 + ], + "spans": [ + { + "bbox": [ + 130, + 540, + 482, + 635 + ], + "type": "text", + "content": "Our experiments on dynamic datasets (D-NeRF [45], DyNeRF [31], and HyperNeRF [44]) demonstrate the effectiveness of optimizing our dynamic 3D Gaussian from both monocular and multi-view videos, showing that our proposed method achieves rendering quality that rivals that of previous NeRFs [17, 18, 20]. In addition to faithful rendering quality, the proposed method achieves rendering speeds similar to a fast radiance field method [26] while avoiding large memory increases caused by a dynamic extension (see Fig. 1). Finally, we show an editing application enabled by the explicit property of 3D Gaussian representations. In summary, our contributions are as follow:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 138, + 641, + 484, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 641, + 484, + 666 + ], + "spans": [ + { + "bbox": [ + 138, + 641, + 484, + 666 + ], + "type": "text", + "content": "- We present a compact dynamic 3D Gaussian representation with time-varying Gaussian parameters equipped with basis functions for representing dynamic scenes." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "text", + "content": "K. Katsumata et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 138, + 116, + 481, + 151 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 116, + 481, + 151 + ], + "spans": [ + { + "bbox": [ + 138, + 116, + 481, + 151 + ], + "type": "text", + "content": "- As a 3D Gaussian representation is defined over all the timesteps, the 3D Gaussian parameters can be optimized with the frames at all the timesteps, enabling dynamic scene reconstruction from monocular or few-view videos." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 138, + 152, + 481, + 187 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 152, + 481, + 187 + ], + "spans": [ + { + "bbox": [ + 138, + 152, + 481, + 187 + ], + "type": "text", + "content": "- Our dynamic 3D Gaussian representation facilitates real-time high-quality dynamic scene rendering of high-resolution images of " + }, + { + "bbox": [ + 138, + 152, + 481, + 187 + ], + "type": "inline_equation", + "content": "1,352 \\times 1,014" + }, + { + "bbox": [ + 138, + 152, + 481, + 187 + ], + "type": "text", + "content": " with a frame rate of 118 FPS using a single GPU." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 206, + 224, + 219 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 206, + 224, + 219 + ], + "spans": [ + { + "bbox": [ + 132, + 206, + 224, + 219 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 232, + 481, + 268 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 232, + 481, + 268 + ], + "spans": [ + { + "bbox": [ + 130, + 232, + 481, + 268 + ], + "type": "text", + "content": "We briefly overview radiance fields for dynamic scenes and discuss recent efficient explicit representation methods (grid-, plane-, hash-, and point-based), contextualizing our work within real-time dynamic view synthesis." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 285, + 259, + 297 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 285, + 259, + 297 + ], + "spans": [ + { + "bbox": [ + 132, + 285, + 259, + 297 + ], + "type": "text", + "content": "2.1 Dynamic view synthesis" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 305, + 482, + 473 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 305, + 482, + 473 + ], + "spans": [ + { + "bbox": [ + 130, + 305, + 482, + 473 + ], + "type": "text", + "content": "Applications in virtual reality and computer vision often need reconstruction of dynamic scenes. Several works extend NeRF [41] to handle dynamic scenes in multi-view or monocular setups by time-varying NeRF [21, 31, 45, 60]. The regularization techniques for temporal smoothness enable suitable scene representations from monocular videos [33]. Additional sensory information is also useful for spatio-temporal regularization. Some attempts [21, 33, 58] employ depth or flow, which are observed or predicted with external networks to reconstruct from sparse observations. Deformation-based approaches [43, 44, 54, 59], another research direction in dynamic reconstruction, combine static NeRF and deformation fields. Although tremendous efforts show high visual quality for dynamic view synthesis, the frequent querying of MLP in NeRFs results in the drawback of slow optimization and rendering [65]. Our study aims to enable real-time dynamic view synthesis with high visual quality. We aim to extend 3DGS to dynamic scene reconstruction to achieve high-speed rendering while maintaining the rendering quality from sparse training views." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 490, + 262, + 502 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 490, + 262, + 502 + ], + "spans": [ + { + "bbox": [ + 132, + 490, + 262, + 502 + ], + "type": "text", + "content": "2.2 Explicit Radiance Fields" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 510, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 510, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 510, + 482, + 666 + ], + "type": "text", + "content": "Recent studies [11, 19, 69] have addressed the issue in implicit models (i.e., NeRFs) by exploring explicit models, reducing optimization and rendering time. Plenoxels [19] directly optimizes 3D grid representation instead of neural networks. Generally, explicit models sacrifice visual quality for fast training time [19]. Hybrid approaches [11, 17, 18, 20, 42, 53] aim to achieve better trade-offs between training time and visual quality. Instant-NGP allows for a compact MLP by exploiting a multi-level hash grid to encode positions to feature vectors [42]. Plane-based approaches are designed principally to represent bounded scenes [3, 9, 10, 14, 18, 23]. MERF [49] employs a multiresolution representation and a fast contraction function to reconstruct unbounded scenes. For dynamic scenes, K-planes [18] decomposes 4D dynamic volumes into multiple feature planes and employs an MLP-based feature decoder for determining color and density. Structured representations still grapple with the trade-off between rendering speed and quality. In this study, unstructured 3D Gaussians promise large gains in rendering speed." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 321, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 321, + 91, + 447, + 102 + ], + "type": "text", + "content": "A Compact Dynamic 3D Gaussian" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 91, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 91, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 91, + 481, + 100 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 253, + 129 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 116, + 253, + 129 + ], + "spans": [ + { + "bbox": [ + 132, + 116, + 253, + 129 + ], + "type": "text", + "content": "2.3 Point-based rendering" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 133, + 482, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 133, + 482, + 373 + ], + "spans": [ + { + "bbox": [ + 130, + 133, + 482, + 373 + ], + "type": "text", + "content": "Points, which naturally come from depth sensors, Structure from Motion (SfM) [51], or common Multi-View Stereo (MVS) algorithms [50, 52], offer a useful representation of fine-grained scenes and complex objects, and facilitate computationally efficient rendering. Consequently, they have been studied comprehensively in the vision and graphics community. The differentiable pipeline for point-based rendering results in points can be used for reconstructing 3D scenes [26-29]. 3DGS [26] achieves real-time rendering with high visual quality for unbounded static scenes at the expense of the generalization performance derived from NeRF's continuous neural field representation. 3DGS is replacing NeRFs as the backbone of text-to-3D models, leading to faster 3D generation [1, 12, 55, 66, 68]. Recently, Dynamic 3D Gaussians [38] employs 3DGS for dynamic scenes, which models dynamic scenes by the Gaussian position and rotation at each timestamp. The position and rotation of Gaussians at every timestamp are effective in modeling scenes from dense multi-view dynamic scenes. However, this approach presents difficulties in reconstructing monocular dynamic scenes, resulting in excessive memory consumption, particularly for extended input sequences. Specifically, the space complexity of the method for a scene with " + }, + { + "bbox": [ + 130, + 133, + 482, + 373 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 130, + 133, + 482, + 373 + ], + "type": "text", + "content": " frames is " + }, + { + "bbox": [ + 130, + 133, + 482, + 373 + ], + "type": "inline_equation", + "content": "O(TN)" + }, + { + "bbox": [ + 130, + 133, + 482, + 373 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 130, + 133, + 482, + 373 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 130, + 133, + 482, + 373 + ], + "type": "text", + "content": " is the number of 3D Gaussians. Our goal is to reduce memory consumption by representing time-varying position and rotation with approximation using a few parameters. The space complexity of our method is " + }, + { + "bbox": [ + 130, + 133, + 482, + 373 + ], + "type": "inline_equation", + "content": "O(LN)" + }, + { + "bbox": [ + 130, + 133, + 482, + 373 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 130, + 133, + 482, + 373 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 130, + 133, + 482, + 373 + ], + "type": "text", + "content": " is the number of parameters of the approximation, and usually " + }, + { + "bbox": [ + 130, + 133, + 482, + 373 + ], + "type": "inline_equation", + "content": "L < T" + }, + { + "bbox": [ + 130, + 133, + 482, + 373 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 374, + 482, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 374, + 482, + 518 + ], + "spans": [ + { + "bbox": [ + 130, + 374, + 482, + 518 + ], + "type": "text", + "content": "Concurrent works on dynamic view synthesis includes approaches combining Gaussian Splatting with MLPs [5,22,32,35,36], approaches focusing on Gaussian representation [13, 15, 16, 64, 67], and approaches for specific targets [30, 46, 47, 72]. SpacetimeGaussian [32] focuses on dynamic view synthesis from multiview videos, unlike this study, by combining Gaussian Splatting and MLPs. [64] aims to model motion by employing a deformation field network while sacrificing rendering speed. [67] splits Gaussians in a time direction, and each Gaussian only focuses on a local temporal space. Four-dimensional (4D) Rotor Gaussian Splatting [15] models a local temporal space via temporal slicing for fast rendering. We aim to build a memory-efficient Gaussian representation for dynamic scenes, even for monocular scenes, while maintaining pure 3D Gaussian representation in order not to sacrifice the gift of 3D Gaussians, such as outstanding rendering speed and ease of direct editing of the scene." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 534, + 194, + 547 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 534, + 194, + 547 + ], + "spans": [ + { + "bbox": [ + 132, + 534, + 194, + 547 + ], + "type": "text", + "content": "3 Method" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 558, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 558, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 558, + 482, + 666 + ], + "type": "text", + "content": "Given images with timesteps and camera parameters obtained from videos, our task is to learn a 4D spatial-temporal representation of a dynamic scene that enables fast and high-quality view rendering. To achieve this, we use 3DGS in dynamic view synthesis. The original 3D Gaussian representation [26] is defined by a position (mean), a covariance matrix (decomposed into a rotation matrix and a scaling vector), a color (determined by spherical harmonics (SH) [8] coefficient), and an opacity. To represent dynamic scenes, each 3D Gaussian in our method (Fig. 2) regards the position and rotation as time-varying parameters and others as time-invariant parameters over time (Sec. 3.1). Given a set of 3D Gaussians, intrinsic and extrinsic camera parameters, and" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 92, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 92, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 92, + 140, + 100 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "text", + "content": "K. Katsumata et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 153, + 114, + 460, + 196 + ], + "blocks": [ + { + "bbox": [ + 153, + 114, + 460, + 196 + ], + "lines": [ + { + "bbox": [ + 153, + 114, + 460, + 196 + ], + "spans": [ + { + "bbox": [ + 153, + 114, + 460, + 196 + ], + "type": "image", + "image_path": "e865dcf72c9a9d8bdfad9c6614cda3a3456003052c368cedabb7a7105d446329.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 205, + 482, + 271 + ], + "lines": [ + { + "bbox": [ + 130, + 205, + 482, + 271 + ], + "spans": [ + { + "bbox": [ + 130, + 205, + 482, + 271 + ], + "type": "text", + "content": "Fig. 2: Overview of our dynamic view synthesis framework. Our dynamic 3D Gaussian representation takes temporal modeling of 3D centers and rotations with Fourier and linear approximation, respectively. Our representation parameters are shared over all the timesteps, and observations of each timestep hint at the representation for other timesteps, enabling compact representation and reconstruction of dynamic scenes from few-view videos. In this figure, we only illustrate the time-varying parameterization of one Gaussian for the sake of simplicity." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 294, + 482, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 294, + 482, + 377 + ], + "spans": [ + { + "bbox": [ + 130, + 294, + 482, + 377 + ], + "type": "text", + "content": "a timestep, we render images with the 3DGS technique [26], which renders an image by employing Gaussians within the camera plane out of a set of 3D Gaussians (Sec. 3.2). We update the Gaussian parameters to decrease the distance between rendered and training images in image and flow spaces (Sec. 3.3). Flow reconstruction loss enhances the temporal consistency of the learned representation, resulting in plausible image reconstruction. The small degrees of freedom of our representation essentially facilitate the reconstruction of dynamic scenes from a few observations." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 131, + 395, + 317, + 407 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 395, + 317, + 407 + ], + "spans": [ + { + "bbox": [ + 131, + 395, + 317, + 407 + ], + "type": "text", + "content": "3.1 Dynamic 3D Gaussian representation" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 414, + 482, + 484 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 414, + 482, + 484 + ], + "spans": [ + { + "bbox": [ + 130, + 414, + 482, + 484 + ], + "type": "text", + "content": "One possible extension of 3DGS [38] to dynamic scenes is to model the scenes per timestep explicitly. Although that strategy allows for flexible modeling for dynamic scenes, it requires 3D Gaussian parameters per timestep, increasing the memory size proportionally to video length. Since the representation for each time is optimized by observations with the number of cameras, the strategy lacks sufficient observations in monocular or few-view video setups, limiting its effectiveness in such scenarios." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 486, + 482, + 570 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 486, + 482, + 570 + ], + "spans": [ + { + "bbox": [ + 130, + 486, + 482, + 570 + ], + "type": "text", + "content": "To design a compact dynamic 3D Gaussian representation, we express 3D Gaussian parameters using only a few parameters to achieve faithful reconstruction without a large increase in parameters. Our dynamic scene representation comprises a set of dynamic 3D Gaussians, extending the static 3D Gaussian introduced in [26]. This representation allows 3D Gaussians to move through the scene over time, using time-varying parameters (center position and rotation factors) and time-invariant parameters (scale, color, and opacity). Each dynamic Gaussian encapsulates the following parameters:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 142, + 572, + 359, + 597 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 142, + 572, + 340, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 142, + 572, + 340, + 586 + ], + "spans": [ + { + "bbox": [ + 142, + 572, + 340, + 586 + ], + "type": "text", + "content": "1) a 3D center at time " + }, + { + "bbox": [ + 142, + 572, + 340, + 586 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 142, + 572, + 340, + 586 + ], + "type": "text", + "content": ": " + }, + { + "bbox": [ + 142, + 572, + 340, + 586 + ], + "type": "inline_equation", + "content": "[x(t),y(t),z(t)]^{\\top} \\in \\mathbb{R}^{3}" + }, + { + "bbox": [ + 142, + 572, + 340, + 586 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 142, + 586, + 359, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 142, + 586, + 359, + 597 + ], + "spans": [ + { + "bbox": [ + 142, + 586, + 359, + 597 + ], + "type": "text", + "content": "2) a 3D rotation at time " + }, + { + "bbox": [ + 142, + 586, + 359, + 597 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 142, + 586, + 359, + 597 + ], + "type": "text", + "content": " represented by a quaternion:" + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 142, + 597, + 279, + 609 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 142, + 597, + 279, + 609 + ], + "spans": [ + { + "bbox": [ + 142, + 597, + 279, + 609 + ], + "type": "interline_equation", + "content": "[ q _ {x} (t), q _ {y} (t), q _ {z} (t), q _ {w} (t) ] ^ {\\top} \\in \\mathbb {R} ^ {4},", + "image_path": "721e1e9bc8dde7d5ae9f3e5755bdc067b57cfbe54c6e50a37f4b8c003e0eca6a.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 142, + 597, + 416, + 633 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 142, + 609, + 296, + 622 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 142, + 609, + 296, + 622 + ], + "spans": [ + { + "bbox": [ + 142, + 609, + 296, + 622 + ], + "type": "text", + "content": "3) a scaling factor: " + }, + { + "bbox": [ + 142, + 609, + 296, + 622 + ], + "type": "inline_equation", + "content": "[s_x, s_y, s_z]^{\\mathsf{T}} \\in \\mathbb{R}^3" + }, + { + "bbox": [ + 142, + 609, + 296, + 622 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 142, + 622, + 416, + 633 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 142, + 622, + 416, + 633 + ], + "spans": [ + { + "bbox": [ + 142, + 622, + 416, + 633 + ], + "type": "text", + "content": "4) SH coefficients representing color with the degrees of freedom " + }, + { + "bbox": [ + 142, + 622, + 416, + 633 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 142, + 622, + 416, + 633 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 142, + 633, + 206, + 646 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 142, + 633, + 206, + 646 + ], + "spans": [ + { + "bbox": [ + 142, + 633, + 206, + 646 + ], + "type": "interline_equation", + "content": "h \\in \\mathbb {R} ^ {3 \\times (k + 1) ^ {2}},", + "image_path": "a11ea4349d6f5750a9f7b795657aa789f97200f1cf172a7a0affa70df7adb9a9.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 142, + 647, + 228, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 142, + 647, + 228, + 658 + ], + "spans": [ + { + "bbox": [ + 142, + 647, + 228, + 658 + ], + "type": "text", + "content": "5) an opacity: " + }, + { + "bbox": [ + 142, + 647, + 228, + 658 + ], + "type": "inline_equation", + "content": "o \\in \\mathbb{R}" + }, + { + "bbox": [ + 142, + 647, + 228, + 658 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 321, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 321, + 91, + 447, + 102 + ], + "type": "text", + "content": "A Compact Dynamic 3D Gaussian" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 91, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 91, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 91, + 481, + 100 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 115, + 482, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 115, + 482, + 152 + ], + "spans": [ + { + "bbox": [ + 130, + 115, + 482, + 152 + ], + "type": "text", + "content": "Each Gaussian at time " + }, + { + "bbox": [ + 130, + 115, + 482, + 152 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 130, + 115, + 482, + 152 + ], + "type": "text", + "content": " is characterized by a 3D center " + }, + { + "bbox": [ + 130, + 115, + 482, + 152 + ], + "type": "inline_equation", + "content": "\\pmb{\\mu}(t) = [x(t), y(t), z(t)]^{\\top}" + }, + { + "bbox": [ + 130, + 115, + 482, + 152 + ], + "type": "text", + "content": " and a 3D covariance matrix " + }, + { + "bbox": [ + 130, + 115, + 482, + 152 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}(t)" + }, + { + "bbox": [ + 130, + 115, + 482, + 152 + ], + "type": "text", + "content": ". The density of the 3D Gaussian at the intersection " + }, + { + "bbox": [ + 130, + 115, + 482, + 152 + ], + "type": "inline_equation", + "content": "\\pmb{x}" + }, + { + "bbox": [ + 130, + 115, + 482, + 152 + ], + "type": "text", + "content": " with a ray is obtained as follows:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 228, + 156, + 481, + 172 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 228, + 156, + 481, + 172 + ], + "spans": [ + { + "bbox": [ + 228, + 156, + 481, + 172 + ], + "type": "interline_equation", + "content": "G _ {t} (\\boldsymbol {x}) = e ^ {- \\frac {1}{2} (\\boldsymbol {x} - \\boldsymbol {\\mu} (t)) ^ {\\top} \\boldsymbol {\\Sigma} (t) ^ {- 1} (\\boldsymbol {x} - \\boldsymbol {\\mu} (t))}. \\tag {1}", + "image_path": "5e388f4d07207363e47d84109b454b6ce54c37dc1661f951ebafd465781fca5b.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 176, + 482, + 272 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 176, + 482, + 272 + ], + "spans": [ + { + "bbox": [ + 130, + 176, + 482, + 272 + ], + "type": "text", + "content": "To constrain the covariance matrix " + }, + { + "bbox": [ + 130, + 176, + 482, + 272 + ], + "type": "inline_equation", + "content": "\\boldsymbol{\\Sigma}(t)" + }, + { + "bbox": [ + 130, + 176, + 482, + 272 + ], + "type": "text", + "content": " such that it is a positive semi-definite matrix during optimization, the covariance matrix " + }, + { + "bbox": [ + 130, + 176, + 482, + 272 + ], + "type": "inline_equation", + "content": "\\boldsymbol{\\Sigma}(t)" + }, + { + "bbox": [ + 130, + 176, + 482, + 272 + ], + "type": "text", + "content": " is decomposed by using a scaling matrix " + }, + { + "bbox": [ + 130, + 176, + 482, + 272 + ], + "type": "inline_equation", + "content": "\\mathbf{S} = \\mathrm{diag}(s_x, s_y, s_z)" + }, + { + "bbox": [ + 130, + 176, + 482, + 272 + ], + "type": "text", + "content": " and a rotation matrix " + }, + { + "bbox": [ + 130, + 176, + 482, + 272 + ], + "type": "inline_equation", + "content": "\\mathbf{R}(t)" + }, + { + "bbox": [ + 130, + 176, + 482, + 272 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 130, + 176, + 482, + 272 + ], + "type": "inline_equation", + "content": "\\boldsymbol{\\Sigma}(t) = \\mathbf{R}(t)\\mathbf{S}\\mathbf{S}^{\\top}\\mathbf{R}(t)^{\\top}" + }, + { + "bbox": [ + 130, + 176, + 482, + 272 + ], + "type": "text", + "content": ". Here, the rotation matrix " + }, + { + "bbox": [ + 130, + 176, + 482, + 272 + ], + "type": "inline_equation", + "content": "\\mathbf{R}(t)" + }, + { + "bbox": [ + 130, + 176, + 482, + 272 + ], + "type": "text", + "content": " is represented by quaternion " + }, + { + "bbox": [ + 130, + 176, + 482, + 272 + ], + "type": "inline_equation", + "content": "(q_x(t), q_y(t), q_z(t), q_w(t))" + }, + { + "bbox": [ + 130, + 176, + 482, + 272 + ], + "type": "text", + "content": ". Since most parts of the dynamic scene hardly change in scale because the solid objects (e.g., humans, animals, and things) scarcely expand or shrink, we maintain the scale parameter as a constant to reduce the model size. In what follows, we formally define the 3D center and rotation." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 272, + 482, + 320 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 272, + 482, + 320 + ], + "spans": [ + { + "bbox": [ + 130, + 272, + 482, + 320 + ], + "type": "text", + "content": "Since motion in dynamic scenes is primarily described by changing the position of points like scene or optical flow [37, 61], we model the 3D center with an expressive approximation. We approximate the 3D position " + }, + { + "bbox": [ + 130, + 272, + 482, + 320 + ], + "type": "inline_equation", + "content": "x(t), y(t), z(t)" + }, + { + "bbox": [ + 130, + 272, + 482, + 320 + ], + "type": "text", + "content": " using Fourier approximation. At time " + }, + { + "bbox": [ + 130, + 272, + 482, + 320 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 130, + 272, + 482, + 320 + ], + "type": "text", + "content": ", it is represented by" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 187, + 324, + 481, + 374 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 187, + 324, + 481, + 374 + ], + "spans": [ + { + "bbox": [ + 187, + 324, + 481, + 374 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} x (t) = w _ {x, 0} + \\sum_ {i = 1} ^ {L} w _ {x, 2 i - 1} \\sin (2 i \\pi t) + w _ {x, 2 i} \\cos (2 i \\pi t), \\\\ y (t) = w _ {y, 0} + \\sum_ {i = 1} ^ {L} w _ {y, 2 i - 1} \\sin (2 i \\pi t) + w _ {y, 2 i} \\cos (2 i \\pi t), \\tag {2} \\\\ z (t) = w _ {z, 0} + \\sum_ {i = 1} ^ {L} w _ {z, 2 i - 1} \\sin (2 i \\pi t) + w _ {z, 2 i} \\cos (2 i \\pi t), \\\\ \\end{array}", + "image_path": "35022c39ee0be4afd0f5ed2b8d75cf4ff9e0a75823aa666f5f679ad09ce37c0a.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 378, + 482, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 378, + 482, + 426 + ], + "spans": [ + { + "bbox": [ + 130, + 378, + 482, + 426 + ], + "type": "text", + "content": "where, " + }, + { + "bbox": [ + 130, + 378, + 482, + 426 + ], + "type": "inline_equation", + "content": "w_{\\cdot,0}, \\ldots, w_{\\cdot,2L}" + }, + { + "bbox": [ + 130, + 378, + 482, + 426 + ], + "type": "text", + "content": " are the intercept and coefficients of the position, and " + }, + { + "bbox": [ + 130, + 378, + 482, + 426 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 130, + 378, + 482, + 426 + ], + "type": "text", + "content": " is the number of terms (harmonics). We remark that a polynomial approximation is inadequate due to underfitting with a small number of bases and overfitting with higher-order polynomials. For these reasons, we choose the Fourier approximation." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 426, + 482, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 426, + 482, + 474 + ], + "spans": [ + { + "bbox": [ + 130, + 426, + 482, + 474 + ], + "type": "text", + "content": "3DGS uses anisotropic 3D Gaussians, resulting in the need for dynamic modeling of Gaussian rotations. We approximate the 3D rotation (quaternion) over time using a linear approximation because a unit quaternion can be approximated locally as linear when considering its tangent plane. At time " + }, + { + "bbox": [ + 130, + 426, + 482, + 474 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 130, + 426, + 482, + 474 + ], + "type": "text", + "content": ", it is defined as" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 199, + 478, + 413, + 506 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 199, + 478, + 413, + 506 + ], + "spans": [ + { + "bbox": [ + 199, + 478, + 413, + 506 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} q _ {x} (t) = w _ {q x, 0} + w _ {q x, 1} t, \\quad q _ {y} (t) = w _ {q y, 0} + w _ {q y, 1} t, \\\\ q _ {z} (t) = w _ {q z, 0} + w _ {q z, 1} t, \\quad q _ {w} (t) = w _ {q w, 0} + w _ {q w, 1} t, \\\\ \\end{array}", + "image_path": "c75411a4b10eb36d8764213924006e71a6284e16b85d66a6c1e4c41b1fb90728.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 510, + 482, + 546 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 510, + 482, + 546 + ], + "spans": [ + { + "bbox": [ + 130, + 510, + 482, + 546 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 510, + 482, + 546 + ], + "type": "inline_equation", + "content": "w_{\\cdot,0}" + }, + { + "bbox": [ + 130, + 510, + 482, + 546 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 510, + 482, + 546 + ], + "type": "inline_equation", + "content": "w_{\\cdot,1}" + }, + { + "bbox": [ + 130, + 510, + 482, + 546 + ], + "type": "text", + "content": " are intercepts and coefficients of the rotation, respectively. We project the quaternion " + }, + { + "bbox": [ + 130, + 510, + 482, + 546 + ], + "type": "inline_equation", + "content": "q_{\\cdot}(t)" + }, + { + "bbox": [ + 130, + 510, + 482, + 546 + ], + "type": "text", + "content": " onto the unit quaternion by normalizing it: " + }, + { + "bbox": [ + 130, + 510, + 482, + 546 + ], + "type": "inline_equation", + "content": "q_{\\cdot}(t) / \\|q_{\\cdot}(t)\\|" + }, + { + "bbox": [ + 130, + 510, + 482, + 546 + ], + "type": "text", + "content": ", to ensure that the quaternion at time " + }, + { + "bbox": [ + 130, + 510, + 482, + 546 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 130, + 510, + 482, + 546 + ], + "type": "text", + "content": " is a unit quaternion." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 546, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 546, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 546, + 482, + 666 + ], + "type": "text", + "content": "For each Gaussian, the preceding definitions yield " + }, + { + "bbox": [ + 130, + 546, + 482, + 666 + ], + "type": "inline_equation", + "content": "3L + 8 + 3 + 3(k + 1)^2 + 1" + }, + { + "bbox": [ + 130, + 546, + 482, + 666 + ], + "type": "text", + "content": " parameters with respect to the 3D center, 3D rotation, scale, color, and opacity. Notably, the parameter count for each Gaussian is defined merely by the number of approximation terms and spherical harmonic degrees of freedom, with no regard to time length. Compared to methods that store parameters for each timestep, our approach saves on memory usage. Memory consumption in our dynamic scene representation is determined by two hyperparameters (i.e., " + }, + { + "bbox": [ + 130, + 546, + 482, + 666 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 130, + 546, + 482, + 666 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 546, + 482, + 666 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 130, + 546, + 482, + 666 + ], + "type": "text", + "content": ") and the number of Gaussians used. Furthermore, the representation defined as a function of time over continuous time inhibits discontinuous movement through time. This characteristic improves robustness in novel view synthesis settings." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "text", + "content": "K. Katsumata et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 316, + 129 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 116, + 316, + 129 + ], + "spans": [ + { + "bbox": [ + 132, + 116, + 316, + 129 + ], + "type": "text", + "content": "3.2 Rendering via 3D Gaussian Splitting" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 131, + 137, + 482, + 197 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 137, + 482, + 197 + ], + "spans": [ + { + "bbox": [ + 131, + 137, + 482, + 197 + ], + "type": "text", + "content": "Rendering with 3D Gaussian applies splatting techniques [26] to the Gaussian within the camera planes. Zwicker et al. [73] introduced the projection of the 3D covariance matrix to the 2D one. The 3D covariance matrix " + }, + { + "bbox": [ + 131, + 137, + 482, + 197 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}" + }, + { + "bbox": [ + 131, + 137, + 482, + 197 + ], + "type": "text", + "content": " is projected into a 2D one " + }, + { + "bbox": [ + 131, + 137, + 482, + 197 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}'" + }, + { + "bbox": [ + 131, + 137, + 482, + 197 + ], + "type": "text", + "content": " given a viewing transformation " + }, + { + "bbox": [ + 131, + 137, + 482, + 197 + ], + "type": "inline_equation", + "content": "\\mathbf{W}" + }, + { + "bbox": [ + 131, + 137, + 482, + 197 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 131, + 137, + 482, + 197 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}'(t) = \\mathbf{J}\\mathbf{W}\\pmb{\\Sigma}(t)\\mathbf{W}^{\\top}\\mathbf{J}^{\\top}" + }, + { + "bbox": [ + 131, + 137, + 482, + 197 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 131, + 137, + 482, + 197 + ], + "type": "inline_equation", + "content": "\\mathbf{J}" + }, + { + "bbox": [ + 131, + 137, + 482, + 197 + ], + "type": "text", + "content": " is the Jacobian of the affine approximation of the projective transformation at Gaussian center " + }, + { + "bbox": [ + 131, + 137, + 482, + 197 + ], + "type": "inline_equation", + "content": "\\pmb{\\mu}(t)" + }, + { + "bbox": [ + 131, + 137, + 482, + 197 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 264, + 206, + 481, + 246 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 264, + 206, + 481, + 246 + ], + "spans": [ + { + "bbox": [ + 264, + 206, + 481, + 246 + ], + "type": "interline_equation", + "content": "\\mathbf {J} = \\left[ \\begin{array}{c c c} \\frac {1}{v _ {z}} & 0 & - \\frac {v _ {x}}{v _ {z} ^ {2}} \\\\ 0 & \\frac {1}{v _ {z}} & - \\frac {v _ {y}}{v _ {z} ^ {2}} \\\\ 0 & 0 & 0 \\end{array} \\right], \\tag {4}", + "image_path": "48cc9100c1a44f708d7afdd352a342e95d16474fc2a94740f0ba7d7b6270a498.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 131, + 256, + 480, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 256, + 480, + 293 + ], + "spans": [ + { + "bbox": [ + 131, + 256, + 480, + 293 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 131, + 256, + 480, + 293 + ], + "type": "inline_equation", + "content": "[v_x, v_y, v_z]^{\\top} = \\mathbf{W}\\boldsymbol{\\mu}(t)" + }, + { + "bbox": [ + 131, + 256, + 480, + 293 + ], + "type": "text", + "content": " is the camera coordinate of the Gaussian center " + }, + { + "bbox": [ + 131, + 256, + 480, + 293 + ], + "type": "inline_equation", + "content": "\\boldsymbol{\\mu}(t)" + }, + { + "bbox": [ + 131, + 256, + 480, + 293 + ], + "type": "text", + "content": " obtained by the viewing transformation, which projects the points from world space to camera space." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 131, + 293, + 481, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 293, + 481, + 354 + ], + "spans": [ + { + "bbox": [ + 131, + 293, + 481, + 354 + ], + "type": "text", + "content": "Similar to NeRF style volumetric rendering, point-based rendering computes the color " + }, + { + "bbox": [ + 131, + 293, + 481, + 354 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 131, + 293, + 481, + 354 + ], + "type": "text", + "content": " of a pixel by evaluating the blending of " + }, + { + "bbox": [ + 131, + 293, + 481, + 354 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 131, + 293, + 481, + 354 + ], + "type": "text", + "content": " ordered points that overlap the pixel " + }, + { + "bbox": [ + 131, + 293, + 481, + 354 + ], + "type": "inline_equation", + "content": "C = \\sum_{i=1}^{N} c_i \\alpha_i \\prod_{j=1}^{i-1} (1 - \\alpha_j)" + }, + { + "bbox": [ + 131, + 293, + 481, + 354 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 131, + 293, + 481, + 354 + ], + "type": "inline_equation", + "content": "c_i" + }, + { + "bbox": [ + 131, + 293, + 481, + 354 + ], + "type": "text", + "content": " represents the color of a Gaussian evaluated by SH coefficients, and " + }, + { + "bbox": [ + 131, + 293, + 481, + 354 + ], + "type": "inline_equation", + "content": "\\alpha_i" + }, + { + "bbox": [ + 131, + 293, + 481, + 354 + ], + "type": "text", + "content": " represents the density that is calculated from a 2D Gaussian with the 2D covariance " + }, + { + "bbox": [ + 131, + 293, + 481, + 354 + ], + "type": "inline_equation", + "content": "\\pmb{\\Sigma}'" + }, + { + "bbox": [ + 131, + 293, + 481, + 354 + ], + "type": "text", + "content": " and 2D center " + }, + { + "bbox": [ + 131, + 293, + 481, + 354 + ], + "type": "inline_equation", + "content": "\\pmb{\\mu}'" + }, + { + "bbox": [ + 131, + 293, + 481, + 354 + ], + "type": "text", + "content": " at time " + }, + { + "bbox": [ + 131, + 293, + 481, + 354 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 131, + 293, + 481, + 354 + ], + "type": "text", + "content": " and the optimized opacity " + }, + { + "bbox": [ + 131, + 293, + 481, + 354 + ], + "type": "inline_equation", + "content": "o" + }, + { + "bbox": [ + 131, + 293, + 481, + 354 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 371, + 400, + 384 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 371, + 400, + 384 + ], + "spans": [ + { + "bbox": [ + 132, + 371, + 400, + 384 + ], + "type": "text", + "content": "3.3 Optimization of the dynamic 3D Gaussian representation" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 131, + 392, + 481, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 392, + 481, + 453 + ], + "spans": [ + { + "bbox": [ + 131, + 392, + 481, + 453 + ], + "type": "text", + "content": "We optimize the Gaussian parameters, i.e., intercepts and coefficients of position and rotation " + }, + { + "bbox": [ + 131, + 392, + 481, + 453 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 131, + 392, + 481, + 453 + ], + "type": "text", + "content": ", a scaling factor " + }, + { + "bbox": [ + 131, + 392, + 481, + 453 + ], + "type": "inline_equation", + "content": "s_x, s_y, s_z" + }, + { + "bbox": [ + 131, + 392, + 481, + 453 + ], + "type": "text", + "content": ", SH coefficients " + }, + { + "bbox": [ + 131, + 392, + 481, + 453 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 131, + 392, + 481, + 453 + ], + "type": "text", + "content": ", and an opacity " + }, + { + "bbox": [ + 131, + 392, + 481, + 453 + ], + "type": "inline_equation", + "content": "o" + }, + { + "bbox": [ + 131, + 392, + 481, + 453 + ], + "type": "text", + "content": ", based on the iterations of rendering and a comparison of the rendered images with training frames in the captured videos. To compare the rendered and training views, the loss function contains the L1 loss and the structural similarity (SSIM) [63] loss " + }, + { + "bbox": [ + 131, + 392, + 481, + 453 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{D - SSIM}}" + }, + { + "bbox": [ + 131, + 392, + 481, + 453 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 227, + 460, + 481, + 474 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 460, + 481, + 474 + ], + "spans": [ + { + "bbox": [ + 227, + 460, + 481, + 474 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {r e c o n}} = (1 - \\lambda) | \\hat {I} - I | + \\lambda \\mathcal {L} _ {\\mathrm {D} - \\mathrm {S S I M}}, \\tag {5}", + "image_path": "e57d209218e3e9b0d0cf054c1709e41665c78884b6e86d3d6d725619d9dd96c6.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 131, + 485, + 482, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 485, + 482, + 616 + ], + "spans": [ + { + "bbox": [ + 131, + 485, + 482, + 616 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 131, + 485, + 482, + 616 + ], + "type": "inline_equation", + "content": "\\hat{I}" + }, + { + "bbox": [ + 131, + 485, + 482, + 616 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 131, + 485, + 482, + 616 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 131, + 485, + 482, + 616 + ], + "type": "text", + "content": " are the rendered and target images, respectively. The loss function moves and rotates the anisotropic Gaussians and changes their color and opacity so that each Gaussian covers a homogeneous area. Since the loss just fixes incorrectly positioned Gaussians, the over- or under-representation of the set of Gaussians for the scene needs a mechanism for creating Gaussians that reconstruct the scene or destroy extra Gaussians. We also follow the divide and prune techniques in 3DGS for producing a compact and precise representation of the scene. We surveil the gradients of each Gaussian and densify Gaussians by splitting a Gaussian with a large gradient and a large scale into two small Gaussians, and cloning a Gaussian with a large gradient and a small scale to two Gaussians. Moreover, we remove transparent Gaussians with an opacity less than a threshold value of 0.005." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 131, + 617, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 617, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 131, + 617, + 482, + 666 + ], + "type": "text", + "content": "Following [26], we initialize a set of Gaussians using a set of sparse points from SfM [51] for real scenes, and we initialize a set of Gaussians randomly using a uniform distribution for synthetic scenes owing to the absence of the prior. We adopt a two-stage optimization strategy consisting of static and dynamic stages. Deeming the" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 321, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 321, + 91, + 447, + 102 + ], + "type": "text", + "content": "A Compact Dynamic 3D Gaussian" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 91, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 91, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 91, + 481, + 100 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 187 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 187 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 187 + ], + "type": "text", + "content": "frames in the captured datasets as static scenes, we optimize static representation in the static stage to learn the prior of Gaussians. In other words, we optimize the parameters that are consistent all over time (i.e., scale, SH coefficients, and opacity) and the intercepts for the center and rotation " + }, + { + "bbox": [ + 130, + 116, + 482, + 187 + ], + "type": "inline_equation", + "content": "(w_{x,0}, w_{y,0}, w_{z,0}, w_{qx,0}, w_{qy,0}, w_{qz,0}, w_{qw,0})" + }, + { + "bbox": [ + 130, + 116, + 482, + 187 + ], + "type": "text", + "content": " among the Gaussian parameters in the static stage. After the static stage, we optimize all the parameters of the set of Gaussians to reconstruct a dynamic region as a dynamic stage." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 188, + 483, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 188, + 483, + 380 + ], + "spans": [ + { + "bbox": [ + 130, + 188, + 483, + 380 + ], + "type": "text", + "content": "Another challenge in the dynamic scene reconstruction is ambiguity caused by the limited number of captured views at a timestep. Since a dynamic scene contains temporal changes, such as moving objects and changing shapes, sharing the scene information over frames with different timesteps is difficult. To overcome the ambiguity, we employ flow information. Similar to our 3D Gaussian, scene flow [39, 40, 62] is defined as the position of a point in 3D space and its motion. These 3D points originate from different mechanisms than those in 3D Gaussian, making matching in 3D space difficult. Since optical flow defined on the image plane can be directly matched with a 3D Gaussian and is readily to compute from monocular inputs, we supervise the flows of the estimizable Gaussians with the ground truth optical flows of the input frames. We use RAFT [56] to obtain ground truth flow for training views: forward flow " + }, + { + "bbox": [ + 130, + 188, + 483, + 380 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{fwd}}" + }, + { + "bbox": [ + 130, + 188, + 483, + 380 + ], + "type": "text", + "content": " and backward flow " + }, + { + "bbox": [ + 130, + 188, + 483, + 380 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{bwd}}" + }, + { + "bbox": [ + 130, + 188, + 483, + 380 + ], + "type": "text", + "content": " between two adjacent frames. The flow loss " + }, + { + "bbox": [ + 130, + 188, + 483, + 380 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{flow}}" + }, + { + "bbox": [ + 130, + 188, + 483, + 380 + ], + "type": "text", + "content": " takes the L1 loss between the ground truth flows and the optical flow of the Gaussian for both directions of the flows. The flow loss gives our method spatial-temporal consistency without any additional computation cost in rendering. We combine the flow loss " + }, + { + "bbox": [ + 130, + 188, + 483, + 380 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{flow}}" + }, + { + "bbox": [ + 130, + 188, + 483, + 380 + ], + "type": "text", + "content": " with the reconstruction loss that compares the rendered and training views:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 242, + 385, + 481, + 398 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 242, + 385, + 481, + 398 + ], + "spans": [ + { + "bbox": [ + 242, + 385, + 481, + 398 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\mathcal {L} _ {\\text {r e c o n}} + \\lambda_ {\\text {f l o w}} \\mathcal {L} _ {\\text {f l o w}} (\\hat {F}, F), \\tag {6}", + "image_path": "ed2e8a9042a14e0b2b4c0e64f9336387437d217929f41a0e4c76526f1d8d3573.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 406, + 482, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 406, + 482, + 467 + ], + "spans": [ + { + "bbox": [ + 130, + 406, + 482, + 467 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 406, + 482, + 467 + ], + "type": "inline_equation", + "content": "F = \\{f_{\\mathrm{fwd}}, f_{\\mathrm{bwd}}\\}" + }, + { + "bbox": [ + 130, + 406, + 482, + 467 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 406, + 482, + 467 + ], + "type": "inline_equation", + "content": "\\hat{F}" + }, + { + "bbox": [ + 130, + 406, + 482, + 467 + ], + "type": "text", + "content": " are the ground truth flow and the flow of the Gaussians, respectively, and " + }, + { + "bbox": [ + 130, + 406, + 482, + 467 + ], + "type": "inline_equation", + "content": "\\lambda_{\\mathrm{flow}}" + }, + { + "bbox": [ + 130, + 406, + 482, + 467 + ], + "type": "text", + "content": " is a balancing hyperparameter for the flow term. Instead of applying an optical flow algorithm for rendering, we create pseudo optical flow from a Gaussian representation. Scene motion is represented solely by the 3D mean coefficients: " + }, + { + "bbox": [ + 130, + 406, + 482, + 467 + ], + "type": "inline_equation", + "content": "w_{x,1 \\leq i}, w_{y,1 \\leq i}, w_{z,1 \\leq i}" + }, + { + "bbox": [ + 130, + 406, + 482, + 467 + ], + "type": "text", + "content": ". Scene flow in 3D space can be computed by" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 196, + 474, + 418, + 487 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 196, + 474, + 418, + 487 + ], + "spans": [ + { + "bbox": [ + 196, + 474, + 418, + 487 + ], + "type": "interline_equation", + "content": "\\hat {f} _ {\\mathrm {f w d}} ^ {x} = x (t + \\Delta t) - x (t), \\quad \\hat {f} _ {\\mathrm {b w d}} ^ {x} = x (t) - x (t - \\Delta t),", + "image_path": "3e2c1ad1321c8bc98b137fc0d793641dba355520f3ca8db2fa9aacc7e4984580.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 198, + 491, + 480, + 505 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 198, + 491, + 480, + 505 + ], + "spans": [ + { + "bbox": [ + 198, + 491, + 480, + 505 + ], + "type": "interline_equation", + "content": "\\hat {f} _ {\\mathrm {f w d}} ^ {y} = y (t + \\Delta t) - y (t), \\quad \\hat {f} _ {\\mathrm {b w d}} ^ {y} = y (t) - y (t - \\Delta t), \\tag {7}", + "image_path": "fa90007b6841ce8227e4db05373cc0bbd3caca5c4cec95e555625791b454adc3.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 198, + 508, + 418, + 521 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 198, + 508, + 418, + 521 + ], + "spans": [ + { + "bbox": [ + 198, + 508, + 418, + 521 + ], + "type": "interline_equation", + "content": "\\hat {f} _ {\\mathrm {f w d}} ^ {\\tilde {z}} = z (t + \\Delta t) - z (t), \\quad \\hat {f} _ {\\mathrm {b w d}} ^ {\\tilde {z}} = z (t) - z (t - \\Delta t),", + "image_path": "ef5a3f27d50056556c177e35be05d19537b8599c7e142718578058731e98bf10.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 527, + 482, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 527, + 482, + 552 + ], + "spans": [ + { + "bbox": [ + 130, + 527, + 482, + 552 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 527, + 482, + 552 + ], + "type": "inline_equation", + "content": "\\Delta t" + }, + { + "bbox": [ + 130, + 527, + 482, + 552 + ], + "type": "text", + "content": " is the difference between the timesteps of the two image frames. The scene flow is projected into a 2D camera plane using" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 200, + 556, + 481, + 572 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 200, + 556, + 481, + 572 + ], + "spans": [ + { + "bbox": [ + 200, + 556, + 481, + 572 + ], + "type": "interline_equation", + "content": "\\hat {f} _ {\\left\\{f w d, b w d \\right\\}} ^ {x y z} = \\mathbf {J} \\left[ \\hat {f} _ {\\left\\{f w d, b w d \\right\\}}, \\hat {f} _ {\\left\\{f w d, b w d \\right\\}} ^ {y}, \\hat {f} _ {\\left\\{f w d, b w d \\right\\}} ^ {z} \\right] ^ {\\top}, \\tag {8}", + "image_path": "cd24d1ce69d5812f6d723b46965a734f7f1b6040b4a2fd7404ac11b4beb2e7d1.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 578, + 482, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 578, + 482, + 613 + ], + "spans": [ + { + "bbox": [ + 130, + 578, + 482, + 613 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 578, + 482, + 613 + ], + "type": "inline_equation", + "content": "\\mathbf{J}" + }, + { + "bbox": [ + 130, + 578, + 482, + 613 + ], + "type": "text", + "content": " is the Jacobian of the affine approximation of the projective transformation at the Gaussian center " + }, + { + "bbox": [ + 130, + 578, + 482, + 613 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 130, + 578, + 482, + 613 + ], + "type": "text", + "content": " (Eq. (4)). Regarding scene flows on the camera plane as RGB colors, point-based rendering can compute an optical flow of a pixel through " + }, + { + "bbox": [ + 130, + 578, + 482, + 613 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 130, + 578, + 482, + 613 + ], + "type": "text", + "content": "-blending:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 239, + 630, + 481, + 662 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 239, + 630, + 481, + 662 + ], + "spans": [ + { + "bbox": [ + 239, + 630, + 481, + 662 + ], + "type": "interline_equation", + "content": "\\hat {f} _ {\\mathrm {f w d}} = \\sum_ {i = 1} ^ {N} \\hat {f} _ {\\mathrm {f w d}, i} ^ {\\mathrm {x y z}} \\alpha_ {i} \\prod_ {j = 1} ^ {i - 1} (1 - \\alpha_ {j}). \\tag {9}", + "image_path": "a8b36e978443ee06b3e58116e5906307249cbb8d5387a24197c5f15399a6b68c.jpg" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 139, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 139, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 139, + 100 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "text", + "content": "K. Katsumata et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 134, + 112, + 480, + 456 + ], + "blocks": [ + { + "bbox": [ + 134, + 112, + 480, + 456 + ], + "lines": [ + { + "bbox": [ + 134, + 112, + 480, + 456 + ], + "spans": [ + { + "bbox": [ + 134, + 112, + 480, + 456 + ], + "type": "image", + "image_path": "4c650dafc6ec69e20921646eb9c6418afeadefc0584eb17191737df572c413dc.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 132, + 463, + 482, + 498 + ], + "lines": [ + { + "bbox": [ + 132, + 463, + 482, + 498 + ], + "spans": [ + { + "bbox": [ + 132, + 463, + 482, + 498 + ], + "type": "text", + "content": "Fig. 3: Qualitative comparison on D-NeRF [45]. We highlight the differences by zoom view. Our method achieves competitive visual quality with strong baselines. While our method successfully reconstructs intricate details like hands, it causes a blurred sphere shape." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 521, + 482, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 521, + 482, + 571 + ], + "spans": [ + { + "bbox": [ + 130, + 521, + 482, + 571 + ], + "type": "text", + "content": "The backward flow is calculated in the same way. The optical flow " + }, + { + "bbox": [ + 130, + 521, + 482, + 571 + ], + "type": "inline_equation", + "content": "\\hat{F}" + }, + { + "bbox": [ + 130, + 521, + 482, + 571 + ], + "type": "text", + "content": " consists of the forward flows " + }, + { + "bbox": [ + 130, + 521, + 482, + 571 + ], + "type": "inline_equation", + "content": "\\hat{f}_{\\mathrm{fwd}}" + }, + { + "bbox": [ + 130, + 521, + 482, + 571 + ], + "type": "text", + "content": " and backward flows " + }, + { + "bbox": [ + 130, + 521, + 482, + 571 + ], + "type": "inline_equation", + "content": "\\hat{f}_{\\mathrm{bwd}}" + }, + { + "bbox": [ + 130, + 521, + 482, + 571 + ], + "type": "text", + "content": " for all pixels. We exclude the flow loss for the D-NeRF dataset because the teleport of the cameras between adjacent frames causes difficulties in calculating ground truth flows." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 590, + 214, + 605 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 590, + 214, + 605 + ], + "spans": [ + { + "bbox": [ + 132, + 590, + 214, + 605 + ], + "type": "text", + "content": "4 Experiment" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 619, + 226, + 629 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 619, + 226, + 629 + ], + "spans": [ + { + "bbox": [ + 132, + 619, + 226, + 629 + ], + "type": "text", + "content": "4.1 Evaluation data" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 641, + 486, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 641, + 486, + 666 + ], + "spans": [ + { + "bbox": [ + 132, + 641, + 486, + 666 + ], + "type": "text", + "content": "We evaluate our compact dynamic Gaussian representation using dynamic scene datasets: a synthetic one D-NeRF [45] and two real ones, i.e., DyNeRF [31] and HyperNeRF [44]." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 321, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 321, + 91, + 447, + 102 + ], + "type": "text", + "content": "A Compact Dynamic 3D Gaussian" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 91, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 91, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 91, + 481, + 100 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 187, + 168, + 430, + 254 + ], + "blocks": [ + { + "bbox": [ + 130, + 114, + 482, + 160 + ], + "lines": [ + { + "bbox": [ + 130, + 114, + 482, + 160 + ], + "spans": [ + { + "bbox": [ + 130, + 114, + 482, + 160 + ], + "type": "text", + "content": "Table 1: Quantitative results on the D-NeRF dataset [45]. Our method performs competitively against NeRF approaches in terms of visual quality and achieves the fastest rendering speed among the highest-performing methods. Results except the FPS of [17, 18, 20] are adopted from the original papers. The best and second best scores among competing methods are highlighted." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 187, + 168, + 430, + 254 + ], + "lines": [ + { + "bbox": [ + 187, + 168, + 430, + 254 + ], + "spans": [ + { + "bbox": [ + 187, + 168, + 430, + 254 + ], + "type": "table", + "html": "
PSNR↑MS-SSIM↑LPIPS↓FPS ↑Train Time ↓Mem↓
TiNeuVox-S [17]30.750.960.070.328 mins8MB
TiNeuVox-B [17]32.670.970.040.1328 mins48MB
K-Planes [18]31.610.97-0.5452 mins~497MB
V4D [20]33.720.980.021.476.9 hrs1.2GB
3DGS [26]20.510.890.071706 mins~50MB
D-3DGS17.220.810.1317315 mins~913MB
Ours32.190.970.041508 mins~159MB
", + "image_path": "dd5399393ebdb39bfe1aa531f5e1d7d2eb3da185eb83c736c001caf11a1e7572.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 179, + 329, + 438, + 402 + ], + "blocks": [ + { + "bbox": [ + 130, + 263, + 482, + 319 + ], + "lines": [ + { + "bbox": [ + 130, + 263, + 482, + 319 + ], + "spans": [ + { + "bbox": [ + 130, + 263, + 482, + 319 + ], + "type": "text", + "content": "Table 2: Quantitative results on the DyNeRF datasets [31]. Results excepting FPS of [18, 20] are adopted from the original papers. The best and second best scores among competing methods (excepting 3DGS) are highlighted. While our method matches NeRFs in terms of rendering quality, our method matches 3DGS in terms of rendering speed. Besides, our method is 20 times more compact than Dynamic3DGaussians." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 179, + 329, + 438, + 402 + ], + "lines": [ + { + "bbox": [ + 179, + 329, + 438, + 402 + ], + "spans": [ + { + "bbox": [ + 179, + 329, + 438, + 402 + ], + "type": "table", + "html": "
PSNR↑MS-SSIM↑LPIPS↓FPS↑Train Time↓Mem↓
K-Planes [18]31.630.964-0.311.8 hrs~309MB
V4D28.960.9370.170.114 hrs1.2GB
3DGS [26]20.940.8000.2910920 mins~198MB
D-3DGS24.360.8340.2511951 mins~2.3GB
Dynamic3DGaussians [38]27.790.8690.23512.1 hrs~6.6GB
Ours30.460.9550.151181 hrs~338MB
", + "image_path": "9d7be7ed78f5c4919310db7d60d24d5f352961dcc136cbaee4d77b28a0c5ceb3.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 424, + 482, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 424, + 482, + 471 + ], + "spans": [ + { + "bbox": [ + 130, + 424, + 482, + 471 + ], + "type": "text", + "content": "D-NeRF dataset [45]. This dataset comprises eight videos of varying lengths, ranging from 50 to 200 frames per video. The camera setup is designed to mimic a monocular camera setting by teleporting between adjacent timesteps. The test views are from novel camera positions. We train and render at the resolution of " + }, + { + "bbox": [ + 130, + 424, + 482, + 471 + ], + "type": "inline_equation", + "content": "800 \\times 800" + }, + { + "bbox": [ + 130, + 424, + 482, + 471 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 472, + 482, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 472, + 482, + 519 + ], + "spans": [ + { + "bbox": [ + 130, + 472, + 482, + 519 + ], + "type": "text", + "content": "DyNeRF dataset [31]. The multi-camera dataset includes six 10-second videos captured at 30 FPS using 15-20 synchronized fixed cameras. For evaluation, a central camera is used, while training utilizes frames from the other cameras. The training and rendering resolution is set at " + }, + { + "bbox": [ + 130, + 472, + 482, + 519 + ], + "type": "inline_equation", + "content": "1,352 \\times 1,014" + }, + { + "bbox": [ + 130, + 472, + 482, + 519 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 520, + 482, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 520, + 482, + 555 + ], + "spans": [ + { + "bbox": [ + 130, + 520, + 482, + 555 + ], + "type": "text", + "content": "HyperNeRF dataset [44]. This dataset encompasses videos ranging from 8 to " + }, + { + "bbox": [ + 130, + 520, + 482, + 555 + ], + "type": "inline_equation", + "content": "15\\mathrm{~s}" + }, + { + "bbox": [ + 130, + 520, + 482, + 555 + ], + "type": "text", + "content": ", captured at 15 FPS using two Pixel 3 phones. The training and rendering processes are conducted at a resolution of " + }, + { + "bbox": [ + 130, + 520, + 482, + 555 + ], + "type": "inline_equation", + "content": "540 \\times 960" + }, + { + "bbox": [ + 130, + 520, + 482, + 555 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 131, + 574, + 257, + 586 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 574, + 257, + 586 + ], + "spans": [ + { + "bbox": [ + 131, + 574, + 257, + 586 + ], + "type": "text", + "content": "4.2 Implementation details" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 594, + 482, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 594, + 482, + 667 + ], + "spans": [ + { + "bbox": [ + 130, + 594, + 482, + 667 + ], + "type": "text", + "content": "We adhere to the experimental setup in the 3DGS paper [26]. The number of approximation terms of the Gaussian centers " + }, + { + "bbox": [ + 130, + 594, + 482, + 667 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 130, + 594, + 482, + 667 + ], + "type": "text", + "content": " is set to 2 for the D-NeRF dataset. For the other datasets, " + }, + { + "bbox": [ + 130, + 594, + 482, + 667 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 130, + 594, + 482, + 667 + ], + "type": "text", + "content": " is set to 5 from preliminary experiments. Our two-stage optimization process begins with an initial fitting of parameters, excluding the coefficients for Gaussian center and rotation. This initial stage spans 3,000 iterations and utilizes all training views in a static setting. Subsequently, we engage in a dynamic stage, adjusting all Gaussian" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "text", + "content": "K. Katsumata et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 133, + 168, + 481, + 236 + ], + "blocks": [ + { + "bbox": [ + 131, + 114, + 482, + 159 + ], + "lines": [ + { + "bbox": [ + 131, + 114, + 482, + 159 + ], + "spans": [ + { + "bbox": [ + 131, + 114, + 482, + 159 + ], + "type": "text", + "content": "Table 3: Quantitative results on the HyperNeRF dataset [44]. Our method demonstrates competitive performance in rendering quality across all scenes, surpassing the compared methods in rendering speed. Furthermore, our method is not inferior to the compared methods in training time and memory size." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 133, + 168, + 481, + 236 + ], + "lines": [ + { + "bbox": [ + 133, + 168, + 481, + 236 + ], + "spans": [ + { + "bbox": [ + 133, + 168, + 481, + 236 + ], + "type": "table", + "html": "
FPS↑Train Time↓Mem↓BROOM3D PRINTERCHICKENPEELBANANAMean
PSNR↑SSIM↑PSNR↑SSIM↑PSNR↑SSIM↑PSNR↑SSIM↑PSNR↑SSIM↑
HyperNeRF [44]0.3648 hrs†15MB19.30.59120.00.82126.90.94823.30.89622.20.811
TiNeuVox-B [17]0.1430 mins48MB21.50.68622.80.84128.30.94724.40.87324.30.837
V4D [20]0.157 hrs1.2GB22.10.66923.20.83528.40.92925.20.87324.70.827
Ours1881 hrs~720MB22.10.78925.50.91928.30.93426.60.92025.60.890
", + "image_path": "019dcf24817e81245bfa51ac7c218b9ce5a82f30621c0ee413ee714fe766d0a3.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 132, + 236, + 481, + 251 + ], + "lines": [ + { + "bbox": [ + 132, + 236, + 481, + 251 + ], + "spans": [ + { + "bbox": [ + 132, + 236, + 481, + 251 + ], + "type": "text", + "content": "Train time of HyperNeRF [44] is estimated from their paper's descriptions. Originally reported as 8 hours on 4 TPU v4s [25], the TPU v4 is slightly faster than the A100 GPU, and the A100 GPU is at least 1.5 times faster than the A6000 GPU." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 140, + 256, + 475, + 373 + ], + "blocks": [ + { + "bbox": [ + 140, + 256, + 475, + 373 + ], + "lines": [ + { + "bbox": [ + 140, + 256, + 475, + 373 + ], + "spans": [ + { + "bbox": [ + 140, + 256, + 475, + 373 + ], + "type": "image", + "image_path": "8646b14d601e95ac912fc9ca3a0b37c477b69db65020e2bb5588c4ff8ef2e31b.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 140, + 381, + 473, + 393 + ], + "lines": [ + { + "bbox": [ + 140, + 381, + 473, + 393 + ], + "spans": [ + { + "bbox": [ + 140, + 381, + 473, + 393 + ], + "type": "text", + "content": "Fig. 4: Qualitative comparison on the DyNeRF dataset [31]. The differences are zoomed in." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 131, + 417, + 482, + 466 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 417, + 482, + 466 + ], + "spans": [ + { + "bbox": [ + 131, + 417, + 482, + 466 + ], + "type": "text", + "content": "parameters in 27,000 iterations. The entire optimization process encompasses 30,000 iterations. Following [26], " + }, + { + "bbox": [ + 131, + 417, + 482, + 466 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 131, + 417, + 482, + 466 + ], + "type": "text", + "content": " is set to 0.2. We set the flow loss weight " + }, + { + "bbox": [ + 131, + 417, + 482, + 466 + ], + "type": "inline_equation", + "content": "\\lambda_{\\mathrm{flow}}" + }, + { + "bbox": [ + 131, + 417, + 482, + 466 + ], + "type": "text", + "content": " to 1,000 and acquire ground truth flow through the RAFT pretrained on the Sintel dataset [7]. All experiments are conducted on a single RTX A6000 GPU." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 484, + 230, + 497 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 484, + 230, + 497 + ], + "spans": [ + { + "bbox": [ + 132, + 484, + 230, + 497 + ], + "type": "text", + "content": "4.3 Evaluation setup" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 505, + 482, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 505, + 482, + 553 + ], + "spans": [ + { + "bbox": [ + 130, + 505, + 482, + 553 + ], + "type": "text", + "content": "Compared methods. We benchmark our method against the following baseline methods: TiNeuVox [17], K-Planes [18], V4D [20], HyperNeRF [44], 3D Gaussian Splatting (3DGS) [26], Dynamic3DGaussians [38], and a D-3DGS baseline. D-3DGS is the dynamic extension of 3DGS, which stores both position and rotation for each timestep." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 131, + 554, + 483, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 554, + 483, + 601 + ], + "spans": [ + { + "bbox": [ + 131, + 554, + 483, + 601 + ], + "type": "text", + "content": "Evaluation metrics. We assess the methods using various metrics, including PSNR [24], SSIM [63], LPIPS [70], FPS, Training time, and memory used to store optimized parameters. Memory consumption includes the 3D Gaussian parameters, voxel/plane representation, and neural network parameters." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 620, + 247, + 632 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 620, + 247, + 632 + ], + "spans": [ + { + "bbox": [ + 132, + 620, + 247, + 632 + ], + "type": "text", + "content": "4.4 Experimental results" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 131, + 641, + 481, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 641, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 131, + 641, + 481, + 665 + ], + "type": "text", + "content": "Quantitative results. The quantitative results on the D-NeRF dataset are detailed in Tab. 1. Our method demonstrates a performance comparable to TiNeuVox and K-Planes" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 321, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 321, + 91, + 447, + 102 + ], + "type": "text", + "content": "A Compact Dynamic 3D Gaussian" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 471, + 91, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 471, + 91, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 471, + 91, + 480, + 100 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 138, + 113, + 475, + 224 + ], + "blocks": [ + { + "bbox": [ + 138, + 113, + 475, + 224 + ], + "lines": [ + { + "bbox": [ + 138, + 113, + 475, + 224 + ], + "spans": [ + { + "bbox": [ + 138, + 113, + 475, + 224 + ], + "type": "image", + "image_path": "2d1346f0d33ed12f3ce5ba01ba407a5ba140e4062cbebc5cb565decdb9f99f69.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 153, + 233, + 459, + 245 + ], + "lines": [ + { + "bbox": [ + 153, + 233, + 459, + 245 + ], + "spans": [ + { + "bbox": [ + 153, + 233, + 459, + 245 + ], + "type": "text", + "content": "Fig. 5: Qualitative comparison on HyperNeRF [44]. Our method offers sharp results." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 259, + 482, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 259, + 482, + 338 + ], + "spans": [ + { + "bbox": [ + 132, + 259, + 482, + 338 + ], + "type": "text", + "content": "Table 4: Per-scene quantitative comparison on D-NeRF scenes of different " + }, + { + "bbox": [ + 132, + 259, + 482, + 338 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 132, + 259, + 482, + 338 + ], + "type": "text", + "content": ", which stands for the number of harmonic terms in the Fourier approximation, and other design choices. The highest mean score is achieved with " + }, + { + "bbox": [ + 132, + 259, + 482, + 338 + ], + "type": "inline_equation", + "content": "L = 2" + }, + { + "bbox": [ + 132, + 259, + 482, + 338 + ], + "type": "text", + "content": ", but increasing the complexity " + }, + { + "bbox": [ + 132, + 259, + 482, + 338 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 132, + 259, + 482, + 338 + ], + "type": "text", + "content": " (the number of coefficients) improves visual quality in some scenes (JUMPING JACKS and T-REX). The spline approximations bring marginal improvements in some scenes but slower rendering. The time-varying scale (the last row) also provides minor gains in some cases and increases the memory size. The setting reported in Fig. 3 is highlighted with a gray background." + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 134, + 346, + 480, + 459 + ], + "blocks": [ + { + "bbox": [ + 134, + 346, + 480, + 459 + ], + "lines": [ + { + "bbox": [ + 134, + 346, + 480, + 459 + ], + "spans": [ + { + "bbox": [ + 134, + 346, + 480, + 459 + ], + "type": "table", + "html": "
STAND UPJACKSBALLSLEGOWARRIORHOOKT-REXMUTANTMean
PSNR SSIMPSNR SSIMPSNR SSIMPSNR SSIMPSNR SSIMPSNR SSIMPSNR SSIMPSNR SSIMPSNR SSIM
L = 140.210.99427.220.95230.270.97224.260.94032.42
L = 239.100.99330.950.98033.290.98423.150.92234.15
L = 338.090.99032.780.98432.540.97922.120.88135.36
L = 435.830.98432.930.98230.390.96921.060.85534.38
L = 532.890.97630.710.97727.680.95920.200.82532.64
Linear27.770.97323.100.92126.680.95922.270.92217.39
Quadratic29.400.97823.440.92627.510.96322.450.92417.70
Cubic29.980.97923.710.92827.760.96422.370.92118.04
Spline (5)38.870.99331.960.98332.960.98023.090.91834.46
Spline (6)38.000.99231.840.98432.810.98022.250.90335.24
Linear (scale)38.320.99330.910.98032.550.98423.870.93034.43
", + "image_path": "df3d03184746249722433f929dd3d8ed6119f13587c3874e0281b6c07578f790.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 484, + 482, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 484, + 482, + 628 + ], + "spans": [ + { + "bbox": [ + 130, + 484, + 482, + 628 + ], + "type": "text", + "content": "in terms of visual quality as measured by PSNR, SSIM, and LPIPS. Notably, it excels in training time, FPS, and memory size, achieving a rendering speed that is " + }, + { + "bbox": [ + 130, + 484, + 482, + 628 + ], + "type": "inline_equation", + "content": "300 \\times" + }, + { + "bbox": [ + 130, + 484, + 482, + 628 + ], + "type": "text", + "content": " faster than that of K-Planes. Furthermore, our method surpasses both 3DGS and D-3DGS in terms of visual quality without compromising rendering speed. In the DyNeRF scenes experiment, detailed in Tab. 2, while our method does not exceed the baseline in reconstruction quality, it shows a substantial improvement in FPS. Since the DyNeRF scenes contain multi-view data, the D-3DGS baseline naturally improves static 3DGS, unlike monocular scenes. Our method even attains rendering speeds that exceed real-time performance at a high resolution of " + }, + { + "bbox": [ + 130, + 484, + 482, + 628 + ], + "type": "inline_equation", + "content": "1,354 \\times 1,014" + }, + { + "bbox": [ + 130, + 484, + 482, + 628 + ], + "type": "text", + "content": ". For the challenging HyperNeRF dataset, which is captured by only two moving cameras and referenced in Tab. 3, our method not only demonstrates rapid rendering speeds but also achieves higher average PSNR and SSIM scores than the compared methods." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 629, + 482, + 666 + ], + "type": "text", + "content": "Qualitative results. Figures 3 to 5 show that our method yields faithful reconstruction for the dynamic scenes. Unlike the structured representation, which has a fixed size of grids, the unstructured nature of 3D Gaussians enables adaptive control of the expres" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "text", + "content": "K. Katsumata et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 146, + 125, + 467, + 220 + ], + "blocks": [ + { + "bbox": [ + 170, + 114, + 248, + 124 + ], + "lines": [ + { + "bbox": [ + 170, + 114, + 248, + 124 + ], + "spans": [ + { + "bbox": [ + 170, + 114, + 248, + 124 + ], + "type": "text", + "content": "Ours without " + }, + { + "bbox": [ + 170, + 114, + 248, + 124 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{flow}}" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 393, + 114, + 414, + 123 + ], + "lines": [ + { + "bbox": [ + 393, + 114, + 414, + 123 + ], + "spans": [ + { + "bbox": [ + 393, + 114, + 414, + 123 + ], + "type": "text", + "content": "Ours" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 146, + 125, + 467, + 220 + ], + "lines": [ + { + "bbox": [ + 146, + 125, + 467, + 220 + ], + "spans": [ + { + "bbox": [ + 146, + 125, + 467, + 220 + ], + "type": "image", + "image_path": "c41a63e6504a068c6b8087083f95eaa993f6898c57b2029c9990377c962f6808.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 167, + 266, + 447, + 376 + ], + "blocks": [ + { + "bbox": [ + 131, + 229, + 480, + 251 + ], + "lines": [ + { + "bbox": [ + 131, + 229, + 480, + 251 + ], + "spans": [ + { + "bbox": [ + 131, + 229, + 480, + 251 + ], + "type": "text", + "content": "Fig.6: Qualitative comparison of disabled and enabled flow loss on DyNeRF. We highlight the difference by zoom view." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 167, + 266, + 447, + 376 + ], + "lines": [ + { + "bbox": [ + 167, + 266, + 447, + 376 + ], + "spans": [ + { + "bbox": [ + 167, + 266, + 447, + 376 + ], + "type": "image", + "image_path": "d36c05713d738724be5412fe9c680427ac7ddb81f84b165665bd4a83725f0cad.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 131, + 386, + 480, + 408 + ], + "lines": [ + { + "bbox": [ + 131, + 386, + 480, + 408 + ], + "spans": [ + { + "bbox": [ + 131, + 386, + 480, + 408 + ], + "type": "text", + "content": "Fig.7: Composition of two scenes. Our method allows for the addition of adding 3D objects represented 3D Gaussians into a 3D Gaussian scene. We highlight the added object." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 434, + 480, + 470 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 434, + 480, + 470 + ], + "spans": [ + { + "bbox": [ + 130, + 434, + 480, + 470 + ], + "type": "text", + "content": "siveness of the representation, delivering sharper renderings. As seen with the results for BOUNCING BALLS, since our method has discrete primitives, it sometimes fails to reproduce smooth boundaries." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 472, + 480, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 472, + 480, + 519 + ], + "spans": [ + { + "bbox": [ + 130, + 472, + 480, + 519 + ], + "type": "text", + "content": "Effect of the number of parameters " + }, + { + "bbox": [ + 130, + 472, + 480, + 519 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 130, + 472, + 480, + 519 + ], + "type": "text", + "content": ". Table 4 shows per-scene PSNR and SSIM scores of K-Planes and our method with the different " + }, + { + "bbox": [ + 130, + 472, + 480, + 519 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 130, + 472, + 480, + 519 + ], + "type": "text", + "content": " (Eq. (2)). It is observed that the optimal " + }, + { + "bbox": [ + 130, + 472, + 480, + 519 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 130, + 472, + 480, + 519 + ], + "type": "text", + "content": " for novel view synthesis varies from scene to scene, highlighting the necessity for complex approximations to capture intricate motions effectively." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 521, + 480, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 521, + 480, + 556 + ], + "spans": [ + { + "bbox": [ + 130, + 521, + 480, + 556 + ], + "type": "text", + "content": "Effect of flow loss. Additionally, visual comparisons drawn from our method without and with the flow loss (Fig. 6) reveal that incorporating the flow loss mitigates ghostly artifacts and significantly enhances the accuracy of color reconstruction." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 558, + 481, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 558, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 558, + 481, + 665 + ], + "type": "text", + "content": "Design choice. Our method is very flexible and allows for the use of arbitrary approximation functions and the choice of time-varying parameters. Table 4 also shows the experimental results of other options for the design of the model to facilitate future dynamic scene reconstruction. The linear, quadratic, and cubic baselines approximate time-varying 3D positions with polynomials of degrees one, two, and three, respectively. The Spline (5) and Spline (6) baselines approximate 3D positions with spline approximations of five and six points, respectively. The linear (scale) baseline approximates time-varying scales with the linear approximation in addition to positions and rotations. Although a Spline baseline gives minor performance gains in some cases, it" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 321, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 321, + 91, + 447, + 102 + ], + "type": "text", + "content": "A Compact Dynamic 3D Gaussian" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 471, + 91, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 471, + 91, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 471, + 91, + 480, + 100 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 481, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 116, + 481, + 200 + ], + "spans": [ + { + "bbox": [ + 132, + 116, + 481, + 200 + ], + "type": "text", + "content": "achieves 91 FPS for rendering, showing slower rendering than the proposed method. The linear (scale) baseline does not show additional parameters that would result in performance improvements. For time-varying 3D rotation, we also consider the approximation with slerp. Since it does not offer performance gains while causing numerical instability for static Gaussians, we use linear approximation for rotation. For faster rendering and compact representation, we use the Fourier approximation for 3D positions and model 3D positions and rotations as time-varying parameters." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 200, + 481, + 272 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 200, + 481, + 272 + ], + "spans": [ + { + "bbox": [ + 132, + 200, + 481, + 272 + ], + "type": "text", + "content": "Scene composition. Since our dynamic 3D Gaussian representation still uses pure 3D Gaussian representation, the learned representation facilitates straightforward editing of Gaussians. We demonstrate the composition of two scenes with our representation. Figure 7 illustrates this by combining the MUTANT scene from the D-NeRF dataset with the SEARED STEAK scene from the DyNeRF dataset. This demonstrates the capability of our method in editing dynamic 3D scenes." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 133, + 291, + 288, + 304 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 291, + 288, + 304 + ], + "spans": [ + { + "bbox": [ + 133, + 291, + 288, + 304 + ], + "type": "text", + "content": "5 Discussion and Conclusion" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 318, + 481, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 318, + 481, + 460 + ], + "spans": [ + { + "bbox": [ + 132, + 318, + 481, + 460 + ], + "type": "text", + "content": "Limitations and future directions. Our dynamic Gaussians are defined through all times of the dynamic scene. This representation implicitly assumes Gaussians exist over all times of the scene. It enables us to naturally model the rigid and non-rigid deformation in the scene. On the other hand, for modeling the change in topology, the occurrence and extinction of Gaussians (e.g., fluid) is tough. Static colors cause difficulty in modeling changes in illumination and color. The reconstruction capability of the method depends on the number of parameters, so that the scene representation is compact but results in poor rendering quality for very long sequences, requiring additional memory consumption for proper reconstruction. To overcome these limitations, considering the lifetime of Gaussians, such as adding start and end time parameters, will allow for the modeling of changes in scene topology, and the adaptive decision of flexibility will leads to better trade-offs between quality and memory size." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 462, + 481, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 462, + 481, + 509 + ], + "spans": [ + { + "bbox": [ + 132, + 462, + 481, + 509 + ], + "type": "text", + "content": "Our Gaussian representation sacrifices the continuity and smoothness inherent in neural field-based volume rendering. Distilling NeRFs into our proposed representation in a manner similar to PlenOctree [69] is a potential extension of our method, promising to enhance rendering quality while maintaining fast rendering advantage." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 509, + 481, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 509, + 481, + 605 + ], + "spans": [ + { + "bbox": [ + 132, + 509, + 481, + 605 + ], + "type": "text", + "content": "Conclusion. We present a compact dynamic 3D Gaussian representation enabling faithful reconstruction and real-time rendering of dynamic scenes. We propose a representation for the position and rotation of 3D Gaussians as a function of time for modeling the motion of the scene. The parameterized functions of time introduce memory efficiency and robustness to the number of views per timestep. Furthermore, we introduce the flow loss constraining the scene flow of the learned Gaussian representation with the ground truth flow. Our experiments on synthetic and real datasets show that the proposed method achieves real-time dynamic scene rendering even at high resolutions." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 605, + 481, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 605, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 132, + 605, + 481, + 665 + ], + "type": "text", + "content": "Acknowledgements This study was supported by JSPS/MEXT KAKENHI Grant Numbers JP24K20830, JP23KJ0381, JP23K28139, and JP22H05015, ROIS NII Open Collaborative Research 2024-24S1201, and the Institute of AI and Beyond of the University of Tokyo. The authors would like to thank D. Horita for carefully proofreading the manuscript and N. Umetani for providing helpful advice on the method's limitations." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "text", + "content": "K. Katsumata et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 133, + 114, + 190, + 127 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 114, + 190, + 127 + ], + "spans": [ + { + "bbox": [ + 133, + 114, + 190, + 127 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 138, + 140, + 481, + 665 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 138, + 140, + 481, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 140, + 481, + 161 + ], + "spans": [ + { + "bbox": [ + 138, + 140, + 481, + 161 + ], + "type": "text", + "content": "1. Abdal, R., Yifan, W., Shi, Z., Xu, Y., Po, R., Kuang, Z., Chen, Q., Yeung, D.Y., Wetzstein, G.: Gaussian Shell Maps for Efficient 3D Human Generation. In: CVPR. pp. 9441-9451 (2024)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 138, + 162, + 481, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 162, + 481, + 183 + ], + "spans": [ + { + "bbox": [ + 138, + 162, + 481, + 183 + ], + "type": "text", + "content": "2. Akhter, I., Sheikh, Y., Khan, S., Kanade, T.: Nonrigid Structure from Motion in Trajectory Space. In: NeurIPS (2008)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 138, + 184, + 481, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 184, + 481, + 205 + ], + "spans": [ + { + "bbox": [ + 138, + 184, + 481, + 205 + ], + "type": "text", + "content": "3. An, S., Xu, H., Shi, Y., Song, G., Ogras, U.Y., Luo, L.: PanoHead: Geometry-Aware 3D Full-Head Synthesis in 360 degree. In: CVPR. pp. 20950-20959 (2023)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 138, + 206, + 481, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 206, + 481, + 237 + ], + "spans": [ + { + "bbox": [ + 138, + 206, + 481, + 237 + ], + "type": "text", + "content": "4. Attal, B., Huang, J.B., Richardt, C., Zollhöefer, M., Kopf, J., O'Toole, M., Kim, C.: HyperReel: High-Fidelity 6-DoF Video with Ray-Conditioned Sampling. In: CVPR. pp. 16610-16620 (2023)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 138, + 238, + 481, + 259 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 238, + 481, + 259 + ], + "spans": [ + { + "bbox": [ + 138, + 238, + 481, + 259 + ], + "type": "text", + "content": "5. Bae, J., Kim, S., Yun, Y., Lee, H., Bang, G., Uh, Y.: Per-Gaussian Embedding-Based Deformation for Deformable 3D Gaussian Splatting. arXiv preprint arXiv:2404.03613 (2024)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 138, + 261, + 481, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 261, + 481, + 293 + ], + "spans": [ + { + "bbox": [ + 138, + 261, + 481, + 293 + ], + "type": "text", + "content": "6. Barron, J.T., Mildenhall, B., Tancik, M., Hedman, P., Martin-Brualla, R., Srinivasan, P.P.: Mip-NeRF: A Multiscale Representation for Anti-Aliasing Neural Radiance Fields. In: CVPR. pp. 5855-5864 (2021)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 138, + 293, + 481, + 315 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 293, + 481, + 315 + ], + "spans": [ + { + "bbox": [ + 138, + 293, + 481, + 315 + ], + "type": "text", + "content": "7. Butler, D.J., Wulff, J., Stanley, G.B., Black, M.J.: A Naturalistic Open Source Movie for Optical Flow Evaluation. In: ECCV. pp. 611-625 (2012)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 138, + 316, + 481, + 336 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 316, + 481, + 336 + ], + "spans": [ + { + "bbox": [ + 138, + 316, + 481, + 336 + ], + "type": "text", + "content": "8. Cabral, B., Max, N., Springmeyer, R.: Bidirectional Reflection Functions from Surface Bump Maps. SIGGRAPH 21(4), 273-281 (1987)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 138, + 337, + 481, + 357 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 337, + 481, + 357 + ], + "spans": [ + { + "bbox": [ + 138, + 337, + 481, + 357 + ], + "type": "text", + "content": "9. Cao, A., Johnson, J.: HexPlane: A Fast Representation for Dynamic Scenes. In: CVPR. pp. 130-141 (2023)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 138, + 358, + 481, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 358, + 481, + 392 + ], + "spans": [ + { + "bbox": [ + 138, + 358, + 481, + 392 + ], + "type": "text", + "content": "0. Chan, E.R., Lin, C.Z., Chan, M.A., Nagano, K., Pan, B., De Mello, S., Gallo, O., Guibas, L.J., Tremblay, J., Khamis, S., et al.: Efficient Geometry-Aware 3D Generative Adversarial Networks. In: CVPR. pp. 16123-16133 (2022)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 138, + 392, + 481, + 413 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 392, + 481, + 413 + ], + "spans": [ + { + "bbox": [ + 138, + 392, + 481, + 413 + ], + "type": "text", + "content": "1. Chen, A., Xu, Z., Geiger, A., Yu, J., Su, H.: TensoRF: Tensorial Radiance Fields. In: ECCV. pp. 333-350 (2022)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 138, + 414, + 481, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 414, + 481, + 434 + ], + "spans": [ + { + "bbox": [ + 138, + 414, + 481, + 434 + ], + "type": "text", + "content": "2. Chen, Z., Wang, F., Liu, H.: Text-to-3D Using Gaussian Splatting. arXiv preprint arXiv:2309.16585 (2023)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 138, + 435, + 481, + 457 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 435, + 481, + 457 + ], + "spans": [ + { + "bbox": [ + 138, + 435, + 481, + 457 + ], + "type": "text", + "content": "3. Das, D., Wewer, C., Yunus, R., Ilg, E., Lenssen, J.E.: Neural Parametric Gaussians for Monocular Non-Rigid Object Reconstruction. In: CVPR. pp. 10715-10725 (2024)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 138, + 458, + 481, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 458, + 481, + 479 + ], + "spans": [ + { + "bbox": [ + 138, + 458, + 481, + 479 + ], + "type": "text", + "content": "4. Dong, Z., Chen, X., Yang, J., Black, M.J., Hilliges, O., Geiger, A.: AG3D: Learning to Generate 3D Avatars from 2D Image Collections. In: ICCV. pp. 14916-14927 (2023)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 138, + 479, + 481, + 501 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 479, + 481, + 501 + ], + "spans": [ + { + "bbox": [ + 138, + 479, + 481, + 501 + ], + "type": "text", + "content": "5. Duan, Y., Wei, F., Dai, Q., He, Y., Chen, W., Chen, B.: 4D-Rotor Gaussian Splitting: Towards Efficient Novel View Synthesis for Dynamic Scenes. ACM TOG (2024)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 138, + 502, + 481, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 502, + 481, + 533 + ], + "spans": [ + { + "bbox": [ + 138, + 502, + 481, + 533 + ], + "type": "text", + "content": "6. Duisterhof, B.P., Mandi, Z., Yao, Y., Liu, J.W., Shou, M.Z., Song, S., Ichnowski, J.: MD-Splatting: Learning Metric Deformation from 4D Gaussians in Highly Deformable Scenes. arXiv preprint arXiv:2312.00583 (2023)" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 138, + 534, + 481, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 534, + 481, + 555 + ], + "spans": [ + { + "bbox": [ + 138, + 534, + 481, + 555 + ], + "type": "text", + "content": "7. Fang, J., Yi, T., Wang, X., Xie, L., Zhang, X., Liu, W., Nießner, M., Tian, Q.: Fast Dynamic Radiance Fields with Time-Aware Neural Voxels. In: SIGGRAPH Asia (2022)" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 138, + 556, + 481, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 556, + 481, + 578 + ], + "spans": [ + { + "bbox": [ + 138, + 556, + 481, + 578 + ], + "type": "text", + "content": "8. Fridovich-Keil, S., Meanti, G., Warburg, F.R., Recht, B., Kanazawa, A.: K-Planes: Explicit Radiance Fields in Space, Time, and Appearance. In: CVPR. pp. 12479-12488 (2023)" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 138, + 578, + 481, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 578, + 481, + 599 + ], + "spans": [ + { + "bbox": [ + 138, + 578, + 481, + 599 + ], + "type": "text", + "content": "9. Fridovich-Keil, S., Yu, A., Tancik, M., Chen, Q., Recht, B., Kanazawa, A.: Plenoxels: Radiance Fields Without Neural Networks. In: CVPR. pp. 5501-5510 (2022)" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 138, + 600, + 481, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 600, + 481, + 620 + ], + "spans": [ + { + "bbox": [ + 138, + 600, + 481, + 620 + ], + "type": "text", + "content": "20. Gan, W., Xu, H., Huang, Y., Chen, S., Yokoya, N.: V4D: Voxel for 4D Novel View Synthesis. IEEE TVCG 30(2), 1579-1591 (2024)" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 138, + 621, + 481, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 621, + 481, + 643 + ], + "spans": [ + { + "bbox": [ + 138, + 621, + 481, + 643 + ], + "type": "text", + "content": "21. Gao, C., Saraf, A., Kopf, J., Huang, J.B.: Dynamic View Synthesis from Dynamic Monocular Video. In: ICCV. pp. 5712-5721 (2021)" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 138, + 643, + 481, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 643, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 138, + 643, + 481, + 665 + ], + "type": "text", + "content": "22. Guo, Z., Zhou, W., Li, L., Wang, M., Li, H.: Motion-Aware 3D Gaussian Splitting for Efficient Dynamic Scene Reconstruction. arXiv preprint arXiv:2403.11447 (2024)" + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 321, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 321, + 91, + 447, + 102 + ], + "type": "text", + "content": "A Compact Dynamic 3D Gaussian" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 471, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 471, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 471, + 92, + 480, + 100 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 117, + 481, + 665 + ], + "type": "list", + "angle": 0, + "index": 23, + "blocks": [ + { + "bbox": [ + 132, + 117, + 481, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 117, + 481, + 138 + ], + "spans": [ + { + "bbox": [ + 132, + 117, + 481, + 138 + ], + "type": "text", + "content": "23. He, H., Yang, Z., Li, S., Dai, B., Wu, W.: OrthoPlanes: A Novel Representation for Better 3D-Awareness of GANs. In: ICCV. pp. 22996-23007 (2023)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 140, + 481, + 160 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 140, + 481, + 160 + ], + "spans": [ + { + "bbox": [ + 132, + 140, + 481, + 160 + ], + "type": "text", + "content": "24. Huynh-Thu, Q., Ghanbari, M.: Scope of Validity of PSNR in Image/Video Quality Assessment. Electronics Letters 44(13), 800-801 (2008)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 162, + 481, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 162, + 481, + 217 + ], + "spans": [ + { + "bbox": [ + 132, + 162, + 481, + 217 + ], + "type": "text", + "content": "25. Jouppi, N., Kurian, G., Li, S., Ma, P., Nagarajan, R., Nai, L., Patil, N., Subramanian, S., Swing, A., Towles, B., Young, C., Zhou, X., Zhou, Z., Patterson, D.A.: TPU v4: An Optically Reconfigurable Supercomputer for Machine Learning with Hardware Support for Embeddings. In: Proceedings of the 50th Annual International Symposium on Computer Architecture. pp. 1-14 (2023)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 217, + 481, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 217, + 481, + 239 + ], + "spans": [ + { + "bbox": [ + 132, + 217, + 481, + 239 + ], + "type": "text", + "content": "26. Kerbl, B., Kopanas, G., Leimkuhler, T., Drettakis, G.: 3D Gaussian Splitting for Real-Time Radiance Field Rendering. ACM TOG 42(4), 1-14 (2023)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 239, + 481, + 261 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 239, + 481, + 261 + ], + "spans": [ + { + "bbox": [ + 132, + 239, + 481, + 261 + ], + "type": "text", + "content": "27. Keselman, L., Hebert, M.: Approximate Differentiable Rendering with Algebraic Surfaces. In: ECCV. pp. 596-614 (2022)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 262, + 481, + 284 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 262, + 481, + 284 + ], + "spans": [ + { + "bbox": [ + 132, + 262, + 481, + 284 + ], + "type": "text", + "content": "28. Kopanas, G., Leimkuhler, T., Rainer, G., Jambon, C., Drettakis, G.: Neural Point Catacaustics for Novel-View Synthesis of Reflections. ACM TOG 41(6), 1-15 (2022)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 285, + 481, + 306 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 285, + 481, + 306 + ], + "spans": [ + { + "bbox": [ + 132, + 285, + 481, + 306 + ], + "type": "text", + "content": "29. Kopanas, G., Philip, J., Leimkuhler, T., Drettakis, G.: Point-Based Neural Rendering with Per-View Optimization. In: Comput. Graph. Forum. vol. 40, pp. 29-43 (2021)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 307, + 481, + 329 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 307, + 481, + 329 + ], + "spans": [ + { + "bbox": [ + 132, + 307, + 481, + 329 + ], + "type": "text", + "content": "30. Lei, J., Wang, Y., Pavlakos, G., Liu, L., Daniilidis, K.: GART: Gaussian Articulated Template Models. In: CVPR. pp. 19876-19887 (2024)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 330, + 481, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 330, + 481, + 361 + ], + "spans": [ + { + "bbox": [ + 132, + 330, + 481, + 361 + ], + "type": "text", + "content": "31. Li, T., Slavcheva, M., Zollhöefer, M., Green, S., Lassner, C., Kim, C., Schmidt, T., Lovegrove, S., Goesele, M., Newcombe, R., et al.: Neural 3D Video Synthesis from Multi-View Video. In: CVPR. pp. 5521-5531 (2022)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 363, + 481, + 384 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 363, + 481, + 384 + ], + "spans": [ + { + "bbox": [ + 132, + 363, + 481, + 384 + ], + "type": "text", + "content": "32. Li, Z., Chen, Z., Li, Z., Xu, Y.: Spacetime Gaussian Feature Splatting for Real-Time Dynamic View Synthesis. arXiv preprint arXiv:2312.16812 (2023)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 385, + 481, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 385, + 481, + 407 + ], + "spans": [ + { + "bbox": [ + 132, + 385, + 481, + 407 + ], + "type": "text", + "content": "33. Li, Z., Niklaus, S., Snavely, N., Wang, O.: Neural Scene Flow Fields for Space-Time View Synthesis of Dynamic Scenes. In: CVPR. pp. 6498-6508 (2021)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 132, + 408, + 481, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 408, + 481, + 430 + ], + "spans": [ + { + "bbox": [ + 132, + 408, + 481, + 430 + ], + "type": "text", + "content": "34. Li, Z., Wang, Q., Cole, F., Tucker, R., Snavely, N.: DynIBaR: Neural Dynamic Image-Based Rendering. In: CVPR. pp. 4273-4284 (2023)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 132, + 430, + 481, + 463 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 430, + 481, + 463 + ], + "spans": [ + { + "bbox": [ + 132, + 430, + 481, + 463 + ], + "type": "text", + "content": "35. Liang, Y., Khan, N., Li, Z., Nguyen-Phuoc, T., Lanman, D., Tompkin, J., Xiao, L.: Gaufre: Gaussian Deformation Fields for Real-Time Dynamic Novel View Synthesis. arXiv preprint arXiv:2312.11458 (2023)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 132, + 464, + 481, + 496 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 464, + 481, + 496 + ], + "spans": [ + { + "bbox": [ + 132, + 464, + 481, + 496 + ], + "type": "text", + "content": "36. Lu, Z., Guo, X., Hui, L., Chen, T., Yang, M., Tang, X., Zhu, F., Dai, Y.: 3D Geometry-Aware Deformable Gaussian Splitting for Dynamic View Synthesis. In: CVPR. pp. 8900-8910 (2024)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 132, + 498, + 481, + 519 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 498, + 481, + 519 + ], + "spans": [ + { + "bbox": [ + 132, + 498, + 481, + 519 + ], + "type": "text", + "content": "37. Lucas, B.D., Kanade, T.: An Iterative Image Registration Technique with an Application to Stereo Vision. In: IJCAI. pp. 674-679 (1981)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 132, + 520, + 481, + 541 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 520, + 481, + 541 + ], + "spans": [ + { + "bbox": [ + 132, + 520, + 481, + 541 + ], + "type": "text", + "content": "38. Luiten, J., Kopanas, G., Leibe, B., Ramanan, D.: Dynamic 3D Gaussians: Tracking by Persistent Dynamic View Synthesis. In: 3DV (2024)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 132, + 543, + 481, + 575 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 543, + 481, + 575 + ], + "spans": [ + { + "bbox": [ + 132, + 543, + 481, + 575 + ], + "type": "text", + "content": "39. Mayer, N., Ilg, E., Hausser, P., Fischer, P., Cremers, D., Dosovitskiy, A., Brox, T.: A Large Dataset to Train Convolutional Networks for Disparity, Optical Flow, and Scene Flow Estimation. In: CVPR. pp. 4040-4048 (2016)" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 132, + 576, + 481, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 576, + 481, + 597 + ], + "spans": [ + { + "bbox": [ + 132, + 576, + 481, + 597 + ], + "type": "text", + "content": "40. Menze, M., Geiger, A.: Object Scene Flow for Autonomous Vehicles. In: CVPR. pp. 3061-3070 (2015)" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 132, + 598, + 481, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 598, + 481, + 620 + ], + "spans": [ + { + "bbox": [ + 132, + 598, + 481, + 620 + ], + "type": "text", + "content": "41. Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: NeRF: Representing Scenes as Neural Radiance Fields for View Synthesis. In: ECCV (2020)" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 132, + 621, + 481, + 642 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 621, + 481, + 642 + ], + "spans": [ + { + "bbox": [ + 132, + 621, + 481, + 642 + ], + "type": "text", + "content": "42. Müller, T., Evans, A., Schied, C., Keller, A.: Instant Neural Graphics Primitives with a Multiresolution Hash Encoding. ACM TOG 41(4), 1-15 (2022)" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 132, + 643, + 481, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 643, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 132, + 643, + 481, + 665 + ], + "type": "text", + "content": "43. Park, K., Sinha, U., Barron, J.T., Bouaziz, S., Goldman, D.B., Seitz, S.M., Martin-Brualla, R.: Nerfies: Deformable Neural Radiance Fields. In: ICCV. pp. 5865-5874 (2021)" + } + ] + } + ], + "index": 22 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "text", + "content": "K. Katsumata et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 665 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 149 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 149 + ], + "type": "text", + "content": "44. Park, K., Sinha, U., Hedman, P., Barron, J.T., Bouaziz, S., Goldman, D.B., Martin-Brualla, R., Seitz, S.M.: HyperNeRF: a Higher-Dimensional Representation for Topologically Varying Neural Radiance Fields. ACM TOG 40(6), 1-12 (2021)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 150, + 482, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 150, + 482, + 173 + ], + "spans": [ + { + "bbox": [ + 130, + 150, + 482, + 173 + ], + "type": "text", + "content": "45. Pumarola, A., Corona, E., Pons-Moll, G., Moreno-Noguer, F.: D-NeRF: Neural Radiance Fields for Dynamic Scenes. In: CVPR. pp. 10318-10327 (2021)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 173, + 481, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 173, + 481, + 205 + ], + "spans": [ + { + "bbox": [ + 132, + 173, + 481, + 205 + ], + "type": "text", + "content": "46. Qian, S., Kirschstein, T., Schoneveld, L., Davoli, D., Giebenhain, S., Nießner, M.: GaussianAvatars: Photorealistic Head Avatars with Rigged 3D Gaussians. In: CVPR. pp. 20299-20309 (2024)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 206, + 481, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 206, + 481, + 239 + ], + "spans": [ + { + "bbox": [ + 132, + 206, + 481, + 239 + ], + "type": "text", + "content": "47. Qian, Z., Wang, S., Mihajlovic, M., Geiger, A., Tang, S.: 3DGS-Avatar: Animatable Avatars via Deformable 3D Gaussian Splatting. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5020-5030 (2024)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 240, + 481, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 240, + 481, + 262 + ], + "spans": [ + { + "bbox": [ + 132, + 240, + 481, + 262 + ], + "type": "text", + "content": "48. Reiser, C., Peng, S., Liao, Y., Geiger, A.: KiloNeRF: Speeding up Neural Radiance Fields with Thousands of Tiny MLPs. In: ICCV. pp. 14335-14345 (2021)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 262, + 481, + 295 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 262, + 481, + 295 + ], + "spans": [ + { + "bbox": [ + 132, + 262, + 481, + 295 + ], + "type": "text", + "content": "49. Reiser, C., Szeliski, R., Verbin, D., Srinivasan, P., Mildenhall, B., Geiger, A., Barron, J., Hedman, P.: MERF: Memory-Efficient Radiance Fields for Real-Time View Synthesis in Unbounded Scenes. ACM TOG 42(4), 1-12 (2023)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 296, + 481, + 318 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 296, + 481, + 318 + ], + "spans": [ + { + "bbox": [ + 132, + 296, + 481, + 318 + ], + "type": "text", + "content": "50. Schonberger, J.L., Zheng, E., Frahm, J.M., Pollefeys, M.: Pixelwise View Selection for Unstructured Multi-View Stereo. In: ECCV. pp. 501-518 (2016)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 319, + 454, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 319, + 454, + 330 + ], + "spans": [ + { + "bbox": [ + 132, + 319, + 454, + 330 + ], + "type": "text", + "content": "51. Schonberger, J.L., Frahm, J.M.: Structure-from-Motion Revisited. In: CVPR (2016)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 331, + 481, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 331, + 481, + 352 + ], + "spans": [ + { + "bbox": [ + 132, + 331, + 481, + 352 + ], + "type": "text", + "content": "52. Seitz, S.M., Curless, B., Diebel, J., Scharstein, D., Szeliski, R.: A Comparison and Evaluation of Multi-View Stereo Reconstruction Algorithms. In: CVPR. pp. 519-528 (2006)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 353, + 481, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 353, + 481, + 385 + ], + "spans": [ + { + "bbox": [ + 132, + 353, + 481, + 385 + ], + "type": "text", + "content": "53. Shao, R., Zheng, Z., Tu, H., Liu, B., Zhang, H., Liu, Y.: Tensor4D: Efficient Neural 4D Decomposition for High-Fidelity Dynamic Reconstruction and Rendering. In: CVPR. pp. 16632-16642 (2023)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 386, + 481, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 386, + 481, + 418 + ], + "spans": [ + { + "bbox": [ + 132, + 386, + 481, + 418 + ], + "type": "text", + "content": "54. Song, L., Chen, A., Li, Z., Chen, Z., Chen, L., Yuan, J., Xu, Y., Geiger, A.: NeRFPlayer: A Streamable Dynamic Scene Representation with Decomposed Neural Radiance Fields. IEEE TVCG 29(5), 2732-2742 (2023)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 132, + 419, + 481, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 419, + 481, + 441 + ], + "spans": [ + { + "bbox": [ + 132, + 419, + 481, + 441 + ], + "type": "text", + "content": "55. Tang, J., Ren, J., Zhou, H., Liu, Z., Zeng, G.: DreamGaussian: Generative Gaussian Splitting for Efficient 3D Content Creation. arXiv preprint arXiv:2309.16653 (2023)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 132, + 442, + 481, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 442, + 481, + 464 + ], + "spans": [ + { + "bbox": [ + 132, + 442, + 481, + 464 + ], + "type": "text", + "content": "56. Teed, Z., Deng, J.: RAFT: Recurrent All-Pairs Field Transforms for Optical Flow. In: ECCV. pp. 402-419 (2020)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 132, + 464, + 481, + 509 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 464, + 481, + 509 + ], + "spans": [ + { + "bbox": [ + 132, + 464, + 481, + 509 + ], + "type": "text", + "content": "57. Tewari, A., Thies, J., Mildenhall, B., Srinivasan, P., Tretschk, E., Wang, Y., Lassner, C., Sitzmann, V., Martin-Brualla, R., Lombardi, S., Simon, T., Theobalt, C., Nießner, M., Barron, J.T., Wetzstein, G., Zollhöefer, M., Golyanik, V.: Advances in Neural Rendering. In: Comput. Graph. Forum. vol. 41, pp. 703-735 (2022)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 132, + 509, + 481, + 531 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 509, + 481, + 531 + ], + "spans": [ + { + "bbox": [ + 132, + 509, + 481, + 531 + ], + "type": "text", + "content": "58. Tian, F., Du, S., Duan, Y.: MonoNeRF: Learning a Generalizable Dynamic Radiance Field from Monocular Videos. In: ICCV. pp. 17903-17913 (2023)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 132, + 531, + 481, + 563 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 531, + 481, + 563 + ], + "spans": [ + { + "bbox": [ + 132, + 531, + 481, + 563 + ], + "type": "text", + "content": "59. Tretschk, E., Tewari, A., Golyanik, V., Zollhöefer, M., Lassner, C., Theobalt, C.: Non-Rigid Neural Radiance Fields: Reconstruction and Novel View Synthesis of a Dynamic Scene From Monocular Video. In: ICCV (2021)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 132, + 564, + 481, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 564, + 481, + 597 + ], + "spans": [ + { + "bbox": [ + 132, + 564, + 481, + 597 + ], + "type": "text", + "content": "60. Tretschk, E., Tewari, A., Golyanik, V., Zollhöfer, M., Lassner, C., Theobalt, C.: Non-Rigid Neural Radiance Fields: Reconstruction and Novel View Synthesis of a Dynamic Scene from Monocular Video. In: ICCV. pp. 12959-12970 (2021)" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 132, + 598, + 481, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 598, + 481, + 620 + ], + "spans": [ + { + "bbox": [ + 132, + 598, + 481, + 620 + ], + "type": "text", + "content": "61. Vedula, S., Baker, S., Rander, P., Collins, R., Kanade, T.: Three-Dimensional Scene Flow. In: CVPR. pp. 722-729 (1999)" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 132, + 620, + 481, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 620, + 481, + 643 + ], + "spans": [ + { + "bbox": [ + 132, + 620, + 481, + 643 + ], + "type": "text", + "content": "62. Vedula, S., Rander, P., Collins, R., Kanade, T.: Three-Dimensional Scene Flow. IEEE TPAMI 27(3), 475-480 (2005)" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 132, + 643, + 481, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 643, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 132, + 643, + 481, + 665 + ], + "type": "text", + "content": "63. Wang, Z., Bovik, A.C., Sheikh, H.R., Simoncelli, E.P.: Image Quality Assessment: From Error Visibility to Structural Similarity. IEEE TIP 13(4), 600-612 (2004)" + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 321, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 321, + 91, + 447, + 102 + ], + "type": "text", + "content": "A Compact Dynamic 3D Gaussian" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 482, + 380 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 132, + 116, + 482, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 116, + 482, + 149 + ], + "spans": [ + { + "bbox": [ + 132, + 116, + 482, + 149 + ], + "type": "text", + "content": "64. Wu, G., Yi, T., Fang, J., Xie, L., Zhang, X., Wei, W., Liu, W., Tian, Q., Wang, X.: 4D Gaussian Splitting for Real-Time Dynamic Scene Rendering. arXiv preprint arXiv:2310.08528 (2023)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 150, + 482, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 150, + 482, + 182 + ], + "spans": [ + { + "bbox": [ + 132, + 150, + 482, + 182 + ], + "type": "text", + "content": "65. Xie, Y., Takikawa, T., Saito, S., Litany, O., Yan, S., Khan, N., Tombari, F., Tompkin, J., Sitzmann, V., Sridhar, S.: Neural Fields in Visual Computing and Beyond. Comput. Graph. Forum (2022)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 183, + 481, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 183, + 481, + 205 + ], + "spans": [ + { + "bbox": [ + 132, + 183, + 481, + 205 + ], + "type": "text", + "content": "66. Xu, D., Yuan, Y., Mardani, M., Liu, S., Song, J., Wang, Z., Vahdat, A.: AGG: Amortized Generative 3D Gaussians for Single Image to 3D. arXiv preprint arXiv:2401.04099 (2024)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 205, + 481, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 205, + 481, + 236 + ], + "spans": [ + { + "bbox": [ + 132, + 205, + 481, + 236 + ], + "type": "text", + "content": "67. Yang, Z., Yang, H., Pan, Z., Zhu, X., Zhang, L.: Real-Time Photorealistic Dynamic Scene Representation and Rendering with 4D Gaussian Splatting. arXiv preprint arXiv:2310.10642 (2023)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 237, + 481, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 237, + 481, + 270 + ], + "spans": [ + { + "bbox": [ + 132, + 237, + 481, + 270 + ], + "type": "text", + "content": "68. Yi, T., Fang, J., Wu, G., Xie, L., Zhang, X., Liu, W., Tian, Q., Wang, X.: GaussianDreamer: Fast Generation from Text to 3D Gaussian Splatting with Point Cloud Priors. arXiv preprint arXiv:2310.08529 (2023)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 271, + 481, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 271, + 481, + 293 + ], + "spans": [ + { + "bbox": [ + 132, + 271, + 481, + 293 + ], + "type": "text", + "content": "69. Yu, A., Li, R., Tancik, M., Li, H., Ng, R., Kanazawa, A.: PlenOctrees for Real-Time Rendering of Neural Radiance Fields. In: ICCV. pp. 5752-5761 (2021)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 293, + 481, + 315 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 293, + 481, + 315 + ], + "spans": [ + { + "bbox": [ + 132, + 293, + 481, + 315 + ], + "type": "text", + "content": "70. Zhang, R., Isola, P., Efros, A.A., Shechtman, E., Wang, O.: The Unreasonable Effectiveness of Deep Features as a Perceptual Metric. In: CVPR. pp. 586-595 (2018)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 316, + 481, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 316, + 481, + 335 + ], + "spans": [ + { + "bbox": [ + 132, + 316, + 481, + 335 + ], + "type": "text", + "content": "71. Zheng, E., Ji, D., Dunn, E., Frahm, J.M.: Sparse Dynamic 3D Reconstruction from Unsynchronized Videos. In: CVPR. pp. 4435-4443 (2015)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 336, + 481, + 357 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 336, + 481, + 357 + ], + "spans": [ + { + "bbox": [ + 132, + 336, + 481, + 357 + ], + "type": "text", + "content": "72. Zielonka, W., Bagautdinov, T., Saito, S., Zollhöfer, M., Thies, J., Romero, J.: Drivable 3D Gaussian Avatars. arXiv preprint arXiv:2311.08581 (2023)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 358, + 481, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 358, + 481, + 380 + ], + "spans": [ + { + "bbox": [ + 132, + 358, + 481, + 380 + ], + "type": "text", + "content": "73. Zwicker, M., Pfister, H., Van Baar, J., Gross, M.: EWA Splitting. IEEE TVCG 8(3), 223-238 (2002)" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 237, + 101 + ], + "type": "text", + "content": "K. Katsumata et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2024/A Comparative Study of Image Restoration Networks for General Backbone Network Design/76c19445-7741-420c-b1a4-d913d41c13ff_content_list.json b/2024/A Comparative Study of Image Restoration Networks for General Backbone Network Design/76c19445-7741-420c-b1a4-d913d41c13ff_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..d203d4b14b7950246db85934f425bc58a88a1f0e --- /dev/null +++ b/2024/A Comparative Study of Image Restoration Networks for General Backbone Network Design/76c19445-7741-420c-b1a4-d913d41c13ff_content_list.json @@ -0,0 +1,1739 @@ +[ + { + "type": "text", + "text": "A Comparative Study of Image Restoration Networks for General Backbone Network Design", + "text_level": 1, + "bbox": [ + 223, + 141, + 779, + 186 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xiangyu Chen $^{1,2,3*}$ Zheyuan Li $^{2,1*}$ Yuandong $\\mathrm{Pu}^{3,4*}$ Yihao Liu $^{2,3}$ \nJiantao Zhou $^{1\\dagger}$ Yu Qiao $^{2,3}$ Chao Dong $^{2,3,5\\dagger}$", + "bbox": [ + 264, + 210, + 736, + 243 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1University of Macau 2Shenzhen Institute of Advanced Technology, Chinese Academy of Sciences 3Shanghai Artificial Intelligence Laboratory 4Shanghai Jiao Tong University 5Shenzhen University of Advanced Technology https://github.com/Andrew0613/X-Restormer", + "bbox": [ + 238, + 253, + 764, + 311 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract. Despite the significant progress made by deep models in various image restoration tasks, existing image restoration networks still face challenges in terms of task generality. An intuitive manifestation is that networks which excel in certain tasks often fail to deliver satisfactory results in others. To illustrate this point, we select five representative networks and conduct a comparative study on five classic image restoration tasks. First, we provide a detailed explanation of the characteristics of different image restoration tasks and backbone networks. Following this, we present the benchmark results and analyze the reasons behind the performance disparity of different models across various tasks. Drawing from this comparative study, we propose that a general image restoration backbone network needs to meet the functional requirements of diverse tasks. Based on this principle, we design a new general image restoration backbone network, X-Restormer. Extensive experiments demonstrate that X-Restormer possesses good task generality and achieves state-of-the-art performance across a variety of tasks.", + "bbox": [ + 261, + 351, + 743, + 571 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 215, + 599, + 375, + 616 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Image restoration aims to generate high-quality images from degraded images. In recent years, deep learning has achieved great success in this field, with numerous networks being proposed to address various image restoration tasks. Initially, networks are primarily designed to solve specific restoration tasks and are typically validated only on selected tasks. As deep learning techniques have continued to evolve, there has been an increasing focus on the development of general-purpose networks that can be applied to a broad range of tasks. This trend is particularly evident in the high-level vision field, where new backbone networks are being designed to support multiple tasks [17, 30], including classification, detection and segmentation. For image restoration, although more and more backbone networks can handle multiple restoration tasks, their task generality is still limited, as illustrated in Fig. 1. For instance, SwinIR [25] achieves", + "bbox": [ + 212, + 633, + 787, + 815 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "* Equal contributions, † Corresponding author.", + "bbox": [ + 230, + 824, + 550, + 839 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/5121f81d60afe460e046d73c19696fcb689f97a14c00e0a08e365e2b499eb441.jpg", + "image_caption": [ + "Fig. 1: Relative performance difference of different backbone networks on five image restoration tasks1. The existing representative networks exhibit diverse performance on these tasks, while our method presents superior task generality." + ], + "image_footnote": [], + "bbox": [ + 263, + 143, + 736, + 358 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "state-of-the-art performance on image super-resolution (SR) but falls short on image deblurring and dehazing. Conversely, Restormer [48] performs exceptionally well on image dehazing and deraining but is less effective on image SR. This discrepancy can be attributed to the fact that the characteristics of image degradation vary across different image restoration tasks. While all image restoration tasks involve mapping degraded images to clean images, the requirements for the capability of backbone networks differ depending on specific tasks.", + "bbox": [ + 212, + 438, + 787, + 545 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Designing a general image restoration backbone network presents a significant challenge. However, the development of such a network holds considerable value, as it has the potential to greatly reduce costs associated with research and application. To achieve this goal, we first conduct a comparative study of mainstream backbone networks on the representative tasks, including image SR, denoising, deblurring, deraining and dehazing. These five tasks are chosen due to the distinct characteristics of their degradation. Five representative backbone networks are selected in the study, including MPRNet [49], Uformer [43], SwinIR [25], Restormer [48] and NAFNet [5]. These five networks encompass classic architectures such as U-shape architecture, plain residual-in-residual architecture and multi-stage progressive architecture. They also employ several common operators, including convolution, spatial self-attention and transposed self-attention [48]. We benchmark the five representative methods on the selected five tasks. The experimental results clearly reflect the performance disparity of different backbone networks on different tasks. We then conduct a detailed anal", + "bbox": [ + 212, + 546, + 787, + 772 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "X.Chen et al.", + "bbox": [ + 271, + 114, + 362, + 126 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "1 We set the minimum average performance of the networks on test sets in Tab. 2 for the task (i) as the lower bound $P_{lower}^{(i)}$ , and set the average performance of X-Restormer for each task as the upper bound $P_{upper}^{(i)}$ . The ordinate of each point in the figure with performance $P^{(i)}$ is calculated by $(P^{(i)} - P_{lower}^{(i)}) / P_{upper}^{(i)}$ .", + "bbox": [ + 217, + 779, + 787, + 842 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "ysis of the characteristics of these tasks and these backbone networks to explain the reasons behind the performance differences. Based on the comparative study, we propose that a general backbone network must be highly comprehensive in terms of functionality that meets the diverse needs of various tasks.", + "bbox": [ + 212, + 146, + 782, + 205 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "It is noteworthy that Restormer stands out in the comparative study, ranking within the top two across all five tasks. This superior performance can be attributed to several key designs. First, Restormer's U-shape architecture allows it to process large-size inputs, which is crucial for the tasks that deal with large areas of degradation. Then, the network employs transposed self-attention that utilizes channel-wise features as tokens, achieving the information interaction among channels and enabling the mapping with a global receptive field. Additionally, the incorporation of numerous depth-wise convolutions activates the considerable spatial information interaction ability of the network. From a functional perspective, Restormer integrates the key capabilities of the other compared networks, thereby exhibiting commendable task generality in the comparative study. However, the spatial mapping ability of Restormer still appears to be somewhat deficient, as indicated by its quantitatively and qualitatively subpar performance in comparison to SwinIR for $\\mathrm{SR}^2$ .", + "bbox": [ + 212, + 209, + 784, + 419 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "This inferiority is hypothesized to originate from the inherent challenge of detail reconstruction posed by the U-shape architecture, coupled with the relatively weak spatial mapping capability of depth-wise convolution, particularly when compared to spatial self-attention (i.e., window-based self-attention in SwinIR). To address this limitation, a plausible solution is the introduction of spatial self-attention to Restormer. To achieve this design, we alternately replace half of transposed self-attention blocks with overlapping cross-attention blocks [8], which are proven to have strong spatial information interaction capability, to construct a new network, X-Restormer. Extensive experiments show that this simple modification can significantly enhance the performance of Restormer without increasing the number of parameters. Moreover, our X-Restormer obtains state-of-the-art performance on all five tasks, exhibiting the best task generality.", + "bbox": [ + 212, + 422, + 785, + 603 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our main contributions can be summarized as follows:", + "bbox": [ + 238, + 606, + 632, + 619 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We conduct a comparative study by constructing an image restoration benchmark, highlighting the challenges faced by existing image restoration backbone networks in task generality.", + "- Based on the benchmark results, we perform a detailed analysis of the characteristics of different degradations and networks. We emphasize that the general image restoration backbone network design must meet the functional requirements of diverse tasks.", + "- By further enhancing the spatial mapping ability of Restormer, we design a preliminary general backbone network, X-Restormer. Without additional parameters, X-Restormer achieves significant performance improvement over existing networks and exhibits superior task generality." + ], + "bbox": [ + 227, + 638, + 784, + 809 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "A Comparative Study of Image Restoration Networks", + "bbox": [ + 372, + 114, + 732, + 128 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 2 + }, + { + "type": "page_footnote", + "text": "2 In general, models' SR performance is highly related to the spatial mapping ability.", + "bbox": [ + 217, + 824, + 782, + 839 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/dfce3516a7c62209483df252920268c964d654ff7cf1841b229c8360d8213d50.jpg", + "image_caption": [ + "Ground Truth Degraded Image" + ], + "image_footnote": [], + "bbox": [ + 245, + 145, + 344, + 258 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/60c46dfaace279ff5bd78cde89737b63a4c2c285b32ffced04b6e6750ab47a73.jpg", + "image_caption": [ + "Fig. 2: Selected five representative image restoration tasks with various degradation." + ], + "image_footnote": [], + "bbox": [ + 351, + 145, + 450, + 258 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/9631d7f38300254ed04c362a007b078bee3f608514120ff2beb444ddfced14a2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 455, + 145, + 557, + 258 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/2c69e4e3833e263071b317e9764f710a62b21f367f88e1916d30200daf536809.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 562, + 145, + 663, + 258 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/1669b773fca3c5f36a3e5cd4fd774c31497adf8c3ab666d54383587de80b88ac.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 669, + 145, + 771, + 258 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2 Related Work", + "text_level": 1, + "bbox": [ + 215, + 311, + 387, + 328 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Image restoration networks. In the past years, numerous deep networks have been proposed for various image restoration tasks such as image SR [8,26,59], denoising [43,53,54], deblurring [1,6], deraining [4,29,45] and dehazing [40,41,46]. Initially, most deep networks are designed for specific tasks [3,14,34,55,57]. Recently, with increasing attention to the task generality of networks, more and more methods have been developed to tackle multiple image restoration tasks. For instance, Zamir et al. [49] builds a multi-stage CNN for deraining, deblurring and denoising. Wang et al. [43] designs a U-shape Transformer for deraining, deblurring and denoising. Liang et al. [25] implements a Swin Transformer-based network that achieves state-of-the-art performance on SR, denoising and compression artifact reduction. Zamir et al. [48] proposes a novel transposed self-attention to build a U-shape network for deraining, deblurring and denoising. Chen et al. [5] constructs a U-shape CNN for denoising and deblurring. While existing methods have demonstrated some ability to generalize across several restoration tasks, their task generality remains limited.", + "bbox": [ + 212, + 340, + 787, + 568 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Difference from the previous network design research. While previous works have proposed networks that excel in various image restoration tasks, their primary focus is on constructing stronger networks to achieve performance breakthroughs on specific tasks. In contrast, this work pays more attention to the task generality of the backbone network, possessing a vision different from previous works. More specifically, our objective is to explore the design principles and directions of general image restoration networks. We are not seeking to create powerful networks for peak performance on a single or some specific tasks, but rather to ensure satisfactory performance across a diverse range of tasks. Regarding the concrete implementation, we do not intend to construct complex network architectures or modules. Our preference, rather, is to enhance task generality through the use of the simplest methodology available.", + "bbox": [ + 212, + 568, + 787, + 750 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "There are concurrent works that adopt similar ideas for specific image restoration tasks. DAT [9] combines spatial-window self-attention and channel-wise self-attention to handle image SR. IPT-V2 [23] designs a spatial-channel Transformer block to build a denoising network and obtains the winner award in the NTIRE 2023 image denoising challenge [23]. However, the motivation and specific network implementation of our work are distinct from these studies.", + "bbox": [ + 212, + 750, + 787, + 839 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 217, + 116, + 228, + 126 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "X.Chen et al.", + "bbox": [ + 271, + 114, + 362, + 126 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3 Image Restoration Benchmark", + "text_level": 1, + "bbox": [ + 215, + 143, + 547, + 162 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In this section, we first briefly introduce several image restoration tasks, each with its own representative degradation characteristics. Subsequently, we classify mainstream image restoration networks based on two key aspects: architecture and core operator. On this basis, we select five representative networks and conduct a benchmark experiment across five different tasks. We describe the experimental setup and explain its rationality. Finally, we present the benchmark results and conduct a detailed analysis of them.", + "bbox": [ + 212, + 179, + 787, + 285 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.1 Overview of Image Restoration Tasks", + "text_level": 1, + "bbox": [ + 215, + 310, + 571, + 325 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We select five representative tasks for the benchmark experiments. These tasks, exemplified in Fig. 2, are chosen based on two primary reasons. First, they are very common image restoration tasks with widely accepted evaluation schemes. Second, the degradation characteristics of these tasks are diverse and differ greatly from each other. As such, they can provide a robust way to evaluate the task generality of image restoration backbone networks.", + "bbox": [ + 212, + 338, + 784, + 428 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Let $I_{GT}$ denote the ground truth image and $I_{LQ}$ denote the degraded image, where $I_{GT} \\in \\mathbb{R}^{H \\times W \\times 3}$ . The degradation model of classic image SR can be represented as:", + "bbox": [ + 212, + 429, + 785, + 473 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nI _ {L Q} = \\left(I _ {G T} \\otimes k\\right) \\downarrow_ {s}, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 429, + 476, + 784, + 491 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $I_{LQ} \\in \\mathbb{R}^{\\frac{H}{s} \\times \\frac{W}{s} \\times 3}$ represents the low-resolution image. $k$ denotes the bicubic downsampling kernel and $\\downarrow_{s}$ represents the downscaling factor. This degradation is highly correlated to local information and leads to a significant loss of high-frequency information. Thus, SR networks emphasize strong spatial information interaction capability to reconstruct as many details as possible.", + "bbox": [ + 212, + 500, + 784, + 577 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The degradation model of image denoising can be denoted as:", + "bbox": [ + 238, + 578, + 686, + 593 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nI _ {L Q} = I _ {G T} + n, \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 446, + 608, + 784, + 622 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $n \\in \\mathbb{R}^{H \\times W \\times 3}$ represents the noise map. For Gaussian denoising, noise values are content-independent. The downsampling-upsampling process of U-shape architecture inherently aids noise removal. Besides, strong spatial information interaction capability can also enhance high-frequency content reconstruction for denoising networks.", + "bbox": [ + 212, + 633, + 784, + 710 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The degradation model of image deblurring (for motion deblurring) can be denoted as:", + "bbox": [ + 212, + 710, + 785, + 739 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nI _ {L Q} = \\sum_ {t} \\left(f _ {\\text {m o t i o n}} ^ {t} \\left(I _ {G T}\\right)\\right), \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 410, + 741, + 784, + 770 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $f_{motion}^{t}(\\cdot)$ represents the motion function under different continuous exposure times. This degradation is related to the global motion offset of the image. Therefore, the ability to utilize large-range information and even global information is important for deblurring networks.", + "bbox": [ + 212, + 779, + 787, + 840 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "A Comparative Study of Image Restoration Networks", + "bbox": [ + 372, + 114, + 732, + 128 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/b3157a9bd87d6feb1c78d54c7598778996395ea992c1fbae68ae01a9d0a75670.jpg", + "image_caption": [ + "Fig. 3: The core operators in image restoration networks." + ], + "image_footnote": [], + "bbox": [ + 243, + 142, + 759, + 208 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The degradation model of image deraining can be simply denoted as:", + "bbox": [ + 238, + 263, + 740, + 279 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nI _ {L Q} = I _ {G T} + R, \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 441, + 294, + 785, + 310 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $R$ denotes the additive rain streak, simulated by the physics models, such as [24, 28]. The difference between this degradation and Gaussian noise is that the added $R$ is not evenly distributed on the image and has a correlation with the image content. Complicated rain streaks also places high demands on the complexity of deraining networks.", + "bbox": [ + 214, + 323, + 782, + 398 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The degradation model of image dehazing, based on the atmospheric scattering model, can be denoted as:", + "bbox": [ + 214, + 400, + 782, + 431 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nI _ {L Q} = I _ {G T} * t \\left(I _ {G T}\\right) + A \\left(1 - t \\left(I _ {G T}\\right)\\right), \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 366, + 445, + 785, + 462 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $t(\\cdot)$ represents the transmission function and $t(I_{GT})$ is associated with the distance from the scene point to the camera. This degradation is intrinsically linked to the depth information within the image. Consequently, the incorporation of global information is important for dehazing networks.", + "bbox": [ + 214, + 474, + 787, + 537 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.2 Characteristics of Typical Backbone Networks", + "text_level": 1, + "bbox": [ + 215, + 564, + 643, + 579 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The architectures of mainstream image restoration networks can be broadly classified into three categories: U-shape encoder-decoder, plain residual-in-residual and multi-stage progressive. Schematic diagrams of these architectures are provided in $\\text{Supp}$ . The U-shape encoder-decoder architecture performs down-sampling and up-sampling operations on features, enabling networks to handle features of varying scales. This architecture allows networks to accept large-size input, and the effective receptive field of the network expands rapidly with down-sampling. Typical U-shape networks include Uformer [43], Restormer [48]. The multi-stage architecture divides the entire network into several sub-networks and progressively processes features, which are primarily used for image deraining and deblurring. Common networks based on this architecture include MPRNet [49] and HINet [6]. The plain residual-in-residual architecture is composed of several residual groups, each of which consists of several residual blocks. This architecture maintains the original size when processing features, which is favorable for the reconstruction of high-frequency information, but it comes at a high computational cost. Typical networks include RCAN [57] and SwinIR [25].", + "bbox": [ + 214, + 595, + 792, + 839 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "X.Chen et al.", + "bbox": [ + 271, + 114, + 362, + 127 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The core operators for constructing an image restoration network can be mainly categorized into three types: convolution, spatial self-attention and transposed self-attention. These operators are shown in Fig. 3. The convolution calculates a fixed-size filter and processes the entire fea", + "bbox": [ + 212, + 146, + 460, + 281 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/e66aa4ef380485c97e7b1ff28ad20cd2e9cc68616c4f86e1b0c7807c6ec588be.jpg", + "table_caption": [ + "Table 1: Architectures and core operators of the five selected backbone networks." + ], + "table_footnote": [], + "table_body": "
NetworkArchitectureCore operator
MPRNetMulti-StageConvolution
UformerU-ShapeSpatial self-attention
SwinIRPlain residual-in-residualSpatial self-attention
RestormerU-ShapeTransposed self-attention
NAFNetU-ShapeConvolution
", + "bbox": [ + 473, + 189, + 784, + 267 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "ture map through a sliding window, which is the major component of many networks, such as RDN [60]. Spatial self-attention is typically implemented as window self-attention in image restoration tasks. It calculates the attention matrix within a fixed window size, generating content-aware weights that are functionally similar to a large kernel dynamic filter. This operator has strong local fitting ability and shows superior advantages on SR and denoising [7]. Transposed self-attention treats the entire feature of each channel as a token to calculate the attention matrix on the channel dimension. This operator directly deals with global features, and when combined with depth-wise convolution, it shows remarkable performance in multiple restoration tasks [48]. The selected five representative backbone networks for the benchmark experiment encompass the abovementioned architectures and core operators, as presented in Tab. 1.", + "bbox": [ + 212, + 282, + 787, + 464 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.3 Experimental Settings", + "text_level": 1, + "bbox": [ + 214, + 482, + 444, + 497 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "For image SR, we conduct experiments on upscaling factor $\\times 4$ . We use the DF2K dataset (the same as SwinIR [25]) to train models. Low-resolution images are generated from the ground truth images using bicubic downsampling in MATLAB. For U-shape networks, we first up-sample the input low-resolution images through bilinear interpolation. The performance is reported on the Y channel. For denoising, we adopt the DFWB dataset for training. Noisy images are generated by adding Gaussian noise with a noise level of 50. For deblurring, we use the motion deblurring dataset GoPro [34] to train the models. For deraining, we conduct experiments using the synthetic rain dataset Rain13K and calculate the performance on the Y channel, following Restormer [48]. For dehazing, we use the indoor training set (ITS) of the RESIDE dataset [21], the same as [40].", + "bbox": [ + 212, + 502, + 787, + 669 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To maximize the capability of these networks, we use the official codes and training configurations provided by different methods to train the models3. Note that all models are trained without using any pre-training strategy (e.g., $\\times 2$ pretraining for SR) or special tricks (e.g., EMA in SwinIR and TLC in NAFNet) for fair comparison. In addition, we find that different methods may not use exactly the same test sets and the same metrics calculation in their papers to report performance. Therefore, we retest all models based on exactly the same data and calculate metrics using the popular open-source toolbox BasicSR [42].", + "bbox": [ + 212, + 669, + 787, + 791 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "A Comparative Study of Image Restoration Networks", + "bbox": [ + 372, + 114, + 732, + 128 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 6 + }, + { + "type": "page_footnote", + "text": "3 We tried to train all networks with a unified configuration, but find it unreasonable. The performance of networks may vary greatly with different training configurations and optimization strategies, making it difficult to determine a fair unified setting.", + "bbox": [ + 217, + 796, + 785, + 839 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/d23af968c0ed91f6d0b19eadaec21574784afea58425bd08c08f6151f101c394.jpg", + "table_caption": [ + "Table 2: Quantitative results on PSNR(dB) of the benchmark experiments. The best and second-best performance results are in **bold** and **underline**." + ], + "table_footnote": [], + "table_body": "
MethodSRDenoisingDeblurringDerainingDehazing
Set14Urban100CBSD68Urban100GoProHIDETest100Rain100HSOTS Indoor
MPRNet28.9026.8828.4829.7132.6630.9630.2930.4340.34
SwinIR29.0727.4728.5629.8831.6629.4130.0530.4529.14
Uformer27.1425.6028.5529.9833.0530.8927.9324.0633.58
Restomer29.0627.3228.6030.0232.9231.2232.0331.4841.87
NAFNet29.0327.0028.5229.6533.0831.2230.3332.8338.97
", + "bbox": [ + 220, + 184, + 785, + 277 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3.4 Benchmark Results", + "text_level": 1, + "bbox": [ + 215, + 303, + 421, + 316 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We present the quantitative results of the benchmark experiments in Tab. 2. (Due to space constraints, complete results are provided in $\\text{Supp.}$ ) Several important observations can be made from the results: 1) Different networks exhibit varying performance on different tasks. For instance, SwinIR performs best on SR but worst on deblurring and dehazing. Uformer excels on denoising and deblurring but performs poorly on deraining and SR. 2) Networks with U-shape and multi-stage architectures present clear advantages on deblurring and dehazing. 3) MPRNet and NAFNet, which are mainly based on convolution operators, exhibit moderate performance across all tasks without outstanding results. 4) SwinIR, which employs plain architecture and spatial self-attention operators, outperforms other networks by a significant margin on SR. 5) The overall performance of Restormer is outstanding. Except for consistently being weaker than SwinIR on SR, it obtains considerable performance on almost all other tasks.", + "bbox": [ + 212, + 327, + 787, + 523 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3.5 Analysis", + "text_level": 1, + "bbox": [ + 215, + 544, + 333, + 559 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this section, we explain the above observations by analyzing the characteristics of different tasks and backbone networks.", + "bbox": [ + 212, + 568, + 785, + 595 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The degradation of SR lies in the compression of local information, resulting in a large loss of high-frequency details. Therefore, SR networks often require strong spatial information interaction capability, or even generative capability. The U-shape architecture, which incorporates multiple downsampling operations, may undermine the reconstruction of high-frequency information and intuitively escalates the difficulty of detail reconstruction. In contrast, the plain architecture that maintains feature sizes benefits SR. Besides, window self-attention has demonstrated a superior local fitting ability than convolution [8]. As a result, SwinIR, which is based on a plain structure and employs spatial self-attention operators, exhibits a distinct advantage on SR.", + "bbox": [ + 212, + 598, + 787, + 748 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Denoising entails smoothing the image to eliminate high-frequency noise and integrating low-frequency information to reconstruct a clear image. This task places no explicit unique requirement for the network, while its performance intuitively benefits from effective spatial information interaction. It is conjectured that the high performance of Restormer on denoising can be attributed to its ability to better smooth noise through channel-wise processing, akin to operating", + "bbox": [ + 212, + 750, + 787, + 840 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 217, + 114, + 228, + 126 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "X.Chen et al.", + "bbox": [ + 271, + 114, + 362, + 127 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/3494763775ec3beba53de9575a31a98f5ecf262539990d3a8ea3e5232d45557d.jpg", + "image_caption": [ + "Fig. 4: Visual and LAM [16] comparisons between Restormer and SwinIR. The LAM results and DI values indicate that Restormer exploits significantly more information than SwinIR. However, SwinIR reconstructs much more details than Restormer." + ], + "image_footnote": [], + "bbox": [ + 215, + 141, + 787, + 320 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "in the frequency domain. In contrast, SwinIR and Uformer perform well due to their robust spatial information interaction ability of the spatial self-attention.", + "bbox": [ + 212, + 407, + 784, + 436 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Deblurring (specifically for motion blur here) involves addressing global motion shifts in the image. As a result, the ability to handle large-size inputs and the use of global or multi-scale information are necessary for deblurring networks. Thus, the networks based on the U-shape architecture all perform well on this task. Conversely, SwinIR, which employs the plain architecture and focuses more on local information processing, performs much worse than other networks.", + "bbox": [ + 212, + 439, + 784, + 530 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Similar phenomena can be observed for dehazing. Due to the involvement of the depth information in the haze model, the ability to use large-range or even global information is crucial. Besides, dehazing networks are required to handle low-frequency transformations, including alterations in color and contrast, both of which constitute global mappings. Therefore, SwinIR and Uformer, which rely more on local spatial information interaction, perform poorly on this task. On the contrary, Restormer exhibits exceptional performance.", + "bbox": [ + 212, + 532, + 787, + 638 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Deraining is relatively unique in that the rain is unevenly distributed in images, with significant differences between different raindrops and streaks. Thus, there is no clear pattern in the performance of different networks on deraining. Nevertheless, networks with higher complexity present better performance.", + "bbox": [ + 212, + 641, + 787, + 702 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Based on the above results and analysis, we can infer that the acceptable performance of a backbone network on a specific task is predicated on meeting the functional requirements of that task. It is notable that Restormer obtains exceptional task generality. This can be attributed to several factors: 1) The U-shape architecture enables the network to accommodate large-size input. 2) The transposed self-attention allows direct interaction of global information. 3) The presence of depth-wise convolution enables the network to process spatial information effectively. In summary, due to Restormer's comprehensive functionality, it is capable of meeting the diverse requirements of different tasks.", + "bbox": [ + 212, + 704, + 787, + 840 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "A Comparative Study of Image Restoration Networks", + "bbox": [ + 372, + 114, + 732, + 128 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 774, + 116, + 784, + 126 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/bc5a579a8cac12e0125a4ebb36e809b789459ae709e31330f172fa66f04d43b8.jpg", + "image_caption": [ + "Fig. 5: The network structure of X-Restormer. To enhance the spatial mapping ability of Restormer and create a more general network, we replace half of the transposed self-attention blocks in Restormer with spatial self-attention blocks. For TSA, we retain the preliminary multi-Dconv head transposed attention (MDTA) used in Restormer. For SSA, we adopt the overlapping cross-attention (OCA) in HAT [8]." + ], + "image_footnote": [], + "bbox": [ + 220, + 143, + 782, + 308 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4 General Backbone Network Design", + "text_level": 1, + "bbox": [ + 215, + 416, + 591, + 434 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Based on the benchmark experiments, we believe that the principle of designing a general backbone network should be to ensure that the network can fulfill the functional requirements of all tasks. As Restormer shows relatively good task generality, we select it as the starting point to design a more general network. By pinpointing and addressing the limitation of Restormer, we present an initial version of a general image restoration backbone network in this section.", + "bbox": [ + 212, + 446, + 787, + 537 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Limitation of Restormer. In the benchmark experiments, Restormer shows inferior performance to SwinIR on SR, particularly on Urban100. The qualitative comparisons also indicate this phenomenon in Fig. 4. From the visual and LAM [16] results, We can observe that Restormer can exploit large-range and even global information for the reconstruction. However, compared to SwinIR, it fails to reconstruct fine textures, even for self-repeated patterns. This discrepancy can be attributed to the U-shape architecture adopted by Restormer on the one hand, which increases the difficulty of reconstructing high-frequency information. On the other hand, Restormer relies on depth-wise convolution for spatial information interaction, whose spatial mapping capability is relatively weaker than the spatial self-attention in SwinIR. Considering that the U-shape architecture is indispensable for some tasks, we still need to retain this architectural design for task generality. To overcome the limitation of Restormer and design a more powerful backbone network, we choose to further enhance its spatial information interaction ability. An intuitive and feasible solution is to incorporate the spatial self-attention module into Restormer.", + "bbox": [ + 212, + 537, + 787, + 779 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Network structure. In Fig. 5, we present the structure of our proposed backbone network, denoted as X-Restormer. We choose the U-shape architecture to build the network. In contrast to Restormer, we replace half of the transposed self-attention blocks (TSAB) with spatial self-attention blocks (SSAB) to", + "bbox": [ + 212, + 779, + 787, + 840 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "X.Chen et al.", + "bbox": [ + 271, + 114, + 362, + 126 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "enhance the ability of spatial information interaction. Given an input feature $F_{in}$ , the two blocks process it alternately as:", + "bbox": [ + 215, + 146, + 782, + 176 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\nF _ {t} = F _ {i n} + T S A \\left(L N \\left(F _ {i n}\\right)\\right), \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 403, + 184, + 784, + 199 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\nF _ {t \\_ o u t} = F _ {t} + F F N (L N (F _ {t})), \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 395, + 202, + 784, + 217 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\nF _ {s} = F _ {t \\_ o u t} + S S A (L N \\left(F _ {t \\_ o u t}\\right)), \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 385, + 220, + 782, + 237 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\nF _ {o u t} = F _ {s} + F F N \\left(L N \\left(F _ {s}\\right)\\right), \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 400, + 239, + 782, + 255 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "where $F_{t}$ , $F_{t\\_out}$ , $F_{s}$ and $F_{s\\_out}$ represent the intermediate feature in TSAB, the output of TSAB, the intermediate feature in SSAB and the output of SSAB. $F_{out}$ means the output of the two consecutive blocks, and also serves as the input for the following two blocks. $TSA(\\cdot)$ and $SSA(\\cdot)$ indicate transposed self-attention (TSA) and spatial self-attention (SSA) modules. $LN(\\cdot)$ denotes layer normalization and $FFN(\\cdot)$ represents the feed-forward network.", + "bbox": [ + 212, + 260, + 784, + 349 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Specifically, we adopt the Multi-Dconv Transpose Attention (MDTA) as the TSA module. It first generates query $(Q)$ , key $(K)$ and value $(V)$ by applying $1 \\times 1$ convolutions followed by $3 \\times 3$ depth-wise convolutions. Then, the channel attention matrix of size $\\mathbb{R}^{C \\times C}$ is calculated by the dot-product of reshaped $Q$ and $K$ followed by a Softmax function. The schematic of TSA is shown in Fig. 3. Finally, the result is generated by the dot-product of the attention matrix and $V$ . For SSA, we adopt the Overlapping Cross-Attention (OCA) introduced in the HAT model [8]. We choose OCA because the shifted window mechanism in SwinIR is not intuitively suitable for our TSA-SSA consecutive blocks, and HAT demonstrates the effectiveness and superiority of OCA. For the specific calculation, $Q$ is produced by partitioning the input into non-overlapping windows, while $K$ and $V$ are generated by partitioning the input into overlapping windows with a manually set overlapping size. Apart from the different window partition methods, the calculation of OCA is essentially identical to that of standard window self-attention. For FFN, we employ the Gated-Dconv Feed-forward Network (GDFN) architecture, as used in Restormer. Instead of using two $1 \\times 1$ convolutions to construct an MLP, GDFN first processes input features through two $3 \\times 3$ depth-wise convolutions and $1 \\times 1$ convolutions. Then, the resulting features are combined via element-wise multiplication and pass through another $1 \\times 1$ convolution to produce the final output.", + "bbox": [ + 212, + 351, + 785, + 652 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "We have also tried multiple design choices for SSAB and TSAB. Experiments can be found in $\\text{Supp}$ . We emphasize that our design of X-Restormer is not to develop novel architectures or modules to improve the performance on certain tasks, but to enhance the task generality of the network according to the principle of general backbone network design through as simple means as possible.", + "bbox": [ + 212, + 652, + 784, + 729 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "5 Experiments", + "text_level": 1, + "bbox": [ + 215, + 747, + 375, + 763 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "5.1 Experimental Setup", + "text_level": 1, + "bbox": [ + 215, + 773, + 426, + 789 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "We conduct experiments of the proposed X-Restormer on the same datasets used in the benchmark experiment. For the network implementation, the network employs a 4-level encoder-decoder with three times down-sampling and", + "bbox": [ + 212, + 794, + 784, + 839 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "A Comparative Study of Image Restoration Networks", + "bbox": [ + 372, + 114, + 732, + 128 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 767, + 116, + 782, + 126 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/6a556fb570e72e6956897830b6b5d907dadbc2bcc360e0588e4e8fa37436de28.jpg", + "table_caption": [ + "Table 3: Quantitative results on $\\times 4$ image SR. * means the model pretrained on $\\times 2\\mathrm{{SR}}$ ." + ], + "table_footnote": [], + "table_body": "
ModelSet5Set14BSD100Urban100Manga109
RCAN32.63/0.900228.87/0.788927.77/0.743626.82/0.808731.22/0.9173
RCAN-it32.69/0.900728.99/0.792227.87/0.745927.16/0.816831.78/0.9217
SwinIR*32.92/0.904429.09/0.795027.92/0.748927.45/0.825432.03/0.9260
IPT32.64/-29.01/-27.82/-27.26/--/-
EDT32.82/0.903129.09/0.793927.91/0.748327.46/0.824632.05/0.9254
NAFNet32.79/0.901029.03/0.791927.86/0.746327.00/0.811231.77/0.9216
SwinIR32.88/0.904129.07/0.794427.93/0.749027.47/0.825831.96/0.9255
Restormer32.94/0.903929.06/0.793427.91/0.748227.32/0.819931.96/0.9244
X-Restormer33.16/0.905829.17/0.796328.00/0.751227.66/0.829132.38/0.9279
", + "bbox": [ + 218, + 185, + 524, + 265 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/360f87f234d35f297f415b082aba6ed13fd7e22e41404a2bd8faed090b8a1856.jpg", + "table_caption": [ + "Table 4: Quantitative results on image denoising with the noise level $\\sigma = 50$" + ], + "table_footnote": [], + "table_body": "
ModelCBSD68Kodak24McMasterUrban100
FFDNet27.96/-28.98/-29.18/-28.05/-
RNAN28.27/-29.58/-29.72/-29.08/-
RDN28.31/-29.66/--/-29.38/-
IPT28.39/-29.64/-29.98/-29.71/-
DRUNet28.51/-29.86/-30.08/-29.61/-
SwinIR28.56/0.811829.95/0.822130.20/0.848929.88/0.8861
Uformer28.55/0.813029.97/0.824430.16/0.848529.98/0.8900
Restorer28.60/0.813030.01/0.823730.30/0.851730.02/0.8898
X-Restorer28.63/0.813830.05/0.824530.33/0.851830.24/0.8928
", + "bbox": [ + 527, + 185, + 785, + 265 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/16d32f30c1df4b972783ec42405c5c8c2c8029720a133721b1ae340ca2287c35.jpg", + "table_caption": [ + "Table 5: Quantitative results on image Table 6: Quantitative results on image deraindeblurring (motion blur). ing." + ], + "table_footnote": [], + "table_body": "
ModelGoProHIDERealBlur-RRealBlur-J
SPAIR32.06/0.95330.29/0.931-/-28.81/0.875
MIMO-UNet+32.45/0.95729.99/0.93035.54/0.94727.63/0.837
IPT32.52/--/--/--/-
MPRNet32.66/0.95930.96/0.93935.99/0.95228.70/0.873
Uformer33.05/0.94230.89/0.92036.19/0.95629.09/0.886
NAFNet33.08/0.94231.22/0.92435.97/0.95228.32/0.857
Restormer32.92/0.94031.22/0.92336.19/0.95728.96/0.879
X-Restormer33.44/0.94631.76/0.93036.27/0.95828.87/0.878
", + "bbox": [ + 218, + 318, + 785, + 397 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/89f4a8927a215e30130e8db74b87425cd2de4f8ea4b71ae98519145785c77361.jpg", + "table_caption": [ + "Table 7: Quantitative results on image dehazing." + ], + "table_footnote": [], + "table_body": "
ModelPFDNFFA-NetAECR-NeMAXIMDehazeFormerMPRNetNAFNetRestormerX-Restormer
SOTS Indoor32.68/0.97636.39/0.98937.17/0.99039.72/-40.05/0.99640.34/0.99438.97/0.99441.97/0.99442.90/0.995
", + "bbox": [ + 220, + 436, + 785, + 467 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "up-sampling. To maintain a similar number of parameters as Restormer, from level-1 to level-4 (i.e., $L_{1} \\sim L_{4}$ in the figure) the numbers of consecutive blocks (containing a TSAB and a SSAB) are [2, 4, 4, 4] and the number of refinement blocks (i.e., $L_{r}$ ) is 4. Attention heads in TSA and SSA are both [1, 2, 4, 8], and channel numbers are [48, 96, 192, 384]. For OCA, the window size and the overlapping ratio are set to 8 and 0.5 as in HAT. The channel expansion factor in GDFN is 2.66. The overall parameters are 26.06M, slightly less than Restormer of 26.13M. We adopt the same training settings as Restormer in the benchmark experiment to optimize the model. We use the AdamW optimizer with $\\beta_{1} = 0.9$ and $\\beta_{2} = 0.99$ , utilizing an initial learning rate of $3e^{-4}$ . The learning rate decay follows a cosine scheduler with intervals at 92k and 208k iterations, and the total training iterations are 300K. The input patch size is $256 \\times 256$ and the batch size is 32. For data augmentation, we use horizontal and vertical flips. We utilize the $L_{1}$ loss function to train the model. Notably, we do not adopt any training tricks (e.g., $\\times 2$ SR pretraining or EMA strategy) or testing tricks (e.g., TLC [11]).", + "bbox": [ + 212, + 493, + 787, + 720 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "5.2 Experimental Results", + "text_level": 1, + "bbox": [ + 215, + 739, + 441, + 755 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "We compare our X-Restormer with the top three models in the benchmark experiments (based on the same test configurations) as well as several state-of-the-art approaches for each task (based on the reported performance in their papers) in this section. PSNR(dB)/SSIM is provided in following tables. The best and second-best performance results are in **bold** and **underline**.", + "bbox": [ + 212, + 763, + 785, + 840 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "X.Chen et al.", + "bbox": [ + 271, + 114, + 362, + 127 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Image SR. In Tab. 3, we present the quantitative results of $\\times 4$ SR on five benchmark datasets: Set5 [2], Set14 [50], BSD100 [31], Urban100 [18] and Manga109 [33]. The state-of-the-art approaches, including RCAN [57], RCAN-it [27], SwinIR [35], IPT [4] and EDT [22] are compared in this experiment. X-Restormer significantly outperforms Restormer by $0.22\\mathrm{dB}$ on Set5, $0.34\\mathrm{dB}$ on Urban100 and $0.42\\mathrm{dB}$ on Manga109. This demonstrates the effectiveness of our design in enhancing the spatial mapping ability of Restormer. Furthermore, X-Restormer surpasses the SOTA method EDT by $0.2\\mathrm{dB}$ on Urban100 and $0.35\\mathrm{dB}$ on Manga109, indicating the effectiveness of X-Restormer on SR. Despite this, we point out that our method still cannot beat the most powerful SR approaches, e.g., HAT. This is due to the inevitable weakening of SR performance for the U-shape architecture. In terms of SR, the plain residual in residual architecture is still more effective.", + "bbox": [ + 212, + 145, + 789, + 327 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Image denoising. In Tab. 4, we provide the quantitative results of Gaussian denoising with the noise level $\\sigma = 50$ on four benchmark datasets: CBSD68 [32], Kodak24 [13], McMaster [56] and Urban100 [18]. The state-of-the-art methods: FFDNet [55], RNAN [58], RDN [60], IPT [4] and DRUNet [52] are compared in this experiment. X-Restormer achieves the state-of-the-art performance, surpassing SwinIR by 0.36dB and outperforming Restormer by 0.22dB on Urban100. This demonstrates the superiority of X-Restormer on image denoising.", + "bbox": [ + 212, + 330, + 787, + 435 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Image deblurring. In Tab. 5, we compare the results of X-Restormer with the state-of-the-art methods: SPAIR [36], MIMO-UNet+ [10], IPT [4] and MPR-Net [49] on both synthetic datasets (Gopro [34] and HIDE [39]) and real-world datasets (RealBlur-R and RealBlur-J [38]). X-Restormer achieves large performance gains over the other models on synthetic datasets, with an improvement of $0.36\\mathrm{dB}$ on Gopro compared to $\\mathrm{NAFNet}^4$ and $0.54\\mathrm{dB}$ on HIDE compared to Restormer. Besides, our X-Restormer obtains the state-of-the-art performance on RealBlur-R and considerable performance on RealBlur-J, showing the effectiveness of our method on real-world motion deblurring scenarios.", + "bbox": [ + 212, + 438, + 787, + 574 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Image deraining. In Tab. 6, we present the quantitative results of deraining on Test100 [19], Rain100L [47], Rain100H [47], Test1200 [51] and Test2800 [15]. The state-of-the-art methods: PreNet [37], MSPFN [20], MPRNet [49] and SPAIR [36] are compared. X-Restormer outperforms the other models on Test100, Rain100H and Rain100L but performs inferior to Restormer on Test1200 and Test2800. This discrepancy is due to the variations in degradation produced by different rain models. Nonetheless, X-Restormer exhibits comparable performance to state-of-the-art methods, showing its effectiveness on image deraining.", + "bbox": [ + 212, + 575, + 787, + 698 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Image dehazing. In Tab. 7, we provide the quantitative results on SOTS Indoor [21]. We compare the state-of-the-art approaches: PFDN [12], FFA-Net [46], AECR-Net [44], MAXIM [41] and DehazeFormer [40] in this experiment. Notably, X-Restormer model significantly outperforms Restormer by a large margin of $0.93\\mathrm{dB}$ . When compared to the state-of-the-art dehazing method DehazeFormer, our method achieves a breakthrough performance gain of $2.85\\mathrm{dB}$ . These results demonstrate the superiority of X-Restormer for image dehazing.", + "bbox": [ + 212, + 700, + 787, + 806 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "A Comparative Study of Image Restoration Networks", + "bbox": [ + 372, + 114, + 730, + 128 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 767, + 114, + 785, + 126 + ], + "page_idx": 12 + }, + { + "type": "page_footnote", + "text": "4 By using TLC, on Gopro/HIDE, NAFNet: 33.69/31.32, X-Restormer: 33.89/31.87.", + "bbox": [ + 217, + 824, + 781, + 839 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/90710e3a2bff86da7730c6d009a9eb241699bbbf6e525cde0b7894789accdcbc.jpg", + "table_caption": [ + "Table 8: Quantitative results on All-in-One restoration." + ], + "table_footnote": [], + "table_body": "
ModelSRDenoisingDeblurringDerainingDehazing
×2×4σ = 15σ = 25σ = 50
MPRNet33.68/0.930028.17/0.804334.27/0.928031.82/0.890128.60/0.811930.00/0.881231.20/0.906835.06/0.9874
SwinIR33.83/0.930128.14/0.804334.27/0.928331.83/0.890628.59/0.814329.06/0.851930.03/0.898331.48/0.9823
Uformer29.99/0.880527.88/0.794933.86/0.925431.42/0.886327.87/0.789129.64/0.872527.53/0.856929.92/0.9714
Restormer34.51/0.934128.70/0.817934.43/0.930332.02/0.894228.87/0.822230.54/0.890231.91/0.913436.95/0.9897
NAFNet34.12/0.931428.17/0.808734.18/0.928131.76/0.890828.64/0.818730.38/0.891131.56/0.914930.84/0.9797
X-Restormer34.72/0.936028.81/0.821734.67/0.933032.26/0.898329.12/0.829330.85/0.898332.27/0.922938.24/0.9914
", + "bbox": [ + 218, + 171, + 787, + 253 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "All-in-One Restoration. We conduct experiments on an all-in-one restoration setting to show the effectiveness of different backbone networks in addressing various tasks simultaneously. Networks are trained on five tasks with varying degradation levels (i.e., $\\times 2$ , $\\times 4$ for SR and $\\sigma \\in (0,50)$ random level for denoising). The sampling probability for each task is the same, and the average performance on benchmark datasets is calculated. As shown in Tab. 8, with the relatively better task generality among the existing networks, Restormer exhibits better performance on the all-in-one restoration. By overcoming the limitation of Restormer, our X-Restormer demonstrates further advantages in handling multiple tasks concurrently, with its performance far exceeding other networks on all tasks. In contrast, the other networks are more or less affected by optimization conflicts across different tasks (e.g., SwinIR performs inferior to Restormer even on SR). These indicate that a general backbone network is of great significance for building a general model that process multiple image restoration tasks, which can effectively mitigate task conflicts with the performance drops.", + "bbox": [ + 212, + 282, + 785, + 508 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Summary. With enhanced spatial mapping capability, our X-Restormer can significantly outperform Restormer. Specifically, X-Restormer obtains performance gains against Restormer of 0.42dB (Manga109), 0.22dB (Urban100), 0.54dB (HIDE), 0.61dB (Rain100H) and 0.93dB (SOTS Indoor) on image SR, denoising, deblurring, deraining and dehazing, respectively, showing the effectiveness of our design. Despite its simplicity, X-Restormer obtains state-of-the-art performance on all these five tasks and present the best task generality among the compared methods. Furthermore, we show that a more general backbone network can also better handle multiple restoration tasks simultaneously. We hope it can inspire more works on the general image restoration backbone network design.", + "bbox": [ + 212, + 508, + 785, + 660 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "6 Conclusion", + "text_level": 1, + "bbox": [ + 215, + 685, + 359, + 702 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "In this paper, we conduct a comparative study of existing image restoration backbone networks to design a general backbone network. Five representative networks are chosen for the benchmark experiment across selected five tasks. The results indicate that comprehensive functionality is crucial for designing a general restoration backbone network. We select Restormer as the baseline and introduce spatial self-attention into it to enhance the spatial information interaction capability. Experimental results show that our X-Restormer achieves significant performance improvement and presents the best task generality.", + "bbox": [ + 212, + 719, + 785, + 840 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "X.Chen et al.", + "bbox": [ + 271, + 114, + 361, + 126 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Acknowledgements", + "text_level": 1, + "bbox": [ + 217, + 143, + 401, + 162 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "This work was partially supported by National Natural Science Foundation of China (Grant No.62276251, 62272450), and the Joint Lab of CAS-HK. This work was also supported in part by Macau Science and Technology Development Fund under SKLIOTSC-2021-2023 and 0022/2022/A.", + "bbox": [ + 215, + 172, + 785, + 233 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 217, + 253, + 321, + 270 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "1. Abuolaim, A., Brown, M.S.: Defocus deblurring using dual-pixel data. In: European Conference on Computer Vision. pp. 111-126. Springer (2020)", + "2. Bevilacqua, M., Roumy, A., Guillemot, C., Morel, M.L.A.: Low-complexity single-image super-resolution based on nonnegative neighbor embedding. In: British Machine Vision Conference (BMVC) (2012)", + "3. Cai, B., Xu, X., Jia, K., Qing, C., Tao, D.: Dehazenet: An end-to-end system for single image haze removal. IEEE transactions on image processing 25(11), 5187-5198 (2016)", + "4. Chen, H., Wang, Y., Guo, T., Xu, C., Deng, Y., Liu, Z., Ma, S., Xu, C., Xu, C., Gao, W.: Pre-trained image processing transformer. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 12299-12310 (2021)", + "5. Chen, L., Chu, X., Zhang, X., Sun, J.: Simple baselines for image restoration. In: European Conference on Computer Vision. pp. 17-33. Springer (2022)", + "6. Chen, L., Lu, X., Zhang, J., Chu, X., Chen, C.: Hinet: Half instance normalization network for image restoration. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 182-192 (2021)", + "7. Chen, X., Wang, X., Zhang, W., Kong, X., Qiao, Y., Zhou, J., Dong, C.: Hat: Hybrid attention transformer for image restoration. arXiv preprint arXiv:2309.05239 (2023)", + "8. Chen, X., Wang, X., Zhou, J., Qiao, Y., Dong, C.: Activating more pixels in image super-resolution transformer. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 22367-22377 (2023)", + "9. Chen, Z., Zhang, Y., Gu, J., Kong, L., Yang, X., Yu, F.: Dual aggregation transformer for image super-resolution. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 12312-12321 (2023)", + "0. Cho, S.J., Ji, S.W., Hong, J.P., Jung, S.W., Ko, S.J.: Rethinking coarse-to-fine approach in single image deblurring. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 4641-4650 (2021)", + "1. Chu, X., Chen, L., Chen, C., Lu, X.: Improving image restoration by revisiting global information aggregation. In: European Conference on Computer Vision. pp. 53-71. Springer (2022)", + "2. Dong, J., Pan, J.: Physics-based feature dehazing networks. In: European Conference on Computer Vision. pp. 188-204. Springer (2020)", + "3. Franzen, R.: Kodak lossless true color image suite. source: http://r0k.us/graphics/kodak 4(2) (1999)", + "4. Fu, X., Huang, J., Zeng, D., Huang, Y., Ding, X., Paisley, J.: Removing rain from single images via a deep detail network. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 3855-3863 (2017)", + "5. Fu, X., Huang, J., Zeng, D., Huang, Y., Ding, X., Paisley, J.: Removing rain from single images via a deep detail network. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 3855-3863 (2017)" + ], + "bbox": [ + 225, + 282, + 784, + 839 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "A Comparative Study of Image Restoration Networks", + "bbox": [ + 372, + 114, + 730, + 128 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 767, + 116, + 784, + 126 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "16. Gu, J., Dong, C.: Interpreting super-resolution networks with local attribution maps. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 9199-9208 (2021)", + "17. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 770-778 (2016)", + "18. Huang, J.B., Singh, A., Ahuja, N.: Single image super-resolution from transformed self-exemplars. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 5197-5206 (2015)", + "19. Huang, J.B., Singh, A., Ahuja, N.: Single image super-resolution from transformed self-exemplars. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 5197-5206 (2015)", + "20. Jiang, K., Wang, Z., Yi, P., Chen, C., Huang, B., Luo, Y., Ma, J., Jiang, J.: Multiscale progressive fusion network for single image deraining. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 8346-8355 (2020)", + "21. Li, B., Ren, W., Fu, D., Tao, D., Feng, D., Zeng, W., Wang, Z.: Benchmarking single-image dehazing and beyond. IEEE Transactions on Image Processing 28(1), 492-505 (2018)", + "22. Li, W., Lu, X., Qian, S., Lu, J., Zhang, X., Jia, J.: On efficient transformer-based image pre-training for low-level vision. arXiv preprint arXiv:2112.10175 (2021)", + "23. Li, Y., Zhang, Y., Timofte, R., Van Gool, L., Tu, Z., Du, K., Wang, H., Chen, H., Li, W., Wang, X., et al.: Ntire 2023 challenge on image denoising: Methods and results. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 1904-1920 (2023)", + "24. Li, Y., Tan, R.T., Guo, X., Lu, J., Brown, M.S.: Rain streak removal using layer priors. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 2736-2744 (2016)", + "25. Liang, J., Cao, J., Sun, G., Zhang, K., Van Gool, L., Timofte, R.: Swinir: Image restoration using swin transformer. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 1833-1844 (2021)", + "26. Lim, B., Son, S., Kim, H., Nah, S., Mu Lee, K.: Enhanced deep residual networks for single image super-resolution. In: Proceedings of the IEEE conference on computer vision and pattern recognition workshops. pp. 136-144 (2017)", + "27. Lin, Z., Garg, P., Banerjee, A., Magid, S.A., Sun, D., Zhang, Y., Van Gool, L., Wei, D., Pfister, H.: Revisiting rcan: Improved training for image super-resolution (2022)", + "28. Liu, J., Yang, W., Yang, S., Guo, Z.: Erase or fill? deep joint recurrent rain removal and reconstruction in videos. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 3233-3242 (2018)", + "29. Liu, L., Xie, L., Zhang, X., Yuan, S., Chen, X., Zhou, W., Li, H., Tian, Q.: Tape: Task-agnostic prior embedding for image restoration. In: European Conference on Computer Vision. pp. 447-464. Springer (2022)", + "30. Liu, Z., Lin, Y., Cao, Y., Hu, H., Wei, Y., Zhang, Z., Lin, S., Guo, B.: Swin transformer: Hierarchical vision transformer using shifted windows. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 10012-10022 (2021)", + "31. Martin, D., Fowlkes, C., Tal, D., Malik, J.: A database of human segmented natural images and its application to evaluating segmentation algorithms and measuring ecological statistics. In: Proceedings of the IEEE International Conference on Computer Vision. vol. 2, pp. 416-423. IEEE (2001)" + ], + "bbox": [ + 215, + 147, + 784, + 839 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "X.Chen et al.", + "bbox": [ + 271, + 114, + 361, + 126 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "32. Martin, D., Fowlkes, C., Tal, D., Malik, J.: A database of human segmented natural images and its application to evaluating segmentation algorithms and measuring ecological statistics. In: Proceedings of the IEEE International Conference on Computer Vision. vol. 2, pp. 416-423. IEEE (2001)", + "33. Matsui, Y., Ito, K., Aramaki, Y., Fujimoto, A., Ogawa, T., Yamasaki, T., Aizawa, K.: Sketch-based manga retrieval using manga109 dataset. Multimedia Tools and Applications 76(20), 21811-21838 (2017)", + "34. Nah, S., Hyun Kim, T., Mu Lee, K.: Deep multi-scale convolutional neural network for dynamic scene deblurring. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 3883-3891 (2017)", + "35. Niu, B., Wen, W., Ren, W., Zhang, X., Yang, L., Wang, S., Zhang, K., Cao, X., Shen, H.: Single image super-resolution via a holistic attention network. In: European Conference on Computer Vision. pp. 191-207. Springer (2020)", + "36. Purohit, K., Suin, M., Rajagopalan, A., Boddeti, V.N.: Spatially-adaptive image restoration using distortion-guided networks. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 2309-2319 (2021)", + "37. Ren, D., Zuo, W., Hu, Q., Zhu, P., Meng, D.: Progressive image deraining networks: A better and simpler baseline. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 3937-3946 (2019)", + "38. Rim, J., Lee, H., Won, J., Cho, S.: Real-world blur dataset for learning and benchmarking deblurring algorithms. In: European conference on computer vision. pp. 184-201. Springer (2020)", + "39. Shen, Z., Wang, W., Lu, X., Shen, J., Ling, H., Xu, T., Shao, L.: Human-aware motion deblurring. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 5572-5581 (2019)", + "40. Song, Y., He, Z., Qian, H., Du, X.: Vision transformers for single image dehazing. IEEE Transactions on Image Processing 32, 1927-1941 (2023)", + "41. Tu, Z., Talebi, H., Zhang, H., Yang, F., Milanfar, P., Bovik, A., Li, Y.: Maxim: Multi-axis mlp for image processing. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5769-5780 (2022)", + "42. Wang, X., Xie, L., Yu, K., Chan, K.C., Loy, C.C., Dong, C.: BasicSR: Open source image and video restoration toolbox. https://github.com/XPixelGroup/BasicSR (2022)", + "43. Wang, Z., Cun, X., Bao, J., Zhou, W., Liu, J., Li, H.: Uformer: A general u-shaped transformer for image restoration. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 17683-17693 (2022)", + "44. Wu, H., Qu, Y., Lin, S., Zhou, J., Qiao, R., Zhang, Z., Xie, Y., Ma, L.: Contrastive learning for compact single image dehazing. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 10551-10560 (2021)", + "45. Yang, W., Tan, R.T., Feng, J., Liu, J., Guo, Z., Yan, S.: Deep joint rain detection and removal from a single image. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 1357-1366 (2017)", + "46. Yang, W., Tan, R.T., Feng, J., Liu, J., Guo, Z., Yan, S.: Deep joint rain detection and removal from a single image. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 1357-1366 (2017)", + "47. Yang, W., Tan, R.T., Feng, J., Liu, J., Guo, Z., Yan, S.: Deep joint rain detection and removal from a single image. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 1357-1366 (2017)", + "48. Zamir, S.W., Arora, A., Khan, S., Hayat, M., Khan, F.S., Yang, M.H.: Restormer: Efficient transformer for high-resolution image restoration. In: Proceedings of the" + ], + "bbox": [ + 212, + 146, + 787, + 839 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "A Comparative Study of Image Restoration Networks", + "bbox": [ + 372, + 114, + 732, + 128 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 767, + 116, + 784, + 126 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5728-5739 (2022)", + "49. Zamir, S.W., Arora, A., Khan, S., Hayat, M., Khan, F.S., Yang, M.H., Shao, L.: Multi-stage progressive image restoration. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 14821-14831 (2021)", + "50. Zeyde, R., Elad, M., Protter, M.: On single image scale-up using sparse-representations. In: International conference on curves and surfaces. pp. 711-730. Springer (2010)", + "51. Zhang, H., Patel, V.M.: Density-aware single image de-raining using a multi-stream dense network. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 695-704 (2018)", + "52. Zhang, K., Li, Y., Zuo, W., Zhang, L., Van Gool, L., Timofte, R.: Plug-and-play image restoration with deep denoiser prior. IEEE Transactions on Pattern Analysis and Machine Intelligence 44(10), 6360-6376 (2021)", + "53. Zhang, K., Zuo, W., Chen, Y., Meng, D., Zhang, L.: Beyond a gaussian denoiser: Residual learning of deep cnn for image denoising. IEEE transactions on image processing 26(7), 3142-3155 (2017)", + "54. Zhang, K., Zuo, W., Gu, S., Zhang, L.: Learning deep cnn denoiser prior for image restoration. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 3929-3938 (2017)", + "55. Zhang, K., Zuo, W., Zhang, L.: Ffdnet: Toward a fast and flexible solution for cnn-based image denoising. IEEE Transactions on Image Processing 27(9), 4608-4622 (2018)", + "56. Zhang, L., Wu, X., Buades, A., Li, X.: Color demosaicking by local directional interpolation and nonlocal adaptive thresholding. Journal of Electronic imaging 20(2), 023016-023016 (2011)", + "57. Zhang, Y., Li, K., Li, K., Wang, L., Zhong, B., Fu, Y.: Image super-resolution using very deep residual channel attention networks. In: European conference on computer vision. pp. 286-301. Springer (2018)", + "58. Zhang, Y., Li, K., Li, K., Zhong, B., Fu, Y.: Residual non-local attention networks for image restoration. arXiv preprint arXiv:1903.10082 (2019)", + "59. Zhang, Y., Tian, Y., Kong, Y., Zhong, B., Fu, Y.: Residual dense network for image super-resolution. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 2472-2481 (2018)", + "60. Zhang, Y., Tian, Y., Kong, Y., Zhong, B., Fu, Y.: Residual dense network for image restoration. IEEE transactions on pattern analysis and machine intelligence 43(7), 2480-2495 (2020)" + ], + "bbox": [ + 217, + 147, + 784, + 659 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 217, + 114, + 235, + 126 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "X.Chen et al.", + "bbox": [ + 271, + 114, + 361, + 126 + ], + "page_idx": 17 + } +] \ No newline at end of file diff --git a/2024/A Comparative Study of Image Restoration Networks for General Backbone Network Design/76c19445-7741-420c-b1a4-d913d41c13ff_model.json b/2024/A Comparative Study of Image Restoration Networks for General Backbone Network Design/76c19445-7741-420c-b1a4-d913d41c13ff_model.json new file mode 100644 index 0000000000000000000000000000000000000000..f4d2f23926a2ef8fa32ea20e35cf0049a77a171f --- /dev/null +++ b/2024/A Comparative Study of Image Restoration Networks for General Backbone Network Design/76c19445-7741-420c-b1a4-d913d41c13ff_model.json @@ -0,0 +1,2458 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.225, + 0.142, + 0.78, + 0.187 + ], + "angle": 0, + "content": "A Comparative Study of Image Restoration Networks for General Backbone Network Design" + }, + { + "type": "text", + "bbox": [ + 0.265, + 0.212, + 0.737, + 0.244 + ], + "angle": 0, + "content": "Xiangyu Chen\\(^{1,2,3*}\\) Zheyuan Li\\(^{2,1*}\\) Yuandong \\(\\mathrm{Pu}^{3,4*}\\) Yihao Liu\\(^{2,3}\\) \nJiantao Zhou\\(^{1\\dagger}\\) Yu Qiao\\(^{2,3}\\) Chao Dong\\(^{2,3,5\\dagger}\\)" + }, + { + "type": "text", + "bbox": [ + 0.239, + 0.255, + 0.765, + 0.313 + ], + "angle": 0, + "content": "1University of Macau 2Shenzhen Institute of Advanced Technology, Chinese Academy of Sciences 3Shanghai Artificial Intelligence Laboratory 4Shanghai Jiao Tong University 5Shenzhen University of Advanced Technology https://github.com/Andrew0613/X-Restormer" + }, + { + "type": "text", + "bbox": [ + 0.262, + 0.352, + 0.744, + 0.573 + ], + "angle": 0, + "content": "Abstract. Despite the significant progress made by deep models in various image restoration tasks, existing image restoration networks still face challenges in terms of task generality. An intuitive manifestation is that networks which excel in certain tasks often fail to deliver satisfactory results in others. To illustrate this point, we select five representative networks and conduct a comparative study on five classic image restoration tasks. First, we provide a detailed explanation of the characteristics of different image restoration tasks and backbone networks. Following this, we present the benchmark results and analyze the reasons behind the performance disparity of different models across various tasks. Drawing from this comparative study, we propose that a general image restoration backbone network needs to meet the functional requirements of diverse tasks. Based on this principle, we design a new general image restoration backbone network, X-Restormer. Extensive experiments demonstrate that X-Restormer possesses good task generality and achieves state-of-the-art performance across a variety of tasks." + }, + { + "type": "title", + "bbox": [ + 0.217, + 0.601, + 0.377, + 0.617 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.634, + 0.788, + 0.816 + ], + "angle": 0, + "content": "Image restoration aims to generate high-quality images from degraded images. In recent years, deep learning has achieved great success in this field, with numerous networks being proposed to address various image restoration tasks. Initially, networks are primarily designed to solve specific restoration tasks and are typically validated only on selected tasks. As deep learning techniques have continued to evolve, there has been an increasing focus on the development of general-purpose networks that can be applied to a broad range of tasks. This trend is particularly evident in the high-level vision field, where new backbone networks are being designed to support multiple tasks [17, 30], including classification, detection and segmentation. For image restoration, although more and more backbone networks can handle multiple restoration tasks, their task generality is still limited, as illustrated in Fig. 1. For instance, SwinIR [25] achieves" + }, + { + "type": "page_footnote", + "bbox": [ + 0.232, + 0.825, + 0.551, + 0.84 + ], + "angle": 0, + "content": "* Equal contributions, † Corresponding author." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "2" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.364, + 0.127 + ], + "angle": 0, + "content": "X.Chen et al." + }, + { + "type": "image", + "bbox": [ + 0.264, + 0.145, + 0.737, + 0.359 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.367, + 0.788, + 0.41 + ], + "angle": 0, + "content": "Fig. 1: Relative performance difference of different backbone networks on five image restoration tasks1. The existing representative networks exhibit diverse performance on these tasks, while our method presents superior task generality." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.439, + 0.788, + 0.546 + ], + "angle": 0, + "content": "state-of-the-art performance on image super-resolution (SR) but falls short on image deblurring and dehazing. Conversely, Restormer [48] performs exceptionally well on image dehazing and deraining but is less effective on image SR. This discrepancy can be attributed to the fact that the characteristics of image degradation vary across different image restoration tasks. While all image restoration tasks involve mapping degraded images to clean images, the requirements for the capability of backbone networks differ depending on specific tasks." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.547, + 0.789, + 0.773 + ], + "angle": 0, + "content": "Designing a general image restoration backbone network presents a significant challenge. However, the development of such a network holds considerable value, as it has the potential to greatly reduce costs associated with research and application. To achieve this goal, we first conduct a comparative study of mainstream backbone networks on the representative tasks, including image SR, denoising, deblurring, deraining and dehazing. These five tasks are chosen due to the distinct characteristics of their degradation. Five representative backbone networks are selected in the study, including MPRNet [49], Uformer [43], SwinIR [25], Restormer [48] and NAFNet [5]. These five networks encompass classic architectures such as U-shape architecture, plain residual-in-residual architecture and multi-stage progressive architecture. They also employ several common operators, including convolution, spatial self-attention and transposed self-attention [48]. We benchmark the five representative methods on the selected five tasks. The experimental results clearly reflect the performance disparity of different backbone networks on different tasks. We then conduct a detailed anal" + }, + { + "type": "page_footnote", + "bbox": [ + 0.218, + 0.78, + 0.788, + 0.843 + ], + "angle": 0, + "content": "1 We set the minimum average performance of the networks on test sets in Tab. 2 for the task (i) as the lower bound \\( P_{lower}^{(i)} \\), and set the average performance of X-Restormer for each task as the upper bound \\( P_{upper}^{(i)} \\). The ordinate of each point in the figure with performance \\( P^{(i)} \\) is calculated by \\( (P^{(i)} - P_{lower}^{(i)}) / P_{upper}^{(i)} \\)." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.374, + 0.115, + 0.733, + 0.13 + ], + "angle": 0, + "content": "A Comparative Study of Image Restoration Networks" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "3" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.784, + 0.207 + ], + "angle": 0, + "content": "ysis of the characteristics of these tasks and these backbone networks to explain the reasons behind the performance differences. Based on the comparative study, we propose that a general backbone network must be highly comprehensive in terms of functionality that meets the diverse needs of various tasks." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.21, + 0.785, + 0.42 + ], + "angle": 0, + "content": "It is noteworthy that Restormer stands out in the comparative study, ranking within the top two across all five tasks. This superior performance can be attributed to several key designs. First, Restormer's U-shape architecture allows it to process large-size inputs, which is crucial for the tasks that deal with large areas of degradation. Then, the network employs transposed self-attention that utilizes channel-wise features as tokens, achieving the information interaction among channels and enabling the mapping with a global receptive field. Additionally, the incorporation of numerous depth-wise convolutions activates the considerable spatial information interaction ability of the network. From a functional perspective, Restormer integrates the key capabilities of the other compared networks, thereby exhibiting commendable task generality in the comparative study. However, the spatial mapping ability of Restormer still appears to be somewhat deficient, as indicated by its quantitatively and qualitatively subpar performance in comparison to SwinIR for \\(\\mathrm{SR}^2\\)." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.423, + 0.787, + 0.604 + ], + "angle": 0, + "content": "This inferiority is hypothesized to originate from the inherent challenge of detail reconstruction posed by the U-shape architecture, coupled with the relatively weak spatial mapping capability of depth-wise convolution, particularly when compared to spatial self-attention (i.e., window-based self-attention in SwinIR). To address this limitation, a plausible solution is the introduction of spatial self-attention to Restormer. To achieve this design, we alternately replace half of transposed self-attention blocks with overlapping cross-attention blocks [8], which are proven to have strong spatial information interaction capability, to construct a new network, X-Restormer. Extensive experiments show that this simple modification can significantly enhance the performance of Restormer without increasing the number of parameters. Moreover, our X-Restormer obtains state-of-the-art performance on all five tasks, exhibiting the best task generality." + }, + { + "type": "text", + "bbox": [ + 0.24, + 0.607, + 0.633, + 0.621 + ], + "angle": 0, + "content": "Our main contributions can be summarized as follows:" + }, + { + "type": "text", + "bbox": [ + 0.228, + 0.64, + 0.784, + 0.685 + ], + "angle": 0, + "content": "- We conduct a comparative study by constructing an image restoration benchmark, highlighting the challenges faced by existing image restoration backbone networks in task generality." + }, + { + "type": "text", + "bbox": [ + 0.228, + 0.688, + 0.785, + 0.747 + ], + "angle": 0, + "content": "- Based on the benchmark results, we perform a detailed analysis of the characteristics of different degradations and networks. We emphasize that the general image restoration backbone network design must meet the functional requirements of diverse tasks." + }, + { + "type": "text", + "bbox": [ + 0.228, + 0.751, + 0.784, + 0.81 + ], + "angle": 0, + "content": "- By further enhancing the spatial mapping ability of Restormer, we design a preliminary general backbone network, X-Restormer. Without additional parameters, X-Restormer achieves significant performance improvement over existing networks and exhibits superior task generality." + }, + { + "type": "list", + "bbox": [ + 0.228, + 0.64, + 0.785, + 0.81 + ], + "angle": 0, + "content": null + }, + { + "type": "page_footnote", + "bbox": [ + 0.218, + 0.825, + 0.783, + 0.84 + ], + "angle": 0, + "content": "2 In general, models' SR performance is highly related to the spatial mapping ability." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.117, + 0.23, + 0.127 + ], + "angle": 0, + "content": "4" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.116, + 0.364, + 0.127 + ], + "angle": 0, + "content": "X.Chen et al." + }, + { + "type": "image_caption", + "bbox": [ + 0.229, + 0.146, + 0.241, + 0.259 + ], + "angle": 270, + "content": "Ground Truth Degraded Image" + }, + { + "type": "image", + "bbox": [ + 0.246, + 0.146, + 0.346, + 0.26 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.352, + 0.146, + 0.452, + 0.26 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.456, + 0.146, + 0.558, + 0.26 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.563, + 0.146, + 0.665, + 0.26 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.671, + 0.146, + 0.772, + 0.26 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.218, + 0.272, + 0.782, + 0.287 + ], + "angle": 0, + "content": "Fig. 2: Selected five representative image restoration tasks with various degradation." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.313, + 0.388, + 0.329 + ], + "angle": 0, + "content": "2 Related Work" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.342, + 0.788, + 0.569 + ], + "angle": 0, + "content": "Image restoration networks. In the past years, numerous deep networks have been proposed for various image restoration tasks such as image SR [8,26,59], denoising [43,53,54], deblurring [1,6], deraining [4,29,45] and dehazing [40,41,46]. Initially, most deep networks are designed for specific tasks [3,14,34,55,57]. Recently, with increasing attention to the task generality of networks, more and more methods have been developed to tackle multiple image restoration tasks. For instance, Zamir et al. [49] builds a multi-stage CNN for deraining, deblurring and denoising. Wang et al. [43] designs a U-shape Transformer for deraining, deblurring and denoising. Liang et al. [25] implements a Swin Transformer-based network that achieves state-of-the-art performance on SR, denoising and compression artifact reduction. Zamir et al. [48] proposes a novel transposed self-attention to build a U-shape network for deraining, deblurring and denoising. Chen et al. [5] constructs a U-shape CNN for denoising and deblurring. While existing methods have demonstrated some ability to generalize across several restoration tasks, their task generality remains limited." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.569, + 0.788, + 0.75 + ], + "angle": 0, + "content": "Difference from the previous network design research. While previous works have proposed networks that excel in various image restoration tasks, their primary focus is on constructing stronger networks to achieve performance breakthroughs on specific tasks. In contrast, this work pays more attention to the task generality of the backbone network, possessing a vision different from previous works. More specifically, our objective is to explore the design principles and directions of general image restoration networks. We are not seeking to create powerful networks for peak performance on a single or some specific tasks, but rather to ensure satisfactory performance across a diverse range of tasks. Regarding the concrete implementation, we do not intend to construct complex network architectures or modules. Our preference, rather, is to enhance task generality through the use of the simplest methodology available." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.75, + 0.788, + 0.84 + ], + "angle": 0, + "content": "There are concurrent works that adopt similar ideas for specific image restoration tasks. DAT [9] combines spatial-window self-attention and channel-wise self-attention to handle image SR. IPT-V2 [23] designs a spatial-channel Transformer block to build a denoising network and obtains the winner award in the NTIRE 2023 image denoising challenge [23]. However, the motivation and specific network implementation of our work are distinct from these studies." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.374, + 0.115, + 0.733, + 0.129 + ], + "angle": 0, + "content": "A Comparative Study of Image Restoration Networks" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "5" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.145, + 0.548, + 0.163 + ], + "angle": 0, + "content": "3 Image Restoration Benchmark" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.18, + 0.788, + 0.286 + ], + "angle": 0, + "content": "In this section, we first briefly introduce several image restoration tasks, each with its own representative degradation characteristics. Subsequently, we classify mainstream image restoration networks based on two key aspects: architecture and core operator. On this basis, we select five representative networks and conduct a benchmark experiment across five different tasks. We describe the experimental setup and explain its rationality. Finally, we present the benchmark results and conduct a detailed analysis of them." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.311, + 0.573, + 0.327 + ], + "angle": 0, + "content": "3.1 Overview of Image Restoration Tasks" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.339, + 0.785, + 0.429 + ], + "angle": 0, + "content": "We select five representative tasks for the benchmark experiments. These tasks, exemplified in Fig. 2, are chosen based on two primary reasons. First, they are very common image restoration tasks with widely accepted evaluation schemes. Second, the degradation characteristics of these tasks are diverse and differ greatly from each other. As such, they can provide a robust way to evaluate the task generality of image restoration backbone networks." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.43, + 0.786, + 0.474 + ], + "angle": 0, + "content": "Let \\( I_{GT} \\) denote the ground truth image and \\( I_{LQ} \\) denote the degraded image, where \\( I_{GT} \\in \\mathbb{R}^{H \\times W \\times 3} \\). The degradation model of classic image SR can be represented as:" + }, + { + "type": "equation", + "bbox": [ + 0.431, + 0.477, + 0.785, + 0.492 + ], + "angle": 0, + "content": "\\[\nI _ {L Q} = \\left(I _ {G T} \\otimes k\\right) \\downarrow_ {s}, \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.501, + 0.785, + 0.578 + ], + "angle": 0, + "content": "where \\( I_{LQ} \\in \\mathbb{R}^{\\frac{H}{s} \\times \\frac{W}{s} \\times 3} \\) represents the low-resolution image. \\( k \\) denotes the bicubic downsampling kernel and \\( \\downarrow_{s} \\) represents the downscaling factor. This degradation is highly correlated to local information and leads to a significant loss of high-frequency information. Thus, SR networks emphasize strong spatial information interaction capability to reconstruct as many details as possible." + }, + { + "type": "text", + "bbox": [ + 0.239, + 0.579, + 0.687, + 0.594 + ], + "angle": 0, + "content": "The degradation model of image denoising can be denoted as:" + }, + { + "type": "equation", + "bbox": [ + 0.447, + 0.609, + 0.785, + 0.623 + ], + "angle": 0, + "content": "\\[\nI _ {L Q} = I _ {G T} + n, \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.635, + 0.785, + 0.711 + ], + "angle": 0, + "content": "where \\( n \\in \\mathbb{R}^{H \\times W \\times 3} \\) represents the noise map. For Gaussian denoising, noise values are content-independent. The downsampling-upsampling process of U-shape architecture inherently aids noise removal. Besides, strong spatial information interaction capability can also enhance high-frequency content reconstruction for denoising networks." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.712, + 0.786, + 0.741 + ], + "angle": 0, + "content": "The degradation model of image deblurring (for motion deblurring) can be denoted as:" + }, + { + "type": "equation", + "bbox": [ + 0.411, + 0.742, + 0.785, + 0.771 + ], + "angle": 0, + "content": "\\[\nI _ {L Q} = \\sum_ {t} \\left(f _ {\\text {m o t i o n}} ^ {t} \\left(I _ {G T}\\right)\\right), \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.78, + 0.788, + 0.841 + ], + "angle": 0, + "content": "where \\( f_{motion}^{t}(\\cdot) \\) represents the motion function under different continuous exposure times. This degradation is related to the global motion offset of the image. Therefore, the ability to utilize large-range information and even global information is important for deblurring networks." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "6" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.364, + 0.128 + ], + "angle": 0, + "content": "X.Chen et al." + }, + { + "type": "image", + "bbox": [ + 0.244, + 0.143, + 0.761, + 0.209 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.308, + 0.218, + 0.695, + 0.232 + ], + "angle": 0, + "content": "Fig. 3: The core operators in image restoration networks." + }, + { + "type": "text", + "bbox": [ + 0.24, + 0.265, + 0.741, + 0.28 + ], + "angle": 0, + "content": "The degradation model of image deraining can be simply denoted as:" + }, + { + "type": "equation", + "bbox": [ + 0.442, + 0.295, + 0.786, + 0.311 + ], + "angle": 0, + "content": "\\[\nI _ {L Q} = I _ {G T} + R, \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.324, + 0.784, + 0.4 + ], + "angle": 0, + "content": "where \\( R \\) denotes the additive rain streak, simulated by the physics models, such as [24, 28]. The difference between this degradation and Gaussian noise is that the added \\( R \\) is not evenly distributed on the image and has a correlation with the image content. Complicated rain streaks also places high demands on the complexity of deraining networks." + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.401, + 0.784, + 0.432 + ], + "angle": 0, + "content": "The degradation model of image dehazing, based on the atmospheric scattering model, can be denoted as:" + }, + { + "type": "equation", + "bbox": [ + 0.367, + 0.446, + 0.786, + 0.463 + ], + "angle": 0, + "content": "\\[\nI _ {L Q} = I _ {G T} * t \\left(I _ {G T}\\right) + A \\left(1 - t \\left(I _ {G T}\\right)\\right), \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.476, + 0.788, + 0.538 + ], + "angle": 0, + "content": "where \\( t(\\cdot) \\) represents the transmission function and \\( t(I_{GT}) \\) is associated with the distance from the scene point to the camera. This degradation is intrinsically linked to the depth information within the image. Consequently, the incorporation of global information is important for dehazing networks." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.565, + 0.644, + 0.58 + ], + "angle": 0, + "content": "3.2 Characteristics of Typical Backbone Networks" + }, + { + "type": "text", + "bbox": [ + 0.215, + 0.596, + 0.794, + 0.84 + ], + "angle": 0, + "content": "The architectures of mainstream image restoration networks can be broadly classified into three categories: U-shape encoder-decoder, plain residual-in-residual and multi-stage progressive. Schematic diagrams of these architectures are provided in \\( \\text{Supp} \\). The U-shape encoder-decoder architecture performs down-sampling and up-sampling operations on features, enabling networks to handle features of varying scales. This architecture allows networks to accept large-size input, and the effective receptive field of the network expands rapidly with down-sampling. Typical U-shape networks include Uformer [43], Restormer [48]. The multi-stage architecture divides the entire network into several sub-networks and progressively processes features, which are primarily used for image deraining and deblurring. Common networks based on this architecture include MPRNet [49] and HINet [6]. The plain residual-in-residual architecture is composed of several residual groups, each of which consists of several residual blocks. This architecture maintains the original size when processing features, which is favorable for the reconstruction of high-frequency information, but it comes at a high computational cost. Typical networks include RCAN [57] and SwinIR [25]." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.374, + 0.115, + 0.733, + 0.129 + ], + "angle": 0, + "content": "A Comparative Study of Image Restoration Networks" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "7" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.147, + 0.461, + 0.282 + ], + "angle": 0, + "content": "The core operators for constructing an image restoration network can be mainly categorized into three types: convolution, spatial self-attention and transposed self-attention. These operators are shown in Fig. 3. The convolution calculates a fixed-size filter and processes the entire fea" + }, + { + "type": "table_caption", + "bbox": [ + 0.47, + 0.163, + 0.788, + 0.189 + ], + "angle": 0, + "content": "Table 1: Architectures and core operators of the five selected backbone networks." + }, + { + "type": "table", + "bbox": [ + 0.474, + 0.19, + 0.785, + 0.268 + ], + "angle": 0, + "content": "
NetworkArchitectureCore operator
MPRNetMulti-StageConvolution
UformerU-ShapeSpatial self-attention
SwinIRPlain residual-in-residualSpatial self-attention
RestormerU-ShapeTransposed self-attention
NAFNetU-ShapeConvolution
" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.283, + 0.788, + 0.465 + ], + "angle": 0, + "content": "ture map through a sliding window, which is the major component of many networks, such as RDN [60]. Spatial self-attention is typically implemented as window self-attention in image restoration tasks. It calculates the attention matrix within a fixed window size, generating content-aware weights that are functionally similar to a large kernel dynamic filter. This operator has strong local fitting ability and shows superior advantages on SR and denoising [7]. Transposed self-attention treats the entire feature of each channel as a token to calculate the attention matrix on the channel dimension. This operator directly deals with global features, and when combined with depth-wise convolution, it shows remarkable performance in multiple restoration tasks [48]. The selected five representative backbone networks for the benchmark experiment encompass the abovementioned architectures and core operators, as presented in Tab. 1." + }, + { + "type": "title", + "bbox": [ + 0.215, + 0.483, + 0.446, + 0.498 + ], + "angle": 0, + "content": "3.3 Experimental Settings" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.503, + 0.788, + 0.67 + ], + "angle": 0, + "content": "For image SR, we conduct experiments on upscaling factor \\(\\times 4\\). We use the DF2K dataset (the same as SwinIR [25]) to train models. Low-resolution images are generated from the ground truth images using bicubic downsampling in MATLAB. For U-shape networks, we first up-sample the input low-resolution images through bilinear interpolation. The performance is reported on the Y channel. For denoising, we adopt the DFWB dataset for training. Noisy images are generated by adding Gaussian noise with a noise level of 50. For deblurring, we use the motion deblurring dataset GoPro [34] to train the models. For deraining, we conduct experiments using the synthetic rain dataset Rain13K and calculate the performance on the Y channel, following Restormer [48]. For dehazing, we use the indoor training set (ITS) of the RESIDE dataset [21], the same as [40]." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.67, + 0.788, + 0.792 + ], + "angle": 0, + "content": "To maximize the capability of these networks, we use the official codes and training configurations provided by different methods to train the models3. Note that all models are trained without using any pre-training strategy (e.g., \\(\\times 2\\) pretraining for SR) or special tricks (e.g., EMA in SwinIR and TLC in NAFNet) for fair comparison. In addition, we find that different methods may not use exactly the same test sets and the same metrics calculation in their papers to report performance. Therefore, we retest all models based on exactly the same data and calculate metrics using the popular open-source toolbox BasicSR [42]." + }, + { + "type": "page_footnote", + "bbox": [ + 0.218, + 0.797, + 0.787, + 0.84 + ], + "angle": 0, + "content": "3 We tried to train all networks with a unified configuration, but find it unreasonable. The performance of networks may vary greatly with different training configurations and optimization strategies, making it difficult to determine a fair unified setting." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.23, + 0.127 + ], + "angle": 0, + "content": "8" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.363, + 0.128 + ], + "angle": 0, + "content": "X.Chen et al." + }, + { + "type": "table_caption", + "bbox": [ + 0.216, + 0.145, + 0.788, + 0.174 + ], + "angle": 0, + "content": "Table 2: Quantitative results on PSNR(dB) of the benchmark experiments. The best and second-best performance results are in **bold** and **underline**." + }, + { + "type": "table", + "bbox": [ + 0.222, + 0.185, + 0.787, + 0.278 + ], + "angle": 0, + "content": "
MethodSRDenoisingDeblurringDerainingDehazing
Set14Urban100CBSD68Urban100GoProHIDETest100Rain100HSOTS Indoor
MPRNet28.9026.8828.4829.7132.6630.9630.2930.4340.34
SwinIR29.0727.4728.5629.8831.6629.4130.0530.4529.14
Uformer27.1425.6028.5529.9833.0530.8927.9324.0633.58
Restomer29.0627.3228.6030.0232.9231.2232.0331.4841.87
NAFNet29.0327.0028.5229.6533.0831.2230.3332.8338.97
" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.304, + 0.423, + 0.318 + ], + "angle": 0, + "content": "3.4 Benchmark Results" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.328, + 0.788, + 0.525 + ], + "angle": 0, + "content": "We present the quantitative results of the benchmark experiments in Tab. 2. (Due to space constraints, complete results are provided in \\( \\text{Supp.} \\)) Several important observations can be made from the results: 1) Different networks exhibit varying performance on different tasks. For instance, SwinIR performs best on SR but worst on deblurring and dehazing. Uformer excels on denoising and deblurring but performs poorly on deraining and SR. 2) Networks with U-shape and multi-stage architectures present clear advantages on deblurring and dehazing. 3) MPRNet and NAFNet, which are mainly based on convolution operators, exhibit moderate performance across all tasks without outstanding results. 4) SwinIR, which employs plain architecture and spatial self-attention operators, outperforms other networks by a significant margin on SR. 5) The overall performance of Restormer is outstanding. Except for consistently being weaker than SwinIR on SR, it obtains considerable performance on almost all other tasks." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.545, + 0.334, + 0.56 + ], + "angle": 0, + "content": "3.5 Analysis" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.569, + 0.787, + 0.597 + ], + "angle": 0, + "content": "In this section, we explain the above observations by analyzing the characteristics of different tasks and backbone networks." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.599, + 0.788, + 0.749 + ], + "angle": 0, + "content": "The degradation of SR lies in the compression of local information, resulting in a large loss of high-frequency details. Therefore, SR networks often require strong spatial information interaction capability, or even generative capability. The U-shape architecture, which incorporates multiple downsampling operations, may undermine the reconstruction of high-frequency information and intuitively escalates the difficulty of detail reconstruction. In contrast, the plain architecture that maintains feature sizes benefits SR. Besides, window self-attention has demonstrated a superior local fitting ability than convolution [8]. As a result, SwinIR, which is based on a plain structure and employs spatial self-attention operators, exhibits a distinct advantage on SR." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.75, + 0.788, + 0.841 + ], + "angle": 0, + "content": "Denoising entails smoothing the image to eliminate high-frequency noise and integrating low-frequency information to reconstruct a clear image. This task places no explicit unique requirement for the network, while its performance intuitively benefits from effective spatial information interaction. It is conjectured that the high performance of Restormer on denoising can be attributed to its ability to better smooth noise through channel-wise processing, akin to operating" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.374, + 0.115, + 0.733, + 0.13 + ], + "angle": 0, + "content": "A Comparative Study of Image Restoration Networks" + }, + { + "type": "page_number", + "bbox": [ + 0.775, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "9" + }, + { + "type": "image", + "bbox": [ + 0.217, + 0.142, + 0.788, + 0.321 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.33, + 0.788, + 0.374 + ], + "angle": 0, + "content": "Fig. 4: Visual and LAM [16] comparisons between Restormer and SwinIR. The LAM results and DI values indicate that Restormer exploits significantly more information than SwinIR. However, SwinIR reconstructs much more details than Restormer." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.408, + 0.785, + 0.438 + ], + "angle": 0, + "content": "in the frequency domain. In contrast, SwinIR and Uformer perform well due to their robust spatial information interaction ability of the spatial self-attention." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.44, + 0.785, + 0.531 + ], + "angle": 0, + "content": "Deblurring (specifically for motion blur here) involves addressing global motion shifts in the image. As a result, the ability to handle large-size inputs and the use of global or multi-scale information are necessary for deblurring networks. Thus, the networks based on the U-shape architecture all perform well on this task. Conversely, SwinIR, which employs the plain architecture and focuses more on local information processing, performs much worse than other networks." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.533, + 0.788, + 0.64 + ], + "angle": 0, + "content": "Similar phenomena can be observed for dehazing. Due to the involvement of the depth information in the haze model, the ability to use large-range or even global information is crucial. Besides, dehazing networks are required to handle low-frequency transformations, including alterations in color and contrast, both of which constitute global mappings. Therefore, SwinIR and Uformer, which rely more on local spatial information interaction, perform poorly on this task. On the contrary, Restormer exhibits exceptional performance." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.642, + 0.788, + 0.703 + ], + "angle": 0, + "content": "Deraining is relatively unique in that the rain is unevenly distributed in images, with significant differences between different raindrops and streaks. Thus, there is no clear pattern in the performance of different networks on deraining. Nevertheless, networks with higher complexity present better performance." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.705, + 0.788, + 0.841 + ], + "angle": 0, + "content": "Based on the above results and analysis, we can infer that the acceptable performance of a backbone network on a specific task is predicated on meeting the functional requirements of that task. It is notable that Restormer obtains exceptional task generality. This can be attributed to several factors: 1) The U-shape architecture enables the network to accommodate large-size input. 2) The transposed self-attention allows direct interaction of global information. 3) The presence of depth-wise convolution enables the network to process spatial information effectively. In summary, due to Restormer's comprehensive functionality, it is capable of meeting the diverse requirements of different tasks." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "10" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.364, + 0.127 + ], + "angle": 0, + "content": "X.Chen et al." + }, + { + "type": "image", + "bbox": [ + 0.222, + 0.144, + 0.784, + 0.309 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.319, + 0.788, + 0.391 + ], + "angle": 0, + "content": "Fig. 5: The network structure of X-Restormer. To enhance the spatial mapping ability of Restormer and create a more general network, we replace half of the transposed self-attention blocks in Restormer with spatial self-attention blocks. For TSA, we retain the preliminary multi-Dconv head transposed attention (MDTA) used in Restormer. For SSA, we adopt the overlapping cross-attention (OCA) in HAT [8]." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.417, + 0.593, + 0.435 + ], + "angle": 0, + "content": "4 General Backbone Network Design" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.448, + 0.788, + 0.538 + ], + "angle": 0, + "content": "Based on the benchmark experiments, we believe that the principle of designing a general backbone network should be to ensure that the network can fulfill the functional requirements of all tasks. As Restormer shows relatively good task generality, we select it as the starting point to design a more general network. By pinpointing and addressing the limitation of Restormer, we present an initial version of a general image restoration backbone network in this section." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.539, + 0.789, + 0.78 + ], + "angle": 0, + "content": "Limitation of Restormer. In the benchmark experiments, Restormer shows inferior performance to SwinIR on SR, particularly on Urban100. The qualitative comparisons also indicate this phenomenon in Fig. 4. From the visual and LAM [16] results, We can observe that Restormer can exploit large-range and even global information for the reconstruction. However, compared to SwinIR, it fails to reconstruct fine textures, even for self-repeated patterns. This discrepancy can be attributed to the U-shape architecture adopted by Restormer on the one hand, which increases the difficulty of reconstructing high-frequency information. On the other hand, Restormer relies on depth-wise convolution for spatial information interaction, whose spatial mapping capability is relatively weaker than the spatial self-attention in SwinIR. Considering that the U-shape architecture is indispensable for some tasks, we still need to retain this architectural design for task generality. To overcome the limitation of Restormer and design a more powerful backbone network, we choose to further enhance its spatial information interaction ability. An intuitive and feasible solution is to incorporate the spatial self-attention module into Restormer." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.78, + 0.789, + 0.842 + ], + "angle": 0, + "content": "Network structure. In Fig. 5, we present the structure of our proposed backbone network, denoted as X-Restormer. We choose the U-shape architecture to build the network. In contrast to Restormer, we replace half of the transposed self-attention blocks (TSAB) with spatial self-attention blocks (SSAB) to" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.374, + 0.115, + 0.733, + 0.129 + ], + "angle": 0, + "content": "A Comparative Study of Image Restoration Networks" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.117, + 0.784, + 0.127 + ], + "angle": 0, + "content": "11" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.147, + 0.784, + 0.178 + ], + "angle": 0, + "content": "enhance the ability of spatial information interaction. Given an input feature \\( F_{in} \\), the two blocks process it alternately as:" + }, + { + "type": "equation", + "bbox": [ + 0.405, + 0.185, + 0.785, + 0.2 + ], + "angle": 0, + "content": "\\[\nF _ {t} = F _ {i n} + T S A \\left(L N \\left(F _ {i n}\\right)\\right), \\tag {6}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.397, + 0.203, + 0.785, + 0.218 + ], + "angle": 0, + "content": "\\[\nF _ {t \\_ o u t} = F _ {t} + F F N (L N (F _ {t})), \\tag {7}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.387, + 0.222, + 0.784, + 0.238 + ], + "angle": 0, + "content": "\\[\nF _ {s} = F _ {t \\_ o u t} + S S A (L N \\left(F _ {t \\_ o u t}\\right)), \\tag {8}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.401, + 0.241, + 0.784, + 0.256 + ], + "angle": 0, + "content": "\\[\nF _ {o u t} = F _ {s} + F F N \\left(L N \\left(F _ {s}\\right)\\right), \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.261, + 0.785, + 0.351 + ], + "angle": 0, + "content": "where \\( F_{t} \\), \\( F_{t\\_out} \\), \\( F_{s} \\) and \\( F_{s\\_out} \\) represent the intermediate feature in TSAB, the output of TSAB, the intermediate feature in SSAB and the output of SSAB. \\( F_{out} \\) means the output of the two consecutive blocks, and also serves as the input for the following two blocks. \\( TSA(\\cdot) \\) and \\( SSA(\\cdot) \\) indicate transposed self-attention (TSA) and spatial self-attention (SSA) modules. \\( LN(\\cdot) \\) denotes layer normalization and \\( FFN(\\cdot) \\) represents the feed-forward network." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.352, + 0.787, + 0.653 + ], + "angle": 0, + "content": "Specifically, we adopt the Multi-Dconv Transpose Attention (MDTA) as the TSA module. It first generates query \\((Q)\\), key \\((K)\\) and value \\((V)\\) by applying \\(1 \\times 1\\) convolutions followed by \\(3 \\times 3\\) depth-wise convolutions. Then, the channel attention matrix of size \\(\\mathbb{R}^{C \\times C}\\) is calculated by the dot-product of reshaped \\(Q\\) and \\(K\\) followed by a Softmax function. The schematic of TSA is shown in Fig. 3. Finally, the result is generated by the dot-product of the attention matrix and \\(V\\). For SSA, we adopt the Overlapping Cross-Attention (OCA) introduced in the HAT model [8]. We choose OCA because the shifted window mechanism in SwinIR is not intuitively suitable for our TSA-SSA consecutive blocks, and HAT demonstrates the effectiveness and superiority of OCA. For the specific calculation, \\(Q\\) is produced by partitioning the input into non-overlapping windows, while \\(K\\) and \\(V\\) are generated by partitioning the input into overlapping windows with a manually set overlapping size. Apart from the different window partition methods, the calculation of OCA is essentially identical to that of standard window self-attention. For FFN, we employ the Gated-Dconv Feed-forward Network (GDFN) architecture, as used in Restormer. Instead of using two \\(1 \\times 1\\) convolutions to construct an MLP, GDFN first processes input features through two \\(3 \\times 3\\) depth-wise convolutions and \\(1 \\times 1\\) convolutions. Then, the resulting features are combined via element-wise multiplication and pass through another \\(1 \\times 1\\) convolution to produce the final output." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.654, + 0.785, + 0.73 + ], + "angle": 0, + "content": "We have also tried multiple design choices for SSAB and TSAB. Experiments can be found in \\( \\text{Supp} \\). We emphasize that our design of X-Restormer is not to develop novel architectures or modules to improve the performance on certain tasks, but to enhance the task generality of the network according to the principle of general backbone network design through as simple means as possible." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.748, + 0.376, + 0.765 + ], + "angle": 0, + "content": "5 Experiments" + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.775, + 0.428, + 0.79 + ], + "angle": 0, + "content": "5.1 Experimental Setup" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.795, + 0.785, + 0.84 + ], + "angle": 0, + "content": "We conduct experiments of the proposed X-Restormer on the same datasets used in the benchmark experiment. For the network implementation, the network employs a 4-level encoder-decoder with three times down-sampling and" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "12" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.364, + 0.128 + ], + "angle": 0, + "content": "X.Chen et al." + }, + { + "type": "table_caption", + "bbox": [ + 0.216, + 0.145, + 0.525, + 0.173 + ], + "angle": 0, + "content": "Table 3: Quantitative results on \\( \\times 4 \\) image SR. * means the model pretrained on \\( \\times 2\\mathrm{{SR}} \\) ." + }, + { + "type": "table_caption", + "bbox": [ + 0.527, + 0.145, + 0.787, + 0.173 + ], + "angle": 0, + "content": "Table 4: Quantitative results on image denoising with the noise level \\(\\sigma = 50\\)" + }, + { + "type": "table", + "bbox": [ + 0.22, + 0.186, + 0.526, + 0.266 + ], + "angle": 0, + "content": "
ModelSet5Set14BSD100Urban100Manga109
RCAN32.63/0.900228.87/0.788927.77/0.743626.82/0.808731.22/0.9173
RCAN-it32.69/0.900728.99/0.792227.87/0.745927.16/0.816831.78/0.9217
SwinIR*32.92/0.904429.09/0.795027.92/0.748927.45/0.825432.03/0.9260
IPT32.64/-29.01/-27.82/-27.26/--/-
EDT32.82/0.903129.09/0.793927.91/0.748327.46/0.824632.05/0.9254
NAFNet32.79/0.901029.03/0.791927.86/0.746327.00/0.811231.77/0.9216
SwinIR32.88/0.904129.07/0.794427.93/0.749027.47/0.825831.96/0.9255
Restormer32.94/0.903929.06/0.793427.91/0.748227.32/0.819931.96/0.9244
X-Restormer33.16/0.905829.17/0.796328.00/0.751227.66/0.829132.38/0.9279
" + }, + { + "type": "table", + "bbox": [ + 0.529, + 0.186, + 0.787, + 0.266 + ], + "angle": 0, + "content": "
ModelCBSD68Kodak24McMasterUrban100
FFDNet27.96/-28.98/-29.18/-28.05/-
RNAN28.27/-29.58/-29.72/-29.08/-
RDN28.31/-29.66/--/-29.38/-
IPT28.39/-29.64/-29.98/-29.71/-
DRUNet28.51/-29.86/-30.08/-29.61/-
SwinIR28.56/0.811829.95/0.822130.20/0.848929.88/0.8861
Uformer28.55/0.813029.97/0.824430.16/0.848529.98/0.8900
Restorer28.60/0.813030.01/0.823730.30/0.851730.02/0.8898
X-Restorer28.63/0.813830.05/0.824530.33/0.851830.24/0.8928
" + }, + { + "type": "table_caption", + "bbox": [ + 0.216, + 0.278, + 0.786, + 0.307 + ], + "angle": 0, + "content": "Table 5: Quantitative results on image Table 6: Quantitative results on image deraindeblurring (motion blur). ing." + }, + { + "type": "table", + "bbox": [ + 0.22, + 0.319, + 0.787, + 0.398 + ], + "angle": 0, + "content": "
ModelGoProHIDERealBlur-RRealBlur-J
SPAIR32.06/0.95330.29/0.931-/-28.81/0.875
MIMO-UNet+32.45/0.95729.99/0.93035.54/0.94727.63/0.837
IPT32.52/--/--/--/-
MPRNet32.66/0.95930.96/0.93935.99/0.95228.70/0.873
Uformer33.05/0.94230.89/0.92036.19/0.95629.09/0.886
NAFNet33.08/0.94231.22/0.92435.97/0.95228.32/0.857
Restormer32.92/0.94031.22/0.92336.19/0.95728.96/0.879
X-Restormer33.44/0.94631.76/0.93036.27/0.95828.87/0.878
" + }, + { + "type": "table_caption", + "bbox": [ + 0.335, + 0.41, + 0.667, + 0.424 + ], + "angle": 0, + "content": "Table 7: Quantitative results on image dehazing." + }, + { + "type": "table", + "bbox": [ + 0.221, + 0.438, + 0.787, + 0.468 + ], + "angle": 0, + "content": "
ModelPFDNFFA-NetAECR-NeMAXIMDehazeFormerMPRNetNAFNetRestormerX-Restormer
SOTS Indoor32.68/0.97636.39/0.98937.17/0.99039.72/-40.05/0.99640.34/0.99438.97/0.99441.97/0.99442.90/0.995
" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.494, + 0.788, + 0.722 + ], + "angle": 0, + "content": "up-sampling. To maintain a similar number of parameters as Restormer, from level-1 to level-4 (i.e., \\( L_{1} \\sim L_{4} \\) in the figure) the numbers of consecutive blocks (containing a TSAB and a SSAB) are [2, 4, 4, 4] and the number of refinement blocks (i.e., \\( L_{r} \\)) is 4. Attention heads in TSA and SSA are both [1, 2, 4, 8], and channel numbers are [48, 96, 192, 384]. For OCA, the window size and the overlapping ratio are set to 8 and 0.5 as in HAT. The channel expansion factor in GDFN is 2.66. The overall parameters are 26.06M, slightly less than Restormer of 26.13M. We adopt the same training settings as Restormer in the benchmark experiment to optimize the model. We use the AdamW optimizer with \\( \\beta_{1} = 0.9 \\) and \\( \\beta_{2} = 0.99 \\), utilizing an initial learning rate of \\( 3e^{-4} \\). The learning rate decay follows a cosine scheduler with intervals at 92k and 208k iterations, and the total training iterations are 300K. The input patch size is \\( 256 \\times 256 \\) and the batch size is 32. For data augmentation, we use horizontal and vertical flips. We utilize the \\( L_{1} \\) loss function to train the model. Notably, we do not adopt any training tricks (e.g., \\( \\times 2 \\) SR pretraining or EMA strategy) or testing tricks (e.g., TLC [11])." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.741, + 0.442, + 0.756 + ], + "angle": 0, + "content": "5.2 Experimental Results" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.765, + 0.787, + 0.841 + ], + "angle": 0, + "content": "We compare our X-Restormer with the top three models in the benchmark experiments (based on the same test configurations) as well as several state-of-the-art approaches for each task (based on the reported performance in their papers) in this section. PSNR(dB)/SSIM is provided in following tables. The best and second-best performance results are in **bold** and **underline**." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.374, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "A Comparative Study of Image Restoration Networks" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.116, + 0.786, + 0.127 + ], + "angle": 0, + "content": "13" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.146, + 0.79, + 0.328 + ], + "angle": 0, + "content": "Image SR. In Tab. 3, we present the quantitative results of \\(\\times 4\\) SR on five benchmark datasets: Set5 [2], Set14 [50], BSD100 [31], Urban100 [18] and Manga109 [33]. The state-of-the-art approaches, including RCAN [57], RCAN-it [27], SwinIR [35], IPT [4] and EDT [22] are compared in this experiment. X-Restormer significantly outperforms Restormer by \\(0.22\\mathrm{dB}\\) on Set5, \\(0.34\\mathrm{dB}\\) on Urban100 and \\(0.42\\mathrm{dB}\\) on Manga109. This demonstrates the effectiveness of our design in enhancing the spatial mapping ability of Restormer. Furthermore, X-Restormer surpasses the SOTA method EDT by \\(0.2\\mathrm{dB}\\) on Urban100 and \\(0.35\\mathrm{dB}\\) on Manga109, indicating the effectiveness of X-Restormer on SR. Despite this, we point out that our method still cannot beat the most powerful SR approaches, e.g., HAT. This is due to the inevitable weakening of SR performance for the U-shape architecture. In terms of SR, the plain residual in residual architecture is still more effective." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.331, + 0.788, + 0.436 + ], + "angle": 0, + "content": "Image denoising. In Tab. 4, we provide the quantitative results of Gaussian denoising with the noise level \\(\\sigma = 50\\) on four benchmark datasets: CBSD68 [32], Kodak24 [13], McMaster [56] and Urban100 [18]. The state-of-the-art methods: FFDNet [55], RNAN [58], RDN [60], IPT [4] and DRUNet [52] are compared in this experiment. X-Restormer achieves the state-of-the-art performance, surpassing SwinIR by 0.36dB and outperforming Restormer by 0.22dB on Urban100. This demonstrates the superiority of X-Restormer on image denoising." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.439, + 0.788, + 0.575 + ], + "angle": 0, + "content": "Image deblurring. In Tab. 5, we compare the results of X-Restormer with the state-of-the-art methods: SPAIR [36], MIMO-UNet+ [10], IPT [4] and MPR-Net [49] on both synthetic datasets (Gopro [34] and HIDE [39]) and real-world datasets (RealBlur-R and RealBlur-J [38]). X-Restormer achieves large performance gains over the other models on synthetic datasets, with an improvement of \\(0.36\\mathrm{dB}\\) on Gopro compared to \\(\\mathrm{NAFNet}^4\\) and \\(0.54\\mathrm{dB}\\) on HIDE compared to Restormer. Besides, our X-Restormer obtains the state-of-the-art performance on RealBlur-R and considerable performance on RealBlur-J, showing the effectiveness of our method on real-world motion deblurring scenarios." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.577, + 0.788, + 0.699 + ], + "angle": 0, + "content": "Image deraining. In Tab. 6, we present the quantitative results of deraining on Test100 [19], Rain100L [47], Rain100H [47], Test1200 [51] and Test2800 [15]. The state-of-the-art methods: PreNet [37], MSPFN [20], MPRNet [49] and SPAIR [36] are compared. X-Restormer outperforms the other models on Test100, Rain100H and Rain100L but performs inferior to Restormer on Test1200 and Test2800. This discrepancy is due to the variations in degradation produced by different rain models. Nonetheless, X-Restormer exhibits comparable performance to state-of-the-art methods, showing its effectiveness on image deraining." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.701, + 0.788, + 0.807 + ], + "angle": 0, + "content": "Image dehazing. In Tab. 7, we provide the quantitative results on SOTS Indoor [21]. We compare the state-of-the-art approaches: PFDN [12], FFA-Net [46], AECR-Net [44], MAXIM [41] and DehazeFormer [40] in this experiment. Notably, X-Restormer model significantly outperforms Restormer by a large margin of \\(0.93\\mathrm{dB}\\). When compared to the state-of-the-art dehazing method DehazeFormer, our method achieves a breakthrough performance gain of \\(2.85\\mathrm{dB}\\). These results demonstrate the superiority of X-Restormer for image dehazing." + }, + { + "type": "page_footnote", + "bbox": [ + 0.218, + 0.825, + 0.782, + 0.84 + ], + "angle": 0, + "content": "4 By using TLC, on Gopro/HIDE, NAFNet: 33.69/31.32, X-Restormer: 33.89/31.87." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "14" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.362, + 0.127 + ], + "angle": 0, + "content": "X.Chen et al." + }, + { + "type": "table_caption", + "bbox": [ + 0.312, + 0.145, + 0.688, + 0.159 + ], + "angle": 0, + "content": "Table 8: Quantitative results on All-in-One restoration." + }, + { + "type": "table", + "bbox": [ + 0.22, + 0.172, + 0.788, + 0.254 + ], + "angle": 0, + "content": "
ModelSRDenoisingDeblurringDerainingDehazing
×2×4σ = 15σ = 25σ = 50
MPRNet33.68/0.930028.17/0.804334.27/0.928031.82/0.890128.60/0.811930.00/0.881231.20/0.906835.06/0.9874
SwinIR33.83/0.930128.14/0.804334.27/0.928331.83/0.890628.59/0.814329.06/0.851930.03/0.898331.48/0.9823
Uformer29.99/0.880527.88/0.794933.86/0.925431.42/0.886327.87/0.789129.64/0.872527.53/0.856929.92/0.9714
Restormer34.51/0.934128.70/0.817934.43/0.930332.02/0.894228.87/0.822230.54/0.890231.91/0.913436.95/0.9897
NAFNet34.12/0.931428.17/0.808734.18/0.928131.76/0.890828.64/0.818730.38/0.891131.56/0.914930.84/0.9797
X-Restormer34.72/0.936028.81/0.821734.67/0.933032.26/0.898329.12/0.829330.85/0.898332.27/0.922938.24/0.9914
" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.283, + 0.787, + 0.509 + ], + "angle": 0, + "content": "All-in-One Restoration. We conduct experiments on an all-in-one restoration setting to show the effectiveness of different backbone networks in addressing various tasks simultaneously. Networks are trained on five tasks with varying degradation levels (i.e., \\(\\times 2\\), \\(\\times 4\\) for SR and \\(\\sigma \\in (0,50)\\) random level for denoising). The sampling probability for each task is the same, and the average performance on benchmark datasets is calculated. As shown in Tab. 8, with the relatively better task generality among the existing networks, Restormer exhibits better performance on the all-in-one restoration. By overcoming the limitation of Restormer, our X-Restormer demonstrates further advantages in handling multiple tasks concurrently, with its performance far exceeding other networks on all tasks. In contrast, the other networks are more or less affected by optimization conflicts across different tasks (e.g., SwinIR performs inferior to Restormer even on SR). These indicate that a general backbone network is of great significance for building a general model that process multiple image restoration tasks, which can effectively mitigate task conflicts with the performance drops." + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.51, + 0.787, + 0.661 + ], + "angle": 0, + "content": "Summary. With enhanced spatial mapping capability, our X-Restormer can significantly outperform Restormer. Specifically, X-Restormer obtains performance gains against Restormer of 0.42dB (Manga109), 0.22dB (Urban100), 0.54dB (HIDE), 0.61dB (Rain100H) and 0.93dB (SOTS Indoor) on image SR, denoising, deblurring, deraining and dehazing, respectively, showing the effectiveness of our design. Despite its simplicity, X-Restormer obtains state-of-the-art performance on all these five tasks and present the best task generality among the compared methods. Furthermore, we show that a more general backbone network can also better handle multiple restoration tasks simultaneously. We hope it can inspire more works on the general image restoration backbone network design." + }, + { + "type": "title", + "bbox": [ + 0.216, + 0.686, + 0.36, + 0.703 + ], + "angle": 0, + "content": "6 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.72, + 0.787, + 0.841 + ], + "angle": 0, + "content": "In this paper, we conduct a comparative study of existing image restoration backbone networks to design a general backbone network. Five representative networks are chosen for the benchmark experiment across selected five tasks. The results indicate that comprehensive functionality is crucial for designing a general restoration backbone network. We select Restormer as the baseline and introduce spatial self-attention into it to enhance the spatial information interaction capability. Experimental results show that our X-Restormer achieves significant performance improvement and presents the best task generality." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.374, + 0.115, + 0.732, + 0.129 + ], + "angle": 0, + "content": "A Comparative Study of Image Restoration Networks" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "15" + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.145, + 0.403, + 0.163 + ], + "angle": 0, + "content": "Acknowledgements" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.174, + 0.787, + 0.234 + ], + "angle": 0, + "content": "This work was partially supported by National Natural Science Foundation of China (Grant No.62276251, 62272450), and the Joint Lab of CAS-HK. This work was also supported in part by Macau Science and Technology Development Fund under SKLIOTSC-2021-2023 and 0022/2022/A." + }, + { + "type": "title", + "bbox": [ + 0.218, + 0.255, + 0.323, + 0.271 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.283, + 0.785, + 0.31 + ], + "angle": 0, + "content": "1. Abuolaim, A., Brown, M.S.: Defocus deblurring using dual-pixel data. In: European Conference on Computer Vision. pp. 111-126. Springer (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.311, + 0.785, + 0.351 + ], + "angle": 0, + "content": "2. Bevilacqua, M., Roumy, A., Guillemot, C., Morel, M.L.A.: Low-complexity single-image super-resolution based on nonnegative neighbor embedding. In: British Machine Vision Conference (BMVC) (2012)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.352, + 0.785, + 0.391 + ], + "angle": 0, + "content": "3. Cai, B., Xu, X., Jia, K., Qing, C., Tao, D.: Dehazenet: An end-to-end system for single image haze removal. IEEE transactions on image processing 25(11), 5187-5198 (2016)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.392, + 0.785, + 0.433 + ], + "angle": 0, + "content": "4. Chen, H., Wang, Y., Guo, T., Xu, C., Deng, Y., Liu, Z., Ma, S., Xu, C., Xu, C., Gao, W.: Pre-trained image processing transformer. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 12299-12310 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.434, + 0.785, + 0.459 + ], + "angle": 0, + "content": "5. Chen, L., Chu, X., Zhang, X., Sun, J.: Simple baselines for image restoration. In: European Conference on Computer Vision. pp. 17-33. Springer (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.46, + 0.785, + 0.5 + ], + "angle": 0, + "content": "6. Chen, L., Lu, X., Zhang, J., Chu, X., Chen, C.: Hinet: Half instance normalization network for image restoration. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 182-192 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.501, + 0.785, + 0.541 + ], + "angle": 0, + "content": "7. Chen, X., Wang, X., Zhang, W., Kong, X., Qiao, Y., Zhou, J., Dong, C.: Hat: Hybrid attention transformer for image restoration. arXiv preprint arXiv:2309.05239 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.542, + 0.785, + 0.581 + ], + "angle": 0, + "content": "8. Chen, X., Wang, X., Zhou, J., Qiao, Y., Dong, C.: Activating more pixels in image super-resolution transformer. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 22367-22377 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.582, + 0.785, + 0.623 + ], + "angle": 0, + "content": "9. Chen, Z., Zhang, Y., Gu, J., Kong, L., Yang, X., Yu, F.: Dual aggregation transformer for image super-resolution. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 12312-12321 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.623, + 0.785, + 0.663 + ], + "angle": 0, + "content": "0. Cho, S.J., Ji, S.W., Hong, J.P., Jung, S.W., Ko, S.J.: Rethinking coarse-to-fine approach in single image deblurring. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 4641-4650 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.664, + 0.785, + 0.704 + ], + "angle": 0, + "content": "1. Chu, X., Chen, L., Chen, C., Lu, X.: Improving image restoration by revisiting global information aggregation. In: European Conference on Computer Vision. pp. 53-71. Springer (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.705, + 0.785, + 0.731 + ], + "angle": 0, + "content": "2. Dong, J., Pan, J.: Physics-based feature dehazing networks. In: European Conference on Computer Vision. pp. 188-204. Springer (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.732, + 0.785, + 0.758 + ], + "angle": 0, + "content": "3. Franzen, R.: Kodak lossless true color image suite. source: http://r0k.us/graphics/kodak 4(2) (1999)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.759, + 0.785, + 0.799 + ], + "angle": 0, + "content": "4. Fu, X., Huang, J., Zeng, D., Huang, Y., Ding, X., Paisley, J.: Removing rain from single images via a deep detail network. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 3855-3863 (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.226, + 0.8, + 0.785, + 0.84 + ], + "angle": 0, + "content": "5. Fu, X., Huang, J., Zeng, D., Huang, Y., Ding, X., Paisley, J.: Removing rain from single images via a deep detail network. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 3855-3863 (2017)" + }, + { + "type": "list", + "bbox": [ + 0.226, + 0.283, + 0.785, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "16" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.362, + 0.127 + ], + "angle": 0, + "content": "X.Chen et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.148, + 0.785, + 0.189 + ], + "angle": 0, + "content": "16. Gu, J., Dong, C.: Interpreting super-resolution networks with local attribution maps. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 9199-9208 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.189, + 0.785, + 0.23 + ], + "angle": 0, + "content": "17. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 770-778 (2016)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.23, + 0.785, + 0.271 + ], + "angle": 0, + "content": "18. Huang, J.B., Singh, A., Ahuja, N.: Single image super-resolution from transformed self-exemplars. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 5197-5206 (2015)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.271, + 0.785, + 0.311 + ], + "angle": 0, + "content": "19. Huang, J.B., Singh, A., Ahuja, N.: Single image super-resolution from transformed self-exemplars. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 5197-5206 (2015)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.311, + 0.785, + 0.366 + ], + "angle": 0, + "content": "20. Jiang, K., Wang, Z., Yi, P., Chen, C., Huang, B., Luo, Y., Ma, J., Jiang, J.: Multiscale progressive fusion network for single image deraining. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 8346-8355 (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.366, + 0.785, + 0.406 + ], + "angle": 0, + "content": "21. Li, B., Ren, W., Fu, D., Tao, D., Feng, D., Zeng, W., Wang, Z.: Benchmarking single-image dehazing and beyond. IEEE Transactions on Image Processing 28(1), 492-505 (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.406, + 0.785, + 0.433 + ], + "angle": 0, + "content": "22. Li, W., Lu, X., Qian, S., Lu, J., Zhang, X., Jia, J.: On efficient transformer-based image pre-training for low-level vision. arXiv preprint arXiv:2112.10175 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.433, + 0.785, + 0.488 + ], + "angle": 0, + "content": "23. Li, Y., Zhang, Y., Timofte, R., Van Gool, L., Tu, Z., Du, K., Wang, H., Chen, H., Li, W., Wang, X., et al.: Ntire 2023 challenge on image denoising: Methods and results. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 1904-1920 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.488, + 0.785, + 0.528 + ], + "angle": 0, + "content": "24. Li, Y., Tan, R.T., Guo, X., Lu, J., Brown, M.S.: Rain streak removal using layer priors. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 2736-2744 (2016)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.528, + 0.785, + 0.569 + ], + "angle": 0, + "content": "25. Liang, J., Cao, J., Sun, G., Zhang, K., Van Gool, L., Timofte, R.: Swinir: Image restoration using swin transformer. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 1833-1844 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.569, + 0.785, + 0.609 + ], + "angle": 0, + "content": "26. Lim, B., Son, S., Kim, H., Nah, S., Mu Lee, K.: Enhanced deep residual networks for single image super-resolution. In: Proceedings of the IEEE conference on computer vision and pattern recognition workshops. pp. 136-144 (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.609, + 0.785, + 0.65 + ], + "angle": 0, + "content": "27. Lin, Z., Garg, P., Banerjee, A., Magid, S.A., Sun, D., Zhang, Y., Van Gool, L., Wei, D., Pfister, H.: Revisiting rcan: Improved training for image super-resolution (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.65, + 0.785, + 0.691 + ], + "angle": 0, + "content": "28. Liu, J., Yang, W., Yang, S., Guo, Z.: Erase or fill? deep joint recurrent rain removal and reconstruction in videos. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 3233-3242 (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.691, + 0.785, + 0.732 + ], + "angle": 0, + "content": "29. Liu, L., Xie, L., Zhang, X., Yuan, S., Chen, X., Zhou, W., Li, H., Tian, Q.: Tape: Task-agnostic prior embedding for image restoration. In: European Conference on Computer Vision. pp. 447-464. Springer (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.732, + 0.785, + 0.786 + ], + "angle": 0, + "content": "30. Liu, Z., Lin, Y., Cao, Y., Hu, H., Wei, Y., Zhang, Z., Lin, S., Guo, B.: Swin transformer: Hierarchical vision transformer using shifted windows. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 10012-10022 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.217, + 0.786, + 0.785, + 0.84 + ], + "angle": 0, + "content": "31. Martin, D., Fowlkes, C., Tal, D., Malik, J.: A database of human segmented natural images and its application to evaluating segmentation algorithms and measuring ecological statistics. In: Proceedings of the IEEE International Conference on Computer Vision. vol. 2, pp. 416-423. IEEE (2001)" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.148, + 0.785, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.374, + 0.115, + 0.733, + 0.129 + ], + "angle": 0, + "content": "A Comparative Study of Image Restoration Networks" + }, + { + "type": "page_number", + "bbox": [ + 0.769, + 0.117, + 0.785, + 0.127 + ], + "angle": 0, + "content": "17" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.204 + ], + "angle": 0, + "content": "32. Martin, D., Fowlkes, C., Tal, D., Malik, J.: A database of human segmented natural images and its application to evaluating segmentation algorithms and measuring ecological statistics. In: Proceedings of the IEEE International Conference on Computer Vision. vol. 2, pp. 416-423. IEEE (2001)" + }, + { + "type": "ref_text", + "bbox": [ + 0.214, + 0.205, + 0.787, + 0.245 + ], + "angle": 0, + "content": "33. Matsui, Y., Ito, K., Aramaki, Y., Fujimoto, A., Ogawa, T., Yamasaki, T., Aizawa, K.: Sketch-based manga retrieval using manga109 dataset. Multimedia Tools and Applications 76(20), 21811-21838 (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.246, + 0.786, + 0.286 + ], + "angle": 0, + "content": "34. Nah, S., Hyun Kim, T., Mu Lee, K.: Deep multi-scale convolutional neural network for dynamic scene deblurring. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 3883-3891 (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.287, + 0.787, + 0.328 + ], + "angle": 0, + "content": "35. Niu, B., Wen, W., Ren, W., Zhang, X., Yang, L., Wang, S., Zhang, K., Cao, X., Shen, H.: Single image super-resolution via a holistic attention network. In: European Conference on Computer Vision. pp. 191-207. Springer (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.328, + 0.787, + 0.37 + ], + "angle": 0, + "content": "36. Purohit, K., Suin, M., Rajagopalan, A., Boddeti, V.N.: Spatially-adaptive image restoration using distortion-guided networks. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 2309-2319 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.371, + 0.787, + 0.412 + ], + "angle": 0, + "content": "37. Ren, D., Zuo, W., Hu, Q., Zhu, P., Meng, D.: Progressive image deraining networks: A better and simpler baseline. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 3937-3946 (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.412, + 0.787, + 0.453 + ], + "angle": 0, + "content": "38. Rim, J., Lee, H., Won, J., Cho, S.: Real-world blur dataset for learning and benchmarking deblurring algorithms. In: European conference on computer vision. pp. 184-201. Springer (2020)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.453, + 0.787, + 0.495 + ], + "angle": 0, + "content": "39. Shen, Z., Wang, W., Lu, X., Shen, J., Ling, H., Xu, T., Shao, L.: Human-aware motion deblurring. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 5572-5581 (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.495, + 0.787, + 0.522 + ], + "angle": 0, + "content": "40. Song, Y., He, Z., Qian, H., Du, X.: Vision transformers for single image dehazing. IEEE Transactions on Image Processing 32, 1927-1941 (2023)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.522, + 0.787, + 0.564 + ], + "angle": 0, + "content": "41. Tu, Z., Talebi, H., Zhang, H., Yang, F., Milanfar, P., Bovik, A., Li, Y.: Maxim: Multi-axis mlp for image processing. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5769-5780 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.565, + 0.787, + 0.605 + ], + "angle": 0, + "content": "42. Wang, X., Xie, L., Yu, K., Chan, K.C., Loy, C.C., Dong, C.: BasicSR: Open source image and video restoration toolbox. https://github.com/XPixelGroup/BasicSR (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.606, + 0.787, + 0.647 + ], + "angle": 0, + "content": "43. Wang, Z., Cun, X., Bao, J., Zhou, W., Liu, J., Li, H.: Uformer: A general u-shaped transformer for image restoration. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 17683-17693 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.647, + 0.787, + 0.688 + ], + "angle": 0, + "content": "44. Wu, H., Qu, Y., Lin, S., Zhou, J., Qiao, R., Zhang, Z., Xie, Y., Ma, L.: Contrastive learning for compact single image dehazing. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 10551-10560 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.689, + 0.787, + 0.73 + ], + "angle": 0, + "content": "45. Yang, W., Tan, R.T., Feng, J., Liu, J., Guo, Z., Yan, S.: Deep joint rain detection and removal from a single image. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 1357-1366 (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.73, + 0.787, + 0.771 + ], + "angle": 0, + "content": "46. Yang, W., Tan, R.T., Feng, J., Liu, J., Guo, Z., Yan, S.: Deep joint rain detection and removal from a single image. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 1357-1366 (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.772, + 0.787, + 0.813 + ], + "angle": 0, + "content": "47. Yang, W., Tan, R.T., Feng, J., Liu, J., Guo, Z., Yan, S.: Deep joint rain detection and removal from a single image. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 1357-1366 (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.216, + 0.813, + 0.787, + 0.84 + ], + "angle": 0, + "content": "48. Zamir, S.W., Arora, A., Khan, S., Hayat, M., Khan, F.S., Yang, M.H.: Restormer: Efficient transformer for high-resolution image restoration. In: Proceedings of the" + }, + { + "type": "list", + "bbox": [ + 0.214, + 0.147, + 0.788, + 0.84 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.218, + 0.116, + 0.236, + 0.127 + ], + "angle": 0, + "content": "18" + }, + { + "type": "header", + "bbox": [ + 0.272, + 0.115, + 0.362, + 0.127 + ], + "angle": 0, + "content": "X.Chen et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.245, + 0.148, + 0.785, + 0.175 + ], + "angle": 0, + "content": "IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5728-5739 (2022)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.177, + 0.785, + 0.217 + ], + "angle": 0, + "content": "49. Zamir, S.W., Arora, A., Khan, S., Hayat, M., Khan, F.S., Yang, M.H., Shao, L.: Multi-stage progressive image restoration. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 14821-14831 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.218, + 0.785, + 0.259 + ], + "angle": 0, + "content": "50. Zeyde, R., Elad, M., Protter, M.: On single image scale-up using sparse-representations. In: International conference on curves and surfaces. pp. 711-730. Springer (2010)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.26, + 0.785, + 0.3 + ], + "angle": 0, + "content": "51. Zhang, H., Patel, V.M.: Density-aware single image de-raining using a multi-stream dense network. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 695-704 (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.301, + 0.785, + 0.342 + ], + "angle": 0, + "content": "52. Zhang, K., Li, Y., Zuo, W., Zhang, L., Van Gool, L., Timofte, R.: Plug-and-play image restoration with deep denoiser prior. IEEE Transactions on Pattern Analysis and Machine Intelligence 44(10), 6360-6376 (2021)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.343, + 0.785, + 0.383 + ], + "angle": 0, + "content": "53. Zhang, K., Zuo, W., Chen, Y., Meng, D., Zhang, L.: Beyond a gaussian denoiser: Residual learning of deep cnn for image denoising. IEEE transactions on image processing 26(7), 3142-3155 (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.384, + 0.785, + 0.424 + ], + "angle": 0, + "content": "54. Zhang, K., Zuo, W., Gu, S., Zhang, L.: Learning deep cnn denoiser prior for image restoration. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 3929-3938 (2017)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.425, + 0.785, + 0.466 + ], + "angle": 0, + "content": "55. Zhang, K., Zuo, W., Zhang, L.: Ffdnet: Toward a fast and flexible solution for cnn-based image denoising. IEEE Transactions on Image Processing 27(9), 4608-4622 (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.467, + 0.785, + 0.508 + ], + "angle": 0, + "content": "56. Zhang, L., Wu, X., Buades, A., Li, X.: Color demosaicking by local directional interpolation and nonlocal adaptive thresholding. Journal of Electronic imaging 20(2), 023016-023016 (2011)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.509, + 0.785, + 0.55 + ], + "angle": 0, + "content": "57. Zhang, Y., Li, K., Li, K., Wang, L., Zhong, B., Fu, Y.: Image super-resolution using very deep residual channel attention networks. In: European conference on computer vision. pp. 286-301. Springer (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.551, + 0.785, + 0.577 + ], + "angle": 0, + "content": "58. Zhang, Y., Li, K., Li, K., Zhong, B., Fu, Y.: Residual non-local attention networks for image restoration. arXiv preprint arXiv:1903.10082 (2019)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.578, + 0.785, + 0.619 + ], + "angle": 0, + "content": "59. Zhang, Y., Tian, Y., Kong, Y., Zhong, B., Fu, Y.: Residual dense network for image super-resolution. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 2472-2481 (2018)" + }, + { + "type": "ref_text", + "bbox": [ + 0.218, + 0.62, + 0.785, + 0.66 + ], + "angle": 0, + "content": "60. Zhang, Y., Tian, Y., Kong, Y., Zhong, B., Fu, Y.: Residual dense network for image restoration. IEEE transactions on pattern analysis and machine intelligence 43(7), 2480-2495 (2020)" + }, + { + "type": "list", + "bbox": [ + 0.218, + 0.148, + 0.785, + 0.66 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/2024/A Comparative Study of Image Restoration Networks for General Backbone Network Design/76c19445-7741-420c-b1a4-d913d41c13ff_origin.pdf b/2024/A Comparative Study of Image Restoration Networks for General Backbone Network Design/76c19445-7741-420c-b1a4-d913d41c13ff_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..d285da310ce57f755bc5400519d751b8981d9fd4 --- /dev/null +++ b/2024/A Comparative Study of Image Restoration Networks for General Backbone Network Design/76c19445-7741-420c-b1a4-d913d41c13ff_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:55c4f9a9365ba60c0c0a55531f14fb9cb48100b94584b16fe26dc89a476b832b +size 3503580 diff --git a/2024/A Comparative Study of Image Restoration Networks for General Backbone Network Design/full.md b/2024/A Comparative Study of Image Restoration Networks for General Backbone Network Design/full.md new file mode 100644 index 0000000000000000000000000000000000000000..59311bc05d1f0cf75f3c247cad4f5a2ed322dcb4 --- /dev/null +++ b/2024/A Comparative Study of Image Restoration Networks for General Backbone Network Design/full.md @@ -0,0 +1,305 @@ +# A Comparative Study of Image Restoration Networks for General Backbone Network Design + +Xiangyu Chen $^{1,2,3*}$ Zheyuan Li $^{2,1*}$ Yuandong $\mathrm{Pu}^{3,4*}$ Yihao Liu $^{2,3}$ +Jiantao Zhou $^{1\dagger}$ Yu Qiao $^{2,3}$ Chao Dong $^{2,3,5\dagger}$ + +1University of Macau 2Shenzhen Institute of Advanced Technology, Chinese Academy of Sciences 3Shanghai Artificial Intelligence Laboratory 4Shanghai Jiao Tong University 5Shenzhen University of Advanced Technology https://github.com/Andrew0613/X-Restormer + +Abstract. Despite the significant progress made by deep models in various image restoration tasks, existing image restoration networks still face challenges in terms of task generality. An intuitive manifestation is that networks which excel in certain tasks often fail to deliver satisfactory results in others. To illustrate this point, we select five representative networks and conduct a comparative study on five classic image restoration tasks. First, we provide a detailed explanation of the characteristics of different image restoration tasks and backbone networks. Following this, we present the benchmark results and analyze the reasons behind the performance disparity of different models across various tasks. Drawing from this comparative study, we propose that a general image restoration backbone network needs to meet the functional requirements of diverse tasks. Based on this principle, we design a new general image restoration backbone network, X-Restormer. Extensive experiments demonstrate that X-Restormer possesses good task generality and achieves state-of-the-art performance across a variety of tasks. + +# 1 Introduction + +Image restoration aims to generate high-quality images from degraded images. In recent years, deep learning has achieved great success in this field, with numerous networks being proposed to address various image restoration tasks. Initially, networks are primarily designed to solve specific restoration tasks and are typically validated only on selected tasks. As deep learning techniques have continued to evolve, there has been an increasing focus on the development of general-purpose networks that can be applied to a broad range of tasks. This trend is particularly evident in the high-level vision field, where new backbone networks are being designed to support multiple tasks [17, 30], including classification, detection and segmentation. For image restoration, although more and more backbone networks can handle multiple restoration tasks, their task generality is still limited, as illustrated in Fig. 1. For instance, SwinIR [25] achieves + +![](images/5121f81d60afe460e046d73c19696fcb689f97a14c00e0a08e365e2b499eb441.jpg) +Fig. 1: Relative performance difference of different backbone networks on five image restoration tasks1. The existing representative networks exhibit diverse performance on these tasks, while our method presents superior task generality. + +state-of-the-art performance on image super-resolution (SR) but falls short on image deblurring and dehazing. Conversely, Restormer [48] performs exceptionally well on image dehazing and deraining but is less effective on image SR. This discrepancy can be attributed to the fact that the characteristics of image degradation vary across different image restoration tasks. While all image restoration tasks involve mapping degraded images to clean images, the requirements for the capability of backbone networks differ depending on specific tasks. + +Designing a general image restoration backbone network presents a significant challenge. However, the development of such a network holds considerable value, as it has the potential to greatly reduce costs associated with research and application. To achieve this goal, we first conduct a comparative study of mainstream backbone networks on the representative tasks, including image SR, denoising, deblurring, deraining and dehazing. These five tasks are chosen due to the distinct characteristics of their degradation. Five representative backbone networks are selected in the study, including MPRNet [49], Uformer [43], SwinIR [25], Restormer [48] and NAFNet [5]. These five networks encompass classic architectures such as U-shape architecture, plain residual-in-residual architecture and multi-stage progressive architecture. They also employ several common operators, including convolution, spatial self-attention and transposed self-attention [48]. We benchmark the five representative methods on the selected five tasks. The experimental results clearly reflect the performance disparity of different backbone networks on different tasks. We then conduct a detailed anal + +ysis of the characteristics of these tasks and these backbone networks to explain the reasons behind the performance differences. Based on the comparative study, we propose that a general backbone network must be highly comprehensive in terms of functionality that meets the diverse needs of various tasks. + +It is noteworthy that Restormer stands out in the comparative study, ranking within the top two across all five tasks. This superior performance can be attributed to several key designs. First, Restormer's U-shape architecture allows it to process large-size inputs, which is crucial for the tasks that deal with large areas of degradation. Then, the network employs transposed self-attention that utilizes channel-wise features as tokens, achieving the information interaction among channels and enabling the mapping with a global receptive field. Additionally, the incorporation of numerous depth-wise convolutions activates the considerable spatial information interaction ability of the network. From a functional perspective, Restormer integrates the key capabilities of the other compared networks, thereby exhibiting commendable task generality in the comparative study. However, the spatial mapping ability of Restormer still appears to be somewhat deficient, as indicated by its quantitatively and qualitatively subpar performance in comparison to SwinIR for $\mathrm{SR}^2$ . + +This inferiority is hypothesized to originate from the inherent challenge of detail reconstruction posed by the U-shape architecture, coupled with the relatively weak spatial mapping capability of depth-wise convolution, particularly when compared to spatial self-attention (i.e., window-based self-attention in SwinIR). To address this limitation, a plausible solution is the introduction of spatial self-attention to Restormer. To achieve this design, we alternately replace half of transposed self-attention blocks with overlapping cross-attention blocks [8], which are proven to have strong spatial information interaction capability, to construct a new network, X-Restormer. Extensive experiments show that this simple modification can significantly enhance the performance of Restormer without increasing the number of parameters. Moreover, our X-Restormer obtains state-of-the-art performance on all five tasks, exhibiting the best task generality. + +Our main contributions can be summarized as follows: + +- We conduct a comparative study by constructing an image restoration benchmark, highlighting the challenges faced by existing image restoration backbone networks in task generality. +- Based on the benchmark results, we perform a detailed analysis of the characteristics of different degradations and networks. We emphasize that the general image restoration backbone network design must meet the functional requirements of diverse tasks. +- By further enhancing the spatial mapping ability of Restormer, we design a preliminary general backbone network, X-Restormer. Without additional parameters, X-Restormer achieves significant performance improvement over existing networks and exhibits superior task generality. + +![](images/dfce3516a7c62209483df252920268c964d654ff7cf1841b229c8360d8213d50.jpg) +Ground Truth Degraded Image + +![](images/60c46dfaace279ff5bd78cde89737b63a4c2c285b32ffced04b6e6750ab47a73.jpg) +Fig. 2: Selected five representative image restoration tasks with various degradation. + +![](images/9631d7f38300254ed04c362a007b078bee3f608514120ff2beb444ddfced14a2.jpg) + +![](images/2c69e4e3833e263071b317e9764f710a62b21f367f88e1916d30200daf536809.jpg) + +![](images/1669b773fca3c5f36a3e5cd4fd774c31497adf8c3ab666d54383587de80b88ac.jpg) + +# 2 Related Work + +Image restoration networks. In the past years, numerous deep networks have been proposed for various image restoration tasks such as image SR [8,26,59], denoising [43,53,54], deblurring [1,6], deraining [4,29,45] and dehazing [40,41,46]. Initially, most deep networks are designed for specific tasks [3,14,34,55,57]. Recently, with increasing attention to the task generality of networks, more and more methods have been developed to tackle multiple image restoration tasks. For instance, Zamir et al. [49] builds a multi-stage CNN for deraining, deblurring and denoising. Wang et al. [43] designs a U-shape Transformer for deraining, deblurring and denoising. Liang et al. [25] implements a Swin Transformer-based network that achieves state-of-the-art performance on SR, denoising and compression artifact reduction. Zamir et al. [48] proposes a novel transposed self-attention to build a U-shape network for deraining, deblurring and denoising. Chen et al. [5] constructs a U-shape CNN for denoising and deblurring. While existing methods have demonstrated some ability to generalize across several restoration tasks, their task generality remains limited. + +Difference from the previous network design research. While previous works have proposed networks that excel in various image restoration tasks, their primary focus is on constructing stronger networks to achieve performance breakthroughs on specific tasks. In contrast, this work pays more attention to the task generality of the backbone network, possessing a vision different from previous works. More specifically, our objective is to explore the design principles and directions of general image restoration networks. We are not seeking to create powerful networks for peak performance on a single or some specific tasks, but rather to ensure satisfactory performance across a diverse range of tasks. Regarding the concrete implementation, we do not intend to construct complex network architectures or modules. Our preference, rather, is to enhance task generality through the use of the simplest methodology available. + +There are concurrent works that adopt similar ideas for specific image restoration tasks. DAT [9] combines spatial-window self-attention and channel-wise self-attention to handle image SR. IPT-V2 [23] designs a spatial-channel Transformer block to build a denoising network and obtains the winner award in the NTIRE 2023 image denoising challenge [23]. However, the motivation and specific network implementation of our work are distinct from these studies. + +# 3 Image Restoration Benchmark + +In this section, we first briefly introduce several image restoration tasks, each with its own representative degradation characteristics. Subsequently, we classify mainstream image restoration networks based on two key aspects: architecture and core operator. On this basis, we select five representative networks and conduct a benchmark experiment across five different tasks. We describe the experimental setup and explain its rationality. Finally, we present the benchmark results and conduct a detailed analysis of them. + +# 3.1 Overview of Image Restoration Tasks + +We select five representative tasks for the benchmark experiments. These tasks, exemplified in Fig. 2, are chosen based on two primary reasons. First, they are very common image restoration tasks with widely accepted evaluation schemes. Second, the degradation characteristics of these tasks are diverse and differ greatly from each other. As such, they can provide a robust way to evaluate the task generality of image restoration backbone networks. + +Let $I_{GT}$ denote the ground truth image and $I_{LQ}$ denote the degraded image, where $I_{GT} \in \mathbb{R}^{H \times W \times 3}$ . The degradation model of classic image SR can be represented as: + +$$ +I _ {L Q} = \left(I _ {G T} \otimes k\right) \downarrow_ {s}, \tag {1} +$$ + +where $I_{LQ} \in \mathbb{R}^{\frac{H}{s} \times \frac{W}{s} \times 3}$ represents the low-resolution image. $k$ denotes the bicubic downsampling kernel and $\downarrow_{s}$ represents the downscaling factor. This degradation is highly correlated to local information and leads to a significant loss of high-frequency information. Thus, SR networks emphasize strong spatial information interaction capability to reconstruct as many details as possible. + +The degradation model of image denoising can be denoted as: + +$$ +I _ {L Q} = I _ {G T} + n, \tag {2} +$$ + +where $n \in \mathbb{R}^{H \times W \times 3}$ represents the noise map. For Gaussian denoising, noise values are content-independent. The downsampling-upsampling process of U-shape architecture inherently aids noise removal. Besides, strong spatial information interaction capability can also enhance high-frequency content reconstruction for denoising networks. + +The degradation model of image deblurring (for motion deblurring) can be denoted as: + +$$ +I _ {L Q} = \sum_ {t} \left(f _ {\text {m o t i o n}} ^ {t} \left(I _ {G T}\right)\right), \tag {3} +$$ + +where $f_{motion}^{t}(\cdot)$ represents the motion function under different continuous exposure times. This degradation is related to the global motion offset of the image. Therefore, the ability to utilize large-range information and even global information is important for deblurring networks. + +![](images/b3157a9bd87d6feb1c78d54c7598778996395ea992c1fbae68ae01a9d0a75670.jpg) +Fig. 3: The core operators in image restoration networks. + +The degradation model of image deraining can be simply denoted as: + +$$ +I _ {L Q} = I _ {G T} + R, \tag {4} +$$ + +where $R$ denotes the additive rain streak, simulated by the physics models, such as [24, 28]. The difference between this degradation and Gaussian noise is that the added $R$ is not evenly distributed on the image and has a correlation with the image content. Complicated rain streaks also places high demands on the complexity of deraining networks. + +The degradation model of image dehazing, based on the atmospheric scattering model, can be denoted as: + +$$ +I _ {L Q} = I _ {G T} * t \left(I _ {G T}\right) + A \left(1 - t \left(I _ {G T}\right)\right), \tag {5} +$$ + +where $t(\cdot)$ represents the transmission function and $t(I_{GT})$ is associated with the distance from the scene point to the camera. This degradation is intrinsically linked to the depth information within the image. Consequently, the incorporation of global information is important for dehazing networks. + +# 3.2 Characteristics of Typical Backbone Networks + +The architectures of mainstream image restoration networks can be broadly classified into three categories: U-shape encoder-decoder, plain residual-in-residual and multi-stage progressive. Schematic diagrams of these architectures are provided in $\text{Supp}$ . The U-shape encoder-decoder architecture performs down-sampling and up-sampling operations on features, enabling networks to handle features of varying scales. This architecture allows networks to accept large-size input, and the effective receptive field of the network expands rapidly with down-sampling. Typical U-shape networks include Uformer [43], Restormer [48]. The multi-stage architecture divides the entire network into several sub-networks and progressively processes features, which are primarily used for image deraining and deblurring. Common networks based on this architecture include MPRNet [49] and HINet [6]. The plain residual-in-residual architecture is composed of several residual groups, each of which consists of several residual blocks. This architecture maintains the original size when processing features, which is favorable for the reconstruction of high-frequency information, but it comes at a high computational cost. Typical networks include RCAN [57] and SwinIR [25]. + +The core operators for constructing an image restoration network can be mainly categorized into three types: convolution, spatial self-attention and transposed self-attention. These operators are shown in Fig. 3. The convolution calculates a fixed-size filter and processes the entire fea + +Table 1: Architectures and core operators of the five selected backbone networks. + +
NetworkArchitectureCore operator
MPRNetMulti-StageConvolution
UformerU-ShapeSpatial self-attention
SwinIRPlain residual-in-residualSpatial self-attention
RestormerU-ShapeTransposed self-attention
NAFNetU-ShapeConvolution
+ +ture map through a sliding window, which is the major component of many networks, such as RDN [60]. Spatial self-attention is typically implemented as window self-attention in image restoration tasks. It calculates the attention matrix within a fixed window size, generating content-aware weights that are functionally similar to a large kernel dynamic filter. This operator has strong local fitting ability and shows superior advantages on SR and denoising [7]. Transposed self-attention treats the entire feature of each channel as a token to calculate the attention matrix on the channel dimension. This operator directly deals with global features, and when combined with depth-wise convolution, it shows remarkable performance in multiple restoration tasks [48]. The selected five representative backbone networks for the benchmark experiment encompass the abovementioned architectures and core operators, as presented in Tab. 1. + +# 3.3 Experimental Settings + +For image SR, we conduct experiments on upscaling factor $\times 4$ . We use the DF2K dataset (the same as SwinIR [25]) to train models. Low-resolution images are generated from the ground truth images using bicubic downsampling in MATLAB. For U-shape networks, we first up-sample the input low-resolution images through bilinear interpolation. The performance is reported on the Y channel. For denoising, we adopt the DFWB dataset for training. Noisy images are generated by adding Gaussian noise with a noise level of 50. For deblurring, we use the motion deblurring dataset GoPro [34] to train the models. For deraining, we conduct experiments using the synthetic rain dataset Rain13K and calculate the performance on the Y channel, following Restormer [48]. For dehazing, we use the indoor training set (ITS) of the RESIDE dataset [21], the same as [40]. + +To maximize the capability of these networks, we use the official codes and training configurations provided by different methods to train the models3. Note that all models are trained without using any pre-training strategy (e.g., $\times 2$ pretraining for SR) or special tricks (e.g., EMA in SwinIR and TLC in NAFNet) for fair comparison. In addition, we find that different methods may not use exactly the same test sets and the same metrics calculation in their papers to report performance. Therefore, we retest all models based on exactly the same data and calculate metrics using the popular open-source toolbox BasicSR [42]. + +Table 2: Quantitative results on PSNR(dB) of the benchmark experiments. The best and second-best performance results are in **bold** and **underline**. + +
MethodSRDenoisingDeblurringDerainingDehazing
Set14Urban100CBSD68Urban100GoProHIDETest100Rain100HSOTS Indoor
MPRNet28.9026.8828.4829.7132.6630.9630.2930.4340.34
SwinIR29.0727.4728.5629.8831.6629.4130.0530.4529.14
Uformer27.1425.6028.5529.9833.0530.8927.9324.0633.58
Restomer29.0627.3228.6030.0232.9231.2232.0331.4841.87
NAFNet29.0327.0028.5229.6533.0831.2230.3332.8338.97
+ +# 3.4 Benchmark Results + +We present the quantitative results of the benchmark experiments in Tab. 2. (Due to space constraints, complete results are provided in $\text{Supp.}$ ) Several important observations can be made from the results: 1) Different networks exhibit varying performance on different tasks. For instance, SwinIR performs best on SR but worst on deblurring and dehazing. Uformer excels on denoising and deblurring but performs poorly on deraining and SR. 2) Networks with U-shape and multi-stage architectures present clear advantages on deblurring and dehazing. 3) MPRNet and NAFNet, which are mainly based on convolution operators, exhibit moderate performance across all tasks without outstanding results. 4) SwinIR, which employs plain architecture and spatial self-attention operators, outperforms other networks by a significant margin on SR. 5) The overall performance of Restormer is outstanding. Except for consistently being weaker than SwinIR on SR, it obtains considerable performance on almost all other tasks. + +# 3.5 Analysis + +In this section, we explain the above observations by analyzing the characteristics of different tasks and backbone networks. + +The degradation of SR lies in the compression of local information, resulting in a large loss of high-frequency details. Therefore, SR networks often require strong spatial information interaction capability, or even generative capability. The U-shape architecture, which incorporates multiple downsampling operations, may undermine the reconstruction of high-frequency information and intuitively escalates the difficulty of detail reconstruction. In contrast, the plain architecture that maintains feature sizes benefits SR. Besides, window self-attention has demonstrated a superior local fitting ability than convolution [8]. As a result, SwinIR, which is based on a plain structure and employs spatial self-attention operators, exhibits a distinct advantage on SR. + +Denoising entails smoothing the image to eliminate high-frequency noise and integrating low-frequency information to reconstruct a clear image. This task places no explicit unique requirement for the network, while its performance intuitively benefits from effective spatial information interaction. It is conjectured that the high performance of Restormer on denoising can be attributed to its ability to better smooth noise through channel-wise processing, akin to operating + +![](images/3494763775ec3beba53de9575a31a98f5ecf262539990d3a8ea3e5232d45557d.jpg) +Fig. 4: Visual and LAM [16] comparisons between Restormer and SwinIR. The LAM results and DI values indicate that Restormer exploits significantly more information than SwinIR. However, SwinIR reconstructs much more details than Restormer. + +in the frequency domain. In contrast, SwinIR and Uformer perform well due to their robust spatial information interaction ability of the spatial self-attention. + +Deblurring (specifically for motion blur here) involves addressing global motion shifts in the image. As a result, the ability to handle large-size inputs and the use of global or multi-scale information are necessary for deblurring networks. Thus, the networks based on the U-shape architecture all perform well on this task. Conversely, SwinIR, which employs the plain architecture and focuses more on local information processing, performs much worse than other networks. + +Similar phenomena can be observed for dehazing. Due to the involvement of the depth information in the haze model, the ability to use large-range or even global information is crucial. Besides, dehazing networks are required to handle low-frequency transformations, including alterations in color and contrast, both of which constitute global mappings. Therefore, SwinIR and Uformer, which rely more on local spatial information interaction, perform poorly on this task. On the contrary, Restormer exhibits exceptional performance. + +Deraining is relatively unique in that the rain is unevenly distributed in images, with significant differences between different raindrops and streaks. Thus, there is no clear pattern in the performance of different networks on deraining. Nevertheless, networks with higher complexity present better performance. + +Based on the above results and analysis, we can infer that the acceptable performance of a backbone network on a specific task is predicated on meeting the functional requirements of that task. It is notable that Restormer obtains exceptional task generality. This can be attributed to several factors: 1) The U-shape architecture enables the network to accommodate large-size input. 2) The transposed self-attention allows direct interaction of global information. 3) The presence of depth-wise convolution enables the network to process spatial information effectively. In summary, due to Restormer's comprehensive functionality, it is capable of meeting the diverse requirements of different tasks. + +![](images/bc5a579a8cac12e0125a4ebb36e809b789459ae709e31330f172fa66f04d43b8.jpg) +Fig. 5: The network structure of X-Restormer. To enhance the spatial mapping ability of Restormer and create a more general network, we replace half of the transposed self-attention blocks in Restormer with spatial self-attention blocks. For TSA, we retain the preliminary multi-Dconv head transposed attention (MDTA) used in Restormer. For SSA, we adopt the overlapping cross-attention (OCA) in HAT [8]. + +# 4 General Backbone Network Design + +Based on the benchmark experiments, we believe that the principle of designing a general backbone network should be to ensure that the network can fulfill the functional requirements of all tasks. As Restormer shows relatively good task generality, we select it as the starting point to design a more general network. By pinpointing and addressing the limitation of Restormer, we present an initial version of a general image restoration backbone network in this section. + +Limitation of Restormer. In the benchmark experiments, Restormer shows inferior performance to SwinIR on SR, particularly on Urban100. The qualitative comparisons also indicate this phenomenon in Fig. 4. From the visual and LAM [16] results, We can observe that Restormer can exploit large-range and even global information for the reconstruction. However, compared to SwinIR, it fails to reconstruct fine textures, even for self-repeated patterns. This discrepancy can be attributed to the U-shape architecture adopted by Restormer on the one hand, which increases the difficulty of reconstructing high-frequency information. On the other hand, Restormer relies on depth-wise convolution for spatial information interaction, whose spatial mapping capability is relatively weaker than the spatial self-attention in SwinIR. Considering that the U-shape architecture is indispensable for some tasks, we still need to retain this architectural design for task generality. To overcome the limitation of Restormer and design a more powerful backbone network, we choose to further enhance its spatial information interaction ability. An intuitive and feasible solution is to incorporate the spatial self-attention module into Restormer. + +Network structure. In Fig. 5, we present the structure of our proposed backbone network, denoted as X-Restormer. We choose the U-shape architecture to build the network. In contrast to Restormer, we replace half of the transposed self-attention blocks (TSAB) with spatial self-attention blocks (SSAB) to + +enhance the ability of spatial information interaction. Given an input feature $F_{in}$ , the two blocks process it alternately as: + +$$ +F _ {t} = F _ {i n} + T S A \left(L N \left(F _ {i n}\right)\right), \tag {6} +$$ + +$$ +F _ {t \_ o u t} = F _ {t} + F F N (L N (F _ {t})), \tag {7} +$$ + +$$ +F _ {s} = F _ {t \_ o u t} + S S A (L N \left(F _ {t \_ o u t}\right)), \tag {8} +$$ + +$$ +F _ {o u t} = F _ {s} + F F N \left(L N \left(F _ {s}\right)\right), \tag {9} +$$ + +where $F_{t}$ , $F_{t\_out}$ , $F_{s}$ and $F_{s\_out}$ represent the intermediate feature in TSAB, the output of TSAB, the intermediate feature in SSAB and the output of SSAB. $F_{out}$ means the output of the two consecutive blocks, and also serves as the input for the following two blocks. $TSA(\cdot)$ and $SSA(\cdot)$ indicate transposed self-attention (TSA) and spatial self-attention (SSA) modules. $LN(\cdot)$ denotes layer normalization and $FFN(\cdot)$ represents the feed-forward network. + +Specifically, we adopt the Multi-Dconv Transpose Attention (MDTA) as the TSA module. It first generates query $(Q)$ , key $(K)$ and value $(V)$ by applying $1 \times 1$ convolutions followed by $3 \times 3$ depth-wise convolutions. Then, the channel attention matrix of size $\mathbb{R}^{C \times C}$ is calculated by the dot-product of reshaped $Q$ and $K$ followed by a Softmax function. The schematic of TSA is shown in Fig. 3. Finally, the result is generated by the dot-product of the attention matrix and $V$ . For SSA, we adopt the Overlapping Cross-Attention (OCA) introduced in the HAT model [8]. We choose OCA because the shifted window mechanism in SwinIR is not intuitively suitable for our TSA-SSA consecutive blocks, and HAT demonstrates the effectiveness and superiority of OCA. For the specific calculation, $Q$ is produced by partitioning the input into non-overlapping windows, while $K$ and $V$ are generated by partitioning the input into overlapping windows with a manually set overlapping size. Apart from the different window partition methods, the calculation of OCA is essentially identical to that of standard window self-attention. For FFN, we employ the Gated-Dconv Feed-forward Network (GDFN) architecture, as used in Restormer. Instead of using two $1 \times 1$ convolutions to construct an MLP, GDFN first processes input features through two $3 \times 3$ depth-wise convolutions and $1 \times 1$ convolutions. Then, the resulting features are combined via element-wise multiplication and pass through another $1 \times 1$ convolution to produce the final output. + +We have also tried multiple design choices for SSAB and TSAB. Experiments can be found in $\text{Supp}$ . We emphasize that our design of X-Restormer is not to develop novel architectures or modules to improve the performance on certain tasks, but to enhance the task generality of the network according to the principle of general backbone network design through as simple means as possible. + +# 5 Experiments + +# 5.1 Experimental Setup + +We conduct experiments of the proposed X-Restormer on the same datasets used in the benchmark experiment. For the network implementation, the network employs a 4-level encoder-decoder with three times down-sampling and + +Table 3: Quantitative results on $\times 4$ image SR. * means the model pretrained on $\times 2\mathrm{{SR}}$ . + +
ModelSet5Set14BSD100Urban100Manga109
RCAN32.63/0.900228.87/0.788927.77/0.743626.82/0.808731.22/0.9173
RCAN-it32.69/0.900728.99/0.792227.87/0.745927.16/0.816831.78/0.9217
SwinIR*32.92/0.904429.09/0.795027.92/0.748927.45/0.825432.03/0.9260
IPT32.64/-29.01/-27.82/-27.26/--/-
EDT32.82/0.903129.09/0.793927.91/0.748327.46/0.824632.05/0.9254
NAFNet32.79/0.901029.03/0.791927.86/0.746327.00/0.811231.77/0.9216
SwinIR32.88/0.904129.07/0.794427.93/0.749027.47/0.825831.96/0.9255
Restormer32.94/0.903929.06/0.793427.91/0.748227.32/0.819931.96/0.9244
X-Restormer33.16/0.905829.17/0.796328.00/0.751227.66/0.829132.38/0.9279
+ +Table 4: Quantitative results on image denoising with the noise level $\sigma = 50$ + +
ModelCBSD68Kodak24McMasterUrban100
FFDNet27.96/-28.98/-29.18/-28.05/-
RNAN28.27/-29.58/-29.72/-29.08/-
RDN28.31/-29.66/--/-29.38/-
IPT28.39/-29.64/-29.98/-29.71/-
DRUNet28.51/-29.86/-30.08/-29.61/-
SwinIR28.56/0.811829.95/0.822130.20/0.848929.88/0.8861
Uformer28.55/0.813029.97/0.824430.16/0.848529.98/0.8900
Restorer28.60/0.813030.01/0.823730.30/0.851730.02/0.8898
X-Restorer28.63/0.813830.05/0.824530.33/0.851830.24/0.8928
+ +Table 5: Quantitative results on image Table 6: Quantitative results on image deraindeblurring (motion blur). ing. + +
ModelGoProHIDERealBlur-RRealBlur-J
SPAIR32.06/0.95330.29/0.931-/-28.81/0.875
MIMO-UNet+32.45/0.95729.99/0.93035.54/0.94727.63/0.837
IPT32.52/--/--/--/-
MPRNet32.66/0.95930.96/0.93935.99/0.95228.70/0.873
Uformer33.05/0.94230.89/0.92036.19/0.95629.09/0.886
NAFNet33.08/0.94231.22/0.92435.97/0.95228.32/0.857
Restormer32.92/0.94031.22/0.92336.19/0.95728.96/0.879
X-Restormer33.44/0.94631.76/0.93036.27/0.95828.87/0.878
+ +Table 7: Quantitative results on image dehazing. + +
ModelPFDNFFA-NetAECR-NeMAXIMDehazeFormerMPRNetNAFNetRestormerX-Restormer
SOTS Indoor32.68/0.97636.39/0.98937.17/0.99039.72/-40.05/0.99640.34/0.99438.97/0.99441.97/0.99442.90/0.995
+ +up-sampling. To maintain a similar number of parameters as Restormer, from level-1 to level-4 (i.e., $L_{1} \sim L_{4}$ in the figure) the numbers of consecutive blocks (containing a TSAB and a SSAB) are [2, 4, 4, 4] and the number of refinement blocks (i.e., $L_{r}$ ) is 4. Attention heads in TSA and SSA are both [1, 2, 4, 8], and channel numbers are [48, 96, 192, 384]. For OCA, the window size and the overlapping ratio are set to 8 and 0.5 as in HAT. The channel expansion factor in GDFN is 2.66. The overall parameters are 26.06M, slightly less than Restormer of 26.13M. We adopt the same training settings as Restormer in the benchmark experiment to optimize the model. We use the AdamW optimizer with $\beta_{1} = 0.9$ and $\beta_{2} = 0.99$ , utilizing an initial learning rate of $3e^{-4}$ . The learning rate decay follows a cosine scheduler with intervals at 92k and 208k iterations, and the total training iterations are 300K. The input patch size is $256 \times 256$ and the batch size is 32. For data augmentation, we use horizontal and vertical flips. We utilize the $L_{1}$ loss function to train the model. Notably, we do not adopt any training tricks (e.g., $\times 2$ SR pretraining or EMA strategy) or testing tricks (e.g., TLC [11]). + +# 5.2 Experimental Results + +We compare our X-Restormer with the top three models in the benchmark experiments (based on the same test configurations) as well as several state-of-the-art approaches for each task (based on the reported performance in their papers) in this section. PSNR(dB)/SSIM is provided in following tables. The best and second-best performance results are in **bold** and **underline**. + +Image SR. In Tab. 3, we present the quantitative results of $\times 4$ SR on five benchmark datasets: Set5 [2], Set14 [50], BSD100 [31], Urban100 [18] and Manga109 [33]. The state-of-the-art approaches, including RCAN [57], RCAN-it [27], SwinIR [35], IPT [4] and EDT [22] are compared in this experiment. X-Restormer significantly outperforms Restormer by $0.22\mathrm{dB}$ on Set5, $0.34\mathrm{dB}$ on Urban100 and $0.42\mathrm{dB}$ on Manga109. This demonstrates the effectiveness of our design in enhancing the spatial mapping ability of Restormer. Furthermore, X-Restormer surpasses the SOTA method EDT by $0.2\mathrm{dB}$ on Urban100 and $0.35\mathrm{dB}$ on Manga109, indicating the effectiveness of X-Restormer on SR. Despite this, we point out that our method still cannot beat the most powerful SR approaches, e.g., HAT. This is due to the inevitable weakening of SR performance for the U-shape architecture. In terms of SR, the plain residual in residual architecture is still more effective. + +Image denoising. In Tab. 4, we provide the quantitative results of Gaussian denoising with the noise level $\sigma = 50$ on four benchmark datasets: CBSD68 [32], Kodak24 [13], McMaster [56] and Urban100 [18]. The state-of-the-art methods: FFDNet [55], RNAN [58], RDN [60], IPT [4] and DRUNet [52] are compared in this experiment. X-Restormer achieves the state-of-the-art performance, surpassing SwinIR by 0.36dB and outperforming Restormer by 0.22dB on Urban100. This demonstrates the superiority of X-Restormer on image denoising. + +Image deblurring. In Tab. 5, we compare the results of X-Restormer with the state-of-the-art methods: SPAIR [36], MIMO-UNet+ [10], IPT [4] and MPR-Net [49] on both synthetic datasets (Gopro [34] and HIDE [39]) and real-world datasets (RealBlur-R and RealBlur-J [38]). X-Restormer achieves large performance gains over the other models on synthetic datasets, with an improvement of $0.36\mathrm{dB}$ on Gopro compared to $\mathrm{NAFNet}^4$ and $0.54\mathrm{dB}$ on HIDE compared to Restormer. Besides, our X-Restormer obtains the state-of-the-art performance on RealBlur-R and considerable performance on RealBlur-J, showing the effectiveness of our method on real-world motion deblurring scenarios. + +Image deraining. In Tab. 6, we present the quantitative results of deraining on Test100 [19], Rain100L [47], Rain100H [47], Test1200 [51] and Test2800 [15]. The state-of-the-art methods: PreNet [37], MSPFN [20], MPRNet [49] and SPAIR [36] are compared. X-Restormer outperforms the other models on Test100, Rain100H and Rain100L but performs inferior to Restormer on Test1200 and Test2800. This discrepancy is due to the variations in degradation produced by different rain models. Nonetheless, X-Restormer exhibits comparable performance to state-of-the-art methods, showing its effectiveness on image deraining. + +Image dehazing. In Tab. 7, we provide the quantitative results on SOTS Indoor [21]. We compare the state-of-the-art approaches: PFDN [12], FFA-Net [46], AECR-Net [44], MAXIM [41] and DehazeFormer [40] in this experiment. Notably, X-Restormer model significantly outperforms Restormer by a large margin of $0.93\mathrm{dB}$ . When compared to the state-of-the-art dehazing method DehazeFormer, our method achieves a breakthrough performance gain of $2.85\mathrm{dB}$ . These results demonstrate the superiority of X-Restormer for image dehazing. + +Table 8: Quantitative results on All-in-One restoration. + +
ModelSRDenoisingDeblurringDerainingDehazing
×2×4σ = 15σ = 25σ = 50
MPRNet33.68/0.930028.17/0.804334.27/0.928031.82/0.890128.60/0.811930.00/0.881231.20/0.906835.06/0.9874
SwinIR33.83/0.930128.14/0.804334.27/0.928331.83/0.890628.59/0.814329.06/0.851930.03/0.898331.48/0.9823
Uformer29.99/0.880527.88/0.794933.86/0.925431.42/0.886327.87/0.789129.64/0.872527.53/0.856929.92/0.9714
Restormer34.51/0.934128.70/0.817934.43/0.930332.02/0.894228.87/0.822230.54/0.890231.91/0.913436.95/0.9897
NAFNet34.12/0.931428.17/0.808734.18/0.928131.76/0.890828.64/0.818730.38/0.891131.56/0.914930.84/0.9797
X-Restormer34.72/0.936028.81/0.821734.67/0.933032.26/0.898329.12/0.829330.85/0.898332.27/0.922938.24/0.9914
+ +All-in-One Restoration. We conduct experiments on an all-in-one restoration setting to show the effectiveness of different backbone networks in addressing various tasks simultaneously. Networks are trained on five tasks with varying degradation levels (i.e., $\times 2$ , $\times 4$ for SR and $\sigma \in (0,50)$ random level for denoising). The sampling probability for each task is the same, and the average performance on benchmark datasets is calculated. As shown in Tab. 8, with the relatively better task generality among the existing networks, Restormer exhibits better performance on the all-in-one restoration. By overcoming the limitation of Restormer, our X-Restormer demonstrates further advantages in handling multiple tasks concurrently, with its performance far exceeding other networks on all tasks. In contrast, the other networks are more or less affected by optimization conflicts across different tasks (e.g., SwinIR performs inferior to Restormer even on SR). These indicate that a general backbone network is of great significance for building a general model that process multiple image restoration tasks, which can effectively mitigate task conflicts with the performance drops. + +Summary. With enhanced spatial mapping capability, our X-Restormer can significantly outperform Restormer. Specifically, X-Restormer obtains performance gains against Restormer of 0.42dB (Manga109), 0.22dB (Urban100), 0.54dB (HIDE), 0.61dB (Rain100H) and 0.93dB (SOTS Indoor) on image SR, denoising, deblurring, deraining and dehazing, respectively, showing the effectiveness of our design. Despite its simplicity, X-Restormer obtains state-of-the-art performance on all these five tasks and present the best task generality among the compared methods. Furthermore, we show that a more general backbone network can also better handle multiple restoration tasks simultaneously. We hope it can inspire more works on the general image restoration backbone network design. + +# 6 Conclusion + +In this paper, we conduct a comparative study of existing image restoration backbone networks to design a general backbone network. Five representative networks are chosen for the benchmark experiment across selected five tasks. The results indicate that comprehensive functionality is crucial for designing a general restoration backbone network. We select Restormer as the baseline and introduce spatial self-attention into it to enhance the spatial information interaction capability. Experimental results show that our X-Restormer achieves significant performance improvement and presents the best task generality. + +# Acknowledgements + +This work was partially supported by National Natural Science Foundation of China (Grant No.62276251, 62272450), and the Joint Lab of CAS-HK. This work was also supported in part by Macau Science and Technology Development Fund under SKLIOTSC-2021-2023 and 0022/2022/A. + +# References + +1. Abuolaim, A., Brown, M.S.: Defocus deblurring using dual-pixel data. In: European Conference on Computer Vision. pp. 111-126. Springer (2020) +2. Bevilacqua, M., Roumy, A., Guillemot, C., Morel, M.L.A.: Low-complexity single-image super-resolution based on nonnegative neighbor embedding. In: British Machine Vision Conference (BMVC) (2012) +3. Cai, B., Xu, X., Jia, K., Qing, C., Tao, D.: Dehazenet: An end-to-end system for single image haze removal. IEEE transactions on image processing 25(11), 5187-5198 (2016) +4. Chen, H., Wang, Y., Guo, T., Xu, C., Deng, Y., Liu, Z., Ma, S., Xu, C., Xu, C., Gao, W.: Pre-trained image processing transformer. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 12299-12310 (2021) +5. Chen, L., Chu, X., Zhang, X., Sun, J.: Simple baselines for image restoration. In: European Conference on Computer Vision. pp. 17-33. Springer (2022) +6. Chen, L., Lu, X., Zhang, J., Chu, X., Chen, C.: Hinet: Half instance normalization network for image restoration. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 182-192 (2021) +7. Chen, X., Wang, X., Zhang, W., Kong, X., Qiao, Y., Zhou, J., Dong, C.: Hat: Hybrid attention transformer for image restoration. arXiv preprint arXiv:2309.05239 (2023) +8. Chen, X., Wang, X., Zhou, J., Qiao, Y., Dong, C.: Activating more pixels in image super-resolution transformer. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 22367-22377 (2023) +9. Chen, Z., Zhang, Y., Gu, J., Kong, L., Yang, X., Yu, F.: Dual aggregation transformer for image super-resolution. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 12312-12321 (2023) +0. Cho, S.J., Ji, S.W., Hong, J.P., Jung, S.W., Ko, S.J.: Rethinking coarse-to-fine approach in single image deblurring. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 4641-4650 (2021) +1. Chu, X., Chen, L., Chen, C., Lu, X.: Improving image restoration by revisiting global information aggregation. In: European Conference on Computer Vision. pp. 53-71. Springer (2022) +2. Dong, J., Pan, J.: Physics-based feature dehazing networks. In: European Conference on Computer Vision. pp. 188-204. Springer (2020) +3. Franzen, R.: Kodak lossless true color image suite. source: http://r0k.us/graphics/kodak 4(2) (1999) +4. Fu, X., Huang, J., Zeng, D., Huang, Y., Ding, X., Paisley, J.: Removing rain from single images via a deep detail network. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 3855-3863 (2017) +5. Fu, X., Huang, J., Zeng, D., Huang, Y., Ding, X., Paisley, J.: Removing rain from single images via a deep detail network. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 3855-3863 (2017) + +16. Gu, J., Dong, C.: Interpreting super-resolution networks with local attribution maps. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 9199-9208 (2021) +17. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 770-778 (2016) +18. Huang, J.B., Singh, A., Ahuja, N.: Single image super-resolution from transformed self-exemplars. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 5197-5206 (2015) +19. Huang, J.B., Singh, A., Ahuja, N.: Single image super-resolution from transformed self-exemplars. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 5197-5206 (2015) +20. Jiang, K., Wang, Z., Yi, P., Chen, C., Huang, B., Luo, Y., Ma, J., Jiang, J.: Multiscale progressive fusion network for single image deraining. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 8346-8355 (2020) +21. Li, B., Ren, W., Fu, D., Tao, D., Feng, D., Zeng, W., Wang, Z.: Benchmarking single-image dehazing and beyond. IEEE Transactions on Image Processing 28(1), 492-505 (2018) +22. Li, W., Lu, X., Qian, S., Lu, J., Zhang, X., Jia, J.: On efficient transformer-based image pre-training for low-level vision. arXiv preprint arXiv:2112.10175 (2021) +23. Li, Y., Zhang, Y., Timofte, R., Van Gool, L., Tu, Z., Du, K., Wang, H., Chen, H., Li, W., Wang, X., et al.: Ntire 2023 challenge on image denoising: Methods and results. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 1904-1920 (2023) +24. Li, Y., Tan, R.T., Guo, X., Lu, J., Brown, M.S.: Rain streak removal using layer priors. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 2736-2744 (2016) +25. Liang, J., Cao, J., Sun, G., Zhang, K., Van Gool, L., Timofte, R.: Swinir: Image restoration using swin transformer. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 1833-1844 (2021) +26. Lim, B., Son, S., Kim, H., Nah, S., Mu Lee, K.: Enhanced deep residual networks for single image super-resolution. In: Proceedings of the IEEE conference on computer vision and pattern recognition workshops. pp. 136-144 (2017) +27. Lin, Z., Garg, P., Banerjee, A., Magid, S.A., Sun, D., Zhang, Y., Van Gool, L., Wei, D., Pfister, H.: Revisiting rcan: Improved training for image super-resolution (2022) +28. Liu, J., Yang, W., Yang, S., Guo, Z.: Erase or fill? deep joint recurrent rain removal and reconstruction in videos. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 3233-3242 (2018) +29. Liu, L., Xie, L., Zhang, X., Yuan, S., Chen, X., Zhou, W., Li, H., Tian, Q.: Tape: Task-agnostic prior embedding for image restoration. In: European Conference on Computer Vision. pp. 447-464. Springer (2022) +30. Liu, Z., Lin, Y., Cao, Y., Hu, H., Wei, Y., Zhang, Z., Lin, S., Guo, B.: Swin transformer: Hierarchical vision transformer using shifted windows. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 10012-10022 (2021) +31. Martin, D., Fowlkes, C., Tal, D., Malik, J.: A database of human segmented natural images and its application to evaluating segmentation algorithms and measuring ecological statistics. In: Proceedings of the IEEE International Conference on Computer Vision. vol. 2, pp. 416-423. IEEE (2001) + +32. Martin, D., Fowlkes, C., Tal, D., Malik, J.: A database of human segmented natural images and its application to evaluating segmentation algorithms and measuring ecological statistics. In: Proceedings of the IEEE International Conference on Computer Vision. vol. 2, pp. 416-423. IEEE (2001) +33. Matsui, Y., Ito, K., Aramaki, Y., Fujimoto, A., Ogawa, T., Yamasaki, T., Aizawa, K.: Sketch-based manga retrieval using manga109 dataset. Multimedia Tools and Applications 76(20), 21811-21838 (2017) +34. Nah, S., Hyun Kim, T., Mu Lee, K.: Deep multi-scale convolutional neural network for dynamic scene deblurring. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 3883-3891 (2017) +35. Niu, B., Wen, W., Ren, W., Zhang, X., Yang, L., Wang, S., Zhang, K., Cao, X., Shen, H.: Single image super-resolution via a holistic attention network. In: European Conference on Computer Vision. pp. 191-207. Springer (2020) +36. Purohit, K., Suin, M., Rajagopalan, A., Boddeti, V.N.: Spatially-adaptive image restoration using distortion-guided networks. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 2309-2319 (2021) +37. Ren, D., Zuo, W., Hu, Q., Zhu, P., Meng, D.: Progressive image deraining networks: A better and simpler baseline. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 3937-3946 (2019) +38. Rim, J., Lee, H., Won, J., Cho, S.: Real-world blur dataset for learning and benchmarking deblurring algorithms. In: European conference on computer vision. pp. 184-201. Springer (2020) +39. Shen, Z., Wang, W., Lu, X., Shen, J., Ling, H., Xu, T., Shao, L.: Human-aware motion deblurring. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 5572-5581 (2019) +40. Song, Y., He, Z., Qian, H., Du, X.: Vision transformers for single image dehazing. IEEE Transactions on Image Processing 32, 1927-1941 (2023) +41. Tu, Z., Talebi, H., Zhang, H., Yang, F., Milanfar, P., Bovik, A., Li, Y.: Maxim: Multi-axis mlp for image processing. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5769-5780 (2022) +42. Wang, X., Xie, L., Yu, K., Chan, K.C., Loy, C.C., Dong, C.: BasicSR: Open source image and video restoration toolbox. https://github.com/XPixelGroup/BasicSR (2022) +43. Wang, Z., Cun, X., Bao, J., Zhou, W., Liu, J., Li, H.: Uformer: A general u-shaped transformer for image restoration. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 17683-17693 (2022) +44. Wu, H., Qu, Y., Lin, S., Zhou, J., Qiao, R., Zhang, Z., Xie, Y., Ma, L.: Contrastive learning for compact single image dehazing. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 10551-10560 (2021) +45. Yang, W., Tan, R.T., Feng, J., Liu, J., Guo, Z., Yan, S.: Deep joint rain detection and removal from a single image. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 1357-1366 (2017) +46. Yang, W., Tan, R.T., Feng, J., Liu, J., Guo, Z., Yan, S.: Deep joint rain detection and removal from a single image. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 1357-1366 (2017) +47. Yang, W., Tan, R.T., Feng, J., Liu, J., Guo, Z., Yan, S.: Deep joint rain detection and removal from a single image. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 1357-1366 (2017) +48. Zamir, S.W., Arora, A., Khan, S., Hayat, M., Khan, F.S., Yang, M.H.: Restormer: Efficient transformer for high-resolution image restoration. In: Proceedings of the + +IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5728-5739 (2022) +49. Zamir, S.W., Arora, A., Khan, S., Hayat, M., Khan, F.S., Yang, M.H., Shao, L.: Multi-stage progressive image restoration. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 14821-14831 (2021) +50. Zeyde, R., Elad, M., Protter, M.: On single image scale-up using sparse-representations. In: International conference on curves and surfaces. pp. 711-730. Springer (2010) +51. Zhang, H., Patel, V.M.: Density-aware single image de-raining using a multi-stream dense network. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 695-704 (2018) +52. Zhang, K., Li, Y., Zuo, W., Zhang, L., Van Gool, L., Timofte, R.: Plug-and-play image restoration with deep denoiser prior. IEEE Transactions on Pattern Analysis and Machine Intelligence 44(10), 6360-6376 (2021) +53. Zhang, K., Zuo, W., Chen, Y., Meng, D., Zhang, L.: Beyond a gaussian denoiser: Residual learning of deep cnn for image denoising. IEEE transactions on image processing 26(7), 3142-3155 (2017) +54. Zhang, K., Zuo, W., Gu, S., Zhang, L.: Learning deep cnn denoiser prior for image restoration. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 3929-3938 (2017) +55. Zhang, K., Zuo, W., Zhang, L.: Ffdnet: Toward a fast and flexible solution for cnn-based image denoising. IEEE Transactions on Image Processing 27(9), 4608-4622 (2018) +56. Zhang, L., Wu, X., Buades, A., Li, X.: Color demosaicking by local directional interpolation and nonlocal adaptive thresholding. Journal of Electronic imaging 20(2), 023016-023016 (2011) +57. Zhang, Y., Li, K., Li, K., Wang, L., Zhong, B., Fu, Y.: Image super-resolution using very deep residual channel attention networks. In: European conference on computer vision. pp. 286-301. Springer (2018) +58. Zhang, Y., Li, K., Li, K., Zhong, B., Fu, Y.: Residual non-local attention networks for image restoration. arXiv preprint arXiv:1903.10082 (2019) +59. Zhang, Y., Tian, Y., Kong, Y., Zhong, B., Fu, Y.: Residual dense network for image super-resolution. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 2472-2481 (2018) +60. Zhang, Y., Tian, Y., Kong, Y., Zhong, B., Fu, Y.: Residual dense network for image restoration. IEEE transactions on pattern analysis and machine intelligence 43(7), 2480-2495 (2020) \ No newline at end of file diff --git a/2024/A Comparative Study of Image Restoration Networks for General Backbone Network Design/images.zip b/2024/A Comparative Study of Image Restoration Networks for General Backbone Network Design/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..4c0edac1f8f0e3c789003dfd6538f043f5d284ba --- /dev/null +++ b/2024/A Comparative Study of Image Restoration Networks for General Backbone Network Design/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a8614e16717c14394dc1ecb58017aa0fb188130fd8e45f9bc2d2125bb1b9e98 +size 475175 diff --git a/2024/A Comparative Study of Image Restoration Networks for General Backbone Network Design/layout.json b/2024/A Comparative Study of Image Restoration Networks for General Backbone Network Design/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..5ef730a4c43258216084bcf2d1eeea70fc219996 --- /dev/null +++ b/2024/A Comparative Study of Image Restoration Networks for General Backbone Network Design/layout.json @@ -0,0 +1,9177 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 137, + 112, + 477, + 148 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 112, + 477, + 148 + ], + "spans": [ + { + "bbox": [ + 137, + 112, + 477, + 148 + ], + "type": "text", + "content": "A Comparative Study of Image Restoration Networks for General Backbone Network Design" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 162, + 167, + 451, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 162, + 167, + 451, + 193 + ], + "spans": [ + { + "bbox": [ + 162, + 167, + 451, + 193 + ], + "type": "text", + "content": "Xiangyu Chen" + }, + { + "bbox": [ + 162, + 167, + 451, + 193 + ], + "type": "inline_equation", + "content": "^{1,2,3*}" + }, + { + "bbox": [ + 162, + 167, + 451, + 193 + ], + "type": "text", + "content": " Zheyuan Li" + }, + { + "bbox": [ + 162, + 167, + 451, + 193 + ], + "type": "inline_equation", + "content": "^{2,1*}" + }, + { + "bbox": [ + 162, + 167, + 451, + 193 + ], + "type": "text", + "content": " Yuandong " + }, + { + "bbox": [ + 162, + 167, + 451, + 193 + ], + "type": "inline_equation", + "content": "\\mathrm{Pu}^{3,4*}" + }, + { + "bbox": [ + 162, + 167, + 451, + 193 + ], + "type": "text", + "content": " Yihao Liu" + }, + { + "bbox": [ + 162, + 167, + 451, + 193 + ], + "type": "inline_equation", + "content": "^{2,3}" + }, + { + "bbox": [ + 162, + 167, + 451, + 193 + ], + "type": "text", + "content": " \nJiantao Zhou" + }, + { + "bbox": [ + 162, + 167, + 451, + 193 + ], + "type": "inline_equation", + "content": "^{1\\dagger}" + }, + { + "bbox": [ + 162, + 167, + 451, + 193 + ], + "type": "text", + "content": " Yu Qiao" + }, + { + "bbox": [ + 162, + 167, + 451, + 193 + ], + "type": "inline_equation", + "content": "^{2,3}" + }, + { + "bbox": [ + 162, + 167, + 451, + 193 + ], + "type": "text", + "content": " Chao Dong" + }, + { + "bbox": [ + 162, + 167, + 451, + 193 + ], + "type": "inline_equation", + "content": "^{2,3,5\\dagger}" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 146, + 201, + 468, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 146, + 201, + 468, + 247 + ], + "spans": [ + { + "bbox": [ + 146, + 201, + 468, + 247 + ], + "type": "text", + "content": "1University of Macau 2Shenzhen Institute of Advanced Technology, Chinese Academy of Sciences 3Shanghai Artificial Intelligence Laboratory 4Shanghai Jiao Tong University 5Shenzhen University of Advanced Technology https://github.com/Andrew0613/X-Restormer" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 160, + 278, + 455, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 278, + 455, + 453 + ], + "spans": [ + { + "bbox": [ + 160, + 278, + 455, + 453 + ], + "type": "text", + "content": "Abstract. Despite the significant progress made by deep models in various image restoration tasks, existing image restoration networks still face challenges in terms of task generality. An intuitive manifestation is that networks which excel in certain tasks often fail to deliver satisfactory results in others. To illustrate this point, we select five representative networks and conduct a comparative study on five classic image restoration tasks. First, we provide a detailed explanation of the characteristics of different image restoration tasks and backbone networks. Following this, we present the benchmark results and analyze the reasons behind the performance disparity of different models across various tasks. Drawing from this comparative study, we propose that a general image restoration backbone network needs to meet the functional requirements of diverse tasks. Based on this principle, we design a new general image restoration backbone network, X-Restormer. Extensive experiments demonstrate that X-Restormer possesses good task generality and achieves state-of-the-art performance across a variety of tasks." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 475, + 230, + 488 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 475, + 230, + 488 + ], + "spans": [ + { + "bbox": [ + 132, + 475, + 230, + 488 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 502, + 482, + 646 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 502, + 482, + 646 + ], + "spans": [ + { + "bbox": [ + 130, + 502, + 482, + 646 + ], + "type": "text", + "content": "Image restoration aims to generate high-quality images from degraded images. In recent years, deep learning has achieved great success in this field, with numerous networks being proposed to address various image restoration tasks. Initially, networks are primarily designed to solve specific restoration tasks and are typically validated only on selected tasks. As deep learning techniques have continued to evolve, there has been an increasing focus on the development of general-purpose networks that can be applied to a broad range of tasks. This trend is particularly evident in the high-level vision field, where new backbone networks are being designed to support multiple tasks [17, 30], including classification, detection and segmentation. For image restoration, although more and more backbone networks can handle multiple restoration tasks, their task generality is still limited, as illustrated in Fig. 1. For instance, SwinIR [25] achieves" + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 141, + 653, + 337, + 665 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 653, + 337, + 665 + ], + "spans": [ + { + "bbox": [ + 141, + 653, + 337, + 665 + ], + "type": "text", + "content": "* Equal contributions, † Corresponding author." + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 161, + 114, + 451, + 284 + ], + "blocks": [ + { + "bbox": [ + 161, + 114, + 451, + 284 + ], + "lines": [ + { + "bbox": [ + 161, + 114, + 451, + 284 + ], + "spans": [ + { + "bbox": [ + 161, + 114, + 451, + 284 + ], + "type": "image", + "image_path": "5121f81d60afe460e046d73c19696fcb689f97a14c00e0a08e365e2b499eb441.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 290, + 482, + 324 + ], + "lines": [ + { + "bbox": [ + 130, + 290, + 482, + 324 + ], + "spans": [ + { + "bbox": [ + 130, + 290, + 482, + 324 + ], + "type": "text", + "content": "Fig. 1: Relative performance difference of different backbone networks on five image restoration tasks1. The existing representative networks exhibit diverse performance on these tasks, while our method presents superior task generality." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 347, + 482, + 432 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 347, + 482, + 432 + ], + "spans": [ + { + "bbox": [ + 130, + 347, + 482, + 432 + ], + "type": "text", + "content": "state-of-the-art performance on image super-resolution (SR) but falls short on image deblurring and dehazing. Conversely, Restormer [48] performs exceptionally well on image dehazing and deraining but is less effective on image SR. This discrepancy can be attributed to the fact that the characteristics of image degradation vary across different image restoration tasks. While all image restoration tasks involve mapping degraded images to clean images, the requirements for the capability of backbone networks differ depending on specific tasks." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 433, + 482, + 612 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 433, + 482, + 612 + ], + "spans": [ + { + "bbox": [ + 130, + 433, + 482, + 612 + ], + "type": "text", + "content": "Designing a general image restoration backbone network presents a significant challenge. However, the development of such a network holds considerable value, as it has the potential to greatly reduce costs associated with research and application. To achieve this goal, we first conduct a comparative study of mainstream backbone networks on the representative tasks, including image SR, denoising, deblurring, deraining and dehazing. These five tasks are chosen due to the distinct characteristics of their degradation. Five representative backbone networks are selected in the study, including MPRNet [49], Uformer [43], SwinIR [25], Restormer [48] and NAFNet [5]. These five networks encompass classic architectures such as U-shape architecture, plain residual-in-residual architecture and multi-stage progressive architecture. They also employ several common operators, including convolution, spatial self-attention and transposed self-attention [48]. We benchmark the five representative methods on the selected five tasks. The experimental results clearly reflect the performance disparity of different backbone networks on different tasks. We then conduct a detailed anal" + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 222, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 222, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 222, + 100 + ], + "type": "text", + "content": "X.Chen et al." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 133, + 617, + 482, + 667 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 617, + 482, + 667 + ], + "spans": [ + { + "bbox": [ + 133, + 617, + 482, + 667 + ], + "type": "text", + "content": "1 We set the minimum average performance of the networks on test sets in Tab. 2 for the task (i) as the lower bound " + }, + { + "bbox": [ + 133, + 617, + 482, + 667 + ], + "type": "inline_equation", + "content": "P_{lower}^{(i)}" + }, + { + "bbox": [ + 133, + 617, + 482, + 667 + ], + "type": "text", + "content": ", and set the average performance of X-Restormer for each task as the upper bound " + }, + { + "bbox": [ + 133, + 617, + 482, + 667 + ], + "type": "inline_equation", + "content": "P_{upper}^{(i)}" + }, + { + "bbox": [ + 133, + 617, + 482, + 667 + ], + "type": "text", + "content": ". The ordinate of each point in the figure with performance " + }, + { + "bbox": [ + 133, + 617, + 482, + 667 + ], + "type": "inline_equation", + "content": "P^{(i)}" + }, + { + "bbox": [ + 133, + 617, + 482, + 667 + ], + "type": "text", + "content": " is calculated by " + }, + { + "bbox": [ + 133, + 617, + 482, + 667 + ], + "type": "inline_equation", + "content": "(P^{(i)} - P_{lower}^{(i)}) / P_{upper}^{(i)}" + }, + { + "bbox": [ + 133, + 617, + 482, + 667 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 479, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 479, + 163 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 479, + 163 + ], + "type": "text", + "content": "ysis of the characteristics of these tasks and these backbone networks to explain the reasons behind the performance differences. Based on the comparative study, we propose that a general backbone network must be highly comprehensive in terms of functionality that meets the diverse needs of various tasks." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 166, + 480, + 332 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 166, + 480, + 332 + ], + "spans": [ + { + "bbox": [ + 130, + 166, + 480, + 332 + ], + "type": "text", + "content": "It is noteworthy that Restormer stands out in the comparative study, ranking within the top two across all five tasks. This superior performance can be attributed to several key designs. First, Restormer's U-shape architecture allows it to process large-size inputs, which is crucial for the tasks that deal with large areas of degradation. Then, the network employs transposed self-attention that utilizes channel-wise features as tokens, achieving the information interaction among channels and enabling the mapping with a global receptive field. Additionally, the incorporation of numerous depth-wise convolutions activates the considerable spatial information interaction ability of the network. From a functional perspective, Restormer integrates the key capabilities of the other compared networks, thereby exhibiting commendable task generality in the comparative study. However, the spatial mapping ability of Restormer still appears to be somewhat deficient, as indicated by its quantitatively and qualitatively subpar performance in comparison to SwinIR for " + }, + { + "bbox": [ + 130, + 166, + 480, + 332 + ], + "type": "inline_equation", + "content": "\\mathrm{SR}^2" + }, + { + "bbox": [ + 130, + 166, + 480, + 332 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 335, + 481, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 335, + 481, + 478 + ], + "spans": [ + { + "bbox": [ + 130, + 335, + 481, + 478 + ], + "type": "text", + "content": "This inferiority is hypothesized to originate from the inherent challenge of detail reconstruction posed by the U-shape architecture, coupled with the relatively weak spatial mapping capability of depth-wise convolution, particularly when compared to spatial self-attention (i.e., window-based self-attention in SwinIR). To address this limitation, a plausible solution is the introduction of spatial self-attention to Restormer. To achieve this design, we alternately replace half of transposed self-attention blocks with overlapping cross-attention blocks [8], which are proven to have strong spatial information interaction capability, to construct a new network, X-Restormer. Extensive experiments show that this simple modification can significantly enhance the performance of Restormer without increasing the number of parameters. Moreover, our X-Restormer obtains state-of-the-art performance on all five tasks, exhibiting the best task generality." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 146, + 480, + 387, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 146, + 480, + 387, + 491 + ], + "spans": [ + { + "bbox": [ + 146, + 480, + 387, + 491 + ], + "type": "text", + "content": "Our main contributions can be summarized as follows:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 139, + 506, + 480, + 641 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 139, + 506, + 479, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 506, + 479, + 542 + ], + "spans": [ + { + "bbox": [ + 139, + 506, + 479, + 542 + ], + "type": "text", + "content": "- We conduct a comparative study by constructing an image restoration benchmark, highlighting the challenges faced by existing image restoration backbone networks in task generality." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 139, + 544, + 480, + 591 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 544, + 480, + 591 + ], + "spans": [ + { + "bbox": [ + 139, + 544, + 480, + 591 + ], + "type": "text", + "content": "- Based on the benchmark results, we perform a detailed analysis of the characteristics of different degradations and networks. We emphasize that the general image restoration backbone network design must meet the functional requirements of diverse tasks." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 139, + 594, + 479, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 594, + 479, + 641 + ], + "spans": [ + { + "bbox": [ + 139, + 594, + 479, + 641 + ], + "type": "text", + "content": "- By further enhancing the spatial mapping ability of Restormer, we design a preliminary general backbone network, X-Restormer. Without additional parameters, X-Restormer achieves significant performance improvement over existing networks and exhibits superior task generality." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 228, + 91, + 448, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 228, + 91, + 448, + 102 + ], + "spans": [ + { + "bbox": [ + 228, + 91, + 448, + 102 + ], + "type": "text", + "content": "A Comparative Study of Image Restoration Networks" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 133, + 653, + 479, + 665 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 653, + 479, + 665 + ], + "spans": [ + { + "bbox": [ + 133, + 653, + 479, + 665 + ], + "type": "text", + "content": "2 In general, models' SR performance is highly related to the spatial mapping ability." + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 150, + 115, + 211, + 205 + ], + "blocks": [ + { + "bbox": [ + 140, + 115, + 147, + 205 + ], + "lines": [ + { + "bbox": [ + 140, + 115, + 147, + 205 + ], + "spans": [ + { + "bbox": [ + 140, + 115, + 147, + 205 + ], + "type": "text", + "content": "Ground Truth Degraded Image" + } + ] + } + ], + "index": 2, + "angle": 270, + "type": "image_caption" + }, + { + "bbox": [ + 150, + 115, + 211, + 205 + ], + "lines": [ + { + "bbox": [ + 150, + 115, + 211, + 205 + ], + "spans": [ + { + "bbox": [ + 150, + 115, + 211, + 205 + ], + "type": "image", + "image_path": "dfce3516a7c62209483df252920268c964d654ff7cf1841b229c8360d8213d50.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 215, + 115, + 276, + 205 + ], + "blocks": [ + { + "bbox": [ + 215, + 115, + 276, + 205 + ], + "lines": [ + { + "bbox": [ + 215, + 115, + 276, + 205 + ], + "spans": [ + { + "bbox": [ + 215, + 115, + 276, + 205 + ], + "type": "image", + "image_path": "60c46dfaace279ff5bd78cde89737b63a4c2c285b32ffced04b6e6750ab47a73.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 133, + 215, + 478, + 227 + ], + "lines": [ + { + "bbox": [ + 133, + 215, + 478, + 227 + ], + "spans": [ + { + "bbox": [ + 133, + 215, + 478, + 227 + ], + "type": "text", + "content": "Fig. 2: Selected five representative image restoration tasks with various degradation." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 279, + 115, + 341, + 205 + ], + "blocks": [ + { + "bbox": [ + 279, + 115, + 341, + 205 + ], + "lines": [ + { + "bbox": [ + 279, + 115, + 341, + 205 + ], + "spans": [ + { + "bbox": [ + 279, + 115, + 341, + 205 + ], + "type": "image", + "image_path": "9631d7f38300254ed04c362a007b078bee3f608514120ff2beb444ddfced14a2.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 344, + 115, + 406, + 205 + ], + "blocks": [ + { + "bbox": [ + 344, + 115, + 406, + 205 + ], + "lines": [ + { + "bbox": [ + 344, + 115, + 406, + 205 + ], + "spans": [ + { + "bbox": [ + 344, + 115, + 406, + 205 + ], + "type": "image", + "image_path": "2c69e4e3833e263071b317e9764f710a62b21f367f88e1916d30200daf536809.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 410, + 115, + 472, + 205 + ], + "blocks": [ + { + "bbox": [ + 410, + 115, + 472, + 205 + ], + "lines": [ + { + "bbox": [ + 410, + 115, + 472, + 205 + ], + "spans": [ + { + "bbox": [ + 410, + 115, + 472, + 205 + ], + "type": "image", + "image_path": "1669b773fca3c5f36a3e5cd4fd774c31497adf8c3ab666d54383587de80b88ac.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 247, + 237, + 260 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 247, + 237, + 260 + ], + "spans": [ + { + "bbox": [ + 132, + 247, + 237, + 260 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 270, + 482, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 270, + 482, + 450 + ], + "spans": [ + { + "bbox": [ + 130, + 270, + 482, + 450 + ], + "type": "text", + "content": "Image restoration networks. In the past years, numerous deep networks have been proposed for various image restoration tasks such as image SR [8,26,59], denoising [43,53,54], deblurring [1,6], deraining [4,29,45] and dehazing [40,41,46]. Initially, most deep networks are designed for specific tasks [3,14,34,55,57]. Recently, with increasing attention to the task generality of networks, more and more methods have been developed to tackle multiple image restoration tasks. For instance, Zamir et al. [49] builds a multi-stage CNN for deraining, deblurring and denoising. Wang et al. [43] designs a U-shape Transformer for deraining, deblurring and denoising. Liang et al. [25] implements a Swin Transformer-based network that achieves state-of-the-art performance on SR, denoising and compression artifact reduction. Zamir et al. [48] proposes a novel transposed self-attention to build a U-shape network for deraining, deblurring and denoising. Chen et al. [5] constructs a U-shape CNN for denoising and deblurring. While existing methods have demonstrated some ability to generalize across several restoration tasks, their task generality remains limited." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 450, + 482, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 450, + 482, + 594 + ], + "spans": [ + { + "bbox": [ + 130, + 450, + 482, + 594 + ], + "type": "text", + "content": "Difference from the previous network design research. While previous works have proposed networks that excel in various image restoration tasks, their primary focus is on constructing stronger networks to achieve performance breakthroughs on specific tasks. In contrast, this work pays more attention to the task generality of the backbone network, possessing a vision different from previous works. More specifically, our objective is to explore the design principles and directions of general image restoration networks. We are not seeking to create powerful networks for peak performance on a single or some specific tasks, but rather to ensure satisfactory performance across a diverse range of tasks. Regarding the concrete implementation, we do not intend to construct complex network architectures or modules. Our preference, rather, is to enhance task generality through the use of the simplest methodology available." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 130, + 594, + 482, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 594, + 482, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 594, + 482, + 665 + ], + "type": "text", + "content": "There are concurrent works that adopt similar ideas for specific image restoration tasks. DAT [9] combines spatial-window self-attention and channel-wise self-attention to handle image SR. IPT-V2 [23] designs a spatial-channel Transformer block to build a denoising network and obtains the winner award in the NTIRE 2023 image denoising challenge [23]. However, the motivation and specific network implementation of our work are distinct from these studies." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 92, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 92, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 92, + 140, + 100 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 222, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 222, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 222, + 100 + ], + "type": "text", + "content": "X.Chen et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 114, + 335, + 129 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 114, + 335, + 129 + ], + "spans": [ + { + "bbox": [ + 132, + 114, + 335, + 129 + ], + "type": "text", + "content": "3 Image Restoration Benchmark" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 142, + 482, + 226 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 142, + 482, + 226 + ], + "spans": [ + { + "bbox": [ + 130, + 142, + 482, + 226 + ], + "type": "text", + "content": "In this section, we first briefly introduce several image restoration tasks, each with its own representative degradation characteristics. Subsequently, we classify mainstream image restoration networks based on two key aspects: architecture and core operator. On this basis, we select five representative networks and conduct a benchmark experiment across five different tasks. We describe the experimental setup and explain its rationality. Finally, we present the benchmark results and conduct a detailed analysis of them." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 246, + 350, + 258 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 246, + 350, + 258 + ], + "spans": [ + { + "bbox": [ + 132, + 246, + 350, + 258 + ], + "type": "text", + "content": "3.1 Overview of Image Restoration Tasks" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 268, + 480, + 339 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 268, + 480, + 339 + ], + "spans": [ + { + "bbox": [ + 130, + 268, + 480, + 339 + ], + "type": "text", + "content": "We select five representative tasks for the benchmark experiments. These tasks, exemplified in Fig. 2, are chosen based on two primary reasons. First, they are very common image restoration tasks with widely accepted evaluation schemes. Second, the degradation characteristics of these tasks are diverse and differ greatly from each other. As such, they can provide a robust way to evaluate the task generality of image restoration backbone networks." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 340, + 481, + 375 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 340, + 481, + 375 + ], + "spans": [ + { + "bbox": [ + 130, + 340, + 481, + 375 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 130, + 340, + 481, + 375 + ], + "type": "inline_equation", + "content": "I_{GT}" + }, + { + "bbox": [ + 130, + 340, + 481, + 375 + ], + "type": "text", + "content": " denote the ground truth image and " + }, + { + "bbox": [ + 130, + 340, + 481, + 375 + ], + "type": "inline_equation", + "content": "I_{LQ}" + }, + { + "bbox": [ + 130, + 340, + 481, + 375 + ], + "type": "text", + "content": " denote the degraded image, where " + }, + { + "bbox": [ + 130, + 340, + 481, + 375 + ], + "type": "inline_equation", + "content": "I_{GT} \\in \\mathbb{R}^{H \\times W \\times 3}" + }, + { + "bbox": [ + 130, + 340, + 481, + 375 + ], + "type": "text", + "content": ". The degradation model of classic image SR can be represented as:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 263, + 377, + 480, + 389 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 263, + 377, + 480, + 389 + ], + "spans": [ + { + "bbox": [ + 263, + 377, + 480, + 389 + ], + "type": "interline_equation", + "content": "I _ {L Q} = \\left(I _ {G T} \\otimes k\\right) \\downarrow_ {s}, \\tag {1}", + "image_path": "b291bb00d12a3857cf365a7c87246ba21bd77d0dd0b1a17893739d5a4d4aff5b.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 396, + 480, + 457 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 396, + 480, + 457 + ], + "spans": [ + { + "bbox": [ + 130, + 396, + 480, + 457 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 396, + 480, + 457 + ], + "type": "inline_equation", + "content": "I_{LQ} \\in \\mathbb{R}^{\\frac{H}{s} \\times \\frac{W}{s} \\times 3}" + }, + { + "bbox": [ + 130, + 396, + 480, + 457 + ], + "type": "text", + "content": " represents the low-resolution image. " + }, + { + "bbox": [ + 130, + 396, + 480, + 457 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 130, + 396, + 480, + 457 + ], + "type": "text", + "content": " denotes the bicubic downsampling kernel and " + }, + { + "bbox": [ + 130, + 396, + 480, + 457 + ], + "type": "inline_equation", + "content": "\\downarrow_{s}" + }, + { + "bbox": [ + 130, + 396, + 480, + 457 + ], + "type": "text", + "content": " represents the downscaling factor. This degradation is highly correlated to local information and leads to a significant loss of high-frequency information. Thus, SR networks emphasize strong spatial information interaction capability to reconstruct as many details as possible." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 146, + 458, + 420, + 470 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 146, + 458, + 420, + 470 + ], + "spans": [ + { + "bbox": [ + 146, + 458, + 420, + 470 + ], + "type": "text", + "content": "The degradation model of image denoising can be denoted as:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 273, + 482, + 480, + 493 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 273, + 482, + 480, + 493 + ], + "spans": [ + { + "bbox": [ + 273, + 482, + 480, + 493 + ], + "type": "interline_equation", + "content": "I _ {L Q} = I _ {G T} + n, \\tag {2}", + "image_path": "375bc3d8731eaa8b5411be1281aed16805433e083bf3e03cc1fd2f65eb4232b8.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 130, + 502, + 480, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 502, + 480, + 563 + ], + "spans": [ + { + "bbox": [ + 130, + 502, + 480, + 563 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 502, + 480, + 563 + ], + "type": "inline_equation", + "content": "n \\in \\mathbb{R}^{H \\times W \\times 3}" + }, + { + "bbox": [ + 130, + 502, + 480, + 563 + ], + "type": "text", + "content": " represents the noise map. For Gaussian denoising, noise values are content-independent. The downsampling-upsampling process of U-shape architecture inherently aids noise removal. Besides, strong spatial information interaction capability can also enhance high-frequency content reconstruction for denoising networks." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 130, + 563, + 481, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 563, + 481, + 586 + ], + "spans": [ + { + "bbox": [ + 130, + 563, + 481, + 586 + ], + "type": "text", + "content": "The degradation model of image deblurring (for motion deblurring) can be denoted as:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 251, + 587, + 480, + 610 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 251, + 587, + 480, + 610 + ], + "spans": [ + { + "bbox": [ + 251, + 587, + 480, + 610 + ], + "type": "interline_equation", + "content": "I _ {L Q} = \\sum_ {t} \\left(f _ {\\text {m o t i o n}} ^ {t} \\left(I _ {G T}\\right)\\right), \\tag {3}", + "image_path": "5bbb39b5eb19b5838fc36f8b68fef9f8689381dd383734e607e8212bbc029553.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 130, + 617, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 617, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 617, + 482, + 666 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 617, + 482, + 666 + ], + "type": "inline_equation", + "content": "f_{motion}^{t}(\\cdot)" + }, + { + "bbox": [ + 130, + 617, + 482, + 666 + ], + "type": "text", + "content": " represents the motion function under different continuous exposure times. This degradation is related to the global motion offset of the image. Therefore, the ability to utilize large-range information and even global information is important for deblurring networks." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 228, + 91, + 448, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 228, + 91, + 448, + 102 + ], + "spans": [ + { + "bbox": [ + 228, + 91, + 448, + 102 + ], + "type": "text", + "content": "A Comparative Study of Image Restoration Networks" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 149, + 113, + 465, + 165 + ], + "blocks": [ + { + "bbox": [ + 149, + 113, + 465, + 165 + ], + "lines": [ + { + "bbox": [ + 149, + 113, + 465, + 165 + ], + "spans": [ + { + "bbox": [ + 149, + 113, + 465, + 165 + ], + "type": "image", + "image_path": "b3157a9bd87d6feb1c78d54c7598778996395ea992c1fbae68ae01a9d0a75670.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 188, + 172, + 425, + 183 + ], + "lines": [ + { + "bbox": [ + 188, + 172, + 425, + 183 + ], + "spans": [ + { + "bbox": [ + 188, + 172, + 425, + 183 + ], + "type": "text", + "content": "Fig. 3: The core operators in image restoration networks." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 146, + 209, + 453, + 221 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 146, + 209, + 453, + 221 + ], + "spans": [ + { + "bbox": [ + 146, + 209, + 453, + 221 + ], + "type": "text", + "content": "The degradation model of image deraining can be simply denoted as:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 270, + 233, + 481, + 246 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 270, + 233, + 481, + 246 + ], + "spans": [ + { + "bbox": [ + 270, + 233, + 481, + 246 + ], + "type": "interline_equation", + "content": "I _ {L Q} = I _ {G T} + R, \\tag {4}", + "image_path": "f92edec9d2e6ee7313f441f778960813ff66279aa94bdc4bfb9e9e2436432399.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 131, + 256, + 479, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 256, + 479, + 316 + ], + "spans": [ + { + "bbox": [ + 131, + 256, + 479, + 316 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 131, + 256, + 479, + 316 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 131, + 256, + 479, + 316 + ], + "type": "text", + "content": " denotes the additive rain streak, simulated by the physics models, such as [24, 28]. The difference between this degradation and Gaussian noise is that the added " + }, + { + "bbox": [ + 131, + 256, + 479, + 316 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 131, + 256, + 479, + 316 + ], + "type": "text", + "content": " is not evenly distributed on the image and has a correlation with the image content. Complicated rain streaks also places high demands on the complexity of deraining networks." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 131, + 317, + 479, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 317, + 479, + 342 + ], + "spans": [ + { + "bbox": [ + 131, + 317, + 479, + 342 + ], + "type": "text", + "content": "The degradation model of image dehazing, based on the atmospheric scattering model, can be denoted as:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 224, + 353, + 481, + 366 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 224, + 353, + 481, + 366 + ], + "spans": [ + { + "bbox": [ + 224, + 353, + 481, + 366 + ], + "type": "interline_equation", + "content": "I _ {L Q} = I _ {G T} * t \\left(I _ {G T}\\right) + A \\left(1 - t \\left(I _ {G T}\\right)\\right), \\tag {5}", + "image_path": "6797c8a335fad0f3e63e9f97c065eae169642b170801d41b883052062b87487a.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 131, + 376, + 482, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 376, + 482, + 426 + ], + "spans": [ + { + "bbox": [ + 131, + 376, + 482, + 426 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 131, + 376, + 482, + 426 + ], + "type": "inline_equation", + "content": "t(\\cdot)" + }, + { + "bbox": [ + 131, + 376, + 482, + 426 + ], + "type": "text", + "content": " represents the transmission function and " + }, + { + "bbox": [ + 131, + 376, + 482, + 426 + ], + "type": "inline_equation", + "content": "t(I_{GT})" + }, + { + "bbox": [ + 131, + 376, + 482, + 426 + ], + "type": "text", + "content": " is associated with the distance from the scene point to the camera. This degradation is intrinsically linked to the depth information within the image. Consequently, the incorporation of global information is important for dehazing networks." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 447, + 394, + 459 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 447, + 394, + 459 + ], + "spans": [ + { + "bbox": [ + 132, + 447, + 394, + 459 + ], + "type": "text", + "content": "3.2 Characteristics of Typical Backbone Networks" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 131, + 472, + 485, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 472, + 485, + 665 + ], + "spans": [ + { + "bbox": [ + 131, + 472, + 485, + 665 + ], + "type": "text", + "content": "The architectures of mainstream image restoration networks can be broadly classified into three categories: U-shape encoder-decoder, plain residual-in-residual and multi-stage progressive. Schematic diagrams of these architectures are provided in " + }, + { + "bbox": [ + 131, + 472, + 485, + 665 + ], + "type": "inline_equation", + "content": "\\text{Supp}" + }, + { + "bbox": [ + 131, + 472, + 485, + 665 + ], + "type": "text", + "content": ". The U-shape encoder-decoder architecture performs down-sampling and up-sampling operations on features, enabling networks to handle features of varying scales. This architecture allows networks to accept large-size input, and the effective receptive field of the network expands rapidly with down-sampling. Typical U-shape networks include Uformer [43], Restormer [48]. The multi-stage architecture divides the entire network into several sub-networks and progressively processes features, which are primarily used for image deraining and deblurring. Common networks based on this architecture include MPRNet [49] and HINet [6]. The plain residual-in-residual architecture is composed of several residual groups, each of which consists of several residual blocks. This architecture maintains the original size when processing features, which is favorable for the reconstruction of high-frequency information, but it comes at a high computational cost. Typical networks include RCAN [57] and SwinIR [25]." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 222, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 222, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 222, + 101 + ], + "type": "text", + "content": "X.Chen et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 282, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 282, + 223 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 282, + 223 + ], + "type": "text", + "content": "The core operators for constructing an image restoration network can be mainly categorized into three types: convolution, spatial self-attention and transposed self-attention. These operators are shown in Fig. 3. The convolution calculates a fixed-size filter and processes the entire fea" + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 290, + 150, + 480, + 212 + ], + "blocks": [ + { + "bbox": [ + 287, + 129, + 482, + 149 + ], + "lines": [ + { + "bbox": [ + 287, + 129, + 482, + 149 + ], + "spans": [ + { + "bbox": [ + 287, + 129, + 482, + 149 + ], + "type": "text", + "content": "Table 1: Architectures and core operators of the five selected backbone networks." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 290, + 150, + 480, + 212 + ], + "lines": [ + { + "bbox": [ + 290, + 150, + 480, + 212 + ], + "spans": [ + { + "bbox": [ + 290, + 150, + 480, + 212 + ], + "type": "table", + "html": "
NetworkArchitectureCore operator
MPRNetMulti-StageConvolution
UformerU-ShapeSpatial self-attention
SwinIRPlain residual-in-residualSpatial self-attention
RestormerU-ShapeTransposed self-attention
NAFNetU-ShapeConvolution
", + "image_path": "e66aa4ef380485c97e7b1ff28ad20cd2e9cc68616c4f86e1b0c7807c6ec588be.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 224, + 482, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 224, + 482, + 368 + ], + "spans": [ + { + "bbox": [ + 130, + 224, + 482, + 368 + ], + "type": "text", + "content": "ture map through a sliding window, which is the major component of many networks, such as RDN [60]. Spatial self-attention is typically implemented as window self-attention in image restoration tasks. It calculates the attention matrix within a fixed window size, generating content-aware weights that are functionally similar to a large kernel dynamic filter. This operator has strong local fitting ability and shows superior advantages on SR and denoising [7]. Transposed self-attention treats the entire feature of each channel as a token to calculate the attention matrix on the channel dimension. This operator directly deals with global features, and when combined with depth-wise convolution, it shows remarkable performance in multiple restoration tasks [48]. The selected five representative backbone networks for the benchmark experiment encompass the abovementioned architectures and core operators, as presented in Tab. 1." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 131, + 382, + 272, + 394 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 382, + 272, + 394 + ], + "spans": [ + { + "bbox": [ + 131, + 382, + 272, + 394 + ], + "type": "text", + "content": "3.3 Experimental Settings" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 398, + 482, + 530 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 398, + 482, + 530 + ], + "spans": [ + { + "bbox": [ + 130, + 398, + 482, + 530 + ], + "type": "text", + "content": "For image SR, we conduct experiments on upscaling factor " + }, + { + "bbox": [ + 130, + 398, + 482, + 530 + ], + "type": "inline_equation", + "content": "\\times 4" + }, + { + "bbox": [ + 130, + 398, + 482, + 530 + ], + "type": "text", + "content": ". We use the DF2K dataset (the same as SwinIR [25]) to train models. Low-resolution images are generated from the ground truth images using bicubic downsampling in MATLAB. For U-shape networks, we first up-sample the input low-resolution images through bilinear interpolation. The performance is reported on the Y channel. For denoising, we adopt the DFWB dataset for training. Noisy images are generated by adding Gaussian noise with a noise level of 50. For deblurring, we use the motion deblurring dataset GoPro [34] to train the models. For deraining, we conduct experiments using the synthetic rain dataset Rain13K and calculate the performance on the Y channel, following Restormer [48]. For dehazing, we use the indoor training set (ITS) of the RESIDE dataset [21], the same as [40]." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 530, + 482, + 627 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 530, + 482, + 627 + ], + "spans": [ + { + "bbox": [ + 130, + 530, + 482, + 627 + ], + "type": "text", + "content": "To maximize the capability of these networks, we use the official codes and training configurations provided by different methods to train the models3. Note that all models are trained without using any pre-training strategy (e.g., " + }, + { + "bbox": [ + 130, + 530, + 482, + 627 + ], + "type": "inline_equation", + "content": "\\times 2" + }, + { + "bbox": [ + 130, + 530, + 482, + 627 + ], + "type": "text", + "content": " pretraining for SR) or special tricks (e.g., EMA in SwinIR and TLC in NAFNet) for fair comparison. In addition, we find that different methods may not use exactly the same test sets and the same metrics calculation in their papers to report performance. Therefore, we retest all models based on exactly the same data and calculate metrics using the popular open-source toolbox BasicSR [42]." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 228, + 91, + 448, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 228, + 91, + 448, + 102 + ], + "spans": [ + { + "bbox": [ + 228, + 91, + 448, + 102 + ], + "type": "text", + "content": "A Comparative Study of Image Restoration Networks" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 133, + 631, + 481, + 665 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 631, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 133, + 631, + 481, + 665 + ], + "type": "text", + "content": "3 We tried to train all networks with a unified configuration, but find it unreasonable. The performance of networks may vary greatly with different training configurations and optimization strategies, making it difficult to determine a fair unified setting." + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 135, + 146, + 481, + 220 + ], + "blocks": [ + { + "bbox": [ + 132, + 114, + 482, + 137 + ], + "lines": [ + { + "bbox": [ + 132, + 114, + 482, + 137 + ], + "spans": [ + { + "bbox": [ + 132, + 114, + 482, + 137 + ], + "type": "text", + "content": "Table 2: Quantitative results on PSNR(dB) of the benchmark experiments. The best and second-best performance results are in **bold** and **underline**." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 135, + 146, + 481, + 220 + ], + "lines": [ + { + "bbox": [ + 135, + 146, + 481, + 220 + ], + "spans": [ + { + "bbox": [ + 135, + 146, + 481, + 220 + ], + "type": "table", + "html": "
MethodSRDenoisingDeblurringDerainingDehazing
Set14Urban100CBSD68Urban100GoProHIDETest100Rain100HSOTS Indoor
MPRNet28.9026.8828.4829.7132.6630.9630.2930.4340.34
SwinIR29.0727.4728.5629.8831.6629.4130.0530.4529.14
Uformer27.1425.6028.5529.9833.0530.8927.9324.0633.58
Restomer29.0627.3228.6030.0232.9231.2232.0331.4841.87
NAFNet29.0327.0028.5229.6533.0831.2230.3332.8338.97
", + "image_path": "d23af968c0ed91f6d0b19eadaec21574784afea58425bd08c08f6151f101c394.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 240, + 258, + 251 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 240, + 258, + 251 + ], + "spans": [ + { + "bbox": [ + 132, + 240, + 258, + 251 + ], + "type": "text", + "content": "3.4 Benchmark Results" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 259, + 482, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 259, + 482, + 415 + ], + "spans": [ + { + "bbox": [ + 130, + 259, + 482, + 415 + ], + "type": "text", + "content": "We present the quantitative results of the benchmark experiments in Tab. 2. (Due to space constraints, complete results are provided in " + }, + { + "bbox": [ + 130, + 259, + 482, + 415 + ], + "type": "inline_equation", + "content": "\\text{Supp.}" + }, + { + "bbox": [ + 130, + 259, + 482, + 415 + ], + "type": "text", + "content": ") Several important observations can be made from the results: 1) Different networks exhibit varying performance on different tasks. For instance, SwinIR performs best on SR but worst on deblurring and dehazing. Uformer excels on denoising and deblurring but performs poorly on deraining and SR. 2) Networks with U-shape and multi-stage architectures present clear advantages on deblurring and dehazing. 3) MPRNet and NAFNet, which are mainly based on convolution operators, exhibit moderate performance across all tasks without outstanding results. 4) SwinIR, which employs plain architecture and spatial self-attention operators, outperforms other networks by a significant margin on SR. 5) The overall performance of Restormer is outstanding. Except for consistently being weaker than SwinIR on SR, it obtains considerable performance on almost all other tasks." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 431, + 204, + 443 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 431, + 204, + 443 + ], + "spans": [ + { + "bbox": [ + 132, + 431, + 204, + 443 + ], + "type": "text", + "content": "3.5 Analysis" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 450, + 481, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 450, + 481, + 472 + ], + "spans": [ + { + "bbox": [ + 130, + 450, + 481, + 472 + ], + "type": "text", + "content": "In this section, we explain the above observations by analyzing the characteristics of different tasks and backbone networks." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 474, + 482, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 474, + 482, + 593 + ], + "spans": [ + { + "bbox": [ + 130, + 474, + 482, + 593 + ], + "type": "text", + "content": "The degradation of SR lies in the compression of local information, resulting in a large loss of high-frequency details. Therefore, SR networks often require strong spatial information interaction capability, or even generative capability. The U-shape architecture, which incorporates multiple downsampling operations, may undermine the reconstruction of high-frequency information and intuitively escalates the difficulty of detail reconstruction. In contrast, the plain architecture that maintains feature sizes benefits SR. Besides, window self-attention has demonstrated a superior local fitting ability than convolution [8]. As a result, SwinIR, which is based on a plain structure and employs spatial self-attention operators, exhibits a distinct advantage on SR." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 594, + 482, + 666 + ], + "type": "text", + "content": "Denoising entails smoothing the image to eliminate high-frequency noise and integrating low-frequency information to reconstruct a clear image. This task places no explicit unique requirement for the network, while its performance intuitively benefits from effective spatial information interaction. It is conjectured that the high performance of Restormer on denoising can be attributed to its ability to better smooth noise through channel-wise processing, akin to operating" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 140, + 100 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 222, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 222, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 222, + 101 + ], + "type": "text", + "content": "X.Chen et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 132, + 112, + 482, + 254 + ], + "blocks": [ + { + "bbox": [ + 132, + 112, + 482, + 254 + ], + "lines": [ + { + "bbox": [ + 132, + 112, + 482, + 254 + ], + "spans": [ + { + "bbox": [ + 132, + 112, + 482, + 254 + ], + "type": "image", + "image_path": "3494763775ec3beba53de9575a31a98f5ecf262539990d3a8ea3e5232d45557d.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 261, + 482, + 296 + ], + "lines": [ + { + "bbox": [ + 130, + 261, + 482, + 296 + ], + "spans": [ + { + "bbox": [ + 130, + 261, + 482, + 296 + ], + "type": "text", + "content": "Fig. 4: Visual and LAM [16] comparisons between Restormer and SwinIR. The LAM results and DI values indicate that Restormer exploits significantly more information than SwinIR. However, SwinIR reconstructs much more details than Restormer." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 323, + 480, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 323, + 480, + 346 + ], + "spans": [ + { + "bbox": [ + 130, + 323, + 480, + 346 + ], + "type": "text", + "content": "in the frequency domain. In contrast, SwinIR and Uformer perform well due to their robust spatial information interaction ability of the spatial self-attention." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 348, + 480, + 420 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 348, + 480, + 420 + ], + "spans": [ + { + "bbox": [ + 130, + 348, + 480, + 420 + ], + "type": "text", + "content": "Deblurring (specifically for motion blur here) involves addressing global motion shifts in the image. As a result, the ability to handle large-size inputs and the use of global or multi-scale information are necessary for deblurring networks. Thus, the networks based on the U-shape architecture all perform well on this task. Conversely, SwinIR, which employs the plain architecture and focuses more on local information processing, performs much worse than other networks." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 422, + 482, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 422, + 482, + 506 + ], + "spans": [ + { + "bbox": [ + 130, + 422, + 482, + 506 + ], + "type": "text", + "content": "Similar phenomena can be observed for dehazing. Due to the involvement of the depth information in the haze model, the ability to use large-range or even global information is crucial. Besides, dehazing networks are required to handle low-frequency transformations, including alterations in color and contrast, both of which constitute global mappings. Therefore, SwinIR and Uformer, which rely more on local spatial information interaction, perform poorly on this task. On the contrary, Restormer exhibits exceptional performance." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 508, + 482, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 508, + 482, + 556 + ], + "spans": [ + { + "bbox": [ + 130, + 508, + 482, + 556 + ], + "type": "text", + "content": "Deraining is relatively unique in that the rain is unevenly distributed in images, with significant differences between different raindrops and streaks. Thus, there is no clear pattern in the performance of different networks on deraining. Nevertheless, networks with higher complexity present better performance." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 558, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 558, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 558, + 482, + 666 + ], + "type": "text", + "content": "Based on the above results and analysis, we can infer that the acceptable performance of a backbone network on a specific task is predicated on meeting the functional requirements of that task. It is notable that Restormer obtains exceptional task generality. This can be attributed to several factors: 1) The U-shape architecture enables the network to accommodate large-size input. 2) The transposed self-attention allows direct interaction of global information. 3) The presence of depth-wise convolution enables the network to process spatial information effectively. In summary, due to Restormer's comprehensive functionality, it is capable of meeting the diverse requirements of different tasks." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 228, + 91, + 448, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 228, + 91, + 448, + 102 + ], + "spans": [ + { + "bbox": [ + 228, + 91, + 448, + 102 + ], + "type": "text", + "content": "A Comparative Study of Image Restoration Networks" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 474, + 92, + 480, + 100 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 135, + 114, + 479, + 244 + ], + "blocks": [ + { + "bbox": [ + 135, + 114, + 479, + 244 + ], + "lines": [ + { + "bbox": [ + 135, + 114, + 479, + 244 + ], + "spans": [ + { + "bbox": [ + 135, + 114, + 479, + 244 + ], + "type": "image", + "image_path": "bc5a579a8cac12e0125a4ebb36e809b789459ae709e31330f172fa66f04d43b8.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 252, + 482, + 309 + ], + "lines": [ + { + "bbox": [ + 130, + 252, + 482, + 309 + ], + "spans": [ + { + "bbox": [ + 130, + 252, + 482, + 309 + ], + "type": "text", + "content": "Fig. 5: The network structure of X-Restormer. To enhance the spatial mapping ability of Restormer and create a more general network, we replace half of the transposed self-attention blocks in Restormer with spatial self-attention blocks. For TSA, we retain the preliminary multi-Dconv head transposed attention (MDTA) used in Restormer. For SSA, we adopt the overlapping cross-attention (OCA) in HAT [8]." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 330, + 362, + 344 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 330, + 362, + 344 + ], + "spans": [ + { + "bbox": [ + 132, + 330, + 362, + 344 + ], + "type": "text", + "content": "4 General Backbone Network Design" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 354, + 482, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 354, + 482, + 426 + ], + "spans": [ + { + "bbox": [ + 130, + 354, + 482, + 426 + ], + "type": "text", + "content": "Based on the benchmark experiments, we believe that the principle of designing a general backbone network should be to ensure that the network can fulfill the functional requirements of all tasks. As Restormer shows relatively good task generality, we select it as the starting point to design a more general network. By pinpointing and addressing the limitation of Restormer, we present an initial version of a general image restoration backbone network in this section." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 426, + 482, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 426, + 482, + 617 + ], + "spans": [ + { + "bbox": [ + 130, + 426, + 482, + 617 + ], + "type": "text", + "content": "Limitation of Restormer. In the benchmark experiments, Restormer shows inferior performance to SwinIR on SR, particularly on Urban100. The qualitative comparisons also indicate this phenomenon in Fig. 4. From the visual and LAM [16] results, We can observe that Restormer can exploit large-range and even global information for the reconstruction. However, compared to SwinIR, it fails to reconstruct fine textures, even for self-repeated patterns. This discrepancy can be attributed to the U-shape architecture adopted by Restormer on the one hand, which increases the difficulty of reconstructing high-frequency information. On the other hand, Restormer relies on depth-wise convolution for spatial information interaction, whose spatial mapping capability is relatively weaker than the spatial self-attention in SwinIR. Considering that the U-shape architecture is indispensable for some tasks, we still need to retain this architectural design for task generality. To overcome the limitation of Restormer and design a more powerful backbone network, we choose to further enhance its spatial information interaction ability. An intuitive and feasible solution is to incorporate the spatial self-attention module into Restormer." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 617, + 482, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 617, + 482, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 617, + 482, + 666 + ], + "type": "text", + "content": "Network structure. In Fig. 5, we present the structure of our proposed backbone network, denoted as X-Restormer. We choose the U-shape architecture to build the network. In contrast to Restormer, we replace half of the transposed self-attention blocks (TSAB) with spatial self-attention blocks (SSAB) to" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 222, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 222, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 222, + 100 + ], + "type": "text", + "content": "X.Chen et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 116, + 479, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 116, + 479, + 140 + ], + "spans": [ + { + "bbox": [ + 132, + 116, + 479, + 140 + ], + "type": "text", + "content": "enhance the ability of spatial information interaction. Given an input feature " + }, + { + "bbox": [ + 132, + 116, + 479, + 140 + ], + "type": "inline_equation", + "content": "F_{in}" + }, + { + "bbox": [ + 132, + 116, + 479, + 140 + ], + "type": "text", + "content": ", the two blocks process it alternately as:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 247, + 146, + 480, + 158 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 247, + 146, + 480, + 158 + ], + "spans": [ + { + "bbox": [ + 247, + 146, + 480, + 158 + ], + "type": "interline_equation", + "content": "F _ {t} = F _ {i n} + T S A \\left(L N \\left(F _ {i n}\\right)\\right), \\tag {6}", + "image_path": "4433c98376a9f803374b3ae536297bb1ef7f3717852021648203859b8fed8ffc.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 242, + 160, + 480, + 172 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 242, + 160, + 480, + 172 + ], + "spans": [ + { + "bbox": [ + 242, + 160, + 480, + 172 + ], + "type": "interline_equation", + "content": "F _ {t \\_ o u t} = F _ {t} + F F N (L N (F _ {t})), \\tag {7}", + "image_path": "f50d07ed5afea0e80eeb9939ecbcf4a3c54eb8ff130dbdccd3612e8e5714f1a3.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 236, + 175, + 479, + 188 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 175, + 479, + 188 + ], + "spans": [ + { + "bbox": [ + 236, + 175, + 479, + 188 + ], + "type": "interline_equation", + "content": "F _ {s} = F _ {t \\_ o u t} + S S A (L N \\left(F _ {t \\_ o u t}\\right)), \\tag {8}", + "image_path": "4fbc23ea72076af7ef67b74bdeb33a213a7e4f767bbf0e26aa6b31a9fbd6a2a6.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 245, + 190, + 479, + 202 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 245, + 190, + 479, + 202 + ], + "spans": [ + { + "bbox": [ + 245, + 190, + 479, + 202 + ], + "type": "interline_equation", + "content": "F _ {o u t} = F _ {s} + F F N \\left(L N \\left(F _ {s}\\right)\\right), \\tag {9}", + "image_path": "30ea8f55dd745d90396095c3906e22a52a4a466bdf02cd376472e853b88ae8bc.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 206, + 480, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 206, + 480, + 277 + ], + "spans": [ + { + "bbox": [ + 130, + 206, + 480, + 277 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 130, + 206, + 480, + 277 + ], + "type": "inline_equation", + "content": "F_{t}" + }, + { + "bbox": [ + 130, + 206, + 480, + 277 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 206, + 480, + 277 + ], + "type": "inline_equation", + "content": "F_{t\\_out}" + }, + { + "bbox": [ + 130, + 206, + 480, + 277 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 206, + 480, + 277 + ], + "type": "inline_equation", + "content": "F_{s}" + }, + { + "bbox": [ + 130, + 206, + 480, + 277 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 206, + 480, + 277 + ], + "type": "inline_equation", + "content": "F_{s\\_out}" + }, + { + "bbox": [ + 130, + 206, + 480, + 277 + ], + "type": "text", + "content": " represent the intermediate feature in TSAB, the output of TSAB, the intermediate feature in SSAB and the output of SSAB. " + }, + { + "bbox": [ + 130, + 206, + 480, + 277 + ], + "type": "inline_equation", + "content": "F_{out}" + }, + { + "bbox": [ + 130, + 206, + 480, + 277 + ], + "type": "text", + "content": " means the output of the two consecutive blocks, and also serves as the input for the following two blocks. " + }, + { + "bbox": [ + 130, + 206, + 480, + 277 + ], + "type": "inline_equation", + "content": "TSA(\\cdot)" + }, + { + "bbox": [ + 130, + 206, + 480, + 277 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 206, + 480, + 277 + ], + "type": "inline_equation", + "content": "SSA(\\cdot)" + }, + { + "bbox": [ + 130, + 206, + 480, + 277 + ], + "type": "text", + "content": " indicate transposed self-attention (TSA) and spatial self-attention (SSA) modules. " + }, + { + "bbox": [ + 130, + 206, + 480, + 277 + ], + "type": "inline_equation", + "content": "LN(\\cdot)" + }, + { + "bbox": [ + 130, + 206, + 480, + 277 + ], + "type": "text", + "content": " denotes layer normalization and " + }, + { + "bbox": [ + 130, + 206, + 480, + 277 + ], + "type": "inline_equation", + "content": "FFN(\\cdot)" + }, + { + "bbox": [ + 130, + 206, + 480, + 277 + ], + "type": "text", + "content": " represents the feed-forward network." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 130, + 278, + 481, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 278, + 481, + 517 + ], + "spans": [ + { + "bbox": [ + 130, + 278, + 481, + 517 + ], + "type": "text", + "content": "Specifically, we adopt the Multi-Dconv Transpose Attention (MDTA) as the TSA module. It first generates query " + }, + { + "bbox": [ + 130, + 278, + 481, + 517 + ], + "type": "inline_equation", + "content": "(Q)" + }, + { + "bbox": [ + 130, + 278, + 481, + 517 + ], + "type": "text", + "content": ", key " + }, + { + "bbox": [ + 130, + 278, + 481, + 517 + ], + "type": "inline_equation", + "content": "(K)" + }, + { + "bbox": [ + 130, + 278, + 481, + 517 + ], + "type": "text", + "content": " and value " + }, + { + "bbox": [ + 130, + 278, + 481, + 517 + ], + "type": "inline_equation", + "content": "(V)" + }, + { + "bbox": [ + 130, + 278, + 481, + 517 + ], + "type": "text", + "content": " by applying " + }, + { + "bbox": [ + 130, + 278, + 481, + 517 + ], + "type": "inline_equation", + "content": "1 \\times 1" + }, + { + "bbox": [ + 130, + 278, + 481, + 517 + ], + "type": "text", + "content": " convolutions followed by " + }, + { + "bbox": [ + 130, + 278, + 481, + 517 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 130, + 278, + 481, + 517 + ], + "type": "text", + "content": " depth-wise convolutions. Then, the channel attention matrix of size " + }, + { + "bbox": [ + 130, + 278, + 481, + 517 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^{C \\times C}" + }, + { + "bbox": [ + 130, + 278, + 481, + 517 + ], + "type": "text", + "content": " is calculated by the dot-product of reshaped " + }, + { + "bbox": [ + 130, + 278, + 481, + 517 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 130, + 278, + 481, + 517 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 278, + 481, + 517 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 130, + 278, + 481, + 517 + ], + "type": "text", + "content": " followed by a Softmax function. The schematic of TSA is shown in Fig. 3. Finally, the result is generated by the dot-product of the attention matrix and " + }, + { + "bbox": [ + 130, + 278, + 481, + 517 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 130, + 278, + 481, + 517 + ], + "type": "text", + "content": ". For SSA, we adopt the Overlapping Cross-Attention (OCA) introduced in the HAT model [8]. We choose OCA because the shifted window mechanism in SwinIR is not intuitively suitable for our TSA-SSA consecutive blocks, and HAT demonstrates the effectiveness and superiority of OCA. For the specific calculation, " + }, + { + "bbox": [ + 130, + 278, + 481, + 517 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 130, + 278, + 481, + 517 + ], + "type": "text", + "content": " is produced by partitioning the input into non-overlapping windows, while " + }, + { + "bbox": [ + 130, + 278, + 481, + 517 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 130, + 278, + 481, + 517 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 278, + 481, + 517 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 130, + 278, + 481, + 517 + ], + "type": "text", + "content": " are generated by partitioning the input into overlapping windows with a manually set overlapping size. Apart from the different window partition methods, the calculation of OCA is essentially identical to that of standard window self-attention. For FFN, we employ the Gated-Dconv Feed-forward Network (GDFN) architecture, as used in Restormer. Instead of using two " + }, + { + "bbox": [ + 130, + 278, + 481, + 517 + ], + "type": "inline_equation", + "content": "1 \\times 1" + }, + { + "bbox": [ + 130, + 278, + 481, + 517 + ], + "type": "text", + "content": " convolutions to construct an MLP, GDFN first processes input features through two " + }, + { + "bbox": [ + 130, + 278, + 481, + 517 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 130, + 278, + 481, + 517 + ], + "type": "text", + "content": " depth-wise convolutions and " + }, + { + "bbox": [ + 130, + 278, + 481, + 517 + ], + "type": "inline_equation", + "content": "1 \\times 1" + }, + { + "bbox": [ + 130, + 278, + 481, + 517 + ], + "type": "text", + "content": " convolutions. Then, the resulting features are combined via element-wise multiplication and pass through another " + }, + { + "bbox": [ + 130, + 278, + 481, + 517 + ], + "type": "inline_equation", + "content": "1 \\times 1" + }, + { + "bbox": [ + 130, + 278, + 481, + 517 + ], + "type": "text", + "content": " convolution to produce the final output." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 130, + 517, + 480, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 517, + 480, + 578 + ], + "spans": [ + { + "bbox": [ + 130, + 517, + 480, + 578 + ], + "type": "text", + "content": "We have also tried multiple design choices for SSAB and TSAB. Experiments can be found in " + }, + { + "bbox": [ + 130, + 517, + 480, + 578 + ], + "type": "inline_equation", + "content": "\\text{Supp}" + }, + { + "bbox": [ + 130, + 517, + 480, + 578 + ], + "type": "text", + "content": ". We emphasize that our design of X-Restormer is not to develop novel architectures or modules to improve the performance on certain tasks, but to enhance the task generality of the network according to the principle of general backbone network design through as simple means as possible." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 592, + 230, + 605 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 592, + 230, + 605 + ], + "spans": [ + { + "bbox": [ + 132, + 592, + 230, + 605 + ], + "type": "text", + "content": "5 Experiments" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 613, + 261, + 625 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 613, + 261, + 625 + ], + "spans": [ + { + "bbox": [ + 132, + 613, + 261, + 625 + ], + "type": "text", + "content": "5.1 Experimental Setup" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 130, + 629, + 480, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 629, + 480, + 665 + ], + "spans": [ + { + "bbox": [ + 130, + 629, + 480, + 665 + ], + "type": "text", + "content": "We conduct experiments of the proposed X-Restormer on the same datasets used in the benchmark experiment. For the network implementation, the network employs a 4-level encoder-decoder with three times down-sampling and" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 228, + 91, + 448, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 228, + 91, + 448, + 102 + ], + "spans": [ + { + "bbox": [ + 228, + 91, + 448, + 102 + ], + "type": "text", + "content": "A Comparative Study of Image Restoration Networks" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 479, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 479, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 479, + 100 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 134, + 147, + 321, + 210 + ], + "blocks": [ + { + "bbox": [ + 132, + 114, + 321, + 137 + ], + "lines": [ + { + "bbox": [ + 132, + 114, + 321, + 137 + ], + "spans": [ + { + "bbox": [ + 132, + 114, + 321, + 137 + ], + "type": "text", + "content": "Table 3: Quantitative results on " + }, + { + "bbox": [ + 132, + 114, + 321, + 137 + ], + "type": "inline_equation", + "content": "\\times 4" + }, + { + "bbox": [ + 132, + 114, + 321, + 137 + ], + "type": "text", + "content": " image SR. * means the model pretrained on " + }, + { + "bbox": [ + 132, + 114, + 321, + 137 + ], + "type": "inline_equation", + "content": "\\times 2\\mathrm{{SR}}" + }, + { + "bbox": [ + 132, + 114, + 321, + 137 + ], + "type": "text", + "content": " ." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 134, + 147, + 321, + 210 + ], + "lines": [ + { + "bbox": [ + 134, + 147, + 321, + 210 + ], + "spans": [ + { + "bbox": [ + 134, + 147, + 321, + 210 + ], + "type": "table", + "html": "
ModelSet5Set14BSD100Urban100Manga109
RCAN32.63/0.900228.87/0.788927.77/0.743626.82/0.808731.22/0.9173
RCAN-it32.69/0.900728.99/0.792227.87/0.745927.16/0.816831.78/0.9217
SwinIR*32.92/0.904429.09/0.795027.92/0.748927.45/0.825432.03/0.9260
IPT32.64/-29.01/-27.82/-27.26/--/-
EDT32.82/0.903129.09/0.793927.91/0.748327.46/0.824632.05/0.9254
NAFNet32.79/0.901029.03/0.791927.86/0.746327.00/0.811231.77/0.9216
SwinIR32.88/0.904129.07/0.794427.93/0.749027.47/0.825831.96/0.9255
Restormer32.94/0.903929.06/0.793427.91/0.748227.32/0.819931.96/0.9244
X-Restormer33.16/0.905829.17/0.796328.00/0.751227.66/0.829132.38/0.9279
", + "image_path": "6a556fb570e72e6956897830b6b5d907dadbc2bcc360e0588e4e8fa37436de28.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 323, + 147, + 481, + 210 + ], + "blocks": [ + { + "bbox": [ + 322, + 114, + 481, + 137 + ], + "lines": [ + { + "bbox": [ + 322, + 114, + 481, + 137 + ], + "spans": [ + { + "bbox": [ + 322, + 114, + 481, + 137 + ], + "type": "text", + "content": "Table 4: Quantitative results on image denoising with the noise level " + }, + { + "bbox": [ + 322, + 114, + 481, + 137 + ], + "type": "inline_equation", + "content": "\\sigma = 50" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 323, + 147, + 481, + 210 + ], + "lines": [ + { + "bbox": [ + 323, + 147, + 481, + 210 + ], + "spans": [ + { + "bbox": [ + 323, + 147, + 481, + 210 + ], + "type": "table", + "html": "
ModelCBSD68Kodak24McMasterUrban100
FFDNet27.96/-28.98/-29.18/-28.05/-
RNAN28.27/-29.58/-29.72/-29.08/-
RDN28.31/-29.66/--/-29.38/-
IPT28.39/-29.64/-29.98/-29.71/-
DRUNet28.51/-29.86/-30.08/-29.61/-
SwinIR28.56/0.811829.95/0.822130.20/0.848929.88/0.8861
Uformer28.55/0.813029.97/0.824430.16/0.848529.98/0.8900
Restorer28.60/0.813030.01/0.823730.30/0.851730.02/0.8898
X-Restorer28.63/0.813830.05/0.824530.33/0.851830.24/0.8928
", + "image_path": "360f87f234d35f297f415b082aba6ed13fd7e22e41404a2bd8faed090b8a1856.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 134, + 252, + 481, + 315 + ], + "blocks": [ + { + "bbox": [ + 132, + 220, + 481, + 243 + ], + "lines": [ + { + "bbox": [ + 132, + 220, + 481, + 243 + ], + "spans": [ + { + "bbox": [ + 132, + 220, + 481, + 243 + ], + "type": "text", + "content": "Table 5: Quantitative results on image Table 6: Quantitative results on image deraindeblurring (motion blur). ing." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 134, + 252, + 481, + 315 + ], + "lines": [ + { + "bbox": [ + 134, + 252, + 481, + 315 + ], + "spans": [ + { + "bbox": [ + 134, + 252, + 481, + 315 + ], + "type": "table", + "html": "
ModelGoProHIDERealBlur-RRealBlur-J
SPAIR32.06/0.95330.29/0.931-/-28.81/0.875
MIMO-UNet+32.45/0.95729.99/0.93035.54/0.94727.63/0.837
IPT32.52/--/--/--/-
MPRNet32.66/0.95930.96/0.93935.99/0.95228.70/0.873
Uformer33.05/0.94230.89/0.92036.19/0.95629.09/0.886
NAFNet33.08/0.94231.22/0.92435.97/0.95228.32/0.857
Restormer32.92/0.94031.22/0.92336.19/0.95728.96/0.879
X-Restormer33.44/0.94631.76/0.93036.27/0.95828.87/0.878
", + "image_path": "16d32f30c1df4b972783ec42405c5c8c2c8029720a133721b1ae340ca2287c35.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 135, + 346, + 481, + 370 + ], + "blocks": [ + { + "bbox": [ + 205, + 324, + 408, + 335 + ], + "lines": [ + { + "bbox": [ + 205, + 324, + 408, + 335 + ], + "spans": [ + { + "bbox": [ + 205, + 324, + 408, + 335 + ], + "type": "text", + "content": "Table 7: Quantitative results on image dehazing." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 135, + 346, + 481, + 370 + ], + "lines": [ + { + "bbox": [ + 135, + 346, + 481, + 370 + ], + "spans": [ + { + "bbox": [ + 135, + 346, + 481, + 370 + ], + "type": "table", + "html": "
ModelPFDNFFA-NetAECR-NeMAXIMDehazeFormerMPRNetNAFNetRestormerX-Restormer
SOTS Indoor32.68/0.97636.39/0.98937.17/0.99039.72/-40.05/0.99640.34/0.99438.97/0.99441.97/0.99442.90/0.995
", + "image_path": "89f4a8927a215e30130e8db74b87425cd2de4f8ea4b71ae98519145785c77361.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 391, + 482, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 391, + 482, + 571 + ], + "spans": [ + { + "bbox": [ + 130, + 391, + 482, + 571 + ], + "type": "text", + "content": "up-sampling. To maintain a similar number of parameters as Restormer, from level-1 to level-4 (i.e., " + }, + { + "bbox": [ + 130, + 391, + 482, + 571 + ], + "type": "inline_equation", + "content": "L_{1} \\sim L_{4}" + }, + { + "bbox": [ + 130, + 391, + 482, + 571 + ], + "type": "text", + "content": " in the figure) the numbers of consecutive blocks (containing a TSAB and a SSAB) are [2, 4, 4, 4] and the number of refinement blocks (i.e., " + }, + { + "bbox": [ + 130, + 391, + 482, + 571 + ], + "type": "inline_equation", + "content": "L_{r}" + }, + { + "bbox": [ + 130, + 391, + 482, + 571 + ], + "type": "text", + "content": ") is 4. Attention heads in TSA and SSA are both [1, 2, 4, 8], and channel numbers are [48, 96, 192, 384]. For OCA, the window size and the overlapping ratio are set to 8 and 0.5 as in HAT. The channel expansion factor in GDFN is 2.66. The overall parameters are 26.06M, slightly less than Restormer of 26.13M. We adopt the same training settings as Restormer in the benchmark experiment to optimize the model. We use the AdamW optimizer with " + }, + { + "bbox": [ + 130, + 391, + 482, + 571 + ], + "type": "inline_equation", + "content": "\\beta_{1} = 0.9" + }, + { + "bbox": [ + 130, + 391, + 482, + 571 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 391, + 482, + 571 + ], + "type": "inline_equation", + "content": "\\beta_{2} = 0.99" + }, + { + "bbox": [ + 130, + 391, + 482, + 571 + ], + "type": "text", + "content": ", utilizing an initial learning rate of " + }, + { + "bbox": [ + 130, + 391, + 482, + 571 + ], + "type": "inline_equation", + "content": "3e^{-4}" + }, + { + "bbox": [ + 130, + 391, + 482, + 571 + ], + "type": "text", + "content": ". The learning rate decay follows a cosine scheduler with intervals at 92k and 208k iterations, and the total training iterations are 300K. The input patch size is " + }, + { + "bbox": [ + 130, + 391, + 482, + 571 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 130, + 391, + 482, + 571 + ], + "type": "text", + "content": " and the batch size is 32. For data augmentation, we use horizontal and vertical flips. We utilize the " + }, + { + "bbox": [ + 130, + 391, + 482, + 571 + ], + "type": "inline_equation", + "content": "L_{1}" + }, + { + "bbox": [ + 130, + 391, + 482, + 571 + ], + "type": "text", + "content": " loss function to train the model. Notably, we do not adopt any training tricks (e.g., " + }, + { + "bbox": [ + 130, + 391, + 482, + 571 + ], + "type": "inline_equation", + "content": "\\times 2" + }, + { + "bbox": [ + 130, + 391, + 482, + 571 + ], + "type": "text", + "content": " SR pretraining or EMA strategy) or testing tricks (e.g., TLC [11])." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 586, + 270, + 598 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 586, + 270, + 598 + ], + "spans": [ + { + "bbox": [ + 132, + 586, + 270, + 598 + ], + "type": "text", + "content": "5.2 Experimental Results" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 130, + 605, + 481, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 605, + 481, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 605, + 481, + 666 + ], + "type": "text", + "content": "We compare our X-Restormer with the top three models in the benchmark experiments (based on the same test configurations) as well as several state-of-the-art approaches for each task (based on the reported performance in their papers) in this section. PSNR(dB)/SSIM is provided in following tables. The best and second-best performance results are in **bold** and **underline**." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 222, + 101 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 222, + 101 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 222, + 101 + ], + "type": "text", + "content": "X.Chen et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 115, + 483, + 259 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 115, + 483, + 259 + ], + "spans": [ + { + "bbox": [ + 130, + 115, + 483, + 259 + ], + "type": "text", + "content": "Image SR. In Tab. 3, we present the quantitative results of " + }, + { + "bbox": [ + 130, + 115, + 483, + 259 + ], + "type": "inline_equation", + "content": "\\times 4" + }, + { + "bbox": [ + 130, + 115, + 483, + 259 + ], + "type": "text", + "content": " SR on five benchmark datasets: Set5 [2], Set14 [50], BSD100 [31], Urban100 [18] and Manga109 [33]. The state-of-the-art approaches, including RCAN [57], RCAN-it [27], SwinIR [35], IPT [4] and EDT [22] are compared in this experiment. X-Restormer significantly outperforms Restormer by " + }, + { + "bbox": [ + 130, + 115, + 483, + 259 + ], + "type": "inline_equation", + "content": "0.22\\mathrm{dB}" + }, + { + "bbox": [ + 130, + 115, + 483, + 259 + ], + "type": "text", + "content": " on Set5, " + }, + { + "bbox": [ + 130, + 115, + 483, + 259 + ], + "type": "inline_equation", + "content": "0.34\\mathrm{dB}" + }, + { + "bbox": [ + 130, + 115, + 483, + 259 + ], + "type": "text", + "content": " on Urban100 and " + }, + { + "bbox": [ + 130, + 115, + 483, + 259 + ], + "type": "inline_equation", + "content": "0.42\\mathrm{dB}" + }, + { + "bbox": [ + 130, + 115, + 483, + 259 + ], + "type": "text", + "content": " on Manga109. This demonstrates the effectiveness of our design in enhancing the spatial mapping ability of Restormer. Furthermore, X-Restormer surpasses the SOTA method EDT by " + }, + { + "bbox": [ + 130, + 115, + 483, + 259 + ], + "type": "inline_equation", + "content": "0.2\\mathrm{dB}" + }, + { + "bbox": [ + 130, + 115, + 483, + 259 + ], + "type": "text", + "content": " on Urban100 and " + }, + { + "bbox": [ + 130, + 115, + 483, + 259 + ], + "type": "inline_equation", + "content": "0.35\\mathrm{dB}" + }, + { + "bbox": [ + 130, + 115, + 483, + 259 + ], + "type": "text", + "content": " on Manga109, indicating the effectiveness of X-Restormer on SR. Despite this, we point out that our method still cannot beat the most powerful SR approaches, e.g., HAT. This is due to the inevitable weakening of SR performance for the U-shape architecture. In terms of SR, the plain residual in residual architecture is still more effective." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 262, + 482, + 345 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 262, + 482, + 345 + ], + "spans": [ + { + "bbox": [ + 130, + 262, + 482, + 345 + ], + "type": "text", + "content": "Image denoising. In Tab. 4, we provide the quantitative results of Gaussian denoising with the noise level " + }, + { + "bbox": [ + 130, + 262, + 482, + 345 + ], + "type": "inline_equation", + "content": "\\sigma = 50" + }, + { + "bbox": [ + 130, + 262, + 482, + 345 + ], + "type": "text", + "content": " on four benchmark datasets: CBSD68 [32], Kodak24 [13], McMaster [56] and Urban100 [18]. The state-of-the-art methods: FFDNet [55], RNAN [58], RDN [60], IPT [4] and DRUNet [52] are compared in this experiment. X-Restormer achieves the state-of-the-art performance, surpassing SwinIR by 0.36dB and outperforming Restormer by 0.22dB on Urban100. This demonstrates the superiority of X-Restormer on image denoising." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 347, + 482, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 347, + 482, + 455 + ], + "spans": [ + { + "bbox": [ + 130, + 347, + 482, + 455 + ], + "type": "text", + "content": "Image deblurring. In Tab. 5, we compare the results of X-Restormer with the state-of-the-art methods: SPAIR [36], MIMO-UNet+ [10], IPT [4] and MPR-Net [49] on both synthetic datasets (Gopro [34] and HIDE [39]) and real-world datasets (RealBlur-R and RealBlur-J [38]). X-Restormer achieves large performance gains over the other models on synthetic datasets, with an improvement of " + }, + { + "bbox": [ + 130, + 347, + 482, + 455 + ], + "type": "inline_equation", + "content": "0.36\\mathrm{dB}" + }, + { + "bbox": [ + 130, + 347, + 482, + 455 + ], + "type": "text", + "content": " on Gopro compared to " + }, + { + "bbox": [ + 130, + 347, + 482, + 455 + ], + "type": "inline_equation", + "content": "\\mathrm{NAFNet}^4" + }, + { + "bbox": [ + 130, + 347, + 482, + 455 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 130, + 347, + 482, + 455 + ], + "type": "inline_equation", + "content": "0.54\\mathrm{dB}" + }, + { + "bbox": [ + 130, + 347, + 482, + 455 + ], + "type": "text", + "content": " on HIDE compared to Restormer. Besides, our X-Restormer obtains the state-of-the-art performance on RealBlur-R and considerable performance on RealBlur-J, showing the effectiveness of our method on real-world motion deblurring scenarios." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 456, + 482, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 456, + 482, + 553 + ], + "spans": [ + { + "bbox": [ + 130, + 456, + 482, + 553 + ], + "type": "text", + "content": "Image deraining. In Tab. 6, we present the quantitative results of deraining on Test100 [19], Rain100L [47], Rain100H [47], Test1200 [51] and Test2800 [15]. The state-of-the-art methods: PreNet [37], MSPFN [20], MPRNet [49] and SPAIR [36] are compared. X-Restormer outperforms the other models on Test100, Rain100H and Rain100L but performs inferior to Restormer on Test1200 and Test2800. This discrepancy is due to the variations in degradation produced by different rain models. Nonetheless, X-Restormer exhibits comparable performance to state-of-the-art methods, showing its effectiveness on image deraining." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 130, + 555, + 482, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 555, + 482, + 639 + ], + "spans": [ + { + "bbox": [ + 130, + 555, + 482, + 639 + ], + "type": "text", + "content": "Image dehazing. In Tab. 7, we provide the quantitative results on SOTS Indoor [21]. We compare the state-of-the-art approaches: PFDN [12], FFA-Net [46], AECR-Net [44], MAXIM [41] and DehazeFormer [40] in this experiment. Notably, X-Restormer model significantly outperforms Restormer by a large margin of " + }, + { + "bbox": [ + 130, + 555, + 482, + 639 + ], + "type": "inline_equation", + "content": "0.93\\mathrm{dB}" + }, + { + "bbox": [ + 130, + 555, + 482, + 639 + ], + "type": "text", + "content": ". When compared to the state-of-the-art dehazing method DehazeFormer, our method achieves a breakthrough performance gain of " + }, + { + "bbox": [ + 130, + 555, + 482, + 639 + ], + "type": "inline_equation", + "content": "2.85\\mathrm{dB}" + }, + { + "bbox": [ + 130, + 555, + 482, + 639 + ], + "type": "text", + "content": ". These results demonstrate the superiority of X-Restormer for image dehazing." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 228, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 228, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 228, + 91, + 447, + 102 + ], + "type": "text", + "content": "A Comparative Study of Image Restoration Networks" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 91, + 481, + 100 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 133, + 653, + 478, + 665 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 653, + 478, + 665 + ], + "spans": [ + { + "bbox": [ + 133, + 653, + 478, + 665 + ], + "type": "text", + "content": "4 By using TLC, on Gopro/HIDE, NAFNet: 33.69/31.32, X-Restormer: 33.89/31.87." + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 134, + 136, + 482, + 201 + ], + "blocks": [ + { + "bbox": [ + 190, + 114, + 421, + 125 + ], + "lines": [ + { + "bbox": [ + 190, + 114, + 421, + 125 + ], + "spans": [ + { + "bbox": [ + 190, + 114, + 421, + 125 + ], + "type": "text", + "content": "Table 8: Quantitative results on All-in-One restoration." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 134, + 136, + 482, + 201 + ], + "lines": [ + { + "bbox": [ + 134, + 136, + 482, + 201 + ], + "spans": [ + { + "bbox": [ + 134, + 136, + 482, + 201 + ], + "type": "table", + "html": "
ModelSRDenoisingDeblurringDerainingDehazing
×2×4σ = 15σ = 25σ = 50
MPRNet33.68/0.930028.17/0.804334.27/0.928031.82/0.890128.60/0.811930.00/0.881231.20/0.906835.06/0.9874
SwinIR33.83/0.930128.14/0.804334.27/0.928331.83/0.890628.59/0.814329.06/0.851930.03/0.898331.48/0.9823
Uformer29.99/0.880527.88/0.794933.86/0.925431.42/0.886327.87/0.789129.64/0.872527.53/0.856929.92/0.9714
Restormer34.51/0.934128.70/0.817934.43/0.930332.02/0.894228.87/0.822230.54/0.890231.91/0.913436.95/0.9897
NAFNet34.12/0.931428.17/0.808734.18/0.928131.76/0.890828.64/0.818730.38/0.891131.56/0.914930.84/0.9797
X-Restormer34.72/0.936028.81/0.821734.67/0.933032.26/0.898329.12/0.829330.85/0.898332.27/0.922938.24/0.9914
", + "image_path": "90710e3a2bff86da7730c6d009a9eb241699bbbf6e525cde0b7894789accdcbc.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 130, + 224, + 481, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 224, + 481, + 403 + ], + "spans": [ + { + "bbox": [ + 130, + 224, + 481, + 403 + ], + "type": "text", + "content": "All-in-One Restoration. We conduct experiments on an all-in-one restoration setting to show the effectiveness of different backbone networks in addressing various tasks simultaneously. Networks are trained on five tasks with varying degradation levels (i.e., " + }, + { + "bbox": [ + 130, + 224, + 481, + 403 + ], + "type": "inline_equation", + "content": "\\times 2" + }, + { + "bbox": [ + 130, + 224, + 481, + 403 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 130, + 224, + 481, + 403 + ], + "type": "inline_equation", + "content": "\\times 4" + }, + { + "bbox": [ + 130, + 224, + 481, + 403 + ], + "type": "text", + "content": " for SR and " + }, + { + "bbox": [ + 130, + 224, + 481, + 403 + ], + "type": "inline_equation", + "content": "\\sigma \\in (0,50)" + }, + { + "bbox": [ + 130, + 224, + 481, + 403 + ], + "type": "text", + "content": " random level for denoising). The sampling probability for each task is the same, and the average performance on benchmark datasets is calculated. As shown in Tab. 8, with the relatively better task generality among the existing networks, Restormer exhibits better performance on the all-in-one restoration. By overcoming the limitation of Restormer, our X-Restormer demonstrates further advantages in handling multiple tasks concurrently, with its performance far exceeding other networks on all tasks. In contrast, the other networks are more or less affected by optimization conflicts across different tasks (e.g., SwinIR performs inferior to Restormer even on SR). These indicate that a general backbone network is of great significance for building a general model that process multiple image restoration tasks, which can effectively mitigate task conflicts with the performance drops." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 130, + 403, + 481, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 403, + 481, + 523 + ], + "spans": [ + { + "bbox": [ + 130, + 403, + 481, + 523 + ], + "type": "text", + "content": "Summary. With enhanced spatial mapping capability, our X-Restormer can significantly outperform Restormer. Specifically, X-Restormer obtains performance gains against Restormer of 0.42dB (Manga109), 0.22dB (Urban100), 0.54dB (HIDE), 0.61dB (Rain100H) and 0.93dB (SOTS Indoor) on image SR, denoising, deblurring, deraining and dehazing, respectively, showing the effectiveness of our design. Despite its simplicity, X-Restormer obtains state-of-the-art performance on all these five tasks and present the best task generality among the compared methods. Furthermore, we show that a more general backbone network can also better handle multiple restoration tasks simultaneously. We hope it can inspire more works on the general image restoration backbone network design." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 543, + 220, + 556 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 543, + 220, + 556 + ], + "spans": [ + { + "bbox": [ + 132, + 543, + 220, + 556 + ], + "type": "text", + "content": "6 Conclusion" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 130, + 570, + 481, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 570, + 481, + 666 + ], + "spans": [ + { + "bbox": [ + 130, + 570, + 481, + 666 + ], + "type": "text", + "content": "In this paper, we conduct a comparative study of existing image restoration backbone networks to design a general backbone network. Five representative networks are chosen for the benchmark experiment across selected five tasks. The results indicate that comprehensive functionality is crucial for designing a general restoration backbone network. We select Restormer as the baseline and introduce spatial self-attention into it to enhance the spatial information interaction capability. Experimental results show that our X-Restormer achieves significant performance improvement and presents the best task generality." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 221, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 221, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 221, + 100 + ], + "type": "text", + "content": "X.Chen et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 133, + 114, + 246, + 129 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 114, + 246, + 129 + ], + "spans": [ + { + "bbox": [ + 133, + 114, + 246, + 129 + ], + "type": "text", + "content": "Acknowledgements" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 137, + 481, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 137, + 481, + 185 + ], + "spans": [ + { + "bbox": [ + 132, + 137, + 481, + 185 + ], + "type": "text", + "content": "This work was partially supported by National Natural Science Foundation of China (Grant No.62276251, 62272450), and the Joint Lab of CAS-HK. This work was also supported in part by Macau Science and Technology Development Fund under SKLIOTSC-2021-2023 and 0022/2022/A." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 133, + 201, + 197, + 214 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 201, + 197, + 214 + ], + "spans": [ + { + "bbox": [ + 133, + 201, + 197, + 214 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 138, + 224, + 480, + 665 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 138, + 224, + 480, + 245 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 224, + 480, + 245 + ], + "spans": [ + { + "bbox": [ + 138, + 224, + 480, + 245 + ], + "type": "text", + "content": "1. Abuolaim, A., Brown, M.S.: Defocus deblurring using dual-pixel data. In: European Conference on Computer Vision. pp. 111-126. Springer (2020)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 138, + 246, + 480, + 277 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 246, + 480, + 277 + ], + "spans": [ + { + "bbox": [ + 138, + 246, + 480, + 277 + ], + "type": "text", + "content": "2. Bevilacqua, M., Roumy, A., Guillemot, C., Morel, M.L.A.: Low-complexity single-image super-resolution based on nonnegative neighbor embedding. In: British Machine Vision Conference (BMVC) (2012)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 138, + 278, + 480, + 309 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 278, + 480, + 309 + ], + "spans": [ + { + "bbox": [ + 138, + 278, + 480, + 309 + ], + "type": "text", + "content": "3. Cai, B., Xu, X., Jia, K., Qing, C., Tao, D.: Dehazenet: An end-to-end system for single image haze removal. IEEE transactions on image processing 25(11), 5187-5198 (2016)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 138, + 310, + 480, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 310, + 480, + 342 + ], + "spans": [ + { + "bbox": [ + 138, + 310, + 480, + 342 + ], + "type": "text", + "content": "4. Chen, H., Wang, Y., Guo, T., Xu, C., Deng, Y., Liu, Z., Ma, S., Xu, C., Xu, C., Gao, W.: Pre-trained image processing transformer. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 12299-12310 (2021)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 138, + 343, + 480, + 363 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 343, + 480, + 363 + ], + "spans": [ + { + "bbox": [ + 138, + 343, + 480, + 363 + ], + "type": "text", + "content": "5. Chen, L., Chu, X., Zhang, X., Sun, J.: Simple baselines for image restoration. In: European Conference on Computer Vision. pp. 17-33. Springer (2022)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 138, + 364, + 480, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 364, + 480, + 396 + ], + "spans": [ + { + "bbox": [ + 138, + 364, + 480, + 396 + ], + "type": "text", + "content": "6. Chen, L., Lu, X., Zhang, J., Chu, X., Chen, C.: Hinet: Half instance normalization network for image restoration. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 182-192 (2021)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 138, + 396, + 480, + 428 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 396, + 480, + 428 + ], + "spans": [ + { + "bbox": [ + 138, + 396, + 480, + 428 + ], + "type": "text", + "content": "7. Chen, X., Wang, X., Zhang, W., Kong, X., Qiao, Y., Zhou, J., Dong, C.: Hat: Hybrid attention transformer for image restoration. arXiv preprint arXiv:2309.05239 (2023)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 138, + 429, + 480, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 429, + 480, + 460 + ], + "spans": [ + { + "bbox": [ + 138, + 429, + 480, + 460 + ], + "type": "text", + "content": "8. Chen, X., Wang, X., Zhou, J., Qiao, Y., Dong, C.: Activating more pixels in image super-resolution transformer. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 22367-22377 (2023)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 138, + 460, + 480, + 493 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 460, + 480, + 493 + ], + "spans": [ + { + "bbox": [ + 138, + 460, + 480, + 493 + ], + "type": "text", + "content": "9. Chen, Z., Zhang, Y., Gu, J., Kong, L., Yang, X., Yu, F.: Dual aggregation transformer for image super-resolution. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 12312-12321 (2023)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 138, + 493, + 480, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 493, + 480, + 525 + ], + "spans": [ + { + "bbox": [ + 138, + 493, + 480, + 525 + ], + "type": "text", + "content": "0. Cho, S.J., Ji, S.W., Hong, J.P., Jung, S.W., Ko, S.J.: Rethinking coarse-to-fine approach in single image deblurring. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 4641-4650 (2021)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 138, + 525, + 480, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 525, + 480, + 557 + ], + "spans": [ + { + "bbox": [ + 138, + 525, + 480, + 557 + ], + "type": "text", + "content": "1. Chu, X., Chen, L., Chen, C., Lu, X.: Improving image restoration by revisiting global information aggregation. In: European Conference on Computer Vision. pp. 53-71. Springer (2022)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 138, + 558, + 480, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 558, + 480, + 578 + ], + "spans": [ + { + "bbox": [ + 138, + 558, + 480, + 578 + ], + "type": "text", + "content": "2. Dong, J., Pan, J.: Physics-based feature dehazing networks. In: European Conference on Computer Vision. pp. 188-204. Springer (2020)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 138, + 579, + 480, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 579, + 480, + 600 + ], + "spans": [ + { + "bbox": [ + 138, + 579, + 480, + 600 + ], + "type": "text", + "content": "3. Franzen, R.: Kodak lossless true color image suite. source: http://r0k.us/graphics/kodak 4(2) (1999)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 138, + 601, + 480, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 601, + 480, + 632 + ], + "spans": [ + { + "bbox": [ + 138, + 601, + 480, + 632 + ], + "type": "text", + "content": "4. Fu, X., Huang, J., Zeng, D., Huang, Y., Ding, X., Paisley, J.: Removing rain from single images via a deep detail network. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 3855-3863 (2017)" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 138, + 633, + 480, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 633, + 480, + 665 + ], + "spans": [ + { + "bbox": [ + 138, + 633, + 480, + 665 + ], + "type": "text", + "content": "5. Fu, X., Huang, J., Zeng, D., Huang, Y., Ding, X., Paisley, J.: Removing rain from single images via a deep detail network. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 3855-3863 (2017)" + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 228, + 91, + 447, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 228, + 91, + 447, + 102 + ], + "spans": [ + { + "bbox": [ + 228, + 91, + 447, + 102 + ], + "type": "text", + "content": "A Comparative Study of Image Restoration Networks" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 117, + 480, + 665 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 133, + 117, + 480, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 117, + 480, + 149 + ], + "spans": [ + { + "bbox": [ + 133, + 117, + 480, + 149 + ], + "type": "text", + "content": "16. Gu, J., Dong, C.: Interpreting super-resolution networks with local attribution maps. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 9199-9208 (2021)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 133, + 149, + 480, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 149, + 480, + 182 + ], + "spans": [ + { + "bbox": [ + 133, + 149, + 480, + 182 + ], + "type": "text", + "content": "17. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 770-778 (2016)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 133, + 182, + 480, + 214 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 182, + 480, + 214 + ], + "spans": [ + { + "bbox": [ + 133, + 182, + 480, + 214 + ], + "type": "text", + "content": "18. Huang, J.B., Singh, A., Ahuja, N.: Single image super-resolution from transformed self-exemplars. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 5197-5206 (2015)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 133, + 214, + 480, + 246 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 214, + 480, + 246 + ], + "spans": [ + { + "bbox": [ + 133, + 214, + 480, + 246 + ], + "type": "text", + "content": "19. Huang, J.B., Singh, A., Ahuja, N.: Single image super-resolution from transformed self-exemplars. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 5197-5206 (2015)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 246, + 480, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 246, + 480, + 289 + ], + "spans": [ + { + "bbox": [ + 132, + 246, + 480, + 289 + ], + "type": "text", + "content": "20. Jiang, K., Wang, Z., Yi, P., Chen, C., Huang, B., Luo, Y., Ma, J., Jiang, J.: Multiscale progressive fusion network for single image deraining. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 8346-8355 (2020)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 289, + 480, + 321 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 289, + 480, + 321 + ], + "spans": [ + { + "bbox": [ + 132, + 289, + 480, + 321 + ], + "type": "text", + "content": "21. Li, B., Ren, W., Fu, D., Tao, D., Feng, D., Zeng, W., Wang, Z.: Benchmarking single-image dehazing and beyond. IEEE Transactions on Image Processing 28(1), 492-505 (2018)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 321, + 480, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 321, + 480, + 342 + ], + "spans": [ + { + "bbox": [ + 132, + 321, + 480, + 342 + ], + "type": "text", + "content": "22. Li, W., Lu, X., Qian, S., Lu, J., Zhang, X., Jia, J.: On efficient transformer-based image pre-training for low-level vision. arXiv preprint arXiv:2112.10175 (2021)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 342, + 480, + 386 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 342, + 480, + 386 + ], + "spans": [ + { + "bbox": [ + 132, + 342, + 480, + 386 + ], + "type": "text", + "content": "23. Li, Y., Zhang, Y., Timofte, R., Van Gool, L., Tu, Z., Du, K., Wang, H., Chen, H., Li, W., Wang, X., et al.: Ntire 2023 challenge on image denoising: Methods and results. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 1904-1920 (2023)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 386, + 480, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 386, + 480, + 418 + ], + "spans": [ + { + "bbox": [ + 132, + 386, + 480, + 418 + ], + "type": "text", + "content": "24. Li, Y., Tan, R.T., Guo, X., Lu, J., Brown, M.S.: Rain streak removal using layer priors. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 2736-2744 (2016)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 418, + 480, + 450 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 418, + 480, + 450 + ], + "spans": [ + { + "bbox": [ + 132, + 418, + 480, + 450 + ], + "type": "text", + "content": "25. Liang, J., Cao, J., Sun, G., Zhang, K., Van Gool, L., Timofte, R.: Swinir: Image restoration using swin transformer. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 1833-1844 (2021)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 450, + 480, + 482 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 450, + 480, + 482 + ], + "spans": [ + { + "bbox": [ + 132, + 450, + 480, + 482 + ], + "type": "text", + "content": "26. Lim, B., Son, S., Kim, H., Nah, S., Mu Lee, K.: Enhanced deep residual networks for single image super-resolution. In: Proceedings of the IEEE conference on computer vision and pattern recognition workshops. pp. 136-144 (2017)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 132, + 482, + 480, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 482, + 480, + 514 + ], + "spans": [ + { + "bbox": [ + 132, + 482, + 480, + 514 + ], + "type": "text", + "content": "27. Lin, Z., Garg, P., Banerjee, A., Magid, S.A., Sun, D., Zhang, Y., Van Gool, L., Wei, D., Pfister, H.: Revisiting rcan: Improved training for image super-resolution (2022)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 132, + 514, + 480, + 547 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 514, + 480, + 547 + ], + "spans": [ + { + "bbox": [ + 132, + 514, + 480, + 547 + ], + "type": "text", + "content": "28. Liu, J., Yang, W., Yang, S., Guo, Z.: Erase or fill? deep joint recurrent rain removal and reconstruction in videos. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 3233-3242 (2018)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 132, + 547, + 480, + 579 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 547, + 480, + 579 + ], + "spans": [ + { + "bbox": [ + 132, + 547, + 480, + 579 + ], + "type": "text", + "content": "29. Liu, L., Xie, L., Zhang, X., Yuan, S., Chen, X., Zhou, W., Li, H., Tian, Q.: Tape: Task-agnostic prior embedding for image restoration. In: European Conference on Computer Vision. pp. 447-464. Springer (2022)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 132, + 579, + 480, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 579, + 480, + 622 + ], + "spans": [ + { + "bbox": [ + 132, + 579, + 480, + 622 + ], + "type": "text", + "content": "30. Liu, Z., Lin, Y., Cao, Y., Hu, H., Wei, Y., Zhang, Z., Lin, S., Guo, B.: Swin transformer: Hierarchical vision transformer using shifted windows. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 10012-10022 (2021)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 132, + 622, + 480, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 622, + 480, + 665 + ], + "spans": [ + { + "bbox": [ + 132, + 622, + 480, + 665 + ], + "type": "text", + "content": "31. Martin, D., Fowlkes, C., Tal, D., Malik, J.: A database of human segmented natural images and its application to evaluating segmentation algorithms and measuring ecological statistics. In: Proceedings of the IEEE International Conference on Computer Vision. vol. 2, pp. 416-423. IEEE (2001)" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 221, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 221, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 221, + 100 + ], + "type": "text", + "content": "X.Chen et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 665 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 130, + 116, + 482, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 482, + 161 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 482, + 161 + ], + "type": "text", + "content": "32. Martin, D., Fowlkes, C., Tal, D., Malik, J.: A database of human segmented natural images and its application to evaluating segmentation algorithms and measuring ecological statistics. In: Proceedings of the IEEE International Conference on Computer Vision. vol. 2, pp. 416-423. IEEE (2001)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 130, + 162, + 481, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 162, + 481, + 194 + ], + "spans": [ + { + "bbox": [ + 130, + 162, + 481, + 194 + ], + "type": "text", + "content": "33. Matsui, Y., Ito, K., Aramaki, Y., Fujimoto, A., Ogawa, T., Yamasaki, T., Aizawa, K.: Sketch-based manga retrieval using manga109 dataset. Multimedia Tools and Applications 76(20), 21811-21838 (2017)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 194, + 481, + 226 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 194, + 481, + 226 + ], + "spans": [ + { + "bbox": [ + 132, + 194, + 481, + 226 + ], + "type": "text", + "content": "34. Nah, S., Hyun Kim, T., Mu Lee, K.: Deep multi-scale convolutional neural network for dynamic scene deblurring. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 3883-3891 (2017)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 227, + 481, + 259 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 227, + 481, + 259 + ], + "spans": [ + { + "bbox": [ + 132, + 227, + 481, + 259 + ], + "type": "text", + "content": "35. Niu, B., Wen, W., Ren, W., Zhang, X., Yang, L., Wang, S., Zhang, K., Cao, X., Shen, H.: Single image super-resolution via a holistic attention network. In: European Conference on Computer Vision. pp. 191-207. Springer (2020)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 259, + 481, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 259, + 481, + 293 + ], + "spans": [ + { + "bbox": [ + 132, + 259, + 481, + 293 + ], + "type": "text", + "content": "36. Purohit, K., Suin, M., Rajagopalan, A., Boddeti, V.N.: Spatially-adaptive image restoration using distortion-guided networks. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 2309-2319 (2021)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 293, + 481, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 293, + 481, + 326 + ], + "spans": [ + { + "bbox": [ + 132, + 293, + 481, + 326 + ], + "type": "text", + "content": "37. Ren, D., Zuo, W., Hu, Q., Zhu, P., Meng, D.: Progressive image deraining networks: A better and simpler baseline. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 3937-3946 (2019)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 326, + 481, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 326, + 481, + 358 + ], + "spans": [ + { + "bbox": [ + 132, + 326, + 481, + 358 + ], + "type": "text", + "content": "38. Rim, J., Lee, H., Won, J., Cho, S.: Real-world blur dataset for learning and benchmarking deblurring algorithms. In: European conference on computer vision. pp. 184-201. Springer (2020)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 358, + 481, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 358, + 481, + 392 + ], + "spans": [ + { + "bbox": [ + 132, + 358, + 481, + 392 + ], + "type": "text", + "content": "39. Shen, Z., Wang, W., Lu, X., Shen, J., Ling, H., Xu, T., Shao, L.: Human-aware motion deblurring. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 5572-5581 (2019)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 392, + 481, + 413 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 392, + 481, + 413 + ], + "spans": [ + { + "bbox": [ + 132, + 392, + 481, + 413 + ], + "type": "text", + "content": "40. Song, Y., He, Z., Qian, H., Du, X.: Vision transformers for single image dehazing. IEEE Transactions on Image Processing 32, 1927-1941 (2023)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 413, + 481, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 413, + 481, + 446 + ], + "spans": [ + { + "bbox": [ + 132, + 413, + 481, + 446 + ], + "type": "text", + "content": "41. Tu, Z., Talebi, H., Zhang, H., Yang, F., Milanfar, P., Bovik, A., Li, Y.: Maxim: Multi-axis mlp for image processing. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5769-5780 (2022)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 447, + 481, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 447, + 481, + 479 + ], + "spans": [ + { + "bbox": [ + 132, + 447, + 481, + 479 + ], + "type": "text", + "content": "42. Wang, X., Xie, L., Yu, K., Chan, K.C., Loy, C.C., Dong, C.: BasicSR: Open source image and video restoration toolbox. https://github.com/XPixelGroup/BasicSR (2022)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 132, + 479, + 481, + 512 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 479, + 481, + 512 + ], + "spans": [ + { + "bbox": [ + 132, + 479, + 481, + 512 + ], + "type": "text", + "content": "43. Wang, Z., Cun, X., Bao, J., Zhou, W., Liu, J., Li, H.: Uformer: A general u-shaped transformer for image restoration. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 17683-17693 (2022)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 132, + 512, + 481, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 512, + 481, + 544 + ], + "spans": [ + { + "bbox": [ + 132, + 512, + 481, + 544 + ], + "type": "text", + "content": "44. Wu, H., Qu, Y., Lin, S., Zhou, J., Qiao, R., Zhang, Z., Xie, Y., Ma, L.: Contrastive learning for compact single image dehazing. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 10551-10560 (2021)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 132, + 545, + 481, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 545, + 481, + 578 + ], + "spans": [ + { + "bbox": [ + 132, + 545, + 481, + 578 + ], + "type": "text", + "content": "45. Yang, W., Tan, R.T., Feng, J., Liu, J., Guo, Z., Yan, S.: Deep joint rain detection and removal from a single image. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 1357-1366 (2017)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 132, + 578, + 481, + 610 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 578, + 481, + 610 + ], + "spans": [ + { + "bbox": [ + 132, + 578, + 481, + 610 + ], + "type": "text", + "content": "46. Yang, W., Tan, R.T., Feng, J., Liu, J., Guo, Z., Yan, S.: Deep joint rain detection and removal from a single image. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 1357-1366 (2017)" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 132, + 611, + 481, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 611, + 481, + 643 + ], + "spans": [ + { + "bbox": [ + 132, + 611, + 481, + 643 + ], + "type": "text", + "content": "47. Yang, W., Tan, R.T., Feng, J., Liu, J., Guo, Z., Yan, S.: Deep joint rain detection and removal from a single image. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 1357-1366 (2017)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 132, + 643, + 481, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 643, + 481, + 665 + ], + "spans": [ + { + "bbox": [ + 132, + 643, + 481, + 665 + ], + "type": "text", + "content": "48. Zamir, S.W., Arora, A., Khan, S., Hayat, M., Khan, F.S., Yang, M.H.: Restormer: Efficient transformer for high-resolution image restoration. In: Proceedings of the" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 228, + 91, + 448, + 102 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 228, + 91, + 448, + 102 + ], + "spans": [ + { + "bbox": [ + 228, + 91, + 448, + 102 + ], + "type": "text", + "content": "A Comparative Study of Image Restoration Networks" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "spans": [ + { + "bbox": [ + 470, + 92, + 480, + 100 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 133, + 117, + 480, + 522 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 149, + 117, + 480, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 117, + 480, + 138 + ], + "spans": [ + { + "bbox": [ + 149, + 117, + 480, + 138 + ], + "type": "text", + "content": "IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5728-5739 (2022)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 133, + 140, + 480, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 140, + 480, + 171 + ], + "spans": [ + { + "bbox": [ + 133, + 140, + 480, + 171 + ], + "type": "text", + "content": "49. Zamir, S.W., Arora, A., Khan, S., Hayat, M., Khan, F.S., Yang, M.H., Shao, L.: Multi-stage progressive image restoration. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 14821-14831 (2021)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 133, + 172, + 480, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 172, + 480, + 205 + ], + "spans": [ + { + "bbox": [ + 133, + 172, + 480, + 205 + ], + "type": "text", + "content": "50. Zeyde, R., Elad, M., Protter, M.: On single image scale-up using sparse-representations. In: International conference on curves and surfaces. pp. 711-730. Springer (2010)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 133, + 205, + 480, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 205, + 480, + 237 + ], + "spans": [ + { + "bbox": [ + 133, + 205, + 480, + 237 + ], + "type": "text", + "content": "51. Zhang, H., Patel, V.M.: Density-aware single image de-raining using a multi-stream dense network. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 695-704 (2018)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 133, + 238, + 480, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 238, + 480, + 270 + ], + "spans": [ + { + "bbox": [ + 133, + 238, + 480, + 270 + ], + "type": "text", + "content": "52. Zhang, K., Li, Y., Zuo, W., Zhang, L., Van Gool, L., Timofte, R.: Plug-and-play image restoration with deep denoiser prior. IEEE Transactions on Pattern Analysis and Machine Intelligence 44(10), 6360-6376 (2021)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 133, + 271, + 480, + 303 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 271, + 480, + 303 + ], + "spans": [ + { + "bbox": [ + 133, + 271, + 480, + 303 + ], + "type": "text", + "content": "53. Zhang, K., Zuo, W., Chen, Y., Meng, D., Zhang, L.: Beyond a gaussian denoiser: Residual learning of deep cnn for image denoising. IEEE transactions on image processing 26(7), 3142-3155 (2017)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 133, + 304, + 480, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 304, + 480, + 335 + ], + "spans": [ + { + "bbox": [ + 133, + 304, + 480, + 335 + ], + "type": "text", + "content": "54. Zhang, K., Zuo, W., Gu, S., Zhang, L.: Learning deep cnn denoiser prior for image restoration. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 3929-3938 (2017)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 133, + 336, + 480, + 369 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 336, + 480, + 369 + ], + "spans": [ + { + "bbox": [ + 133, + 336, + 480, + 369 + ], + "type": "text", + "content": "55. Zhang, K., Zuo, W., Zhang, L.: Ffdnet: Toward a fast and flexible solution for cnn-based image denoising. IEEE Transactions on Image Processing 27(9), 4608-4622 (2018)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 133, + 369, + 480, + 402 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 369, + 480, + 402 + ], + "spans": [ + { + "bbox": [ + 133, + 369, + 480, + 402 + ], + "type": "text", + "content": "56. Zhang, L., Wu, X., Buades, A., Li, X.: Color demosaicking by local directional interpolation and nonlocal adaptive thresholding. Journal of Electronic imaging 20(2), 023016-023016 (2011)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 133, + 403, + 480, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 403, + 480, + 435 + ], + "spans": [ + { + "bbox": [ + 133, + 403, + 480, + 435 + ], + "type": "text", + "content": "57. Zhang, Y., Li, K., Li, K., Wang, L., Zhong, B., Fu, Y.: Image super-resolution using very deep residual channel attention networks. In: European conference on computer vision. pp. 286-301. Springer (2018)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 133, + 436, + 480, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 436, + 480, + 456 + ], + "spans": [ + { + "bbox": [ + 133, + 436, + 480, + 456 + ], + "type": "text", + "content": "58. Zhang, Y., Li, K., Li, K., Zhong, B., Fu, Y.: Residual non-local attention networks for image restoration. arXiv preprint arXiv:1903.10082 (2019)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 133, + 457, + 480, + 490 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 457, + 480, + 490 + ], + "spans": [ + { + "bbox": [ + 133, + 457, + 480, + 490 + ], + "type": "text", + "content": "59. Zhang, Y., Tian, Y., Kong, Y., Zhong, B., Fu, Y.: Residual dense network for image super-resolution. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 2472-2481 (2018)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 133, + 491, + 480, + 522 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 491, + 480, + 522 + ], + "spans": [ + { + "bbox": [ + 133, + 491, + 480, + 522 + ], + "type": "text", + "content": "60. Zhang, Y., Tian, Y., Kong, Y., Zhong, B., Fu, Y.: Residual dense network for image restoration. IEEE transactions on pattern analysis and machine intelligence 43(7), 2480-2495 (2020)" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "spans": [ + { + "bbox": [ + 133, + 91, + 144, + 100 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 166, + 91, + 221, + 100 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 91, + 221, + 100 + ], + "spans": [ + { + "bbox": [ + 166, + 91, + 221, + 100 + ], + "type": "text", + "content": "X.Chen et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file